repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
var_hash
int64
-9,223,186,179,200,150,000
9,223,291,175B
doc_hash
int64
-9,223,304,365,658,930,000
9,223,309,051B
line_mean
float64
3.5
99.8
line_max
int64
13
999
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
autosportlabs/RaceCapture_App
autosportlabs/racecapture/views/configuration/rcp/configview.py
1
20903
# # Race Capture App # # Copyright (C) 2014-2017 Autosport Labs # # This file is part of the Race Capture App # # This is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU General Public License for more details. You should # have received a copy of the GNU General Public License along with # this code. If not, see <http://www.gnu.org/licenses/>. import os import kivy kivy.require('1.10.0') from kivy.app import Builder from kivy.uix.treeview import TreeViewLabel from kivy.properties import ObjectProperty, BooleanProperty from kivy.uix.popup import Popup from kivy.uix.screenmanager import Screen from kivy.clock import Clock from kivy import platform from kivy.logger import Logger from autosportlabs.help.helpmanager import HelpInfo from autosportlabs.racecapture.views.configuration.rcp.autocontrolconfigview import AutoControlConfigView from autosportlabs.racecapture.views.configuration.rcp.analogchannelsview import * from autosportlabs.racecapture.views.configuration.rcp.imuchannelsview import * from autosportlabs.racecapture.views.configuration.rcp.gpschannelsview import * from autosportlabs.racecapture.views.configuration.rcp.lapstatsview import * from autosportlabs.racecapture.views.configuration.rcp.timerchannelsview import * from autosportlabs.racecapture.views.configuration.rcp.gpiochannelsview import * from autosportlabs.racecapture.views.configuration.rcp.pwmchannelsview import * from autosportlabs.racecapture.views.configuration.rcp.trackconfigview import * from autosportlabs.racecapture.views.configuration.rcp.canchannelsview import * from autosportlabs.racecapture.views.configuration.rcp.obd2channelsview import * from autosportlabs.racecapture.views.configuration.rcp.canconfigview import * from autosportlabs.racecapture.views.configuration.rcp.telemetry.telemetryconfigview import * from autosportlabs.racecapture.views.configuration.rcp.wirelessconfigview import * from autosportlabs.racecapture.views.configuration.rcp.scriptview import * from autosportlabs.racecapture.views.file.loaddialogview import LoadDialog from autosportlabs.racecapture.views.file.savedialogview import SaveDialog from autosportlabs.racecapture.views.util.alertview import alertPopup, confirmPopup from autosportlabs.racecapture.config.rcpconfig import * from autosportlabs.uix.button.featurebutton import FeatureButton from autosportlabs.racecapture.theme.color import ColorScheme RCP_CONFIG_FILE_EXTENSION = '.rcp' class LinkedTreeViewLabel(TreeViewLabel): Builder.load_string(""" <LinkedTreeViewLabel>: font_size: dp(16) font_name: 'resource/fonts/Roboto-Light.ttf' """) view = None view_builder = None class ConfigView(Screen): Builder.load_string(""" <InfoFieldLabel@FieldLabel>: color: ColorScheme.get_dark_primary_text() halign: 'center' font_size: min(50, sp(50)) shorten: False <ConfigFeatureButton@FeatureButton>: tile_color: ColorScheme.get_dark_accent() icon_color: ColorScheme.get_accent() title_color: ColorScheme.get_accent() <ConfigView>: BoxLayout: orientation: 'horizontal' BoxLayout: size_hint_x: None width: max(dp(150), 200) orientation: 'vertical' ScrollContainer: id: scroller do_scroll_x:False TreeView: height: max(self.minimum_height, scroller.height) id: menu size_hint_y: None hide_root: True indent_level: dp(0) indent_start: dp(5) BoxLayout: id: button_panel padding: (dp(5), dp(0)) size_hint_y: None height: -10 BoxLayout: orientation: 'vertical' padding: (dp(0), dp(10)) BoxLayout: orientation: 'horizontal' size_hint_y: 0.45 spacing: dp(7) LabelIconButton: id: open title: 'Open' icon_size: self.height * 0.5 title_font_size: self.height * 0.35 icon: '\357\204\225' on_press: root.openConfig() LabelIconButton: id: save title: 'Save' icon_size: self.height * 0.5 title_font_size: self.height * 0.35 icon: '\357\203\207' on_press: root.saveConfig() BoxLayout: size_hint_y: 0.1 BoxLayout: orientation: 'horizontal' size_hint_y: 0.45 spacing: dp(7) LabelIconButton: id: read title: 'Read' icon_size: self.height * 0.5 title_font_size: self.height * 0.35 icon: '\357\202\223' on_press: root.readConfig() LabelIconButton: id: write title: 'Write' icon_size: self.height * 0.5 title_font_size: self.height * 0.35 icon: '\357\200\231' on_press: root.writeConfig() disabled: True BoxLayout: size_hint_x: 0.8 padding: [sp(10), sp(10), 0, 0] id: content orientation: 'vertical' Widget: size_hint_y: 0.15 BoxLayout: orientation: 'horizontal' size_hint_y: 0.3 padding: (dp(30), dp(15)) spacing: dp(30) ConfigFeatureButton: id: read title: 'Read' icon: u'\uf019' on_press: root.readConfig() ConfigFeatureButton: id: open title: 'Open' icon: u'\uf07c' on_press: root.openConfig() Widget: size_hint_x: None width: max(dp(150), 200) BoxLayout: orientation: 'horizontal' size_hint_y: 0.3 padding: (dp(30), dp(15)) spacing: dp(30) ConfigFeatureButton: id: first_time_setup title: 'First time Setup' icon: u'\uf138' on_press: root._on_first_time_setup() Widget: size_hint_x: None width: max(dp(150), 200) Widget: size_hint_y: 0.15 """) # file save/load loaded = BooleanProperty(False) writeStale = BooleanProperty(False) loadfile = ObjectProperty() savefile = ObjectProperty() text_input = ObjectProperty() track_manager = ObjectProperty() preset_manager = ObjectProperty() # List of config views configViews = [] menu = None rc_config = None script_view = None _settings = None base_dir = None _databus = None def __init__(self, **kwargs): super(ConfigView, self).__init__(**kwargs) self._status_pump = kwargs.get('status_pump') self._databus = kwargs.get('databus') self.rc_config = kwargs.get('rcpConfig', None) self.rc_api = kwargs.get('rc_api', None) self._settings = kwargs.get('settings') self.base_dir = kwargs.get('base_dir') self.register_event_type('on_config_updated') self.register_event_type('on_channels_updated') self.register_event_type('on_config_written') self.register_event_type('on_tracks_updated') self.register_event_type('on_config_modified') self.register_event_type('on_read_config') self.register_event_type('on_write_config') self.register_event_type('on_show_main_view') self._sn = '' if self.rc_config: self._sn = self.rc_config.versionConfig.serial self.ids.menu.bind(selected_node=self.on_select_node) def on_show_main_view(self, name): pass def on_config_written(self, *args): self.writeStale = False def on_config_modified(self, *args): self.writeStale = True def update_runtime_channels(self, system_channels): for view in self.configViews: channelWidgets = list(kvquery(view, __class__=ChannelNameSpinner)) for channelWidget in channelWidgets: channelWidget.dispatch('on_channels_updated', system_channels) def on_channels_updated(self, runtime_channels): self.update_runtime_channels(runtime_channels) def on_config_updated(self, config, force_reload=False): if config.versionConfig.serial != self._sn or force_reload: # New device or we need to redraw, reload everything # Our config object is the same object with new values, so we need to copy our value self._sn = copy(config.versionConfig.serial) self._clear() self.init_screen() else: self.rc_config = config self.update_config_views() def _clear(self): nodes = [] # Building an array because if we remove while iterating we end up skipping things for node in self.ids.menu.iterate_all_nodes(): nodes.append(node) for node in nodes: self.ids.menu.remove_node(node) self.ids.menu.clear_widgets() del(self.configViews[:]) self.ids.content.clear_widgets() def on_track_manager(self, instance, value): self.update_tracks() def on_enter(self): if not self.loaded and self.rc_config.loaded == True: self.init_screen() def on_loaded(self, instance, value): self.update_config_views() self.update_tracks() def on_writeStale(self, instance, value): self.updateControls() def _reset_stale(self): self.writeStale = False def update_config_views(self): config = self.rc_config if config and self.loaded: for view in self.configViews: view.dispatch('on_config_updated', config) self._reset_stale() def _on_first_time_setup(self): self.dispatch('on_show_main_view', 'setup') def init_screen(self): self.createConfigViews() def createConfigViews(self): def attach_node(text, n, view_builder): tree = self.ids.menu label = LinkedTreeViewLabel(text=text) label.view_builder = view_builder label.color_selected = ColorScheme.get_dark_primary() return tree.add_node(label, n) def create_scripting_view(capabilities): script_view = LuaScriptingView(capabilities, rc_api=self.rc_api) self.script_view = script_view return script_view runtime_channels = self._settings.runtimeChannels default_node = attach_node('Race Tracks', None, lambda: TrackConfigView(status_pump=self._status_pump, databus=self._databus, rc_api=self.rc_api, settings=self._settings, track_manager=self.track_manager)) if self.rc_config.capabilities.has_gps: attach_node('GPS', None, lambda: GPSChannelsView()) attach_node('Race Timing', None, lambda: LapStatsView()) if self.rc_config.capabilities.has_analog: attach_node('Analog Sensors', None, lambda: AnalogChannelsView(channels=runtime_channels, preset_manager=self.preset_manager)) if self.rc_config.capabilities.has_timer: attach_node('Pulse/RPM Sensors', None, lambda: PulseChannelsView(channels=runtime_channels)) if self.rc_config.capabilities.has_gpio: attach_node('Digital In/Out', None, lambda: GPIOChannelsView(channels=runtime_channels)) if self.rc_config.capabilities.has_imu: attach_node('Accel/Gyro', None, lambda: ImuChannelsView(rc_api=self.rc_api)) if self.rc_config.capabilities.has_pwm: attach_node('Pulse/Analog Out', None, lambda: AnalogPulseOutputChannelsView(channels=runtime_channels)) attach_node('CAN Bus', None, lambda: CANConfigView()) if self.rc_config.capabilities.has_can_channel: attach_node('CAN Mapping', None, lambda: CANChannelsView(settings=self._settings, preset_manager=self.preset_manager, channels=runtime_channels, base_dir=self.base_dir)) attach_node('OBDII', None, lambda: OBD2ChannelsView(channels=runtime_channels, base_dir=self.base_dir, preset_manager=self.preset_manager)) attach_node('Automatic Control', None, lambda: AutoControlConfigView(channels=runtime_channels)) attach_node('Wireless', None, lambda: WirelessConfigView(self.base_dir, self.rc_config, self.rc_config.capabilities)) attach_node('Telemetry', None, lambda: TelemetryConfigView(self.rc_config.capabilities)) if self.rc_config.capabilities.has_script: node_name = 'Scripting' else: node_name = 'Logs' attach_node(node_name, None, lambda: create_scripting_view(self.rc_config.capabilities)) if self.rc_api.is_firmware_update_supported(): from autosportlabs.racecapture.views.configuration.rcp.firmwareupdateview import FirmwareUpdateView attach_node('Firmware', None, lambda: FirmwareUpdateView(rc_api=self.rc_api, settings=self._settings)) self.ids.menu.select_node(default_node) self.update_runtime_channels(runtime_channels) self.update_tracks() self.ids.button_panel.height = max(dp(100), 150) self.loaded = True def show_node(self, node): view = node.view if not view: view = node.view_builder() self.configViews.append(view) view.bind(on_config_modified=self.on_config_modified) node.view = view if self.loaded: if self.rc_config: view.dispatch('on_config_updated', self.rc_config) if self.track_manager: view.dispatch('on_tracks_updated', self.track_manager) if view.get_parent_window() is None: Clock.schedule_once(lambda dt: self.ids.content.add_widget(view)) def on_select_node(self, instance, value): if not value: return # ensure that any keyboard is released try: self.ids.content.get_parent_window().release_keyboard() except: pass self.ids.content.clear_widgets() Clock.schedule_once(lambda dt: self.show_node(value)) def updateControls(self): Logger.debug("ConfigView: data is stale: " + str(self.writeStale)) write_button = self.ids.write write_button.disabled = not self.writeStale write_button.pulsing = self.writeStale Clock.schedule_once(lambda dt: HelpInfo.help_popup('rc_write_config', self, arrow_pos='left_mid'), 1.0) def update_tracks(self): track_manager = self.track_manager if track_manager and self.loaded: for view in self.configViews: view.dispatch('on_tracks_updated', track_manager) def on_tracks_updated(self, track_manager): self.track_manager = track_manager def on_read_config(self, instance, *args): pass def on_write_config(self, instance, *args): pass def readConfig(self): if self.writeStale == True: popup = None def _on_answer(instance, answer): if answer: self.dispatch('on_read_config', None) popup.dismiss() popup = confirmPopup('Confirm', 'Configuration Modified - Continue Loading?', _on_answer) else: self.dispatch('on_read_config', None) def writeConfig(self): if self.rc_config.loaded: self.dispatch('on_write_config', None) else: alertPopup('Warning', 'Please load or read a configuration before writing') def openConfig(self): if self.writeStale: popup = None def _on_answer(instance, answer): if answer: self.doOpenConfig() popup.dismiss() popup = confirmPopup('Confirm', 'Configuration Modified - Open Configuration?', _on_answer) else: self.doOpenConfig() def set_config_file_path(self, path): self._settings.userPrefs.set_pref('preferences', 'config_file_dir', path) def get_config_file_path(self): return self._settings.userPrefs.get_pref('preferences', 'config_file_dir') def doOpenConfig(self): content = LoadDialog(ok=self.load, cancel=self.dismiss_popup, filters=['*' + RCP_CONFIG_FILE_EXTENSION], user_path=self.get_config_file_path()) self._popup = Popup(title="Load file", content=content, size_hint=(0.9, 0.9)) self._popup.open() def saveConfig(self): if self.rc_config.loaded: content = SaveDialog(ok=self.save, cancel=self.dismiss_popup, filters=['*' + RCP_CONFIG_FILE_EXTENSION], user_path=self.get_config_file_path()) self._popup = Popup(title="Save file", content=content, size_hint=(0.9, 0.9)) self._popup.open() else: alertPopup('Warning', 'Please load or read a configuration before saving') def load(self, instance): self.set_config_file_path(instance.path) self.dismiss_popup() try: selection = instance.selection filename = selection[0] if len(selection) else None if filename: with open(filename) as stream: rcpConfigJsonString = stream.read() self.rc_config.fromJsonString(rcpConfigJsonString) self.rc_config.stale = True self.on_config_updated(self.rc_config, force_reload=True) self.on_config_modified() else: alertPopup('Error Loading', 'No config file selected') except Exception as detail: alertPopup('Error Loading', 'Failed to Load Configuration:\n\n' + str(detail)) Logger.exception('ConfigView: Error loading config: ' + str(detail)) def save(self, instance): def _do_save_config(filename): if not filename.endswith(RCP_CONFIG_FILE_EXTENSION): filename += RCP_CONFIG_FILE_EXTENSION with open(filename, 'w') as stream: configJson = self.rc_config.toJsonString() stream.write(configJson) self.set_config_file_path(instance.path) self.dismiss_popup() config_filename = instance.filename if len(config_filename): try: config_filename = os.path.join(instance.path, config_filename) if os.path.isfile(config_filename): def _on_answer(instance, answer): if answer: _do_save_config(config_filename) popup.dismiss() popup = confirmPopup('Confirm', 'File Exists - overwrite?', _on_answer) else: _do_save_config(config_filename) except Exception as detail: alertPopup('Error Saving', 'Failed to save:\n\n' + str(detail)) Logger.exception('ConfigView: Error Saving config: ' + str(detail)) def dismiss_popup(self, *args): self._popup.dismiss()
gpl-3.0
-7,582,040,648,257,056,000
1,295,893,086,367,705,300
38.891221
181
0.584318
false
jetty840/ReplicatorG
skein_engines/skeinforge-35/skeinforge_application/skeinforge_plugins/craft_plugins/export_plugins/static_plugins/gcode_small.py
6
4301
""" This page is in the table of contents. Gcode_small is an export plugin to remove the comments and the redundant z and feed rate parameters from a gcode file. An export plugin is a script in the export_plugins folder which has the getOutput function, the globalIsReplaceable variable and if it's output is not replaceable, the writeOutput function. It is meant to be run from the export tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name. The getOutput function of this script takes a gcode text and returns that text without comments and redundant z and feed rate parameters. The writeOutput function of this script takes a gcode text and writes that text without comments and redundant z and feed rate parameters to a file. Many of the functions in this script are copied from gcodec in skeinforge_utilities. They are copied rather than imported so developers making new plugins do not have to learn about gcodec, the code here is all they need to learn. """ from __future__ import absolute_import import cStringIO import os __author__ = 'Enrique Perez ([email protected])' __date__ = '$Date: 2008/21/04 $' __license__ = 'GPL 3.0' # This is true if the output is text and false if it is binary." globalIsReplaceable = True def getOutput(gcodeText): 'Get the exported version of a gcode file.' return GcodeSmallSkein().getCraftedGcode(gcodeText) def getSplitLineBeforeBracketSemicolon(line): "Get the split line before a bracket or semicolon." bracketSemicolonIndex = min( line.find(';'), line.find('(') ) if bracketSemicolonIndex < 0: return line.split() return line[ : bracketSemicolonIndex ].split() def getStringFromCharacterSplitLine(character, splitLine): "Get the string after the first occurence of the character in the split line." indexOfCharacter = getIndexOfStartingWithSecond(character, splitLine) if indexOfCharacter < 0: return None return splitLine[indexOfCharacter][1 :] def getSummarizedFileName(fileName): "Get the fileName basename if the file is in the current working directory, otherwise return the original full name." if os.getcwd() == os.path.dirname(fileName): return os.path.basename(fileName) return fileName def getTextLines(text): "Get the all the lines of text of a text." return text.replace('\r', '\n').split('\n') def getIndexOfStartingWithSecond(letter, splitLine): "Get index of the first occurence of the given letter in the split line, starting with the second word. Return - 1 if letter is not found" for wordIndex in xrange( 1, len(splitLine) ): word = splitLine[ wordIndex ] firstLetter = word[0] if firstLetter == letter: return wordIndex return - 1 class GcodeSmallSkein: "A class to remove redundant z and feed rate parameters from a skein of extrusions." def __init__(self): self.lastFeedRateString = None self.lastZString = None self.output = cStringIO.StringIO() def getCraftedGcode( self, gcodeText ): "Parse gcode text and store the gcode." lines = getTextLines(gcodeText) for line in lines: self.parseLine(line) return self.output.getvalue() def parseLine(self, line): "Parse a gcode line." splitLine = getSplitLineBeforeBracketSemicolon(line) if len(splitLine) < 1: return firstWord = splitLine[0] if len(firstWord) < 1: return if firstWord[0] == '(': return if firstWord != 'G1': self.output.write(line + '\n') return eString = getStringFromCharacterSplitLine('E', splitLine ) xString = getStringFromCharacterSplitLine('X', splitLine ) yString = getStringFromCharacterSplitLine('Y', splitLine ) zString = getStringFromCharacterSplitLine('Z', splitLine ) feedRateString = getStringFromCharacterSplitLine('F', splitLine ) self.output.write('G1') if xString != None: self.output.write(' X' + xString ) if yString != None: self.output.write(' Y' + yString ) if zString != None and zString != self.lastZString: self.output.write(' Z' + zString ) if feedRateString != None and feedRateString != self.lastFeedRateString: self.output.write(' F' + feedRateString ) if eString != None: self.output.write(' E' + eString ) self.lastFeedRateString = feedRateString self.lastZString = zString self.output.write('\n')
gpl-2.0
-935,177,455,998,550,000
-6,983,675,514,363,565,000
38.1
365
0.747036
false
sencha/chromium-spacewalk
chrome/test/chromedriver/third_party/googlecode/googlecode_upload.py
160
8608
#!/usr/bin/env python # # Copyright 2006, 2007 Google Inc. All Rights Reserved. # Author: [email protected] (David Anderson) # # Script for uploading files to a Google Code project. # # This is intended to be both a useful script for people who want to # streamline project uploads and a reference implementation for # uploading files to Google Code projects. # # To upload a file to Google Code, you need to provide a path to the # file on your local machine, a small summary of what the file is, a # project name, and a valid account that is a member or owner of that # project. You can optionally provide a list of labels that apply to # the file. The file will be uploaded under the same name that it has # in your local filesystem (that is, the "basename" or last path # component). Run the script with '--help' to get the exact syntax # and available options. # # Note that the upload script requests that you enter your # googlecode.com password. This is NOT your Gmail account password! # This is the password you use on googlecode.com for committing to # Subversion and uploading files. You can find your password by going # to http://code.google.com/hosting/settings when logged in with your # Gmail account. If you have already committed to your project's # Subversion repository, the script will automatically retrieve your # credentials from there (unless disabled, see the output of '--help' # for details). # # If you are looking at this script as a reference for implementing # your own Google Code file uploader, then you should take a look at # the upload() function, which is the meat of the uploader. You # basically need to build a multipart/form-data POST request with the # right fields and send it to https://PROJECT.googlecode.com/files . # Authenticate the request using HTTP Basic authentication, as is # shown below. # # Licensed under the terms of the Apache Software License 2.0: # http://www.apache.org/licenses/LICENSE-2.0 # # Questions, comments, feature requests and patches are most welcome. # Please direct all of these to the Google Code users group: # http://groups.google.com/group/google-code-hosting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 """Google Code file uploader script. """ __author__ = '[email protected] (David Anderson)' import httplib import os.path import optparse import getpass import base64 import sys def upload(file, project_name, user_name, password, summary, labels=None): """Upload a file to a Google Code project's file server. Args: file: The local path to the file. project_name: The name of your project on Google Code. user_name: Your Google account name. password: The googlecode.com password for your account. Note that this is NOT your global Google Account password! summary: A small description for the file. labels: an optional list of label strings with which to tag the file. Returns: a tuple: http_status: 201 if the upload succeeded, something else if an error occured. http_reason: The human-readable string associated with http_status file_url: If the upload succeeded, the URL of the file on Google Code, None otherwise. """ # The login is the user part of [email protected]. If the login provided # is in the full user@domain form, strip it down. if user_name.endswith('@gmail.com'): user_name = user_name[:user_name.index('@gmail.com')] form_fields = [('summary', summary)] if labels is not None: form_fields.extend([('label', l.strip()) for l in labels]) content_type, body = encode_upload_request(form_fields, file) upload_host = '%s.googlecode.com' % project_name upload_uri = '/files' auth_token = base64.b64encode('%s:%s'% (user_name, password)) headers = { 'Authorization': 'Basic %s' % auth_token, 'User-Agent': 'Googlecode.com uploader v0.9.4', 'Content-Type': content_type, } server = httplib.HTTPSConnection(upload_host) server.request('POST', upload_uri, body, headers) resp = server.getresponse() server.close() if resp.status == 201: location = resp.getheader('Location', None) else: location = None return resp.status, resp.reason, location def encode_upload_request(fields, file_path): """Encode the given fields and file into a multipart form body. fields is a sequence of (name, value) pairs. file is the path of the file to upload. The file will be uploaded to Google Code with the same file name. Returns: (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla' CRLF = '\r\n' body = [] # Add the metadata about the upload first for key, value in fields: body.extend( ['--' + BOUNDARY, 'Content-Disposition: form-data; name="%s"' % key, '', value, ]) # Now add the file itself file_name = os.path.basename(file_path) f = open(file_path, 'rb') file_content = f.read() f.close() body.extend( ['--' + BOUNDARY, 'Content-Disposition: form-data; name="filename"; filename="%s"' % file_name, # The upload server determines the mime-type, no need to set it. 'Content-Type: application/octet-stream', '', file_content, ]) # Finalize the form body body.extend(['--' + BOUNDARY + '--', '']) return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body) def upload_find_auth(file_path, project_name, summary, labels=None, user_name=None, password=None, tries=3): """Find credentials and upload a file to a Google Code project's file server. file_path, project_name, summary, and labels are passed as-is to upload. Args: file_path: The local path to the file. project_name: The name of your project on Google Code. summary: A small description for the file. labels: an optional list of label strings with which to tag the file. config_dir: Path to Subversion configuration directory, 'none', or None. user_name: Your Google account name. tries: How many attempts to make. """ if user_name is None or password is None: from netrc import netrc # Chromium edit: Works on windows without requiring HOME to be set. netrc_path = os.path.join(os.path.expanduser('~'), '.netrc') authenticators = netrc(netrc_path).authenticators("code.google.com") if authenticators: if user_name is None: user_name = authenticators[0] if password is None: password = authenticators[2] if user_name is None or password is None: raise RuntimeError('Missing user credentials for upload') return upload(file_path, project_name, user_name, password, summary, labels) def main(): parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY ' '-p PROJECT [options] FILE') parser.add_option('-s', '--summary', dest='summary', help='Short description of the file') parser.add_option('-p', '--project', dest='project', help='Google Code project name') parser.add_option('-u', '--user', dest='user', help='Your Google Code username') parser.add_option('-w', '--password', dest='password', help='Your Google Code password') parser.add_option('-l', '--labels', dest='labels', help='An optional list of comma-separated labels to attach ' 'to the file') options, args = parser.parse_args() if not options.summary: parser.error('File summary is missing.') elif not options.project: parser.error('Project name is missing.') elif len(args) < 1: parser.error('File to upload not provided.') elif len(args) > 1: parser.error('Only one file may be specified.') file_path = args[0] if options.labels: labels = options.labels.split(',') else: labels = None status, reason, url = upload_find_auth(file_path, options.project, options.summary, labels, options.user, options.password) if url: print 'The file was uploaded successfully.' print 'URL: %s' % url return 0 else: print 'An error occurred. Your file was not uploaded.' print 'Google Code upload server said: %s (%s)' % (reason, status) return 1 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
-1,247,416,561,687,579,400
1,494,235,584,473,995,300
34.866667
80
0.676928
false
RCOSDP/waterbutler
waterbutler/core/streams/base64.py
4
1100
import base64 import asyncio class Base64EncodeStream(asyncio.StreamReader): @staticmethod def calculate_encoded_size(size): size = 4 * size / 3 if size % 4: size += 4 - size % 4 return int(size) def __init__(self, stream, **kwargs): self.extra = b'' self.stream = stream if stream.size is None: self._size = None else: self._size = Base64EncodeStream.calculate_encoded_size(stream.size) super().__init__(**kwargs) @property def size(self): return self._size async def read(self, n=-1): if n < 0: return (await super().read(n)) nog = n padding = n % 3 if padding: n += (3 - padding) chunk = self.extra + base64.b64encode((await self.stream.read(n))) if len(chunk) <= nog: self.extra = b'' return chunk chunk, self.extra = chunk[:nog], chunk[nog:] return chunk def at_eof(self): return len(self.extra) == 0 and self.stream.at_eof()
apache-2.0
492,322,546,153,256,000
3,693,391,259,085,914,600
21.916667
79
0.527273
false
Mashape/unirest-python
unirest/test/test_unirest.py
4
5262
# -*- coding:utf-8 -*- import sys import os import unittest sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) import unirest class UnirestTestCase(unittest.TestCase): def test_get(self): response = unirest.get('http://httpbin.org/get?name=Mark', params={"nick":"thefosk"}) self.assertEqual(response.code, 200) self.assertEqual(len(response.body['args']), 2) self.assertEqual(response.body['args']['name'], "Mark") self.assertEqual(response.body['args']['nick'], "thefosk") def test_get2(self): response = unirest.get('http://httpbin.org/get?name=Mark', params={"nick":"the fosk"}) self.assertEqual(response.code, 200) self.assertEqual(len(response.body['args']), 2) self.assertEqual(response.body['args']['name'], "Mark") self.assertEqual(response.body['args']['nick'], "the fosk") def test_get_unicode_param(self): response = unirest.get('http://httpbin.org/get?name=Shimada', params={"nick":u"しまりん"}) self.assertEqual(response.code, 200) self.assertEqual(len(response.body['args']), 2) self.assertEqual(response.body['args']['name'], "Shimada") self.assertEqual(response.body['args']['nick'], u"しまりん") def test_get_none_param(self): response = unirest.get('http://httpbin.org/get?name=Mark', params={"nick":"thefosk", "age": None, "third":""}) self.assertEqual(response.code, 200) self.assertEqual(len(response.body['args']), 3) self.assertEqual(response.body['args']['name'], "Mark") self.assertEqual(response.body['args']['nick'], "thefosk") self.assertEqual(response.body['args']['third'], "") def test_post(self): response = unirest.post('http://httpbin.org/post', params={"name":"Mark", "nick":"thefosk"}) self.assertEqual(response.code, 200) self.assertEqual(len(response.body['args']), 0) self.assertEqual(len(response.body['form']), 2) self.assertEqual(response.body['form']['name'], "Mark") self.assertEqual(response.body['form']['nick'], "thefosk") def test_post_none_param(self): response = unirest.post('http://httpbin.org/post', params={"name":"Mark", "nick":"thefosk", "age": None, "third":""}) self.assertEqual(response.code, 200) self.assertEqual(len(response.body['args']), 0) self.assertEqual(len(response.body['form']), 3) self.assertEqual(response.body['form']['name'], "Mark") self.assertEqual(response.body['form']['nick'], "thefosk") self.assertEqual(response.body['form']['third'], "") def test_delete(self): response = unirest.delete('http://httpbin.org/delete', params={"name":"Mark", "nick":"thefosk"}) self.assertEqual(response.code, 200) self.assertEqual(response.body['form']['name'], "Mark") self.assertEqual(response.body['form']['nick'], "thefosk") def test_put(self): response = unirest.put('http://httpbin.org/put', params={"name":"Mark", "nick":"thefosk"}) self.assertEqual(response.code, 200) self.assertEqual(len(response.body['args']), 0) self.assertEqual(len(response.body['form']), 2) self.assertEqual(response.body['form']['name'], "Mark") self.assertEqual(response.body['form']['nick'], "thefosk") def test_patch(self): response = unirest.patch('http://httpbin.org/patch', params={"name":"Mark", "nick":"thefosk"}) self.assertEqual(response.code, 200) self.assertEqual(len(response.body['args']), 0) self.assertEqual(len(response.body['form']), 2) self.assertEqual(response.body['form']['name'], "Mark") self.assertEqual(response.body['form']['nick'], "thefosk") def test_post_entity(self): response = unirest.post('http://httpbin.org/post', headers={'Content-Type':'text/plain'}, params="hello this is custom data") self.assertEqual(response.code, 200) self.assertEqual(response.body['data'], "hello this is custom data") def test_gzip(self): response = unirest.get('http://httpbin.org/gzip', params={"name":"Mark"}) self.assertEqual(response.code, 200) self.assertTrue(response.body['gzipped']) def test_basicauth(self): response = unirest.get('http://httpbin.org/get', auth=('marco', 'password')) self.assertEqual(response.code, 200) self.assertEqual(response.body['headers']['Authorization'], "Basic bWFyY286cGFzc3dvcmQ=") def test_defaultheaders(self): unirest.default_header('custom','custom header') response = unirest.get('http://httpbin.org/get') self.assertEqual(response.code, 200) self.assertTrue('Custom' in response.body['headers']); self.assertEqual(response.body['headers']['Custom'], "custom header") # Make another request response = unirest.get('http://httpbin.org/get') self.assertEqual(response.code, 200) self.assertTrue('Custom' in response.body['headers']); self.assertTrue(response.body['headers']['Custom'], "custom header") # Clear the default headers unirest.clear_default_headers() response = unirest.get('http://httpbin.org/get') self.assertEqual(response.code, 200) self.assertFalse('Custom' in response.body['headers']); def test_timeout(self): unirest.timeout(3) response = unirest.get('http://httpbin.org/delay/1') self.assertEqual(response.code, 200) unirest.timeout(1) try: response = unirest.get('http://httpbin.org/delay/3') self.fail("The timeout didn't work") except: pass if __name__ == '__main__': unittest.main()
mit
2,354,023,879,691,795,000
8,017,739,870,531,157,000
40.634921
127
0.691575
false
pabloborrego93/edx-platform
common/lib/xmodule/xmodule/annotator_token.py
211
1542
""" This file contains a function used to retrieve the token for the annotation backend without having to create a view, but just returning a string instead. It can be called from other files by using the following: from xmodule.annotator_token import retrieve_token """ import datetime from firebase_token_generator import create_token def retrieve_token(userid, secret): ''' Return a token for the backend of annotations. It uses the course id to retrieve a variable that contains the secret token found in inheritance.py. It also contains information of when the token was issued. This will be stored with the user along with the id for identification purposes in the backend. ''' # the following five lines of code allows you to include the default timezone in the iso format # for more information: http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone dtnow = datetime.datetime.now() dtutcnow = datetime.datetime.utcnow() delta = dtnow - dtutcnow newhour, newmin = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60) newtime = "%s%+02d:%02d" % (dtnow.isoformat(), newhour, newmin) # uses the issued time (UTC plus timezone), the consumer key and the user's email to maintain a # federated system in the annotation backend server custom_data = {"issuedAt": newtime, "consumerKey": secret, "userId": userid, "ttl": 86400} newtoken = create_token(secret, custom_data) return newtoken
agpl-3.0
339,092,060,702,395,500
1,716,267,926,539,015,200
47.1875
141
0.732815
false
pabloborrego93/edx-platform
pavelib/tests.py
9
11010
""" Unit test tasks """ import re import os import sys from paver.easy import sh, task, cmdopts, needs from pavelib.utils.test import suites from pavelib.utils.envs import Env from pavelib.utils.timer import timed from pavelib.utils.passthrough_opts import PassthroughTask from optparse import make_option try: from pygments.console import colorize except ImportError: colorize = lambda color, text: text __test__ = False # do not collect @needs( 'pavelib.prereqs.install_prereqs', 'pavelib.utils.test.utils.clean_reports_dir', ) @cmdopts([ ("system=", "s", "System to act on"), ("test-id=", "t", "Test id"), ("fail-fast", "x", "Fail suite on first failed test"), ("fasttest", "a", "Run without collectstatic"), make_option( '-c', '--cov-args', default='', help='adds as args to coverage for the test run' ), ('skip-clean', 'C', 'skip cleaning repository before running tests'), ('processes=', 'p', 'number of processes to use running tests'), make_option('-r', '--randomize', action='store_true', help='run the tests in a random order'), make_option('--no-randomize', action='store_false', dest='randomize', help="don't run the tests in a random order"), make_option("--verbose", action="store_const", const=2, dest="verbosity"), make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"), make_option("-v", "--verbosity", action="count", dest="verbosity", default=1), make_option( '--disable-migrations', action='store_true', dest='disable_migrations', help="Create tables directly from apps' models. Can also be used by exporting DISABLE_MIGRATIONS=1." ), make_option( '--enable-migrations', action='store_false', dest='disable_migrations', help="Create tables by applying migrations." ), ("fail_fast", None, "deprecated in favor of fail-fast"), ("test_id=", None, "deprecated in favor of test-id"), ('cov_args=', None, 'deprecated in favor of cov-args'), make_option( "-e", "--extra_args", default="", help="deprecated, pass extra options directly in the paver commandline" ), ('skip_clean', None, 'deprecated in favor of skip-clean'), ], share_with=['pavelib.utils.test.utils.clean_reports_dir']) @PassthroughTask @timed def test_system(options, passthrough_options): """ Run tests on our djangoapps for lms and cms """ system = getattr(options, 'system', None) test_id = getattr(options, 'test_id', None) if test_id: if not system: system = test_id.split('/')[0] if system in ['common', 'openedx']: system = 'lms' options.test_system['test_id'] = test_id if test_id or system: system_tests = [suites.SystemTestSuite( system, passthrough_options=passthrough_options, **options.test_system )] else: system_tests = [] for syst in ('cms', 'lms'): system_tests.append(suites.SystemTestSuite( syst, passthrough_options=passthrough_options, **options.test_system )) test_suite = suites.PythonTestSuite( 'python tests', subsuites=system_tests, passthrough_options=passthrough_options, **options.test_system ) test_suite.run() @needs( 'pavelib.prereqs.install_prereqs', 'pavelib.utils.test.utils.clean_reports_dir', ) @cmdopts([ ("lib=", "l", "lib to test"), ("test-id=", "t", "Test id"), ("failed", "f", "Run only failed tests"), ("fail-fast", "x", "Run only failed tests"), make_option( '-c', '--cov-args', default='', help='adds as args to coverage for the test run' ), ('skip-clean', 'C', 'skip cleaning repository before running tests'), make_option("--verbose", action="store_const", const=2, dest="verbosity"), make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"), make_option("-v", "--verbosity", action="count", dest="verbosity", default=1), ('cov_args=', None, 'deprecated in favor of cov-args'), make_option( '-e', '--extra_args', default='', help='deprecated, pass extra options directly in the paver commandline' ), ("fail_fast", None, "deprecated in favor of fail-fast"), ('skip_clean', None, 'deprecated in favor of skip-clean'), ("test_id=", None, "deprecated in favor of test-id"), ], share_with=['pavelib.utils.test.utils.clean_reports_dir']) @PassthroughTask @timed def test_lib(options, passthrough_options): """ Run tests for common/lib/ and pavelib/ (paver-tests) """ lib = getattr(options, 'lib', None) test_id = getattr(options, 'test_id', lib) if test_id: if '/' in test_id: lib = '/'.join(test_id.split('/')[0:3]) else: lib = 'common/lib/' + test_id.split('.')[0] options.test_lib['test_id'] = test_id lib_tests = [suites.LibTestSuite( lib, passthrough_options=passthrough_options, **options.test_lib )] else: lib_tests = [ suites.LibTestSuite( d, passthrough_options=passthrough_options, **options.test_lib ) for d in Env.LIB_TEST_DIRS ] test_suite = suites.PythonTestSuite( 'python tests', subsuites=lib_tests, passthrough_options=passthrough_options, **options.test_lib ) test_suite.run() @needs( 'pavelib.prereqs.install_prereqs', 'pavelib.utils.test.utils.clean_reports_dir', ) @cmdopts([ ("failed", "f", "Run only failed tests"), ("fail-fast", "x", "Run only failed tests"), make_option( '-c', '--cov-args', default='', help='adds as args to coverage for the test run' ), make_option("--verbose", action="store_const", const=2, dest="verbosity"), make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"), make_option("-v", "--verbosity", action="count", dest="verbosity", default=1), make_option( '--disable-migrations', action='store_true', dest='disable_migrations', help="Create tables directly from apps' models. Can also be used by exporting DISABLE_MIGRATIONS=1." ), ('cov_args=', None, 'deprecated in favor of cov-args'), make_option( '-e', '--extra_args', default='', help='deprecated, pass extra options directly in the paver commandline' ), ("fail_fast", None, "deprecated in favor of fail-fast"), ]) @PassthroughTask @timed def test_python(options, passthrough_options): """ Run all python tests """ python_suite = suites.PythonTestSuite( 'Python Tests', passthrough_options=passthrough_options, **options.test_python ) python_suite.run() @needs( 'pavelib.prereqs.install_prereqs', 'pavelib.utils.test.utils.clean_reports_dir', ) @cmdopts([ ("suites", "s", "List of unit test suites to run. (js, lib, cms, lms)"), make_option( '-c', '--cov-args', default='', help='adds as args to coverage for the test run' ), make_option("--verbose", action="store_const", const=2, dest="verbosity"), make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"), make_option("-v", "--verbosity", action="count", dest="verbosity", default=1), ('cov_args=', None, 'deprecated in favor of cov-args'), make_option( '-e', '--extra_args', default='', help='deprecated, pass extra options directly in the paver commandline' ), ]) @PassthroughTask @timed def test(options, passthrough_options): """ Run all tests """ # Subsuites to be added to the main suite python_suite = suites.PythonTestSuite( 'Python Tests', passthrough_options=passthrough_options, **options.test ) js_suite = suites.JsTestSuite('JS Tests', mode='run', with_coverage=True) # Main suite to be run all_unittests_suite = suites.TestSuite('All Tests', subsuites=[js_suite, python_suite]) all_unittests_suite.run() @task @needs('pavelib.prereqs.install_coverage_prereqs') @cmdopts([ ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), ("compare_branch=", None, "deprecated in favor of compare-branch"), ]) @timed def coverage(): """ Build the html, xml, and diff coverage reports """ report_dir = Env.REPORT_DIR rcfile = Env.PYTHON_COVERAGERC if not (report_dir / '.coverage').isfile(): # This may be that the coverage files were generated using -p, # try to combine them to the one file that we need. sh("coverage combine --rcfile={}".format(rcfile)) if not os.path.getsize(report_dir / '.coverage') > 50: # Check if the .coverage data file is larger than the base file, # because coverage combine will always at least make the "empty" data # file even when there isn't any data to be combined. err_msg = colorize( 'red', "No coverage info found. Run `paver test` before running " "`paver coverage`.\n" ) sys.stderr.write(err_msg) return # Generate the coverage.py XML report sh("coverage xml --rcfile={}".format(rcfile)) # Generate the coverage.py HTML report sh("coverage html --rcfile={}".format(rcfile)) diff_coverage() # pylint: disable=no-value-for-parameter @task @needs('pavelib.prereqs.install_coverage_prereqs') @cmdopts([ ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), ("compare_branch=", None, "deprecated in favor of compare-branch"), ], share_with=['coverage']) @timed def diff_coverage(options): """ Build the diff coverage reports """ compare_branch = options.get('compare_branch', 'origin/master') # Find all coverage XML files (both Python and JavaScript) xml_reports = [] for filepath in Env.REPORT_DIR.walk(): if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): xml_reports.append(filepath) if not xml_reports: err_msg = colorize( 'red', "No coverage info found. Run `paver test` before running " "`paver coverage`.\n" ) sys.stderr.write(err_msg) else: xml_report_str = ' '.join(xml_reports) diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') # Generate the diff coverage reports (HTML and console) sh( "diff-cover {xml_report_str} --compare-branch={compare_branch} " "--html-report {diff_html_path}".format( xml_report_str=xml_report_str, compare_branch=compare_branch, diff_html_path=diff_html_path, ) ) print "\n"
agpl-3.0
-4,413,130,887,316,799,000
-4,368,902,828,853,117,000
32.876923
120
0.606812
false
demiangomez/Parallel.GAMIT
com/ScanArchive.py
1
66663
""" Project: Parallel.Archive Date: 02/16/2017 Author: Demian D. Gomez Main routines to load the RINEX files to the database, load station information, run PPP on the archive files and obtain the OTL coefficients usage: pyScanArchive.py [-h] [-rinex] [-otl] [-stninfo [argument [argument ...]]] [-ppp [argument [argument ...]]] [-rehash [argument [argument ...]]] [-np] all|net.stnm [all|net.stnm ...] Archive operations Main Program positional arguments: all|net.stnm List of networks/stations to process given in [net].[stnm] format or just [stnm] (separated by spaces; if [stnm] is not unique in the database, all stations with that name will be processed). Use keyword 'all' to process all stations in the database. If [net].all is given, all stations from network [net] will be processed. Alternatevily, a file with the station list can be provided. optional arguments: -h, --help show this help message and exit -rinex, --rinex Scan the current archive for RINEX 2/3 files. -otl, --ocean_loading Calculate ocean loading coefficients. -stninfo [argument [argument ...]], --station_info [argument [argument ...]] Insert station information to the database. If no arguments are given, then scan the archive for station info files and use their location (folder) to determine the network to use during insertion. Only stations in the station list will be processed. If a filename is provided, then scan that file only, in which case a second argument specifies the network to use during insertion. Eg: -stninfo ~/station.info arg. In cases where multiple networks are being processed, the network argument will be used to desambiguate station code conflicts. Eg: pyScanArchive all -stninfo ~/station.info arg -> if a station named igm1 exists in networks 'igs' and 'arg', only 'arg.igm1' will get the station information insert. Use keyword 'stdin' to read the station information data from the pipeline. -ppp [argument [argument ...]], --ppp [argument [argument ...]] Run ppp on the rinex files in the database. Append [date_start] and (optionally) [date_end] to limit the range of the processing. Allowed formats are yyyy.doy or yyyy/mm/dd. Append keyword 'hash' to the end to check the PPP hash values against the station information records. If hash doesn't match, recalculate the PPP solutions. -rehash [argument [argument ...]], --rehash [argument [argument ...]] Check PPP hash against station information hash. Rehash PPP solutions to match the station information hash without recalculating the PPP solution. Optionally append [date_start] and (optionally) [date_end] to limit the rehashing time window. Allowed formats are yyyy.doy or yyyy/mm/dd. -np, --noparallel Execute command without parallelization. """ import pyArchiveStruct import dbConnection import pyDate import pyRinex import pyRinexName import traceback import datetime import os import pyOTL import pyStationInfo import sys import pySp3 import pyBrdc import pyClk import pyPPP from tqdm import tqdm import argparse import numpy import pyOptions import Utils import platform import pyJobServer from Utils import print_columns from Utils import process_date from Utils import ecef2lla import pyEvents import scandir import json import shutil import glob import uuid from decimal import Decimal import zipfile error_message = False class Encoder(json.JSONEncoder): def default(self, o): if isinstance(o, Decimal): return float(o) if isinstance(o, datetime.datetime): return datetime.datetime.strftime(o, '%Y-%m-%d %H:%M:%S') return super(Encoder, self).default(o) class callback_class(): def __init__(self, pbar): self.errors = None self.pbar = pbar def callbackfunc(self, args): msg = args self.errors = msg self.pbar.update(1) def callback_handle(job): global error_message if job.result is not None or job.exception: error_message = True if job.result: msg = job.result else: msg = job.exception tqdm.write(' -- There were unhandled errors during this batch. ' 'Please check errors_pyArchiveService.log for details') f = open('errors_pyScanArchive.log', 'a') f.write('ON ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ' an unhandled error occurred:\n') f.write(msg + '\n') f.write('END OF ERROR =================== \n\n') f.close() def verify_rinex_date_multiday(cnn, date, rinexinfo, Config): # function to verify if rinex is multiday or if the file is from the date it was stored in the archive # returns true if parent process can continue with insert # returns false if file had to be moved from the archive (date != rinex.date or multiday file) # check if rinex is a multiday file (rinex with more than one day of observations) if rinexinfo.multiday: # move all the files to the repository, delete the crinex from the archive, log the event rnxlist = [] for rnx in rinexinfo.multiday_rnx_list: rnxlist.append(rnx.rinex) # some other file, move it to the repository retry_folder = os.path.join(Config.repository_data_in_retry, 'multidays_found/' + rnx.date.yyyy() + '/' + rnx.date.ddd()) rnx.compress_local_copyto(retry_folder) # if the file corresponding to this session is found, assign its object to rinexinfo event = pyEvents.Event( Description='%s was a multi-day rinex file. The following rinex files where generated and moved to the repository/data_in_retry: %s.' % ( rinexinfo.origin_file, ','.join(rnxlist)), NetworkCode=rinexinfo.NetworkCode, EventType='warn', StationCode=rinexinfo.StationCode, Year=int(rinexinfo.date.year), DOY=int(rinexinfo.date.doy)) cnn.insert_event(event) # remove crinex from archive os.remove(rinexinfo.origin_file) return False # compare the date of the rinex with the date in the archive if not date == rinexinfo.date: # move the file out of the archive because it's in the wrong spot (wrong folder, wrong name, etc) # let pyArchiveService fix the issue retry_folder = os.path.join(Config.repository_data_in_retry, 'wrong_date_found/' + date.yyyy() + '/' + date.ddd()) # move the crinex out of the archive rinexinfo.move_origin_file(retry_folder) event = pyEvents.Event( Description='The date in the archive for ' + rinexinfo.rinex + ' (' + date.yyyyddd() + ') does not agree with the mean session date (' + rinexinfo.date.yyyyddd() + '). The file was moved to the repository/data_in_retry.', NetworkCode=rinexinfo.NetworkCode, EventType='warn', StationCode=rinexinfo.StationCode, Year=int(rinexinfo.date.year), DOY=int(rinexinfo.date.doy)) cnn.insert_event(event) return False return True def try_insert(NetworkCode, StationCode, year, doy, rinex): try: # try to open a connection to the database cnn = dbConnection.Cnn("gnss_data.cfg") Config = pyOptions.ReadOptions("gnss_data.cfg") # get the rejection directory ready data_reject = os.path.join(Config.repository_data_reject, 'bad_rinex/%i/%03i' % (year, doy)) # get the rinex file name rnx_name = pyRinexName.RinexNameFormat(rinex) except Exception: return traceback.format_exc() + ' processing rinex: %s (%s.%s %s %s) using node %s' \ % (rinex, NetworkCode, StationCode, str(year), str(doy), platform.node()) try: filename = rnx_name.to_rinex_format(pyRinexName.TYPE_RINEX, no_path=True) # build the archive level sql string # the file has not to exist in the RINEX table (check done using filename) rs = cnn.query( 'SELECT * FROM rinex WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' AND "Filename" = \'%s\'' % (NetworkCode, StationCode, filename)) if rs.ntuples() == 0: # no record found, possible new rinex file for this day with pyRinex.ReadRinex(NetworkCode, StationCode, rinex) as rinexinfo: date = pyDate.Date(year=year, doy=doy) # verify that the rinex is from this date and that is not a multiday file if verify_rinex_date_multiday(cnn, date, rinexinfo, Config): try: # create the insert statement cnn.insert('rinex', rinexinfo.record) event = pyEvents.Event( Description='Archived crinex file %s added to the database.' % (rinex), EventType='info', StationCode=StationCode, NetworkCode=NetworkCode, Year=date.year, DOY=date.doy) cnn.insert_event(event) except dbConnection.dbErrInsert: # insert duplicate values: a rinex file with different name but same interval and completion % # discard file cnn.begin_transac() event = pyEvents.Event( Description='Crinex file %s was removed from the archive (and not added to db) because ' 'it matched the interval and completion of an already existing file.' % rinex, EventType='info', StationCode=StationCode, NetworkCode=NetworkCode, Year=date.year, DOY=date.doy) cnn.insert_event(event) rinexinfo.move_origin_file(os.path.join(Config.repository_data_reject, 'duplicate_insert/%i/%03i' % (year, doy))) cnn.commit_transac() except (pyRinex.pyRinexExceptionBadFile, pyRinex.pyRinexExceptionSingleEpoch) as e: try: filename = Utils.move(rinex, os.path.join(data_reject, os.path.basename(rinex))) except OSError: # permission denied: could not move file out of the archive->return error in an orderly fashion return traceback.format_exc() + ' processing rinex: %s (%s.%s %s %s) using node %s' \ % (rinex, NetworkCode, StationCode, str(year), str(doy), platform.node()) e.event['Description'] = 'During %s, file moved to %s: %s' \ % (os.path.basename(rinex), filename, e.event['Description']) e.event['StationCode'] = StationCode e.event['NetworkCode'] = NetworkCode e.event['Year'] = year e.event['DOY'] = doy cnn.insert_event(e.event) return except pyRinex.pyRinexException as e: if cnn.active_transaction: cnn.rollback_transac() e.event['Description'] = e.event['Description'] + ' during ' + rinex e.event['StationCode'] = StationCode e.event['NetworkCode'] = NetworkCode e.event['Year'] = year e.event['DOY'] = doy cnn.insert_event(e.event) return except Exception: if cnn.active_transaction: cnn.rollback_transac() return traceback.format_exc() + ' processing rinex: %s (%s.%s %s %s) using node %s' \ % (rinex, NetworkCode, StationCode, str(year), str(doy), platform.node()) def obtain_otl(NetworkCode, StationCode): errors = '' x = [] y = [] z = [] try: cnn = dbConnection.Cnn("gnss_data.cfg") Config = pyOptions.ReadOptions("gnss_data.cfg") pyArchive = pyArchiveStruct.RinexStruct(cnn) # assumes that the files in the db are correct. We take 10 records from the time span (evenly spaced) count = cnn.query('SELECT count(*) as cc FROM rinex_proc as r WHERE "NetworkCode" = \'%s\' AND ' '"StationCode" = \'%s\'' % (NetworkCode, StationCode)) count = count.dictresult() if count[0]['cc'] >= 10: stn = cnn.query('SELECT * FROM (SELECT *, row_number() OVER ' '(PARTITION BY "NetworkCode", "StationCode") as rnum, ' 'count(*) OVER (PARTITION BY "NetworkCode", "StationCode") as cc FROM rinex_proc) as rinex ' 'WHERE rinex."NetworkCode" = \'%s\' AND rinex."StationCode" = \'%s\' ' 'AND rinex.rnum %% (rinex.cc/10) = 0 ORDER BY rinex."ObservationSTime"' % (NetworkCode, StationCode)) elif count[0]['cc'] < 10: stn = cnn.query('SELECT * FROM (SELECT row_number() OVER() as rnum, r.* FROM rinex_proc as r ' 'WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' ' 'ORDER BY "ObservationSTime") AS rr ' % (NetworkCode, StationCode)) else: return 'Station %s.%s had no rinex files in the archive. Please check the database for problems.' \ % (NetworkCode, StationCode) tblrinex = stn.dictresult() for dbRinex in tblrinex: # obtain the path to the crinex file = pyArchive.build_rinex_path(NetworkCode, StationCode, dbRinex['ObservationYear'], dbRinex['ObservationDOY']) with pyRinex.ReadRinex(dbRinex['NetworkCode'], dbRinex['StationCode'], os.path.join(Config.archive_path, file)) as Rinex: # read the crinex try: # run ppp without otl and met and in non-strict mode with pyPPP.RunPPP(Rinex, '', Config.options, Config.sp3types, Config.sp3altrn, Rinex.antOffset, strict=False, apply_met=False, clock_interpolation=True) as ppp: ppp.exec_ppp() x.append(ppp.x) y.append(ppp.y) z.append(ppp.z) errors = errors + 'PPP -> %s.%s: %.3f %.3f %.3f\n' \ % (NetworkCode, StationCode, ppp.x, ppp.y, ppp.z) except (pySp3.pySp3Exception, pyClk.pyClkException, pyPPP.pyRunPPPException): # try autonomous solution try: brdc = pyBrdc.GetBrdcOrbits(Config.brdc_path, Rinex.date, Rinex.rootdir) Rinex.auto_coord(brdc, chi_limit=1000) x.append(Rinex.x) y.append(Rinex.y) z.append(Rinex.z) except Exception as e: errors = errors + str(e) + '\n' continue except (IOError, pyRinex.pyRinexException, pyRinex.pyRinexExceptionBadFile) as e: # problem loading this file, try another one errors = errors + str(e) + '\n' continue except Exception: return traceback.format_exc() + ' processing: %s.%s using node %s' \ % (NetworkCode, StationCode, platform.node()) # average the x y z values if len(x) > 0: if len(x) > 1: x = numpy.array(x) y = numpy.array(y) z = numpy.array(z) x = numpy.mean(x[abs(x - numpy.mean(x)) < 2 * numpy.std(x)]) y = numpy.mean(y[abs(y - numpy.mean(y)) < 2 * numpy.std(y)]) z = numpy.mean(z[abs(z - numpy.mean(z)) < 2 * numpy.std(z)]) else: x = x[0] y = y[0] z = z[0] lat, lon, h = ecef2lla([x,y,z]) # calculate the otl parameters if the auto_coord returned a valid position errors = errors + 'Mean -> %s.%s: %.3f %.3f %.3f\n' % (NetworkCode, StationCode, x, y, z) otl = pyOTL.OceanLoading(StationCode, Config.options['grdtab'], Config.options['otlgrid']) coeff = otl.calculate_otl_coeff(x=x, y=y, z=z) # update record in the database cnn.query('UPDATE stations SET "auto_x" = %.3f, "auto_y" = %.3f, "auto_z" = %.3f, ' '"lat" = %.8f, "lon" = %.8f, "height" = %.3f, ' '"Harpos_coeff_otl" = \'%s\' ' 'WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\'' % (x, y, z, lat[0], lon[0], h[0], coeff, NetworkCode, StationCode)) else: outmsg = 'Could not obtain a coordinate/otl coefficients for ' + NetworkCode + ' ' + StationCode + \ ' after 20 tries. Maybe there where few valid RINEX files or could not find an ephemeris file. ' \ 'Debug info and errors follow:\n' + errors return outmsg except pyOTL.pyOTLException as e: return "Error while calculating OTL for %s.%s: %s\n" % (NetworkCode, StationCode, str(e)) + \ 'Debug info and errors follow: \n' + errors except Exception: # print 'problem!' + traceback.format_exc() outmsg = traceback.format_exc() + ' processing otl: %s.%s using node %s\n' \ % (NetworkCode, StationCode, platform.node()) \ + 'Debug info and errors follow: \n' + errors return outmsg def insert_stninfo(NetworkCode, StationCode, stninfofile): errors = [] try: cnn = dbConnection.Cnn("gnss_data.cfg") except Exception: return traceback.format_exc() + ' insert_stninfo: ' + NetworkCode + ' ' + StationCode + \ ' using node ' + platform.node() try: stnInfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, allow_empty=True) stninfo = stnInfo.parse_station_info(stninfofile) except pyStationInfo.pyStationInfoException: return traceback.format_exc() + ' insert_stninfo: ' + NetworkCode + ' ' + StationCode + \ ' using node ' + platform.node() # DDG: 18-Feb-2019 used to have some code here to force the insertion of receivers and antennas # this is not done anymore, and receivers and antennas should exists in the corresponding tables before inserting # otherwise, a python exception will be raised. # ready to insert stuff to station info table for stn in stninfo: if stn.get('StationCode').lower() == StationCode: try: stnInfo.InsertStationInfo(stn) except pyStationInfo.pyStationInfoException as e: errors.append(str(e)) except Exception: errors.append(traceback.format_exc() + ' insert_stninfo: ' + NetworkCode + ' ' + StationCode + ' using node ' + platform.node()) continue if not errors: return else: return '\n\n'.join(errors) def remove_from_archive(cnn, record, Rinex, Config): # do not make very complex things here, just move it out from the archive retry_folder = os.path.join(Config.repository_data_in_retry, 'inconsistent_ppp_solution/' + Rinex.date.yyyy() + '/' + Rinex.date.ddd()) pyArchive = pyArchiveStruct.RinexStruct(cnn) pyArchive.remove_rinex(record, retry_folder) event = pyEvents.Event( Description='After running PPP it was found that the rinex file %s does not belong to this station. ' 'This file was removed from the rinex table and moved to the repository/data_in_retry to add it ' 'to the corresponding station.' % Rinex.origin_file, NetworkCode=record['NetworkCode'], StationCode=record['StationCode'], EventType='warn', Year=int(Rinex.date.year), DOY=int(Rinex.date.doy)) cnn.insert_event(event) def execute_ppp(record, rinex_path, h_tolerance): NetworkCode = record['NetworkCode'] StationCode = record['StationCode'] year = record['ObservationYear'] doy = record['ObservationDOY'] try: # try to open a connection to the database cnn = dbConnection.Cnn("gnss_data.cfg") Config = pyOptions.ReadOptions("gnss_data.cfg") except Exception: return traceback.format_exc() + ' processing rinex: %s.%s %s %s using node %s' \ % (NetworkCode, StationCode, str(year), str(doy), platform.node()) # create a temp folder in production to put the orbit in # we need to check the RF of the orbit to see if we have this solution in the DB try: # check to see if record exists for this file in ppp_soln # DDG: now read the frame from the config file frame, _ = Utils.determine_frame(Config.options['frames'], pyDate.Date(year=year, doy=doy)) ppp_soln = cnn.query('SELECT * FROM ppp_soln WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' AND ' '"Year" = %s AND "DOY" = %s AND "ReferenceFrame" = \'%s\'' % (NetworkCode, StationCode, year, doy, frame)) if ppp_soln.ntuples() == 0: # load the stations record to get the OTL params rs_stn = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\'' % (NetworkCode, StationCode)) stn = rs_stn.dictresult() # RINEX FILE TO BE PROCESSED with pyRinex.ReadRinex(NetworkCode, StationCode, rinex_path) as Rinex: if not verify_rinex_date_multiday(cnn, Rinex.date, Rinex, Config): # the file is a multiday file. These files are not supposed to be in the archive, but, due to a bug # in ScanArchive (now fixed - 2017-10-26) some multiday files are still in the rinex table # the file is moved out of the archive (into the retry folder and the rinex record is deleted event = pyEvents.Event(EventType='warn', Description='RINEX record in database belonged to a multiday file. ' 'The record has been removed from the database. ' 'See previous associated event.', StationCode=StationCode, NetworkCode=NetworkCode, Year=int(Rinex.date.year), DOY=int(Rinex.date.doy)) cnn.insert_event(event) cnn.begin_transac() cnn.query( 'DELETE FROM gamit_soln WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' ' 'AND "Year" = %i AND "DOY" = %i' % (record['NetworkCode'], record['StationCode'], record['ObservationYear'], record['ObservationDOY'])) cnn.query( 'DELETE FROM ppp_soln WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' ' 'AND "Year" = %i AND "DOY" = %i' % (record['NetworkCode'], record['StationCode'], record['ObservationYear'], record['ObservationDOY'])) cnn.query( 'DELETE FROM rinex WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' ' 'AND "ObservationYear" = %i AND "ObservationDOY" = %i AND "Filename" = \'%s\'' % (record['NetworkCode'], record['StationCode'], record['ObservationYear'], record['ObservationDOY'], record['Filename'])) cnn.commit_transac() return stninfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, Rinex.date, h_tolerance=h_tolerance) Rinex.normalize_header(stninfo, x=stn[0]['auto_x'], y=stn[0]['auto_y'], z=stn[0]['auto_z']) with pyPPP.RunPPP(Rinex, stn[0]['Harpos_coeff_otl'], Config.options, Config.sp3types, Config.sp3altrn, stninfo.to_dharp(stninfo.currentrecord).AntennaHeight, hash=stninfo.currentrecord.hash) as ppp: ppp.exec_ppp() # verify that the solution is from the station it claims to be Result, match, _ = ppp.verify_spatial_coherence(cnn, StationCode) if Result: if match[0]['NetworkCode'] == NetworkCode and match[0]['StationCode'] == StationCode: # the match agrees with the station-day that we THINK we are processing # this check should not be necessary if the rinex went through Archive Service, since we # already match rinex vs station # but it's still here to prevent that a rinex imported by ScanArchive (which assumes the # rinex files belong to the network/station of the folder) doesn't get into the PPP table # if it's not of the station it claims to be. # insert record in DB cnn.insert('ppp_soln', ppp.record) # DDG: Eric's request to generate a date of PPP solution event = pyEvents.Event(Description='A new PPP solution was created for frame ' + ppp.frame, NetworkCode=NetworkCode, StationCode=StationCode, Year=int(year), DOY=int(doy)) cnn.insert_event(event) else: remove_from_archive(cnn, record, Rinex, Config) else: remove_from_archive(cnn, record, Rinex, Config) except (pyRinex.pyRinexException, pyRinex.pyRinexExceptionBadFile, pyRinex.pyRinexExceptionSingleEpoch) as e: e.event['StationCode'] = StationCode e.event['NetworkCode'] = NetworkCode e.event['Year'] = int(year) e.event['DOY'] = int(doy) cnn.insert_event(e.event) except pyPPP.pyRunPPPException as e: e.event['StationCode'] = StationCode e.event['NetworkCode'] = NetworkCode e.event['Year'] = int(year) e.event['DOY'] = int(doy) cnn.insert_event(e.event) except pyStationInfo.pyStationInfoException as e: e.event['StationCode'] = StationCode e.event['NetworkCode'] = NetworkCode e.event['Year'] = int(year) e.event['DOY'] = int(doy) cnn.insert_event(e.event) except Exception: return traceback.format_exc() + ' processing rinex: %s.%s %s %s using node %s' \ % (NetworkCode, StationCode, str(year), str(doy), platform.node()) def post_scan_rinex_job(cnn, Archive, rinex_file, rinexpath, master_list, JobServer, ignore): valid, result = Archive.parse_archive_keys(rinex_file, key_filter=('network', 'station', 'year', 'doy')) if valid: NetworkCode = result['network'] StationCode = result['station'] year = result['year'] doy = result['doy'] # check the master_list if NetworkCode + '.' + StationCode in master_list or ignore: # check existence of network in the db rs = cnn.query('SELECT * FROM networks WHERE "NetworkCode" = \'%s\'' % NetworkCode) if rs.ntuples() == 0: cnn.insert('networks', NetworkCode=NetworkCode, NetworkName='UNK') # check existence of station in the db rs = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\'' % (NetworkCode, StationCode)) if rs.ntuples() == 0: # run grdtab to get the OTL parameters in HARPOS format and insert then in the db # use the current rinex to get an approximate coordinate cnn.insert('stations', NetworkCode=NetworkCode, StationCode=StationCode) JobServer.submit(NetworkCode, StationCode, year, doy, rinexpath) def scan_rinex(cnn, JobServer, pyArchive, archive_path, master_list, ignore): master_list = [item['NetworkCode'] + '.' + item['StationCode'] for item in master_list] print " >> Analyzing the archive's structure..." pbar = tqdm(ncols=80, unit='crz', disable=None) depfuncs = (verify_rinex_date_multiday,) modules = ('dbConnection', 'pyDate', 'pyRinex', 'shutil', 'platform', 'datetime', 'traceback', 'pyOptions', 'pyEvents', 'Utils', 'os', 'pyRinexName') JobServer.create_cluster(try_insert, dependencies=depfuncs, modules=modules, callback=callback_handle) if ignore[0] == 1: ignore = True else: ignore = False for path, _, files in scandir.walk(archive_path): for sfile in files: # DDG issue #15: match the name of the file to a valid rinex filename try: _ = pyRinexName.RinexNameFormat(sfile) # only examine valid rinex compressed files rnx = os.path.join(path, sfile).rsplit(archive_path + '/')[1] path2rnx = os.path.join(path, sfile) pbar.set_postfix(crinex=rnx) pbar.update() post_scan_rinex_job(cnn, pyArchive, rnx, path2rnx, master_list, JobServer, ignore) except pyRinexName.RinexNameException: pass JobServer.wait() # handle any output messages during this batch if error_message: tqdm.write(' -- There were unhandled errors. Please check errors_pyScanArchive.log for details') pbar.close() def process_otl(cnn, JobServer, master_list): print "" print " >> Calculating coordinates and OTL for new stations..." master_list = [item['NetworkCode'] + '.' + item['StationCode'] for item in master_list] rs = cnn.query('SELECT "NetworkCode", "StationCode" FROM stations ' 'WHERE auto_x is null OR auto_y is null OR auto_z is null OR "Harpos_coeff_otl" is null ' 'AND "NetworkCode" not like \'?%\' ' 'AND "NetworkCode" || \'.\' || "StationCode" IN (\'' + '\',\''.join(master_list) + '\')') records = rs.dictresult() pbar = tqdm(total=len(records), ncols=80, disable=None) depfuncs = (ecef2lla,) modules = ('dbConnection', 'pyRinex', 'pyArchiveStruct', 'pyOTL', 'pyPPP', 'numpy', 'platform', 'pySp3', 'traceback', 'pyOptions', 'pyBrdc', 'pyClk') JobServer.create_cluster(obtain_otl, depfuncs, callback_handle, progress_bar=pbar, modules=modules) for record in records: NetworkCode = record['NetworkCode'] StationCode = record['StationCode'] JobServer.submit(NetworkCode, StationCode) JobServer.wait() pbar.close() def scan_station_info(JobServer, pyArchive, archive_path, master_list): print " >> Searching for station info files in the archive..." stninfo, path2stninfo = pyArchive.scan_archive_struct_stninfo(archive_path) print " >> Processing Station Info files..." master_list = [item['NetworkCode'] + '.' + item['StationCode'] for item in master_list] pbar = tqdm(total=len(stninfo), ncols=80, disable=None) modules = ('dbConnection', 'pyStationInfo', 'sys', 'datetime', 'pyDate', 'platform', 'traceback') JobServer.create_cluster(insert_stninfo, callback=callback_handle, progress_bar=pbar, modules=modules) for stninfofile, stninfopath in zip(stninfo,path2stninfo): valid, result = pyArchive.parse_archive_keys(stninfofile, key_filter=('network', 'station')) if valid: NetworkCode = result['network'] StationCode = result['station'] if NetworkCode + '.' + StationCode in master_list: # we were able to get the network and station code, add it to the database JobServer.submit(NetworkCode, StationCode, stninfopath) JobServer.wait() pbar.close() def scan_station_info_man(cnn, pyArchive, stn_info_path, stations, stn_info_net, stdin=None): # input "stations" has a list in net.stnm format print " >> Manual scan of station info files in " + stn_info_path NetworkCode = stn_info_net if stdin: stn_info_obj = pyStationInfo.StationInfo(cnn) stn_list = stn_info_obj.parse_station_info(stdin) for Station in tqdm(stations, total=len(stations), disable=None): # input "stations" has a list in net.stnm format if Station['StationCode'] in [stn['StationCode'].lower() for stn in stn_list]: tqdm.write(" >> Processing %s using network code %s" % (Station['StationCode'], NetworkCode)) out = insert_stninfo(NetworkCode, Station['StationCode'], stdin) if out: tqdm.write(out) else: tqdm.write(' >> Station %s.%s was not found in the station info file %s' % (Station['NetworkCode'], Station['StationCode'], 'standard input')) else: if os.path.isfile(stn_info_path): path2stninfo = [stn_info_path] else: _, path2stninfo = pyArchive.scan_archive_struct_stninfo(stn_info_path) print " >> Found %i station information files." % (len(path2stninfo)) for stninfopath in path2stninfo: stn_info_obj = pyStationInfo.StationInfo(cnn) stn_list = stn_info_obj.parse_station_info(stninfopath) for Station in tqdm(stations, total=len(stations), disable=None): # input "stations" has a list in net.stnm format if Station['StationCode'] in [stn['StationCode'].lower() for stn in stn_list]: tqdm.write(" >> Processing %s using network code %s" % (Station['StationCode'], NetworkCode)) out = insert_stninfo(NetworkCode, Station['StationCode'], stninfopath) if out: tqdm.write(out) else: tqdm.write(' >> Station %s.%s was not found in the station info file %s' % (Station['NetworkCode'], Station['StationCode'], stninfopath)) return def hash_check(cnn, master_list, sdate, edate, rehash=False, h_tolerant=0): print " >> Running hash check to the PPP solutions..." master_list = [item['NetworkCode'] + '.' + item['StationCode'] for item in master_list] ppp_soln = cnn.query('SELECT * FROM ppp_soln ' 'WHERE "NetworkCode" || \'.\' || "StationCode" IN (\'' + '\',\''.join(master_list) + '\') ' 'AND "Year" || \' \' || to_char("DOY", \'fm000\') ' 'BETWEEN \'' + sdate.yyyyddd() + '\' AND \'' + (edate+1).yyyyddd() + '\' ' 'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"') tbl = ppp_soln.dictresult() archive = pyArchiveStruct.RinexStruct(cnn) # check the hash values if specified if not rehash: print ' -- Checking hash values.' else: print ' -- Rehashing all records. This may take a while...' for soln in tqdm(tbl, ncols=80, disable=None): # load station info object try: # lookup for the rinex_proc record rinex = archive.get_rinex_record(NetworkCode=soln['NetworkCode'], StationCode=soln['StationCode'], ObservationYear=soln['Year'], ObservationDOY=soln['DOY']) if not rinex: # if no records, print warning tqdm.write(" -- Could not find RINEX for %s.%s %i %03i. PPP solution will be deleted." % (soln['NetworkCode'], soln['StationCode'], soln['Year'], soln['DOY'])) cnn.delete('ppp_soln', soln) else: # select the first record rinex = rinex[0] dd = rinex['ObservationSTime'] + (rinex['ObservationETime'] - rinex['ObservationSTime']) / 2 stninfo = pyStationInfo.StationInfo(cnn, soln['NetworkCode'], soln['StationCode'], pyDate.Date(datetime=dd), h_tolerance=h_tolerant) if stninfo.currentrecord.hash != soln['hash']: if not rehash: tqdm.write(" -- Hash value for %s.%s %i %03i does not match with Station Information hash. " "PPP coordinate will be recalculated." % (soln['NetworkCode'], soln['StationCode'], soln['Year'], soln['DOY'])) cnn.delete('ppp_soln', soln) else: tqdm.write(" -- %s.%s %i %03i has been rehashed." % (soln['NetworkCode'], soln['StationCode'], soln['Year'], soln['DOY'])) cnn.update('ppp_soln', soln, hash=stninfo.currentrecord.hash) except pyStationInfo.pyStationInfoException as e: tqdm.write(str(e)) except Exception: raise if not rehash: print ' -- Done checking hash values.' else: print ' -- Done rehashing PPP records.' def process_ppp(cnn, Config, pyArchive, archive_path, JobServer, master_list, sdate, edate, h_tolerance): print " >> Running PPP on the RINEX files in the archive..." master_list = [item['NetworkCode'] + '.' + item['StationCode'] for item in master_list] # for each rinex in the db, run PPP and get a coordinate rs_rnx = cnn.query('SELECT rinex.* FROM rinex_proc as rinex ' 'LEFT JOIN ppp_soln ON ' 'rinex."NetworkCode" = ppp_soln."NetworkCode" AND ' 'rinex."StationCode" = ppp_soln."StationCode" AND ' 'rinex."ObservationYear" = ppp_soln."Year" AND ' 'rinex."ObservationDOY" = ppp_soln."DOY" ' 'WHERE ppp_soln."NetworkCode" is null AND ' 'rinex."NetworkCode" || \'.\' || rinex."StationCode" IN (\'' + '\',\''.join(master_list) + '\') ' 'AND rinex."ObservationSTime" BETWEEN \'' + sdate.yyyymmdd() + '\' AND \'' + (edate+1).yyyymmdd() + '\' ' 'ORDER BY "ObservationSTime"') tblrinex = rs_rnx.dictresult() pbar = tqdm(total=len(tblrinex), ncols=80, disable=None) modules = ('dbConnection', 'pyRinex', 'pyPPP', 'pyStationInfo', 'pyDate', 'pySp3', 'os', 'platform', 'pyArchiveStruct', 'traceback', 'pyOptions', 'pyEvents', 'Utils') depfuncs = (remove_from_archive, verify_rinex_date_multiday) JobServer.create_cluster(execute_ppp, depfuncs, callback=callback_handle, progress_bar=pbar, modules=modules) for record in tblrinex: rinex_path = pyArchive.build_rinex_path(record['NetworkCode'], record['StationCode'], record['ObservationYear'], record['ObservationDOY']) # add the base dir rinex_path = os.path.join(archive_path, rinex_path) JobServer.submit(record, rinex_path, h_tolerance) JobServer.wait() pbar.close() # print a summary of the events generated by the run print_scan_archive_summary(cnn) def print_scan_archive_summary(cnn): # find the last event in the executions table exec_date = cnn.query_float('SELECT max(exec_date) as mx FROM executions WHERE script = \'ScanArchive.py\'') info = cnn.query_float('SELECT count(*) as cc FROM events WHERE "EventDate" >= \'%s\' AND "EventType" = \'info\'' % exec_date[0][0]) erro = cnn.query_float('SELECT count(*) as cc FROM events WHERE "EventDate" >= \'%s\' AND "EventType" = \'error\'' % exec_date[0][0]) warn = cnn.query_float('SELECT count(*) as cc FROM events WHERE "EventDate" >= \'%s\' AND "EventType" = \'warn\'' % exec_date[0][0]) print ' >> Summary of events for this run:' print ' -- info : %i' % info[0][0] print ' -- errors : %i' % erro[0][0] print ' -- warnings: %i' % warn[0][0] def export_station(cnn, stnlist, pyArchive, archive_path, dataless): # loop collecting the necessary information print " >> Collecting the information for each station in the list..." pbar1 = tqdm(total=len(stnlist), ncols=160, position=0, disable=None) for stn in tqdm(stnlist, ncols=80, disable=None): NetworkCode = stn['NetworkCode'] StationCode = stn['StationCode'] rs_stn = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\'' % (NetworkCode, StationCode)) stn = rs_stn.dictresult()[0] pbar1.set_postfix(Station='%s.%s' % (NetworkCode, StationCode)) pbar1.update() export_dic = dict() # list of rinex files rinex_lst = cnn.query('SELECT * FROM rinex WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' ' 'ORDER BY "ObservationYear", "ObservationDOY"' % (NetworkCode, StationCode)) rinex_lst = rinex_lst.dictresult() # rinex_lst = pyArchive.get_rinex_record(NetworkCode=NetworkCode, StationCode=StationCode) # list of metadata stninfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, allow_empty=True) export_dic['NetworkCode'] = NetworkCode export_dic['StationCode'] = StationCode export_dic['StationInfo'] = stninfo if stn['lat'] and stn['auto_x'] and stn['Harpos_coeff_otl']: export_dic['lat'] = stn['lat'] export_dic['lon'] = stn['lon'] export_dic['height'] = stn['height'] export_dic['x'] = stn['auto_x'] export_dic['y'] = stn['auto_y'] export_dic['z'] = stn['auto_z'] export_dic['otl'] = stn['Harpos_coeff_otl'] export_dic['dome'] = stn['dome'] export_dic['max_dist'] = stn['max_dist'] else: tqdm.write(' -- Warning! %s.%s has incomplete station data' % (NetworkCode, StationCode)) # create dir for the rinex files dest = 'production/export/%s.%s' % (NetworkCode, StationCode) if not os.path.isdir(dest): os.makedirs(dest) rinex_dict = [] pbar2 = tqdm(total=len(rinex_lst), ncols=160, position=1, disable=None) for rnx in rinex_lst: # make a copy of each file rnx_path = pyArchive.build_rinex_path(NetworkCode=NetworkCode, StationCode=StationCode, ObservationYear=rnx['ObservationYear'], ObservationDOY=rnx['ObservationDOY'], filename=rnx['Filename']) try: if not dataless: # only copy the files if dataless == False shutil.copy(os.path.join(archive_path, rnx_path), os.path.join(dest, os.path.basename(rnx_path))) rinex_dict = rinex_dict + [rnx] except IOError: tqdm.write(' -- Warning! File not found in archive: %s' % (os.path.join(archive_path, rnx_path))) pbar2.set_postfix(Date='%s %03s' % (rnx['ObservationYear'], rnx['ObservationDOY'])) pbar2.update() pbar2.close() export_dic['files'] = len(rinex_dict) export_dic['rinex'] = rinex_dict with open(os.path.join(dest, '%s.%s.json') % (NetworkCode, StationCode), 'w') as file: json.dump(export_dic, file, indent=4, sort_keys=True, cls=Encoder) # make the zip file with the station with zipfile.ZipFile('%s.%s.zip' % (NetworkCode, StationCode), "w", zipfile.ZIP_DEFLATED, allowZip64=True) as zf: for root, _, filenames in os.walk(dest): for name in filenames: name = os.path.join(root, name) name = os.path.normpath(name) zf.write(name, os.path.basename(name)) shutil.rmtree(dest) pbar1.close() print "" def import_station(cnn, args): files = args[1:] network = args[0] archive = pyArchiveStruct.RinexStruct(cnn) print " >> Processing input files..." for ff in tqdm(files, ncols=160, disable=None): filename = os.path.basename(ff) if filename.endswith('.zip'): fileparts = filename.split('.') NetworkCode = fileparts[0].lower() StationCode = fileparts[1].lower() path = 'production/archive/' + str(uuid.uuid4()) try: # process each station file zipfile.ZipFile(ff).extractall(path) jfile = glob.glob(os.path.join(path, '*.json')) # DDG: may want to consider other compression formats rnx_files = [f for f_ in [glob.glob(os.path.join(path, e)) for e in ['*.gz', '*d.Z']] for f in f_] station = json.load(open(jfile[0], 'r')) spatial = pyPPP.PPPSpatialCheck([station['lat']], [station['lon']], [station['height']]) result, match, closest_stn = spatial.verify_spatial_coherence(cnn, StationCode) if result: tqdm.write(' -- Found external station %s.%s in network %s (distance %.3f)' % (NetworkCode, StationCode, match[0]['NetworkCode'], match[0]['distance'])) # ask the user what to do with the data r = raw_input('\n Insert data to this station?: y/n ') if r.lower() == 'y': try_insert_files(cnn, archive, station, match[0]['NetworkCode'], StationCode, rnx_files) else: if len(match) == 1: tqdm.write(' -- External station %s.%s not found. Possible match is %s.%s: %.3f m' % (NetworkCode, StationCode, match[0]['NetworkCode'], match[0]['StationCode'], match[0]['distance'])) # ask the user what to do with the data r = raw_input('\n Insert new station %s with network code %s ' 'or add this data to station %s.%s?: (i)nsert new/(a)dd ' % (StationCode, network, match[0]['NetworkCode'], match[0]['StationCode'])) if r.lower() == 'i': if insert_station(cnn, network, station): try_insert_files(cnn, archive, station, network, StationCode, rnx_files) else: # if data is added to existing station, replace the StationCode with the matched # StationCode the rinexobj will apply the naming convention to the file try_insert_files(cnn, archive, station, match[0]['NetworkCode'], match[0]['StationCode'], rnx_files) elif len(match) > 1: tqdm.write(' -- External station %s.%s not found. Possible matches are %s' % (NetworkCode, StationCode, ', '.join(['%s.%s: %.3f m' % (m['NetworkCode'], m['StationCode'], m['distance']) for m in match]))) options = ', '.join(['%s.%s (%i)' % (m['NetworkCode'], m['StationCode'], i+1) for i, m in enumerate(match)]) r = raw_input('\n Insert new station %s with network code %s ' 'or add this data as %s: (i)nsert new/(number)' % (StationCode, network, options)) if r.lower() == 'i': if insert_station(cnn, network, station): try_insert_files(cnn, archive, station, network, StationCode, rnx_files) else: try: i = int(r) try_insert_files(cnn, archive, station, match[i]['NetworkCode'], match[i]['StationCode'], rnx_files) except ValueError: tqdm.write(' -- Selected value is not numeric!') else: tqdm.write(' -- External station %s.%s not found. Closest station is %s.%s: %.3f m' % (NetworkCode, StationCode, closest_stn[0]['NetworkCode'], closest_stn[0]['StationCode'], closest_stn[0]['distance'])) # ask the user what to do with the data r = raw_input('\n Insert new station with default station network %s?: y/n ' % network) if r.lower() == 'y': if insert_station(cnn, network, station): # now that station was created, insert files try_insert_files(cnn, archive, station, network, StationCode, rnx_files) # delete all files once we're done. shutil.rmtree(path) except zipfile.BadZipfile: tqdm.write(' -- Bad zipfile detected: %s' % ff) def insert_station(cnn, network, station): # check that another station with same name doesn't exist in this network rstn = cnn.query_float('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND ' '"StationCode" = \'%s\'' % (network, station['StationCode'])) if len(rstn) > 0: tqdm.write(' -- Station code %s already exists in network %s. Cannot insert station' % (station['StationCode'], network)) return False else: # check if network exists if not cnn.query_float('SELECT * FROM networks WHERE "NetworkCode" = \'%s\'' % network): cnn.insert('networks', NetworkCode=network) # insert the station and metadata in the json file cnn.insert('stations', NetworkCode=network, StationCode=station['StationCode'], auto_x=station['x'], auto_y=station['y'], auto_z=station['z'], Harpos_coeff_otl=station['otl'], lat=station['lat'], lon=station['lon'], height=station['height'], max_dist=station['max_dist'] if 'max_dist' in station.keys() else None, dome=station['dome'] if 'dome' in station.keys() else None) return True def try_insert_files(cnn, archive, station, NetworkCode, StationCode, rinex): import_stninfo = station['StationInfo'] stninfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, allow_empty=True) if rinex: # a station file with rinex data in it. Attempt to insert the data and the associated station information for rnx in rinex: with pyRinex.ReadRinex(NetworkCode, StationCode, rnx) as rinexinfo: inserted = archive.insert_rinex(rinexobj=rinexinfo) if not inserted: # display an error message tqdm.write(' -- %s.%s (%s) not imported: already existed in database.' % (NetworkCode, StationCode, os.path.basename(rnx))) else: tqdm.write(' -- %s.%s (%s) successfully imported into database.' % (NetworkCode, StationCode, os.path.basename(rnx))) try: pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, rinexinfo.date) except pyStationInfo.pyStationInfoException: # station info not in db! import the corresponding station info stninfo_inserted = False for record in import_stninfo: import_record = pyStationInfo.StationInfoRecord(NetworkCode, StationCode, record) # DDG: to avoid problems with files that contain two different station info records, we check # that import_record.DateEnd.datetime() is not less than the first observation of the rinex if rinexinfo.datetime_firstObs >= import_record.DateStart.datetime() and \ not import_record.DateEnd.datetime() <= rinexinfo.datetime_firstObs: if rinexinfo.datetime_lastObs > import_record.DateEnd.datetime(): tqdm.write(' WARNING! RINEX file %s has an end data past the station info record. ' 'Maybe this file has a receiver/antenna change in the middle.' % os.path.basename(rnx)) # the record we are looking for try: stninfo.InsertStationInfo(import_record) stninfo_inserted = True except pyStationInfo.pyStationInfoException as e: tqdm.write(' ' + str(e)) if not stninfo_inserted: tqdm.write(' Could not find a valid station info in the database or in the station ' 'package. File remains in database without metadata.') else: # a station file without rinex data # attempt to merge the station information for record in import_stninfo: import_record = pyStationInfo.StationInfoRecord(NetworkCode, StationCode, record) try: stninfo.InsertStationInfo(import_record) tqdm.write(' -- Successful insert: %s -> %s' + str(import_record['DateStart']), str(import_record['DateEnd'])) except pyStationInfo.pyStationInfoException as e: tqdm.write(' -- ' + str(e)) def get_rinex_file(cnn, stnlist, date, Archive_path): archive = pyArchiveStruct.RinexStruct(cnn) print " >> Getting stations from db..." for stn in tqdm(stnlist, ncols=80, disable=None): NetworkCode = stn['NetworkCode'] StationCode = stn['StationCode'] rinex = archive.build_rinex_path(NetworkCode, StationCode, date.year, date.doy) if rinex is not None: rinex = os.path.join(Archive_path, rinex) with pyRinex.ReadRinex(NetworkCode, StationCode, rinex, False) as Rinex: # type: pyRinex.ReadRinex StationInfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode, Rinex.date) Rinex.normalize_header(StationInfo) Rinex.compress_local_copyto('./') else: tqdm.write(" -- %s not found for %s.%s" % (date.yyyyddd(), NetworkCode, StationCode)) def main(): parser = argparse.ArgumentParser(description='Archive operations Main Program') parser.add_argument('stnlist', type=str, nargs='+', metavar='all|net.stnm', help="List of networks/stations to process given in [net].[stnm] format or just [stnm] " "(separated by spaces; if [stnm] is not unique in the database, all stations with that " "name will be processed). Use keyword 'all' to process all stations in the database. " "If [net].all is given, all stations from network [net] will be processed. " "Alternatively, a file with the station list can be provided.") parser.add_argument('-rinex', '--rinex', metavar='{ignore_stnlist}', type=int, nargs=1, default=None, help="Scan the current archive for RINEX 2/3 files and add them to the database if missing. " "Station list will be used to filter specific networks and stations if {ignore_stnlist} = " "0. For example: ScanArchive [net].all -rinex 0 will process all the stations in network " "[net], but networks and stations have to exist in the database. " "If ScanArchive [net].all -rinex 1 the station list will be ignored and everything in the " "archive will be checked (and added to the db if missing) even if networks and stations " "don't exist. Networks and stations will be added if they don't exist.") parser.add_argument('-otl', '--ocean_loading', action='store_true', help="Calculate ocean loading coefficients using FES2004. To calculate FES2014b coefficients, " "use OTL_FES2014b.py") parser.add_argument('-stninfo', '--station_info', nargs='*', metavar='argument', help="Insert station information to the database. " "If no arguments are given, then scan the archive for station info files and use their " "location (folder) to determine the network to use during insertion. " "Only stations in the station list will be processed. " "If a filename is provided, then scan that file only, in which case a second argument " "specifies the network to use during insertion. Eg: -stninfo ~/station.info arg. " "In cases where multiple networks are being processed, the network argument will be used " "to desambiguate station code conflicts. " "Eg: ScanArchive all -stninfo ~/station.info arg -> if a station named igm1 exists in " "networks 'igs' and 'arg', only 'arg.igm1' will get the station information insert. " "Use keyword 'stdin' to read the station information data from the pipeline.") parser.add_argument('-export', '--export_station', nargs='?', metavar='[dataless seed]', default=None, const=False, help="Export a station from the local database that can be imported into another " "Parallel.GAMIT system using the -import option." "One file is created per station in the current directory. If the [dataless seed] switch " "is passed (e.g. -export true), then the export seed is created without data " "(only metadata included, i.e. station info, station record, etc).") parser.add_argument('-import', '--import_station', nargs='+', type=str, metavar=('{default net}', '{zipfiles}'), help="Import a station from zipfiles produced by another Parallel.GAMIT system. " "Wildcards are accepted to import multiple zipfiles. If station does not exist, use " "{default net} to specify the network where station should be added to. If {default net} " "does not exit, it will be created. Station list is ignored.") parser.add_argument('-get', '--get_from_archive', nargs=1, metavar='{date}', help="Get the specified station from the archive and copy it to the current directory. Fix it " "to match the station information in the database.") parser.add_argument('-ppp', '--ppp', nargs='*', metavar='argument', help="Run ppp on the rinex files in the database. Append [date_start] and (optionally) " "[date_end] to limit the range of the processing. Allowed formats are yyyy_doy, wwww-d, " "fyear or yyyy/mm/dd. Append keyword 'hash' to the end to check the PPP hash values " "against the station information records. If hash doesn't match, recalculate the PPP " "solutions.") parser.add_argument('-rehash', '--rehash', nargs='*', metavar='argument', help="Check PPP hash against station information hash. Rehash PPP solutions to match the " "station information hash without recalculating the PPP solution. Optionally append " "[date_start] and (optionally) [date_end] to limit the rehashing time window. " "Allowed formats are yyyy.doy or yyyy/mm/dd.") parser.add_argument('-tol', '--stninfo_tolerant', nargs=1, type=int, metavar='{hours}', default=[0], help="Specify a tolerance (in hours) for station information gaps (only use for early " "survey data). Default is zero.") parser.add_argument('-np', '--noparallel', action='store_true', help="Execute command without parallelization.") args = parser.parse_args() if args.station_info is not None and (not len(args.station_info) in (0, 2)): parser.error('-stninfo requires 0 or 2 arguments. {} given.'.format(len(args.station_info))) Config = pyOptions.ReadOptions("gnss_data.cfg") # type: pyOptions.ReadOptions cnn = dbConnection.Cnn("gnss_data.cfg") # create the execution log cnn.insert('executions', script='ScanArchive.py') # get the station list stnlist = Utils.process_stnlist(cnn, args.stnlist) pyArchive = pyArchiveStruct.RinexStruct(cnn) JobServer = pyJobServer.JobServer(Config, run_parallel=not args.noparallel, software_sync=[Config.options['ppp_remote_local']]) # type: pyJobServer.JobServer ######################################### if args.rinex is not None: scan_rinex(cnn, JobServer, pyArchive, Config.archive_path, stnlist, args.rinex) ######################################### if args.ocean_loading: process_otl(cnn, JobServer, stnlist) ######################################### if args.station_info is not None: if len(args.station_info) == 0: scan_station_info(JobServer, pyArchive, Config.archive_path, stnlist) else: stn_info_stdin = [] if args.station_info[0] == 'stdin': for line in sys.stdin: stn_info_stdin.append(line) scan_station_info_man(cnn, pyArchive, args.station_info[0], stnlist, args.station_info[1], stn_info_stdin) ######################################### if args.rehash is not None: dates = [] try: dates = process_date(args.rehash) except ValueError as e: parser.error(str(e)) hash_check(cnn, stnlist, dates[0], dates[1], rehash=True, h_tolerant=args.stninfo_tolerant[0]) ######################################### if args.ppp is not None: # check other possible arguments dates = [] do_hash = True if 'hash' in args.ppp else False date_args = [date for date in args.ppp if date != 'hash'] try: dates = process_date(date_args) except ValueError as e: parser.error(str(e)) if do_hash: hash_check(cnn, stnlist, dates[0], dates[1], rehash=False, h_tolerant=args.stninfo_tolerant[0]) process_ppp(cnn, Config, pyArchive, Config.archive_path, JobServer, stnlist, dates[0], dates[1], args.stninfo_tolerant[0]) ######################################### if args.export_station is not None: export_station(cnn, stnlist, pyArchive, Config.archive_path, args.export_station) ######################################### if args.import_station: import_station(cnn, args.import_station) ######################################### if args.get_from_archive: dates = process_date(args.get_from_archive) get_rinex_file(cnn, stnlist, dates[0], Config.archive_path) # remove the production dir # if os.path.isdir('production'): # rmtree('production') JobServer.close_cluster() if __name__ == '__main__': main()
gpl-3.0
8,272,388,707,743,091,000
-8,394,534,001,297,085,000
43.921833
149
0.54861
false
ekasitk/sahara
sahara/utils/hacking/logging_checks.py
6
4253
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re # NOTE(Kezar): this checks was copied from cinder/nova and should be one day # appear at general hacking checks. So we need to try remember it and remove it # when it'll be happened. # FIXME(Kezar): may be it will be better to right in the way that introduced in # keystone but it will need additional work and total checks refactoring. log_translation_LI = re.compile( r"(.)*LOG\.(info)\(\s*(_\(|'|\")") log_translation_LE = re.compile( r"(.)*LOG\.(exception)\(\s*(_\(|'|\")") log_translation_LW = re.compile( r"(.)*LOG\.(warning)\(\s*(_\(|'|\")") log_translation_LC = re.compile( r"(.)*LOG\.(critical)\(\s*('|\")") accepted_log_level = re.compile( r"^LOG\.(debug|info|exception|warning|error|critical)\(") def validate_log_translations(logical_line, filename): """Check if log levels has translations and if it's correct. S369 S370 S371 S372 """ # NOTE(Kezar): sahara/tests included because we don't require translations # in tests. sahara/openstack/common included because it's part imported # from oslo and we don't change it forever and ever. sahara/db/templates # provide separate cli interface so we don't want to translate it. ignore_dirs = ["sahara/db/templates", "sahara/tests", "sahara/openstack/common"] for directory in ignore_dirs: if directory in filename: return # Translations are not required in the test directory. # This will not catch all instances of violations, just direct # misuse of the form LOG.info('Message'). msg = "S369: LOG.info messages require translations `_LI()`!" if log_translation_LI.search(logical_line): yield (0, msg) msg = ("S370: LOG.exception and LOG.error messages require " "translations `_LE()`!") if log_translation_LE.search(logical_line): yield (0, msg) msg = "S371: LOG.warning messages require translations `_LW()`!" if log_translation_LW.search(logical_line): yield (0, msg) msg = "S372: LOG.critical messages require translations `_LC()`!" if log_translation_LC.search(logical_line): yield (0, msg) def no_translate_debug_logs(logical_line, filename): """Check for 'LOG.debug(_(' As per our translation policy, https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation we shouldn't translate debug level logs. * This check assumes that 'LOG' is a logger. * Use filename so we can start enforcing this in specific folders instead of needing to do so all at once. S373 """ msg = "S373 Don't translate debug level logs" if logical_line.startswith("LOG.debug(_("): yield(0, msg) def accepted_log_levels(logical_line, filename): """In Sahara we use only 5 log levels. This check is needed because we don't want new contributors to use deprecated log levels. S373 """ # NOTE(Kezar): sahara/tests included because we don't require translations # in tests. sahara/openstack/common included because it's part imported # from oslo and we don't change it forever and ever. sahara/db/templates # provide separate cli interface so we don't want to translate it. ignore_dirs = ["sahara/db/templates", "sahara/tests", "sahara/openstack/common"] for directory in ignore_dirs: if directory in filename: return msg = ("S373 You used deprecated log level. Accepted log levels are " "debug|info|warning|error|critical") if logical_line.startswith("LOG."): if not accepted_log_level.search(logical_line): yield(0, msg)
apache-2.0
-5,571,801,921,149,250,000
2,470,664,393,164,711,000
35.982609
79
0.666118
false
codasus/django-blogages
blogages/django/contrib/gis/gdal/tests/test_ds.py
233
10504
import os, os.path, unittest from django.contrib.gis.gdal import DataSource, Envelope, OGRGeometry, OGRException, OGRIndexError, GDAL_VERSION from django.contrib.gis.gdal.field import OFTReal, OFTInteger, OFTString from django.contrib.gis.geometry.test_data import get_ds_file, TestDS # List of acceptable data sources. ds_list = (TestDS('test_point', nfeat=5, nfld=3, geom='POINT', gtype=1, driver='ESRI Shapefile', fields={'dbl' : OFTReal, 'int' : OFTInteger, 'str' : OFTString,}, extent=(-1.35011,0.166623,-0.524093,0.824508), # Got extent from QGIS srs_wkt='GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]', field_values={'dbl' : [float(i) for i in range(1, 6)], 'int' : range(1, 6), 'str' : [str(i) for i in range(1, 6)]}, fids=range(5)), TestDS('test_vrt', ext='vrt', nfeat=3, nfld=3, geom='POINT', gtype='Point25D', driver='VRT', fields={'POINT_X' : OFTString, 'POINT_Y' : OFTString, 'NUM' : OFTString}, # VRT uses CSV, which all types are OFTString. extent=(1.0, 2.0, 100.0, 523.5), # Min/Max from CSV field_values={'POINT_X' : ['1.0', '5.0', '100.0'], 'POINT_Y' : ['2.0', '23.0', '523.5'], 'NUM' : ['5', '17', '23']}, fids=range(1,4)), TestDS('test_poly', nfeat=3, nfld=3, geom='POLYGON', gtype=3, driver='ESRI Shapefile', fields={'float' : OFTReal, 'int' : OFTInteger, 'str' : OFTString,}, extent=(-1.01513,-0.558245,0.161876,0.839637), # Got extent from QGIS srs_wkt='GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]'), ) bad_ds = (TestDS('foo'), ) class DataSourceTest(unittest.TestCase): def test01_valid_shp(self): "Testing valid SHP Data Source files." for source in ds_list: # Loading up the data source ds = DataSource(source.ds) # Making sure the layer count is what's expected (only 1 layer in a SHP file) self.assertEqual(1, len(ds)) # Making sure GetName works self.assertEqual(source.ds, ds.name) # Making sure the driver name matches up self.assertEqual(source.driver, str(ds.driver)) # Making sure indexing works try: ds[len(ds)] except OGRIndexError: pass else: self.fail('Expected an IndexError!') def test02_invalid_shp(self): "Testing invalid SHP files for the Data Source." for source in bad_ds: self.assertRaises(OGRException, DataSource, source.ds) def test03a_layers(self): "Testing Data Source Layers." print "\nBEGIN - expecting out of range feature id error; safe to ignore.\n" for source in ds_list: ds = DataSource(source.ds) # Incrementing through each layer, this tests DataSource.__iter__ for layer in ds: # Making sure we get the number of features we expect self.assertEqual(len(layer), source.nfeat) # Making sure we get the number of fields we expect self.assertEqual(source.nfld, layer.num_fields) self.assertEqual(source.nfld, len(layer.fields)) # Testing the layer's extent (an Envelope), and it's properties if source.driver == 'VRT' and (GDAL_VERSION > (1, 7, 0) and GDAL_VERSION < (1, 7, 3)): # There's a known GDAL regression with retrieving the extent # of a VRT layer in versions 1.7.0-1.7.2: # http://trac.osgeo.org/gdal/ticket/3783 pass else: self.assertEqual(True, isinstance(layer.extent, Envelope)) self.assertAlmostEqual(source.extent[0], layer.extent.min_x, 5) self.assertAlmostEqual(source.extent[1], layer.extent.min_y, 5) self.assertAlmostEqual(source.extent[2], layer.extent.max_x, 5) self.assertAlmostEqual(source.extent[3], layer.extent.max_y, 5) # Now checking the field names. flds = layer.fields for f in flds: self.assertEqual(True, f in source.fields) # Negative FIDs are not allowed. self.assertRaises(OGRIndexError, layer.__getitem__, -1) self.assertRaises(OGRIndexError, layer.__getitem__, 50000) if hasattr(source, 'field_values'): fld_names = source.field_values.keys() # Testing `Layer.get_fields` (which uses Layer.__iter__) for fld_name in fld_names: self.assertEqual(source.field_values[fld_name], layer.get_fields(fld_name)) # Testing `Layer.__getitem__`. for i, fid in enumerate(source.fids): feat = layer[fid] self.assertEqual(fid, feat.fid) # Maybe this should be in the test below, but we might as well test # the feature values here while in this loop. for fld_name in fld_names: self.assertEqual(source.field_values[fld_name][i], feat.get(fld_name)) print "\nEND - expecting out of range feature id error; safe to ignore." def test03b_layer_slice(self): "Test indexing and slicing on Layers." # Using the first data-source because the same slice # can be used for both the layer and the control values. source = ds_list[0] ds = DataSource(source.ds) sl = slice(1, 3) feats = ds[0][sl] for fld_name in ds[0].fields: test_vals = [feat.get(fld_name) for feat in feats] control_vals = source.field_values[fld_name][sl] self.assertEqual(control_vals, test_vals) def test03c_layer_references(self): "Test to make sure Layer access is still available without the DataSource." source = ds_list[0] # See ticket #9448. def get_layer(): # This DataSource object is not accessible outside this # scope. However, a reference should still be kept alive # on the `Layer` returned. ds = DataSource(source.ds) return ds[0] # Making sure we can call OGR routines on the Layer returned. lyr = get_layer() self.assertEqual(source.nfeat, len(lyr)) self.assertEqual(source.gtype, lyr.geom_type.num) def test04_features(self): "Testing Data Source Features." for source in ds_list: ds = DataSource(source.ds) # Incrementing through each layer for layer in ds: # Incrementing through each feature in the layer for feat in layer: # Making sure the number of fields, and the geometry type # are what's expected. self.assertEqual(source.nfld, len(list(feat))) self.assertEqual(source.gtype, feat.geom_type) # Making sure the fields match to an appropriate OFT type. for k, v in source.fields.items(): # Making sure we get the proper OGR Field instance, using # a string value index for the feature. self.assertEqual(True, isinstance(feat[k], v)) # Testing Feature.__iter__ for fld in feat: self.assertEqual(True, fld.name in source.fields.keys()) def test05_geometries(self): "Testing Geometries from Data Source Features." for source in ds_list: ds = DataSource(source.ds) # Incrementing through each layer and feature. for layer in ds: for feat in layer: g = feat.geom # Making sure we get the right Geometry name & type self.assertEqual(source.geom, g.geom_name) self.assertEqual(source.gtype, g.geom_type) # Making sure the SpatialReference is as expected. if hasattr(source, 'srs_wkt'): self.assertEqual(source.srs_wkt, g.srs.wkt) def test06_spatial_filter(self): "Testing the Layer.spatial_filter property." ds = DataSource(get_ds_file('cities', 'shp')) lyr = ds[0] # When not set, it should be None. self.assertEqual(None, lyr.spatial_filter) # Must be set a/an OGRGeometry or 4-tuple. self.assertRaises(TypeError, lyr._set_spatial_filter, 'foo') # Setting the spatial filter with a tuple/list with the extent of # a buffer centering around Pueblo. self.assertRaises(ValueError, lyr._set_spatial_filter, range(5)) filter_extent = (-105.609252, 37.255001, -103.609252, 39.255001) lyr.spatial_filter = (-105.609252, 37.255001, -103.609252, 39.255001) self.assertEqual(OGRGeometry.from_bbox(filter_extent), lyr.spatial_filter) feats = [feat for feat in lyr] self.assertEqual(1, len(feats)) self.assertEqual('Pueblo', feats[0].get('Name')) # Setting the spatial filter with an OGRGeometry for buffer centering # around Houston. filter_geom = OGRGeometry('POLYGON((-96.363151 28.763374,-94.363151 28.763374,-94.363151 30.763374,-96.363151 30.763374,-96.363151 28.763374))') lyr.spatial_filter = filter_geom self.assertEqual(filter_geom, lyr.spatial_filter) feats = [feat for feat in lyr] self.assertEqual(1, len(feats)) self.assertEqual('Houston', feats[0].get('Name')) # Clearing the spatial filter by setting it to None. Now # should indicate that there are 3 features in the Layer. lyr.spatial_filter = None self.assertEqual(3, len(lyr)) def suite(): s = unittest.TestSuite() s.addTest(unittest.makeSuite(DataSourceTest)) return s def run(verbosity=2): unittest.TextTestRunner(verbosity=verbosity).run(suite())
mit
2,620,528,229,399,978,000
-1,860,674,569,186,851,300
45.477876
171
0.576066
false
Daniex/horizon
openstack_dashboard/dashboards/admin/instances/views.py
8
7555
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 OpenStack Foundation # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy from django.utils.datastructures import SortedDict from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import tables from horizon.utils import memoized from openstack_dashboard import api from openstack_dashboard.dashboards.admin.instances \ import forms as project_forms from openstack_dashboard.dashboards.admin.instances \ import tables as project_tables from openstack_dashboard.dashboards.project.instances import views from openstack_dashboard.dashboards.project.instances.workflows \ import update_instance # re-use console from project.instances.views to make reflection work def console(args, **kvargs): return views.console(args, **kvargs) # re-use vnc from project.instances.views to make reflection work def vnc(args, **kvargs): return views.vnc(args, **kvargs) # re-use spice from project.instances.views to make reflection work def spice(args, **kvargs): return views.spice(args, **kvargs) # re-use rdp from project.instances.views to make reflection work def rdp(args, **kvargs): return views.rdp(args, **kvargs) class AdminUpdateView(views.UpdateView): workflow_class = update_instance.AdminUpdateInstance success_url = reverse_lazy("horizon:admin:instances:index") class AdminIndexView(tables.DataTableView): table_class = project_tables.AdminInstancesTable template_name = 'admin/instances/index.html' page_title = _("Instances") def has_more_data(self, table): return self._more def get_data(self): instances = [] marker = self.request.GET.get( project_tables.AdminInstancesTable._meta.pagination_param, None) search_opts = self.get_filters({'marker': marker, 'paginate': True}) # Gather our tenants to correlate against IDs try: tenants, has_more = api.keystone.tenant_list(self.request) except Exception: tenants = [] msg = _('Unable to retrieve instance project information.') exceptions.handle(self.request, msg) if 'project' in search_opts: ten_filter_ids = [t.id for t in tenants if t.name == search_opts['project']] del search_opts['project'] if len(ten_filter_ids) > 0: search_opts['tenant_id'] = ten_filter_ids[0] else: self._more = False return [] try: instances, self._more = api.nova.server_list( self.request, search_opts=search_opts, all_tenants=True) except Exception: self._more = False exceptions.handle(self.request, _('Unable to retrieve instance list.')) if instances: try: api.network.servers_update_addresses(self.request, instances, all_tenants=True) except Exception: exceptions.handle( self.request, message=_('Unable to retrieve IP addresses from Neutron.'), ignore=True) # Gather our flavors to correlate against IDs try: flavors = api.nova.flavor_list(self.request) except Exception: # If fails to retrieve flavor list, creates an empty list. flavors = [] full_flavors = SortedDict([(f.id, f) for f in flavors]) tenant_dict = SortedDict([(t.id, t) for t in tenants]) # Loop through instances to get flavor and tenant info. for inst in instances: flavor_id = inst.flavor["id"] try: if flavor_id in full_flavors: inst.full_flavor = full_flavors[flavor_id] else: # If the flavor_id is not in full_flavors list, # gets it via nova api. inst.full_flavor = api.nova.flavor_get( self.request, flavor_id) except Exception: msg = _('Unable to retrieve instance size information.') exceptions.handle(self.request, msg) tenant = tenant_dict.get(inst.tenant_id, None) inst.tenant_name = getattr(tenant, "name", None) return instances def get_filters(self, filters): filter_field = self.table.get_filter_field() filter_action = self.table._meta._filter_action if filter_action.is_api_filter(filter_field): filter_string = self.table.get_filter_string() if filter_field and filter_string: filters[filter_field] = filter_string return filters class LiveMigrateView(forms.ModalFormView): form_class = project_forms.LiveMigrateForm template_name = 'admin/instances/live_migrate.html' context_object_name = 'instance' success_url = reverse_lazy("horizon:admin:instances:index") page_title = _("Live Migrate") def get_context_data(self, **kwargs): context = super(LiveMigrateView, self).get_context_data(**kwargs) context["instance_id"] = self.kwargs['instance_id'] return context @memoized.memoized_method def get_hosts(self, *args, **kwargs): try: return api.nova.host_list(self.request) except Exception: redirect = reverse("horizon:admin:instances:index") msg = _('Unable to retrieve host information.') exceptions.handle(self.request, msg, redirect=redirect) @memoized.memoized_method def get_object(self, *args, **kwargs): instance_id = self.kwargs['instance_id'] try: return api.nova.server_get(self.request, instance_id) except Exception: redirect = reverse("horizon:admin:instances:index") msg = _('Unable to retrieve instance details.') exceptions.handle(self.request, msg, redirect=redirect) def get_initial(self): initial = super(LiveMigrateView, self).get_initial() _object = self.get_object() if _object: current_host = getattr(_object, 'OS-EXT-SRV-ATTR:host', '') initial.update({'instance_id': self.kwargs['instance_id'], 'current_host': current_host, 'hosts': self.get_hosts()}) return initial class DetailView(views.DetailView): redirect_url = 'horizon:admin:instances:index'
apache-2.0
3,531,301,513,903,614,500
-8,687,186,287,578,060,000
37.74359
79
0.619987
false
x303597316/hue
desktop/core/ext-py/Paste-2.0.1/tests/test_auth/test_auth_cookie.py
47
1527
# (c) 2005 Clark C. Evans # This module is part of the Python Paste Project and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from six.moves import xrange import six from paste.auth import cookie from paste.wsgilib import raw_interactive, dump_environ from paste.response import header_value from paste.httpexceptions import * def build(application,setenv, *args, **kwargs): def setter(environ, start_response): save = environ['paste.auth.cookie'].append for (k,v) in setenv.items(): save(k) environ[k] = v return application(environ, start_response) return cookie.middleware(setter,*args,**kwargs) def test_noop(): app = build(dump_environ,{}) (status,headers,content,errors) = \ raw_interactive(app) assert not header_value(headers,'Set-Cookie') def test_basic(key='key', val='bingles'): app = build(dump_environ,{key:val}) (status,headers,content,errors) = \ raw_interactive(app) value = header_value(headers,'Set-Cookie') assert "Path=/;" in value assert "expires=" not in value cookie = value.split(";")[0] (status,headers,content,errors) = \ raw_interactive(app,{'HTTP_COOKIE': cookie}) expected = ("%s: %s" % (key,val.replace("\n","\n "))) if six.PY3: expected = expected.encode('utf8') assert expected in content def test_roundtrip(): roundtrip = str('').join(map(chr, xrange(256))) test_basic(roundtrip,roundtrip)
apache-2.0
7,509,943,101,151,447,000
-1,043,709,417,246,818,700
32.195652
71
0.659463
false
ntddk/pemu
scripts/tracetool/format/simpletrace_stap.py
84
2400
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Generate .stp file that outputs simpletrace binary traces (DTrace with SystemTAP only). """ __author__ = "Stefan Hajnoczi <redhat.com>" __copyright__ = "Copyright (C) 2014, Red Hat, Inc." __license__ = "GPL version 2 or (at your option) any later version" __maintainer__ = "Stefan Hajnoczi" __email__ = "[email protected]" from tracetool import out from tracetool.backend.dtrace import binary, probeprefix from tracetool.backend.simple import is_string from tracetool.format.stap import stap_escape def generate(events, backend): out('/* This file is autogenerated by tracetool, do not edit. */', '') for event_id, e in enumerate(events): if 'disable' in e.properties: continue out('probe %(probeprefix)s.simpletrace.%(name)s = %(probeprefix)s.%(name)s ?', '{', probeprefix=probeprefix(), name=e.name) # Calculate record size sizes = ['24'] # sizeof(TraceRecord) for type_, name in e.args: name = stap_escape(name) if is_string(type_): out(' try {', ' arg%(name)s_str = %(name)s ? user_string_n(%(name)s, 512) : "<null>"', ' } catch {}', ' arg%(name)s_len = strlen(arg%(name)s_str)', name=name) sizes.append('4 + arg%s_len' % name) else: sizes.append('8') sizestr = ' + '.join(sizes) # Generate format string and value pairs for record header and arguments fields = [('8b', str(event_id)), ('8b', 'gettimeofday_ns()'), ('4b', sizestr), ('4b', 'pid()')] for type_, name in e.args: name = stap_escape(name) if is_string(type_): fields.extend([('4b', 'arg%s_len' % name), ('.*s', 'arg%s_len, arg%s_str' % (name, name))]) else: fields.append(('8b', name)) # Emit the entire record in a single SystemTap printf() fmt_str = '%'.join(fmt for fmt, _ in fields) arg_str = ', '.join(arg for _, arg in fields) out(' printf("%%%(fmt_str)s", %(arg_str)s)', fmt_str=fmt_str, arg_str=arg_str) out('}') out()
gpl-2.0
2,217,547,594,159,243,000
2,700,635,556,823,368,700
32.802817
99
0.510417
false
CSC301H-Fall2013/JuakStore
site-packages/tests/modeltests/or_lookups/tests.py
150
7625
from __future__ import absolute_import from datetime import datetime from operator import attrgetter from django.db.models import Q from django.test import TestCase from .models import Article class OrLookupsTests(TestCase): def setUp(self): self.a1 = Article.objects.create( headline='Hello', pub_date=datetime(2005, 11, 27) ).pk self.a2 = Article.objects.create( headline='Goodbye', pub_date=datetime(2005, 11, 28) ).pk self.a3 = Article.objects.create( headline='Hello and goodbye', pub_date=datetime(2005, 11, 29) ).pk def test_filter_or(self): self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Hello') | Article.objects.filter(headline__startswith='Goodbye'), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.filter(headline__contains='Hello') | Article.objects.filter(headline__contains='bye'), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.filter(headline__iexact='Hello') | Article.objects.filter(headline__contains='ood'), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__startswith='Goodbye')), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline") ) def test_stages(self): # You can shorten this syntax with code like the following, which is # especially useful if building the query in stages: articles = Article.objects.all() self.assertQuerysetEqual( articles.filter(headline__startswith='Hello') & articles.filter(headline__startswith='Goodbye'), [] ) self.assertQuerysetEqual( articles.filter(headline__startswith='Hello') & articles.filter(headline__contains='bye'), [ 'Hello and goodbye' ], attrgetter("headline") ) def test_pk_q(self): self.assertQuerysetEqual( Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2)), [ 'Hello', 'Goodbye' ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2) | Q(pk=self.a3)), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline"), ) def test_pk_in(self): self.assertQuerysetEqual( Article.objects.filter(pk__in=[self.a1, self.a2, self.a3]), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline"), ) self.assertQuerysetEqual( Article.objects.filter(pk__in=(self.a1, self.a2, self.a3)), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline"), ) self.assertQuerysetEqual( Article.objects.filter(pk__in=[self.a1, self.a2, self.a3, 40000]), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline"), ) def test_q_negated(self): # Q objects can be negated self.assertQuerysetEqual( Article.objects.filter(Q(pk=self.a1) | ~Q(pk=self.a2)), [ 'Hello', 'Hello and goodbye' ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.filter(~Q(pk=self.a1) & ~Q(pk=self.a2)), [ 'Hello and goodbye' ], attrgetter("headline"), ) # This allows for more complex queries than filter() and exclude() # alone would allow self.assertQuerysetEqual( Article.objects.filter(Q(pk=self.a1) & (~Q(pk=self.a2) | Q(pk=self.a3))), [ 'Hello' ], attrgetter("headline"), ) def test_complex_filter(self): # The 'complex_filter' method supports framework features such as # 'limit_choices_to' which normally take a single dictionary of lookup # arguments but need to support arbitrary queries via Q objects too. self.assertQuerysetEqual( Article.objects.complex_filter({'pk': self.a1}), [ 'Hello' ], attrgetter("headline"), ) self.assertQuerysetEqual( Article.objects.complex_filter(Q(pk=self.a1) | Q(pk=self.a2)), [ 'Hello', 'Goodbye' ], attrgetter("headline"), ) def test_empty_in(self): # Passing "in" an empty list returns no results ... self.assertQuerysetEqual( Article.objects.filter(pk__in=[]), [] ) # ... but can return results if we OR it with another query. self.assertQuerysetEqual( Article.objects.filter(Q(pk__in=[]) | Q(headline__icontains='goodbye')), [ 'Goodbye', 'Hello and goodbye' ], attrgetter("headline"), ) def test_q_and(self): # Q arg objects are ANDed self.assertQuerysetEqual( Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')), [ 'Hello and goodbye' ], attrgetter("headline") ) # Q arg AND order is irrelevant self.assertQuerysetEqual( Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello'), [ 'Hello and goodbye' ], attrgetter("headline"), ) self.assertQuerysetEqual( Article.objects.filter(Q(headline__startswith='Hello') & Q(headline__startswith='Goodbye')), [] ) def test_q_exclude(self): self.assertQuerysetEqual( Article.objects.exclude(Q(headline__startswith='Hello')), [ 'Goodbye' ], attrgetter("headline") ) def test_other_arg_queries(self): # Try some arg queries with operations other than filter. self.assertEqual( Article.objects.get(Q(headline__startswith='Hello'), Q(headline__contains='bye')).headline, 'Hello and goodbye' ) self.assertEqual( Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__contains='bye')).count(), 3 ) self.assertQuerysetEqual( Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')).values(), [ {"headline": "Hello and goodbye", "id": self.a3, "pub_date": datetime(2005, 11, 29)}, ], lambda o: o, ) self.assertEqual( Article.objects.filter(Q(headline__startswith='Hello')).in_bulk([self.a1, self.a2]), {self.a1: Article.objects.get(pk=self.a1)} )
mit
-7,358,849,044,501,286,000
137,746,372,379,280,130
31.58547
125
0.521836
false
jjx02230808/project0223
examples/decomposition/plot_kernel_pca.py
353
2011
""" ========== Kernel PCA ========== This example shows that Kernel PCA is able to find a projection of the data that makes data linearly separable. """ print(__doc__) # Authors: Mathieu Blondel # Andreas Mueller # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA, KernelPCA from sklearn.datasets import make_circles np.random.seed(0) X, y = make_circles(n_samples=400, factor=.3, noise=.05) kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10) X_kpca = kpca.fit_transform(X) X_back = kpca.inverse_transform(X_kpca) pca = PCA() X_pca = pca.fit_transform(X) # Plot results plt.figure() plt.subplot(2, 2, 1, aspect='equal') plt.title("Original space") reds = y == 0 blues = y == 1 plt.plot(X[reds, 0], X[reds, 1], "ro") plt.plot(X[blues, 0], X[blues, 1], "bo") plt.xlabel("$x_1$") plt.ylabel("$x_2$") X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50)) X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T # projection on the first principal component (in the phi space) Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape) plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower') plt.subplot(2, 2, 2, aspect='equal') plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro") plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo") plt.title("Projection by PCA") plt.xlabel("1st principal component") plt.ylabel("2nd component") plt.subplot(2, 2, 3, aspect='equal') plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro") plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo") plt.title("Projection by KPCA") plt.xlabel("1st principal component in space induced by $\phi$") plt.ylabel("2nd component") plt.subplot(2, 2, 4, aspect='equal') plt.plot(X_back[reds, 0], X_back[reds, 1], "ro") plt.plot(X_back[blues, 0], X_back[blues, 1], "bo") plt.title("Original space after inverse transform") plt.xlabel("$x_1$") plt.ylabel("$x_2$") plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35) plt.show()
bsd-3-clause
-116,776,969,672,992,430
4,460,732,754,710,002,700
26.547945
76
0.668324
false
zibneuro/brainvispy
generators/uniformpointcloud.py
1
2947
import vtk import math import numpy as np from core.settings import Settings class UPoint: def __init__(self, p, dist_to_closest_point): self.p = p self.dist_to_closest_point = dist_to_closest_point def compute_energy(self): diff = self.dist_to_closest_point - Settings.inter_neuron_distance if diff >= 0: return diff return -5*diff class UniformPointCloud: def __init__(self, target_point): self.__target_point = np.array([target_point[0], target_point[1], target_point[2]]) self.__points = vtk.vtkPolyData() self.__points.SetPoints(vtk.vtkPoints()) self.__point_locator = vtk.vtkPointLocator() self.__point_locator.SetDataSet(self.__points) def add_single_point(self, p): self.__points.GetPoints().InsertNextPoint(p) self.__points.Modified() self.__point_locator.Update() def insert_best_point(self, point_candidates): if self.__points.GetNumberOfPoints() <= 0: point = self.__select_point_closest_to_target(point_candidates) else: point = self.__select_best_point(point_candidates) self.__points.GetPoints().InsertNextPoint(point) self.__points.Modified() self.__point_locator.Update() return point def __select_point_closest_to_target(self, points): closest_point = points[0] min_dist = self.__compute_distance_to_target(closest_point) for p in points[1:]: dist = self.__compute_distance_to_target(p) if dist < min_dist: min_dist = dist closest_point = p return closest_point def __select_best_point(self, points): evaluated_points = list() for p in points: evaluated_points.append(UPoint(p, self.__compute_distance_to_closest_point(p))) evaluated_points.sort(key = lambda point: point.compute_energy()) min_dist_to_target = self.__compute_distance_to_target(evaluated_points[0].p) best_point = evaluated_points[0].p return best_point list_end = max(len(evaluated_points)//20, 1) for evaluated_point in evaluated_points[1:list_end]: dist_to_target = self.__compute_distance_to_target(evaluated_point.p) if dist_to_target < min_dist_to_target: min_dist_to_target = dist_to_target best_point = evaluated_point.p return best_point def __compute_distance_to_target(self, p): return np.linalg.norm(p - self.__target_point) def __compute_distance_to_closest_point(self, p): # Make sure there are points in the point cloud if self.__points.GetNumberOfPoints() <= 0: return float("inf") # Find the point closest to 'p' ids = vtk.vtkIdList() self.__point_locator.FindClosestNPoints(1, p, ids) closest_point = self.__points.GetPoint(ids.GetId(0)) # Return the distance between 'p' and the closest point x = p[0] - closest_point[0] y = p[1] - closest_point[1] return math.sqrt(x*x + y*y) #return np.linalg.norm(p - closest_point)
bsd-3-clause
-4,174,260,639,565,716,000
-1,239,934,494,028,898,800
27.892157
87
0.660333
false
openmicroscopy/omero-marshal
omero_marshal/decode/decoders/screen.py
1
1831
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2015 Glencoe Software, Inc. All rights reserved. # # This software is distributed under the terms described by the LICENCE file # you can find at the root of the distribution bundle. # If the file is missing please request a copy by contacting # [email protected]. # from ... import SCHEMA_VERSION from .annotation import AnnotatableDecoder from omero.model import ScreenI class Screen201501Decoder(AnnotatableDecoder): TYPE = 'http://www.openmicroscopy.org/Schemas/SPW/2015-01#Screen' OMERO_CLASS = ScreenI def decode(self, data): v = super(Screen201501Decoder, self).decode(data) self.set_property(v, 'name', data.get('Name')) self.set_property(v, 'description', data.get('Description')) self.set_property(v, 'protocolDescription', data.get('ProtocolDescription')) self.set_property(v, 'protocolIdentifier', data.get('ProtocolIdentifier')) self.set_property(v, 'reagentSetDescription', data.get('ReagentSetDescription')) self.set_property(v, 'reagentSetIdentifier', data.get('ReagentSetIdentifier')) self.set_property(v, 'type', data.get('Type')) for plate in data.get('Plates', list()): plate_decoder = self.ctx.get_decoder(plate['@type']) v.linkPlate(plate_decoder.decode(plate)) return v class Screen201606Decoder(Screen201501Decoder): TYPE = 'http://www.openmicroscopy.org/Schemas/OME/2016-06#Screen' if SCHEMA_VERSION == '2015-01': decoder = (Screen201501Decoder.TYPE, Screen201501Decoder) elif SCHEMA_VERSION == '2016-06': decoder = (Screen201606Decoder.TYPE, Screen201606Decoder) ScreenDecoder = decoder[1]
gpl-2.0
-2,836,658,489,345,531,000
6,291,544,312,766,475,000
34.901961
76
0.665756
false
tellesnobrega/storm_plugin
sahara/tests/unit/service/validation/test_cluster_template_create_validation.py
5
8632
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service import api from sahara.service.validations import cluster_templates as ct from sahara.tests.unit.service.validation import utils as u class TestClusterTemplateCreateValidation(u.ValidationTestCase): def setUp(self): super(TestClusterTemplateCreateValidation, self).setUp() self._create_object_fun = ct.check_cluster_template_create self.scheme = ct.CLUSTER_TEMPLATE_SCHEMA api.plugin_base.setup_plugins() def test_cluster_template_create_v_cluster_configs(self): self._assert_cluster_configs_validation() def test_cluster_template_create_v_ng(self): self._assert_create_object_validation( data={ 'name': "test-name", 'plugin_name': "vanilla", 'hadoop_version': "1.2.1", 'node_groups': [ {'name': 'a'} ] }, bad_req_i=(1, 'VALIDATION_ERROR', "{'name': 'a'} is not valid under " "any of the given schemas") ) self._assert_create_object_validation( data={ 'name': "test-name", 'plugin_name': "vanilla", 'hadoop_version': "1.2.1", 'node_groups': [ {'name': 'a', 'flavor_id': '42'} ] }, bad_req_i=(1, "VALIDATION_ERROR", "{'name': 'a', 'flavor_id': '42'} " "is not valid under any of the given schemas") ) self._assert_create_object_validation( data={ 'name': "test-name", 'plugin_name': "vanilla", 'hadoop_version': "1.2.1", 'node_groups': [ {'name': 'a', 'flavor_id': '42', 'node_processes': ['namenode']} ] }, bad_req_i=(1, "VALIDATION_ERROR", "{'node_processes': ['namenode'], " "'name': 'a', " "'flavor_id': '42'} " "is not valid under any of the given schemas") ) self._assert_create_object_validation( data={ 'name': "test-name", 'plugin_name': "vanilla", 'hadoop_version': "1.2.1", 'node_groups': [ { 'name': 'a', 'flavor_id': '42', 'node_processes': ['namenode'], 'count': 1 }, { "node_group_template_id": "550e8400-e29b-41d4-a716-" "446655440000", "name": "a", 'count': 2 } ] }, bad_req_i=(1, "INVALID_REFERENCE", "Duplicates in node group names are detected") ) def test_cluster_template_create_v_ng_templates(self): self._assert_create_object_validation( data={ 'name': "test-name", 'plugin_name': "vanilla", 'hadoop_version': "1.2.1", 'node_groups': [ { "node_group_template_id": "", "name": "test", } ] }, bad_req_i=(1, "VALIDATION_ERROR", "{'node_group_template_id': '', 'name': 'test'} " "is not valid under any of the given schemas") ) self._assert_create_object_validation( data={ 'name': "test-name", 'plugin_name': "vanilla", 'hadoop_version': "1.2.1", 'node_groups': [ { "node_group_template_id": "test", "name": "test", 'count': 3 } ] }, bad_req_i=(1, "VALIDATION_ERROR", "{'count': 3, " "'node_group_template_id': 'test', " "'name': 'test'} " "is not valid under any of the given schemas") ) def test_cluster_template_create_v_ng_templates_right(self): self._assert_create_object_validation( data={ 'name': "test-name", 'plugin_name': "vanilla", 'hadoop_version': "1.2.1", 'node_groups': [ { "node_group_template_id": "550e8400-e29b-41d4-a716-" "446655440000", "name": "test", 'count': 3 } ] }, ) def test_cluster_template_create_v_name_base(self): data = { 'name': "testname", 'plugin_name': "vanilla", 'hadoop_version': "1.2.1" } self._assert_valid_name_hostname_validation(data) def test_cluster_template_create_v_types(self): data = { 'name': "testname", 'plugin_name': "vanilla", 'hadoop_version': "1.2.1" } self._assert_types(data) def test_cluster_template_create_v_required(self): self._assert_create_object_validation( data={}, bad_req_i=(1, "VALIDATION_ERROR", u"'name' is a required property") ) self._assert_create_object_validation( data={ 'name': 'test-name' }, bad_req_i=(1, "VALIDATION_ERROR", u"'plugin_name' is a required property") ) self._assert_create_object_validation( data={ 'name': 'testname', 'plugin_name': 'vanilla' }, bad_req_i=(1, "VALIDATION_ERROR", u"'hadoop_version' is a required property") ) def test_cluster_template_create_v_right(self): self._assert_create_object_validation( data={ 'name': 'testname', 'plugin_name': 'vanilla', 'hadoop_version': '1.2.1' }) def test_cluster_template_create_v_plugin_name_exists(self): self._assert_create_object_validation( data={ 'name': "test-name", 'plugin_name': "wrong_plugin", 'hadoop_version': "1.2.1", }, bad_req_i=(1, 'INVALID_REFERENCE', "Sahara doesn't contain plugin " "with name 'wrong_plugin'") ) def test_cluster_template_create_v_unique_cl(self): data = { 'name': 'test', 'plugin_name': 'vanilla', 'hadoop_version': '1.2.1' } self._assert_create_object_validation( data=data, bad_req_i=(1, 'NAME_ALREADY_EXISTS', "Cluster template with name 'test' already exists") ) def test_cluster_template_wrong_neutron_mngmt_net(self): data = { 'name': 'test-template', 'plugin_name': 'vanilla', 'hadoop_version': '1.2.1', 'neutron_management_network': '53a36917-ab9f-4589' '-94ce-b6df85a68332' } self._assert_create_object_validation( data=data, bad_req_i=(1, 'INVALID_REFERENCE', "Network 53a36917-ab9f-4589-" "94ce-b6df85a68332 not found") ) def test_cluster_create_v_default_image_required_tags(self): self._assert_cluster_default_image_tags_validation()
apache-2.0
-7,209,164,706,042,769,000
-7,779,094,155,706,792,000
34.522634
77
0.446594
false
nwjs/chromium.src
tools/cr/cr/actions/runner.py
10
2365
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A module for the Runner base class.""" import cr class Runner(cr.Action, cr.Plugin.Type): """Base class for implementing target runners. Runner implementations must implement the Kill, Run and Test methods. """ SELECTOR_ARG = '--runner' SELECTOR = 'CR_RUNNER' SELECTOR_HELP = 'Sets the runner to use to execute the target.' @classmethod def AddArguments(cls, command, parser): parser.add_argument( '--test', dest='CR_TEST_TYPE', choices=cr.Target.TEST_TYPES, default=None, help=""" Sets the test type to use, defaults to choosing based on the target. Set to 'no' to force it to not be a test. """ ) cls.AddSelectorArg(command, parser) @cr.Plugin.activemethod def Kill(self, targets, arguments): """Stops all running processes that match a target.""" raise NotImplementedError('Must be overridden.') @cr.Plugin.activemethod def Run(self, target, arguments): """Run a new copy of a runnable target.""" raise NotImplementedError('Must be overridden.') @cr.Plugin.activemethod def Test(self, target, arguments): """Run a test target.""" raise NotImplementedError('Must be overridden.') @cr.Plugin.activemethod def Invoke(self, targets, arguments): """Invoke a target. This dispatches to either Test or Run depending on the target type. """ for target in targets: if target.is_test: self.Test(target, arguments) else: self.Run(target, arguments) @cr.Plugin.activemethod def Restart(self, targets, arguments): """Force a target to restart if it is already running. Default implementation is to do a Kill Invoke sequence. Do not call the base version if you implement a more efficient one. """ self.Kill(targets, []) self.Invoke(targets, arguments) class SkipRunner(Runner): """A Runner the user chooses to bypass the run step of a command.""" @property def priority(self): return super(SkipRunner, self).priority - 1 def Kill(self, targets, arguments): pass def Run(self, target, arguments): pass def Test(self, target, arguments): pass
bsd-3-clause
6,173,870,010,270,057,000
-3,356,927,860,769,375,700
26.183908
72
0.669345
false
ma1co/Sony-PMCA-RE
pmca/spk/__init__.py
1
2713
"""Methods for reading and writing spk files""" import sys try: from Cryptodome.Cipher import AES from Cryptodome.PublicKey import RSA from Cryptodome.Util.number import bytes_to_long, long_to_bytes except ImportError: from Crypto.Cipher import AES from Crypto.PublicKey import RSA from Crypto.Util.number import bytes_to_long, long_to_bytes if sys.version_info >= (3,): long = int from . import constants from . import util from ..util import * SpkHeader = Struct('SpkHeader', [ ('magic', Struct.STR % 4), ('keyOffset', Struct.INT32), ]) spkHeaderMagic = b'1spk' SpkKeyHeader = Struct('SpkKeyHeader', [ ('keySize', Struct.INT32), ]) def parse(data): """Parses an spk file Returns: The contained apk data """ encryptedKey, encryptedData = parseContainer(data) key = decryptKey(encryptedKey) return decryptData(key, encryptedData) def dump(data): """Builds an spk file containing the apk data specified""" encryptedKey = constants.sampleSpkKey key = decryptKey(encryptedKey) encryptedData = encryptData(key, data) return dumpContainer(encryptedKey, encryptedData) def isSpk(data): return len(data) >= SpkHeader.size and SpkHeader.unpack(data).magic == spkHeaderMagic def parseContainer(data): """Parses an spk file Returns: ('encrypted key', 'encrypted apk data') """ header = SpkHeader.unpack(data) if header.magic != spkHeaderMagic: raise Exception('Wrong magic') keyHeaderOffset = SpkHeader.size + header.keyOffset keyHeader = SpkKeyHeader.unpack(data, keyHeaderOffset) keyOffset = keyHeaderOffset + SpkKeyHeader.size dataOffset = keyOffset + keyHeader.keySize return data[keyOffset:dataOffset], data[dataOffset:] def dumpContainer(encryptedKey, encryptedData): """Builds an spk file from the encrypted key and data specified""" return SpkHeader.pack(magic=spkHeaderMagic, keyOffset=0) + SpkKeyHeader.pack(keySize=len(encryptedKey)) + encryptedKey + encryptedData def decryptKey(encryptedKey): """Decrypts an RSA-encrypted key""" rsa = RSA.construct((long(constants.rsaModulus), long(constants.rsaExponent))) try: return rsa.encrypt(encryptedKey, 0)[0] except NotImplementedError: # pycryptodome return long_to_bytes(rsa._encrypt(bytes_to_long(encryptedKey))) def decryptData(key, encryptedData): """Decrypts the apk data using the specified AES key""" aes = AES.new(key, AES.MODE_ECB) return b''.join(util.unpad(aes.decrypt(c)) for c in util.chunk(encryptedData, constants.blockSize + constants.paddingSize)) def encryptData(key, data): """Encrypts the apk data using the specified AES key""" aes = AES.new(key, AES.MODE_ECB) return b''.join(aes.encrypt(util.pad(c, constants.paddingSize)) for c in util.chunk(data, constants.blockSize))
mit
7,462,263,044,723,836,000
-466,691,478,833,074,400
30.183908
135
0.75341
false
chinmaygarde/depot_tools
third_party/boto/services/service.py
70
6641
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import boto from boto.services.message import ServiceMessage from boto.services.servicedef import ServiceDef from boto.pyami.scriptbase import ScriptBase from boto.utils import get_ts import time import os import mimetypes class Service(ScriptBase): # Time required to process a transaction ProcessingTime = 60 def __init__(self, config_file=None, mimetype_files=None): ScriptBase.__init__(self, config_file) self.name = self.__class__.__name__ self.working_dir = boto.config.get('Pyami', 'working_dir') self.sd = ServiceDef(config_file) self.retry_count = self.sd.getint('retry_count', 5) self.loop_delay = self.sd.getint('loop_delay', 30) self.processing_time = self.sd.getint('processing_time', 60) self.input_queue = self.sd.get_obj('input_queue') self.output_queue = self.sd.get_obj('output_queue') self.output_domain = self.sd.get_obj('output_domain') if mimetype_files: mimetypes.init(mimetype_files) def split_key(key): if key.find(';') < 0: t = (key, '') else: key, type = key.split(';') label, mtype = type.split('=') t = (key, mtype) return t def read_message(self): boto.log.info('read_message') message = self.input_queue.read(self.processing_time) if message: boto.log.info(message.get_body()) key = 'Service-Read' message[key] = get_ts() return message # retrieve the source file from S3 def get_file(self, message): bucket_name = message['Bucket'] key_name = message['InputKey'] file_name = os.path.join(self.working_dir, message.get('OriginalFileName', 'in_file')) boto.log.info('get_file: %s/%s to %s' % (bucket_name, key_name, file_name)) bucket = boto.lookup('s3', bucket_name) key = bucket.new_key(key_name) key.get_contents_to_filename(os.path.join(self.working_dir, file_name)) return file_name # process source file, return list of output files def process_file(self, in_file_name, msg): return [] # store result file in S3 def put_file(self, bucket_name, file_path, key_name=None): boto.log.info('putting file %s as %s.%s' % (file_path, bucket_name, key_name)) bucket = boto.lookup('s3', bucket_name) key = bucket.new_key(key_name) key.set_contents_from_filename(file_path) return key def save_results(self, results, input_message, output_message): output_keys = [] for file, type in results: if 'OutputBucket' in input_message: output_bucket = input_message['OutputBucket'] else: output_bucket = input_message['Bucket'] key_name = os.path.split(file)[1] key = self.put_file(output_bucket, file, key_name) output_keys.append('%s;type=%s' % (key.name, type)) output_message['OutputKey'] = ','.join(output_keys) # write message to each output queue def write_message(self, message): message['Service-Write'] = get_ts() message['Server'] = self.name if 'HOSTNAME' in os.environ: message['Host'] = os.environ['HOSTNAME'] else: message['Host'] = 'unknown' message['Instance-ID'] = self.instance_id if self.output_queue: boto.log.info('Writing message to SQS queue: %s' % self.output_queue.id) self.output_queue.write(message) if self.output_domain: boto.log.info('Writing message to SDB domain: %s' % self.output_domain.name) item_name = '/'.join([message['Service-Write'], message['Bucket'], message['InputKey']]) self.output_domain.put_attributes(item_name, message) # delete message from input queue def delete_message(self, message): boto.log.info('deleting message from %s' % self.input_queue.id) self.input_queue.delete_message(message) # to clean up any files, etc. after each iteration def cleanup(self): pass def shutdown(self): on_completion = self.sd.get('on_completion', 'shutdown') if on_completion == 'shutdown': if self.instance_id: time.sleep(60) c = boto.connect_ec2() c.terminate_instances([self.instance_id]) def main(self, notify=False): self.notify('Service: %s Starting' % self.name) empty_reads = 0 while self.retry_count < 0 or empty_reads < self.retry_count: try: input_message = self.read_message() if input_message: empty_reads = 0 output_message = ServiceMessage(None, input_message.get_body()) input_file = self.get_file(input_message) results = self.process_file(input_file, output_message) self.save_results(results, input_message, output_message) self.write_message(output_message) self.delete_message(input_message) self.cleanup() else: empty_reads += 1 time.sleep(self.loop_delay) except Exception: boto.log.exception('Service Failed') empty_reads += 1 self.notify('Service: %s Shutting Down' % self.name) self.shutdown()
bsd-3-clause
-5,659,346,170,579,439,000
-7,453,082,512,295,332,000
40.248447
100
0.613763
false
markdryan/dleyna-renderer
test/dbus/cap.py
4
8713
#!/usr/bin/python # cap # # Copyright (C) 2012 Intel Corporation. All rights reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU Lesser General Public License, # version 2.1, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License # for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. # # Mark Ryan <[email protected]> # from gi.repository import Gtk, Gdk, GdkPixbuf import cairo import dbus import dbus.service import dbus.mainloop.glib import tempfile class Renderer: def __init__(self, path): bus = dbus.SessionBus() obj = bus.get_object('com.intel.dleyna-renderer', path) self.__propsIF = dbus.Interface(obj, 'org.freedesktop.DBus.Properties') self.__hostIF = dbus.Interface(obj, 'com.intel.dLeynaRenderer.PushHost') self.__playerIF = dbus.Interface(obj, 'org.mpris.MediaPlayer2.Player') def get_prop(self, prop_name, iface = ""): return self.__propsIF.Get(iface, prop_name) def push_file(self, fname): try: self.__hostIF.RemoveFile(fname) except: pass self.__playerIF.Stop() uri = self.__hostIF.HostFile(fname) self.__playerIF.OpenUri(uri) self.__playerIF.Play() class Renderers: def __init__(self, cb): bus=dbus.SessionBus() obj = bus.get_object('com.intel.dleyna-renderer', '/com/intel/dLeynaRenderer') self.__manager = dbus.Interface(obj, 'com.intel.dLeynaRenderer.Manager') self.__cb = cb self.__manager.connect_to_signal("LostServer", self.__servers_changed) self.__manager.connect_to_signal("FoundServer", self.__servers_changed) def __servers_changed(self, server): self.__cb() def get_renderers(self): retval = [] for path in self.__manager.GetServers(): retval.append((path, Renderer(path))) return retval class UI: def delete_event(self, widget, event, data=None): return False def destroy(self, widget, data=None): Gtk.main_quit() def __create_renderers_store(self): servers_store = Gtk.ListStore(str, str) for server in self.__Renderers.get_renderers(): servers_store.append([server[0], server[1].get_prop("Identity")]) return servers_store def __reset_renderers(self): print "Renderers Changed" entry = self.__combo.get_child() servers_store = self.__create_renderers_store() self.__combo.set_model(servers_store) if len(servers_store) > 0: self.__combo.set_active(0) else: entry.set_text("") def draw_rect(self, widget, x, y): if self.__pixmap != None: ctx = cairo.Context(self.__pixmap) ctx.set_source_rgb(0, 0, 0) ctx.rectangle(x -3, y -3, 6, 6) ctx.fill() widget.queue_draw_area(x -3, y -3, 6, 6) def __mouse_button_pressed_cb(self, widget, event): self.draw_rect(widget, event.x, event.y) return True def __mouse_moved_cb(self, widget, event): if event.state & Gdk.ModifierType.BUTTON1_MASK: self.draw_rect(widget, event.x, event.y) event.request_motions() return True def __draw_cb(self, da, ctx): if self.__pixmap: ctx.set_source_surface(self.__pixmap, 0, 0) ctx.rectangle(0, 0, da.get_allocated_width(), da.get_allocated_height()) ctx.fill() @staticmethod def __blank_pixmap(width, height): new_pixmap = cairo.ImageSurface(cairo.FORMAT_RGB24, width, height) ctx = cairo.Context(new_pixmap) ctx.set_source_rgb(0xff, 0xff, 0xff) ctx.rectangle(0, 0, width, height) ctx.fill() return (new_pixmap, ctx) def __configured_cb(self, widget, event): allocation = widget.get_allocation() width = allocation.width height = allocation.height new_pixmap, ctx = UI.__blank_pixmap(width, height) if self.__pixmap: old_width = self.__pixmap.get_width() old_height = self.__pixmap.get_height() dest_x = (width - old_width) / 2 dest_y = (height - old_height) / 2 ctx.set_source_surface(self.__pixmap, dest_x, dest_y) ctx.rectangle(0, 0, width, height) ctx.fill() self.__pixmap = new_pixmap return True def push_cb(self, button): tree_iter = self.__combo.get_active_iter() if tree_iter != None: self.__pixmap.write_to_png(self.__tmp_file) model = self.__combo.get_model() ren = Renderer(model[tree_iter][0]) ren.push_file(self.__tmp_file) def pick_cb(self, button): dialog = Gtk.FileChooserDialog("Please choose a file", self.__window, Gtk.FileChooserAction.OPEN, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK)) response = dialog.run() if response == Gtk.ResponseType.OK: print "Open clicked" pick_file = dialog.get_filename() tree_iter = self.__combo.get_active_iter() if tree_iter != None: model = self.__combo.get_model() ren = Renderer(model[tree_iter][0]) dialog.destroy() ren.push_file(pick_file) elif response == Gtk.ResponseType.CANCEL: print "Cancel clicked" dialog.destroy() def clear_cb(self, button): allocation = self.__area.get_allocation() self.__pixmap, ctx = UI.__blank_pixmap(allocation.width, allocation.height) self.__area.queue_draw_area(0,0, allocation.width, allocation.height) def __init__(self): self.__Renderers = Renderers(self.__reset_renderers) self.__tmp_file = tempfile.mktemp(".png") self.__pixmap = None window = Gtk.Window() window.set_default_size(640, 480) window.set_title("Create and Push!") container = Gtk.VBox(False, 0) area = Gtk.DrawingArea() area.set_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.POINTER_MOTION_HINT_MASK) area.connect("button_press_event", self.__mouse_button_pressed_cb) area.connect("motion_notify_event", self.__mouse_moved_cb) area.connect("configure-event", self.__configured_cb) area.connect("draw", self.__draw_cb) container.pack_start(area, True, True, 4); button_bar = Gtk.HBox(False, 0) pick_button = Gtk.Button("Pick & Push"); pick_button.connect("clicked", self.pick_cb) push_button = Gtk.Button("Push"); push_button.connect("clicked", self.push_cb) clear_button = Gtk.Button("Clear"); clear_button.connect("clicked", self.clear_cb) servers_store = self.__create_renderers_store() self.__combo = Gtk.ComboBox.new_with_model_and_entry(servers_store) self.__combo.set_entry_text_column(1) if len(servers_store) > 0: self.__combo.set_active(0) self.__combo.get_child().set_property("editable", False) button_bar.pack_start(pick_button, True, True, 4) button_bar.pack_start(push_button, True, True, 4) button_bar.pack_start(clear_button, True, True, 4) button_bar.pack_start(self.__combo, True, True, 4) container.pack_start(button_bar, False, False, 4); window.add(container) window.show_all() window.connect("delete_event", self.delete_event) window.connect("destroy", self.destroy) self.__window = window self.__area = area if __name__ == "__main__": dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) ui = UI() Gtk.main()
lgpl-2.1
853,544,185,855,282,200
1,461,543,017,434,681,300
35.763713
79
0.576724
false
chauhanhardik/populo
common/lib/xmodule/xmodule/modulestore/tests/test_libraries.py
157
7393
# -*- coding: utf-8 -*- """ Basic unit tests related to content libraries. Higher-level tests are in `cms/djangoapps/contentstore`. """ import ddt from bson.objectid import ObjectId from opaque_keys.edx.locator import LibraryLocator from xmodule.modulestore.exceptions import DuplicateCourseError from xmodule.modulestore.tests.factories import LibraryFactory, ItemFactory, check_mongo_calls from xmodule.modulestore.tests.utils import MixedSplitTestCase @ddt.ddt class TestLibraries(MixedSplitTestCase): """ Test for libraries. Mostly tests code found throughout split mongo, but also tests library_root_xblock.py """ def test_create_library(self): """ Test that we can create a library, and see how many mongo calls it uses to do so. Expected mongo calls, in order: find_one({'org': '...', 'run': 'library', 'course': '...'}) insert(definition: {'block_type': 'library', 'fields': {}}) insert_structure(bulk) insert_course_index(bulk) get_course_index(bulk) """ with check_mongo_calls(2, 3): LibraryFactory.create(modulestore=self.store) def test_duplicate_library(self): """ Make sure we cannot create duplicate libraries """ org, lib_code = ('DuplicateX', "DUP") LibraryFactory.create(org=org, library=lib_code, modulestore=self.store) with self.assertRaises(DuplicateCourseError): LibraryFactory.create(org=org, library=lib_code, modulestore=self.store) @ddt.data( "This is a test library!", u"Ωμέγα Βιβλιοθήκη", ) def test_str_repr(self, name): """ Test __unicode__() and __str__() methods of libraries """ library = LibraryFactory.create(metadata={"display_name": name}, modulestore=self.store) self.assertIn(name, unicode(library)) if not isinstance(name, unicode): self.assertIn(name, str(library)) def test_display_with_default_methods(self): """ Check that the display_x_with_default methods have been implemented, for compatibility with courses. """ org = 'TestOrgX' lib_code = 'LC101' library = LibraryFactory.create(org=org, library=lib_code, modulestore=self.store) self.assertEqual(library.display_org_with_default, org) self.assertEqual(library.display_number_with_default, lib_code) def test_block_with_children(self): """ Test that blocks used from a library can have children. """ library = LibraryFactory.create(modulestore=self.store) # In the library, create a vertical block with a child: vert_block = ItemFactory.create( category="vertical", parent_location=library.location, user_id=self.user_id, publish_item=False, modulestore=self.store, ) child_block = ItemFactory.create( category="html", parent_location=vert_block.location, user_id=self.user_id, publish_item=False, metadata={"data": "Hello world", }, modulestore=self.store, ) self.assertEqual(child_block.parent.replace(version_guid=None, branch=None), vert_block.location) def test_update_item(self): """ Test that update_item works for a block in a library """ library = LibraryFactory.create(modulestore=self.store) block = ItemFactory.create( category="html", parent_location=library.location, user_id=self.user_id, publish_item=False, metadata={"data": "Hello world", }, modulestore=self.store, ) block_key = block.location block.data = "NEW" old_version = self.store.get_item(block_key, remove_version=False, remove_branch=False).location.version_guid self.store.update_item(block, self.user_id) # Reload block from the modulestore block = self.store.get_item(block_key) self.assertEqual(block.data, "NEW") self.assertEqual(block.location, block_key) new_version = self.store.get_item(block_key, remove_version=False, remove_branch=False).location.version_guid self.assertNotEqual(old_version, new_version) def test_delete_item(self): """ Test to make sure delete_item() works on blocks in a library """ library = LibraryFactory.create(modulestore=self.store) lib_key = library.location.library_key block = ItemFactory.create( category="html", parent_location=library.location, user_id=self.user_id, publish_item=False, modulestore=self.store, ) library = self.store.get_library(lib_key) self.assertEqual(len(library.children), 1) self.store.delete_item(block.location, self.user_id) library = self.store.get_library(lib_key) self.assertEqual(len(library.children), 0) def test_get_library_non_existent(self): """ Test get_library() with non-existent key """ result = self.store.get_library(LibraryLocator("non", "existent")) self.assertEqual(result, None) def test_get_libraries(self): """ Test get_libraries() """ libraries = [LibraryFactory.create(modulestore=self.store) for _ in range(3)] lib_dict = dict([(lib.location.library_key, lib) for lib in libraries]) lib_list = self.store.get_libraries() self.assertEqual(len(lib_list), len(libraries)) for lib in lib_list: self.assertIn(lib.location.library_key, lib_dict) def test_strip(self): """ Test that library keys coming out of MixedModuleStore are stripped of branch and version info by default. """ # Create a library lib_key = LibraryFactory.create(modulestore=self.store).location.library_key # Re-load the library from the modulestore, explicitly including version information: lib = self.store.get_library(lib_key) self.assertEqual(lib.location.version_guid, None) self.assertEqual(lib.location.branch, None) self.assertEqual(lib.location.library_key.version_guid, None) self.assertEqual(lib.location.library_key.branch, None) def test_get_lib_version(self): """ Test that we can get version data about a library from get_library() """ # Create a library lib_key = LibraryFactory.create(modulestore=self.store).location.library_key # Re-load the library from the modulestore, explicitly including version information: lib = self.store.get_library(lib_key, remove_version=False, remove_branch=False) version = lib.location.library_key.version_guid self.assertIsInstance(version, ObjectId) def test_xblock_in_lib_have_published_version_returns_false(self): library = LibraryFactory.create(modulestore=self.store) block = ItemFactory.create( category="html", parent_location=library.location, user_id=self.user_id, publish_item=False, modulestore=self.store, ) self.assertFalse(self.store.has_published_version(block))
agpl-3.0
-3,050,397,469,555,729,400
-512,294,190,429,254,900
37.831579
117
0.635945
false
huggingface/transformers
examples/tensorflow/benchmarking/run_benchmark_tf.py
2
1915
#!/usr/bin/env python # coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Benchmarking the library on inference and training in TensorFlow""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def main(): parser = HfArgumentParser(TensorFlowBenchmarkArguments) benchmark_args = parser.parse_args_into_dataclasses()[0] benchmark = TensorFlowBenchmark(args=benchmark_args) try: benchmark_args = parser.parse_args_into_dataclasses()[0] except ValueError as e: arg_error_msg = "Arg --no_{0} is no longer used, please use --no-{0} instead." begin_error_msg = " ".join(str(e).split(" ")[:-1]) full_error_msg = "" depreciated_args = eval(str(e).split(" ")[-1]) wrong_args = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:]) else: wrong_args.append(arg) if len(wrong_args) > 0: full_error_msg = full_error_msg + begin_error_msg + str(wrong_args) raise ValueError(full_error_msg) benchmark.run() if __name__ == "__main__": main()
apache-2.0
-8,836,675,037,526,030,000
-5,758,236,106,585,437,000
38.895833
92
0.65953
false
vineodd/PIMSim
GEM5Simulation/gem5/ext/pybind11/tests/test_methods_and_attributes.py
13
17017
import pytest from pybind11_tests import methods_and_attributes as m from pybind11_tests import ConstructorStats def test_methods_and_attributes(): instance1 = m.ExampleMandA() instance2 = m.ExampleMandA(32) instance1.add1(instance2) instance1.add2(instance2) instance1.add3(instance2) instance1.add4(instance2) instance1.add5(instance2) instance1.add6(32) instance1.add7(32) instance1.add8(32) instance1.add9(32) instance1.add10(32) assert str(instance1) == "ExampleMandA[value=320]" assert str(instance2) == "ExampleMandA[value=32]" assert str(instance1.self1()) == "ExampleMandA[value=320]" assert str(instance1.self2()) == "ExampleMandA[value=320]" assert str(instance1.self3()) == "ExampleMandA[value=320]" assert str(instance1.self4()) == "ExampleMandA[value=320]" assert str(instance1.self5()) == "ExampleMandA[value=320]" assert instance1.internal1() == 320 assert instance1.internal2() == 320 assert instance1.internal3() == 320 assert instance1.internal4() == 320 assert instance1.internal5() == 320 assert instance1.overloaded() == "()" assert instance1.overloaded(0) == "(int)" assert instance1.overloaded(1, 1.0) == "(int, float)" assert instance1.overloaded(2.0, 2) == "(float, int)" assert instance1.overloaded(3, 3) == "(int, int)" assert instance1.overloaded(4., 4.) == "(float, float)" assert instance1.overloaded_const(-3) == "(int) const" assert instance1.overloaded_const(5, 5.0) == "(int, float) const" assert instance1.overloaded_const(6.0, 6) == "(float, int) const" assert instance1.overloaded_const(7, 7) == "(int, int) const" assert instance1.overloaded_const(8., 8.) == "(float, float) const" assert instance1.overloaded_float(1, 1) == "(float, float)" assert instance1.overloaded_float(1, 1.) == "(float, float)" assert instance1.overloaded_float(1., 1) == "(float, float)" assert instance1.overloaded_float(1., 1.) == "(float, float)" assert instance1.value == 320 instance1.value = 100 assert str(instance1) == "ExampleMandA[value=100]" cstats = ConstructorStats.get(m.ExampleMandA) assert cstats.alive() == 2 del instance1, instance2 assert cstats.alive() == 0 assert cstats.values() == ["32"] assert cstats.default_constructions == 1 assert cstats.copy_constructions == 3 assert cstats.move_constructions >= 1 assert cstats.copy_assignments == 0 assert cstats.move_assignments == 0 def test_copy_method(): """Issue #443: calling copied methods fails in Python 3""" m.ExampleMandA.add2c = m.ExampleMandA.add2 m.ExampleMandA.add2d = m.ExampleMandA.add2b a = m.ExampleMandA(123) assert a.value == 123 a.add2(m.ExampleMandA(-100)) assert a.value == 23 a.add2b(m.ExampleMandA(20)) assert a.value == 43 a.add2c(m.ExampleMandA(6)) assert a.value == 49 a.add2d(m.ExampleMandA(-7)) assert a.value == 42 def test_properties(): instance = m.TestProperties() assert instance.def_readonly == 1 with pytest.raises(AttributeError): instance.def_readonly = 2 instance.def_readwrite = 2 assert instance.def_readwrite == 2 assert instance.def_property_readonly == 2 with pytest.raises(AttributeError): instance.def_property_readonly = 3 instance.def_property = 3 assert instance.def_property == 3 def test_static_properties(): assert m.TestProperties.def_readonly_static == 1 with pytest.raises(AttributeError) as excinfo: m.TestProperties.def_readonly_static = 2 assert "can't set attribute" in str(excinfo) m.TestProperties.def_readwrite_static = 2 assert m.TestProperties.def_readwrite_static == 2 assert m.TestProperties.def_property_readonly_static == 2 with pytest.raises(AttributeError) as excinfo: m.TestProperties.def_property_readonly_static = 3 assert "can't set attribute" in str(excinfo) m.TestProperties.def_property_static = 3 assert m.TestProperties.def_property_static == 3 # Static property read and write via instance instance = m.TestProperties() m.TestProperties.def_readwrite_static = 0 assert m.TestProperties.def_readwrite_static == 0 assert instance.def_readwrite_static == 0 instance.def_readwrite_static = 2 assert m.TestProperties.def_readwrite_static == 2 assert instance.def_readwrite_static == 2 # It should be possible to override properties in derived classes assert m.TestPropertiesOverride().def_readonly == 99 assert m.TestPropertiesOverride.def_readonly_static == 99 def test_static_cls(): """Static property getter and setters expect the type object as the their only argument""" instance = m.TestProperties() assert m.TestProperties.static_cls is m.TestProperties assert instance.static_cls is m.TestProperties def check_self(self): assert self is m.TestProperties m.TestProperties.static_cls = check_self instance.static_cls = check_self def test_metaclass_override(): """Overriding pybind11's default metaclass changes the behavior of `static_property`""" assert type(m.ExampleMandA).__name__ == "pybind11_type" assert type(m.MetaclassOverride).__name__ == "type" assert m.MetaclassOverride.readonly == 1 assert type(m.MetaclassOverride.__dict__["readonly"]).__name__ == "pybind11_static_property" # Regular `type` replaces the property instead of calling `__set__()` m.MetaclassOverride.readonly = 2 assert m.MetaclassOverride.readonly == 2 assert isinstance(m.MetaclassOverride.__dict__["readonly"], int) def test_no_mixed_overloads(): from pybind11_tests import debug_enabled with pytest.raises(RuntimeError) as excinfo: m.ExampleMandA.add_mixed_overloads1() assert (str(excinfo.value) == "overloading a method with both static and instance methods is not supported; " + ("compile in debug mode for more details" if not debug_enabled else "error while attempting to bind static method ExampleMandA.overload_mixed1" "(arg0: float) -> str") ) with pytest.raises(RuntimeError) as excinfo: m.ExampleMandA.add_mixed_overloads2() assert (str(excinfo.value) == "overloading a method with both static and instance methods is not supported; " + ("compile in debug mode for more details" if not debug_enabled else "error while attempting to bind instance method ExampleMandA.overload_mixed2" "(self: pybind11_tests.methods_and_attributes.ExampleMandA, arg0: int, arg1: int)" " -> str") ) @pytest.mark.parametrize("access", ["ro", "rw", "static_ro", "static_rw"]) def test_property_return_value_policies(access): if not access.startswith("static"): obj = m.TestPropRVP() else: obj = m.TestPropRVP ref = getattr(obj, access + "_ref") assert ref.value == 1 ref.value = 2 assert getattr(obj, access + "_ref").value == 2 ref.value = 1 # restore original value for static properties copy = getattr(obj, access + "_copy") assert copy.value == 1 copy.value = 2 assert getattr(obj, access + "_copy").value == 1 copy = getattr(obj, access + "_func") assert copy.value == 1 copy.value = 2 assert getattr(obj, access + "_func").value == 1 def test_property_rvalue_policy(): """When returning an rvalue, the return value policy is automatically changed from `reference(_internal)` to `move`. The following would not work otherwise.""" instance = m.TestPropRVP() o = instance.rvalue assert o.value == 1 os = m.TestPropRVP.static_rvalue assert os.value == 1 # https://bitbucket.org/pypy/pypy/issues/2447 @pytest.unsupported_on_pypy def test_dynamic_attributes(): instance = m.DynamicClass() assert not hasattr(instance, "foo") assert "foo" not in dir(instance) # Dynamically add attribute instance.foo = 42 assert hasattr(instance, "foo") assert instance.foo == 42 assert "foo" in dir(instance) # __dict__ should be accessible and replaceable assert "foo" in instance.__dict__ instance.__dict__ = {"bar": True} assert not hasattr(instance, "foo") assert hasattr(instance, "bar") with pytest.raises(TypeError) as excinfo: instance.__dict__ = [] assert str(excinfo.value) == "__dict__ must be set to a dictionary, not a 'list'" cstats = ConstructorStats.get(m.DynamicClass) assert cstats.alive() == 1 del instance assert cstats.alive() == 0 # Derived classes should work as well class PythonDerivedDynamicClass(m.DynamicClass): pass for cls in m.CppDerivedDynamicClass, PythonDerivedDynamicClass: derived = cls() derived.foobar = 100 assert derived.foobar == 100 assert cstats.alive() == 1 del derived assert cstats.alive() == 0 # https://bitbucket.org/pypy/pypy/issues/2447 @pytest.unsupported_on_pypy def test_cyclic_gc(): # One object references itself instance = m.DynamicClass() instance.circular_reference = instance cstats = ConstructorStats.get(m.DynamicClass) assert cstats.alive() == 1 del instance assert cstats.alive() == 0 # Two object reference each other i1 = m.DynamicClass() i2 = m.DynamicClass() i1.cycle = i2 i2.cycle = i1 assert cstats.alive() == 2 del i1, i2 assert cstats.alive() == 0 def test_noconvert_args(msg): a = m.ArgInspector() assert msg(a.f("hi")) == """ loading ArgInspector1 argument WITH conversion allowed. Argument value = hi """ assert msg(a.g("this is a", "this is b")) == """ loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b 13 loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2) """ # noqa: E501 line too long assert msg(a.g("this is a", "this is b", 42)) == """ loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b 42 loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2) """ # noqa: E501 line too long assert msg(a.g("this is a", "this is b", 42, "this is d")) == """ loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b 42 loading ArgInspector2 argument WITH conversion allowed. Argument value = this is d """ assert (a.h("arg 1") == "loading ArgInspector2 argument WITHOUT conversion allowed. Argument value = arg 1") assert msg(m.arg_inspect_func("A1", "A2")) == """ loading ArgInspector2 argument WITH conversion allowed. Argument value = A1 loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = A2 """ assert m.floats_preferred(4) == 2.0 assert m.floats_only(4.0) == 2.0 with pytest.raises(TypeError) as excinfo: m.floats_only(4) assert msg(excinfo.value) == """ floats_only(): incompatible function arguments. The following argument types are supported: 1. (f: float) -> float Invoked with: 4 """ assert m.ints_preferred(4) == 2 assert m.ints_preferred(True) == 0 with pytest.raises(TypeError) as excinfo: m.ints_preferred(4.0) assert msg(excinfo.value) == """ ints_preferred(): incompatible function arguments. The following argument types are supported: 1. (i: int) -> int Invoked with: 4.0 """ # noqa: E501 line too long assert m.ints_only(4) == 2 with pytest.raises(TypeError) as excinfo: m.ints_only(4.0) assert msg(excinfo.value) == """ ints_only(): incompatible function arguments. The following argument types are supported: 1. (i: int) -> int Invoked with: 4.0 """ def test_bad_arg_default(msg): from pybind11_tests import debug_enabled with pytest.raises(RuntimeError) as excinfo: m.bad_arg_def_named() assert msg(excinfo.value) == ( "arg(): could not convert default argument 'a: UnregisteredType' in function " "'should_fail' into a Python object (type not registered yet?)" if debug_enabled else "arg(): could not convert default argument into a Python object (type not registered " "yet?). Compile in debug mode for more information." ) with pytest.raises(RuntimeError) as excinfo: m.bad_arg_def_unnamed() assert msg(excinfo.value) == ( "arg(): could not convert default argument 'UnregisteredType' in function " "'should_fail' into a Python object (type not registered yet?)" if debug_enabled else "arg(): could not convert default argument into a Python object (type not registered " "yet?). Compile in debug mode for more information." ) def test_accepts_none(msg): a = m.NoneTester() assert m.no_none1(a) == 42 assert m.no_none2(a) == 42 assert m.no_none3(a) == 42 assert m.no_none4(a) == 42 assert m.no_none5(a) == 42 assert m.ok_none1(a) == 42 assert m.ok_none2(a) == 42 assert m.ok_none3(a) == 42 assert m.ok_none4(a) == 42 assert m.ok_none5(a) == 42 with pytest.raises(TypeError) as excinfo: m.no_none1(None) assert "incompatible function arguments" in str(excinfo.value) with pytest.raises(TypeError) as excinfo: m.no_none2(None) assert "incompatible function arguments" in str(excinfo.value) with pytest.raises(TypeError) as excinfo: m.no_none3(None) assert "incompatible function arguments" in str(excinfo.value) with pytest.raises(TypeError) as excinfo: m.no_none4(None) assert "incompatible function arguments" in str(excinfo.value) with pytest.raises(TypeError) as excinfo: m.no_none5(None) assert "incompatible function arguments" in str(excinfo.value) # The first one still raises because you can't pass None as a lvalue reference arg: with pytest.raises(TypeError) as excinfo: assert m.ok_none1(None) == -1 assert msg(excinfo.value) == """ ok_none1(): incompatible function arguments. The following argument types are supported: 1. (arg0: m.methods_and_attributes.NoneTester) -> int Invoked with: None """ # The rest take the argument as pointer or holder, and accept None: assert m.ok_none2(None) == -1 assert m.ok_none3(None) == -1 assert m.ok_none4(None) == -1 assert m.ok_none5(None) == -1 def test_str_issue(msg): """#283: __str__ called on uninitialized instance when constructor arguments invalid""" assert str(m.StrIssue(3)) == "StrIssue[3]" with pytest.raises(TypeError) as excinfo: str(m.StrIssue("no", "such", "constructor")) assert msg(excinfo.value) == """ __init__(): incompatible constructor arguments. The following argument types are supported: 1. m.methods_and_attributes.StrIssue(arg0: int) 2. m.methods_and_attributes.StrIssue() Invoked with: 'no', 'such', 'constructor' """ def test_unregistered_base_implementations(): a = m.RegisteredDerived() a.do_nothing() assert a.rw_value == 42 assert a.ro_value == 1.25 a.rw_value += 5 assert a.sum() == 48.25 a.increase_value() assert a.rw_value == 48 assert a.ro_value == 1.5 assert a.sum() == 49.5 assert a.rw_value_prop == 48 a.rw_value_prop += 1 assert a.rw_value_prop == 49 a.increase_value() assert a.ro_value_prop == 1.75 def test_custom_caster_destruction(): """Tests that returning a pointer to a type that gets converted with a custom type caster gets destroyed when the function has py::return_value_policy::take_ownership policy applied.""" cstats = m.destruction_tester_cstats() # This one *doesn't* have take_ownership: the pointer should be used but not destroyed: z = m.custom_caster_no_destroy() assert cstats.alive() == 1 and cstats.default_constructions == 1 assert z # take_ownership applied: this constructs a new object, casts it, then destroys it: z = m.custom_caster_destroy() assert z assert cstats.default_constructions == 2 # Same, but with a const pointer return (which should *not* inhibit destruction): z = m.custom_caster_destroy_const() assert z assert cstats.default_constructions == 3 # Make sure we still only have the original object (from ..._no_destroy()) alive: assert cstats.alive() == 1
gpl-3.0
5,428,050,152,751,483,000
-4,463,932,639,017,107,000
34.75
107
0.661926
false
junhuac/MQUIC
depot_tools/ENV/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/url.py
375
5760
from collections import namedtuple from ..exceptions import LocationParseError url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] class Url(namedtuple('Url', url_attrs)): """ Datastructure for representing an HTTP URL. Used as a return value for :func:`parse_url`. """ slots = () def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None): return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment) @property def hostname(self): """For backwards-compatibility with urlparse. We're nice like that.""" return self.host @property def request_uri(self): """Absolute path including the query string.""" uri = self.path or '/' if self.query is not None: uri += '?' + self.query return uri @property def netloc(self): """Network location including host and port""" if self.port: return '%s:%d' % (self.host, self.port) return self.host @property def url(self): """ Convert self into a url This function should more or less round-trip with :func:`.parse_url`. The returned url may not be exactly the same as the url inputted to :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls with a blank port will have : removed). Example: :: >>> U = parse_url('http://google.com/mail/') >>> U.url 'http://google.com/mail/' >>> Url('http', 'username:password', 'host.com', 80, ... '/path', 'query', 'fragment').url 'http://username:[email protected]:80/path?query#fragment' """ scheme, auth, host, port, path, query, fragment = self url = '' # We use "is not None" we want things to happen with empty strings (or 0 port) if scheme is not None: url += scheme + '://' if auth is not None: url += auth + '@' if host is not None: url += host if port is not None: url += ':' + str(port) if path is not None: url += path if query is not None: url += '?' + query if fragment is not None: url += '#' + fragment return url def __str__(self): return self.url def split_first(s, delims): """ Given a string and an iterable of delimiters, split on the first found delimiter. Return two split parts and the matched delimiter. If not found, then the first part is the full input string. Example:: >>> split_first('foo/bar?baz', '?/=') ('foo', 'bar?baz', '/') >>> split_first('foo/bar?baz', '123') ('foo/bar?baz', '', None) Scales linearly with number of delims. Not ideal for large number of delims. """ min_idx = None min_delim = None for d in delims: idx = s.find(d) if idx < 0: continue if min_idx is None or idx < min_idx: min_idx = idx min_delim = d if min_idx is None or min_idx < 0: return s, '', None return s[:min_idx], s[min_idx+1:], min_delim def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. Partly backwards-compatible with :mod:`urlparse`. Example:: >>> parse_url('http://google.com/mail/') Url(scheme='http', host='google.com', port=None, path='/mail/', ...) >>> parse_url('google.com:80') Url(scheme=None, host='google.com', port=80, path=None, ...) >>> parse_url('/foo?bar') Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) """ # While this code has overlap with stdlib's urlparse, it is much # simplified for our needs and less annoying. # Additionally, this implementations does silly things to be optimal # on CPython. if not url: # Empty return Url() scheme = None auth = None host = None port = None path = None fragment = None query = None # Scheme if '://' in url: scheme, url = url.split('://', 1) # Find the earliest Authority Terminator # (http://tools.ietf.org/html/rfc3986#section-3.2) url, path_, delim = split_first(url, ['/', '?', '#']) if delim: # Reassemble the path path = delim + path_ # Auth if '@' in url: # Last '@' denotes end of auth part auth, url = url.rsplit('@', 1) # IPv6 if url and url[0] == '[': host, url = url.split(']', 1) host += ']' # Port if ':' in url: _host, port = url.split(':', 1) if not host: host = _host if port: # If given, ports must be integers. if not port.isdigit(): raise LocationParseError(url) port = int(port) else: # Blank ports are cool, too. (rfc3986#section-3.2.3) port = None elif not host and url: host = url if not path: return Url(scheme, auth, host, port, path, query, fragment) # Fragment if '#' in path: path, fragment = path.split('#', 1) # Query if '?' in path: path, query = path.split('?', 1) return Url(scheme, auth, host, port, path, query, fragment) def get_host(url): """ Deprecated. Use :func:`.parse_url` instead. """ p = parse_url(url) return p.scheme or 'http', p.hostname, p.port
mit
2,810,680,675,792,779,300
-5,246,577,895,927,291,000
26.169811
86
0.538194
false
TaskEvolution/Task-Coach-Evolution
taskcoach/taskcoachlib/thirdparty/timeline/timeline.py
1
16578
#! /usr/bin/env python import wx, wx.lib TimeLineSelectionEvent, EVT_TIMELINE_SELECTED = wx.lib.newevent.NewEvent() TimeLineActivationEvent, EVT_TIMELINE_ACTIVATED = wx.lib.newevent.NewEvent() class HotMap(object): ''' Keep track of which node is where. ''' def __init__(self, parent=None): self.parent = parent self.nodes = [] self.rects = {} self.children = {} super(HotMap, self).__init__() def append(self, node, rect): self.nodes.append(node) self.rects[node] = rect self.children[node] = HotMap(node) def __getitem__(self, node): return self.children[node] def findNodeAtPosition(self, position, parent=None): ''' Retrieve the node at the given position. ''' for node, rect in self.rects.items(): if rect.Contains(position): return self[node].findNodeAtPosition(position, node) return parent def firstNode(self): return self.nodes[0] if self.nodes else None def lastNode(self, parent=None): if self.nodes: last = self.nodes[-1] return self[last].lastNode(last) else: return parent def findNode(self, target): if target in self.nodes: return self for node in self.nodes: result = self[node].findNode(target) if result: return result return None def nextChild(self, target): index = self.nodes.index(target) index = min(index+1, len(self.nodes)-1) return self.nodes[index] def previousChild(self, target): index = self.nodes.index(target) index = max(index-1, 0) return self.nodes[index] def firstChild(self, target): children = self[target].nodes if children: return children[0] else: return target class TimeLine(wx.Panel): def __init__(self, *args, **kwargs): self.model = kwargs.pop('model', []) self.padding = kwargs.pop('padding', 3) self.adapter = kwargs.pop('adapter', DefaultAdapter()) self.selectedNode = None self.backgroundColour = wx.WHITE self._buffer = wx.EmptyBitmap(20, 20) # Have a default buffer ready self.DEFAULT_PEN = wx.Pen(wx.BLACK, 1, wx.SOLID) self.SELECTED_PEN = wx.Pen(wx.WHITE, 2, wx.SOLID) kwargs['style'] = wx.TAB_TRAVERSAL|wx.NO_BORDER|wx.FULL_REPAINT_ON_RESIZE|wx.WANTS_CHARS super(TimeLine, self).__init__(*args, **kwargs) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnSize ) self.Bind(wx.EVT_LEFT_UP, self.OnClickRelease) self.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick) self.Bind(wx.EVT_KEY_UP, self.OnKeyUp) self.OnSize(None) def SetBackgroundColour(self, colour): self.backgroundColour = colour def Refresh(self): self.UpdateDrawing() def OnPaint(self, event): dc = wx.BufferedPaintDC(self, self._buffer) def OnSize(self, event): # The buffer is initialized in here, so that the buffer is always # the same size as the Window. width, height = self.GetClientSizeTuple() if width <= 0 or height <= 0: return # Make new off-screen bitmap: this bitmap will always have the # current drawing in it, so it can be used to save the image to # a file, or whatever. self._buffer = wx.EmptyBitmap(width, height) self.UpdateDrawing() def OnClickRelease(self, event): event.Skip() self.SetFocus() point = event.GetPosition() node = self.hot_map.findNodeAtPosition(point) self.SetSelected(node, point) def OnDoubleClick(self, event): point = event.GetPosition() node = self.hot_map.findNodeAtPosition(point) if node: wx.PostEvent(self, TimeLineActivationEvent(node=node, point=point)) def OnKeyUp(self, event): event.Skip() if not self.hot_map: return if event.KeyCode == wx.WXK_HOME: self.SetSelected(self.hot_map.firstNode()) return elif event.KeyCode == wx.WXK_END: self.SetSelected(self.hot_map.lastNode()) return if not self.selectedNode: return if event.KeyCode == wx.WXK_RETURN: wx.PostEvent(self, TimeLineActivationEvent(node=self.selectedNode)) return hot_map = self.hot_map.findNode(self.selectedNode) if hot_map is None: newSelection = self.hot_map.firstNode() elif event.KeyCode == wx.WXK_DOWN: newSelection = hot_map.nextChild(self.selectedNode) elif event.KeyCode == wx.WXK_UP: newSelection = hot_map.previousChild(self.selectedNode) elif event.KeyCode == wx.WXK_RIGHT: newSelection = hot_map.firstChild(self.selectedNode) elif event.KeyCode == wx.WXK_LEFT and hot_map.parent: newSelection = hot_map.parent else: newSelection = self.selectedNode self.SetSelected(newSelection) def GetSelected(self): return self.selectedNode def SetSelected(self, node, point=None): ''' Set the given node selected in the timeline widget ''' if node == self.selectedNode: return self.selectedNode = node self.Refresh() if node: wx.PostEvent(self, TimeLineSelectionEvent(node=node, point=point)) def UpdateDrawing(self): dc = wx.BufferedDC(wx.ClientDC(self), self._buffer) self.Draw(dc) def Draw(self, dc): ''' Draw the timeline on the device context. ''' self.hot_map = HotMap() dc.BeginDrawing() brush = wx.Brush(self.backgroundColour) dc.SetBackground(brush) dc.Clear() dc.SetFont(self.FontForLabels(dc)) if self.model: bounds = self.adapter.bounds(self.model) self.min_start = float(min(bounds)) self.max_stop = float(max(bounds)) if self.max_stop - self.min_start < 100: self.max_stop += 100 self.length = self.max_stop - self.min_start self.width, self.height = dc.GetSize() labelHeight = dc.GetTextExtent('ABC')[1] + 2 # Leave room for time labels self.DrawParallelChildren(dc, self.model, labelHeight, self.height-labelHeight, self.hot_map) self.DrawNow(dc) dc.EndDrawing() def FontForLabels(self, dc): ''' Return the default GUI font, scaled for printing if necessary. ''' font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT) scale = dc.GetPPI()[0] / wx.ScreenDC().GetPPI()[0] font.SetPointSize(scale*font.GetPointSize()) return font def DrawBox(self, dc, node, y, h, hot_map, isSequentialNode=False, depth=0): if h < self.padding: return start, stop = self.adapter.start(node), self.adapter.stop(node) if start is None: start = self.min_start - 10 if stop is None: stop = self.max_stop + 10 start, stop = min(start, stop), max(start, stop) # Sanitize input x = self.scaleX(start) + 2*depth w = self.scaleWidth(stop - start) - 4*depth hot_map.append(node, (wx.Rect(int(x), int(y), int(w), int(h)))) self.DrawRectangle(dc, node, x, y, w, h, isSequentialNode, depth) if not isSequentialNode: self.DrawIconAndLabel(dc, node, x, y, w, h, depth) seqHeight = min(dc.GetTextExtent('ABC')[1] + 2, h) self.DrawSequentialChildren(dc, node, y+2, seqHeight-4, hot_map[node], depth+1) self.DrawParallelChildren(dc, node, y+seqHeight, h-seqHeight, hot_map[node], depth+1) def DrawRectangle(self, dc, node, x, y, w, h, isSequentialNode, depth): dc = wx.GCDC(dc) if isSequentialNode else dc dc.SetClippingRegion(x, y, w, h) dc.SetBrush(self.brushForNode(node, isSequentialNode, depth)) dc.SetPen(self.penForNode(node, isSequentialNode, depth)) rounding = 0 if isSequentialNode and (h < self.padding * 4 or w < self.padding * 4) else self.padding * 2 dc.DrawRoundedRectangle(x, y, w, h, rounding) dc.DestroyClippingRegion() def DrawIconAndLabel(self, dc, node, x, y, w, h, depth): ''' Draw the icon, if any, and the label, if any, of the node. ''' # Make sure the Icon and Label are visible: if x < 0: w -= abs(x) x = 0 dc.SetClippingRegion(x+1, y+1, w-2, h-2) # Don't draw outside the box icon = self.adapter.icon(node, node==self.selectedNode) if icon and h >= icon.GetHeight() and w >= icon.GetWidth(): iconWidth = icon.GetWidth() + 2 dc.DrawIcon(icon, x+2, y+2) else: iconWidth = 0 if h >= dc.GetTextExtent('ABC')[1]: dc.SetFont(self.fontForNode(dc, node, depth)) dc.SetTextForeground(self.textForegroundForNode(node, depth)) dc.DrawText(self.adapter.label(node), x + iconWidth + 2, y+2) dc.DestroyClippingRegion() def DrawParallelChildren(self, dc, parent, y, h, hot_map, depth=0): children = self.adapter.parallel_children(parent) if not children: return childY = y h -= len(children) # vertical space between children recursiveChildrenList = [self.adapter.parallel_children(child, recursive=True) \ for child in children] recursiveChildrenCounts = [len(recursiveChildren) for recursiveChildren in recursiveChildrenList] recursiveChildHeight = h / float(len(children) + sum(recursiveChildrenCounts)) for child, numberOfRecursiveChildren in zip(children, recursiveChildrenCounts): childHeight = recursiveChildHeight * (numberOfRecursiveChildren + 1) if childHeight >= self.padding: self.DrawBox(dc, child, childY, childHeight, hot_map, depth=depth) childY += childHeight + 1 def DrawSequentialChildren(self, dc, parent, y, h, hot_map, depth=0): for child in self.adapter.sequential_children(parent): self.DrawBox(dc, child, y, h, hot_map, isSequentialNode=True, depth=depth) def DrawNow(self, dc): alpha_dc = wx.GCDC(dc) alpha_dc.SetPen(wx.Pen(wx.Color(128, 200, 128, 128), width=3)) now = self.scaleX(self.adapter.now()) alpha_dc.DrawLine(now, 0, now, self.height) label = self.adapter.nowlabel() textWidth = alpha_dc.GetTextExtent(label)[0] alpha_dc.DrawText(label, now - (textWidth / 2), 0) def scaleX(self, x): return self.scaleWidth(x - self.min_start) def scaleWidth(self, width): return (width / self.length) * self.width def textForegroundForNode(self, node, depth=0): ''' Determine the text foreground color to use to display the label of the given node ''' if node == self.selectedNode: fg_color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT) else: fg_color = self.adapter.foreground_color(node, depth) if not fg_color: fg_color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOWTEXT) return fg_color def fontForNode(self, dc, node, depth=0): ''' Determine the font to use to display the label of the given node, scaled for printing if necessary. ''' font = self.adapter.font(node, depth) font = font if font else wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT) scale = dc.GetPPI()[0] / wx.ScreenDC().GetPPI()[0] font.SetPointSize(scale*font.GetPointSize()) return font def brushForNode(self, node, isSequentialNode=False, depth=0): ''' Create brush to use to display the given node ''' if node == self.selectedNode: color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHT) else: color = self.adapter.background_color(node) if color: # The adapter returns a 3-tuple color = wx.Color(*color) else: red = (depth * 10)%255 green = 255-((depth * 10)%255) blue = 200 color = wx.Color(red, green, blue) if isSequentialNode: color.Set(color.Red(), color.Green(), color.Blue(), 128) return wx.Brush(color) def penForNode(self, node, isSequentialNode=False, depth=0): ''' Determine the pen to use to display the given node ''' pen = self.SELECTED_PEN if node == self.selectedNode else self.DEFAULT_PEN #style = wx.DOT if isSequentialNode else wx.SOLID #pen.SetStyle(style) return pen class DefaultAdapter(object): def parallel_children(self, node, recursive=False): children = node.parallel_children[:] if recursive: for child in node.parallel_children: children.extend(self.parallel_children(child, True)) return children def sequential_children(self, node): return node.sequential_children def children(self, node): return self.parallel_children(node) + self.sequential_children(node) def bounds(self, node): times = [node.start, node.stop] for child in self.children(node): times.extend(self.bounds(child)) return min(times), max(times) def start(self, node, recursive=False): starts = [node.start] if recursive: starts.extend([self.start(child, True) \ for child in self.children(node)]) return float(min(starts)) def stop(self, node, recursive=False): stops = [node.stop] if recursive: stops.extend([self.stop(child, True) \ for child in self.children(node)]) return float(max(stops)) def label(self, node): return node.path def background_color(self, node): return None def foreground_color(self, node, depth): return None def icon(self, node): return None def now(self): return 0 def nowlabel(self): return 'Now' class TestApp(wx.App): ''' Basic application for holding the viewing Frame ''' def __init__(self, size): self.size = size super(TestApp, self).__init__(0) def OnInit(self): ''' Initialise the application. ''' wx.InitAllImageHandlers() self.frame = wx.Frame(None) self.frame.CreateStatusBar() model = self.get_model(self.size) self.timeline = TimeLine(self.frame, model=model) self.frame.Show(True) return True def get_model(self, size): parallel_children, sequential_children = [], [] if size > 0: parallel_children = [self.get_model(size-1) for i in range(size)] sequential_children = [Node('Seq 1', 30+10*size, 40+10*size, [], []), Node('Seq 2', 80-10*size, 90-10*size, [], [])] return Node('Node %d'%size, 0+5*size, 100-5*size, parallel_children, sequential_children) class Node(object): def __init__(self, path, start, stop, subnodes, events): self.path = path self.start = start self.stop = stop self.parallel_children = subnodes self.sequential_children = events def __repr__(self): return '%s(%r, %r, %r, %r, %r)'%(self.__class__.__name__, self.path, self.start, self.stop, self.parallel_children, self.sequential_children) usage = 'timeline.py [size]' def main(): """Mainloop for the application""" import sys size = 3 if len(sys.argv) > 1: if sys.argv[1] in ('-h', '--help'): print usage else: try: size = int(sys.argv[1]) except ValueError: print usage else: app = TestApp(size) app.MainLoop() if __name__ == "__main__": main()
gpl-3.0
-3,925,512,095,869,851,000
-3,901,000,360,021,019,000
36.591837
113
0.583002
false
dbmi-pitt/DIKB-Micropublication
scripts/mp-scripts/Bio/EUtils/setup.py
1
1941
import sys from distutils.core import setup try: import EUtils except ImportError: import __init__ as EUtils def _dict(**kwargs): return kwargs d = _dict( name = "EUtils", version = EUtils.__version__, description = "Client interface to NCBI's EUtils/Entrez server", author = "Andrew Dalke", author_email = "[email protected]", maintainer = "Dalke Scientific Software, LLC", maintainer_email = "[email protected]", url = "http://www.dalkescientific.com/EUtils/", long_description = """\ EUtils is a client library for the Entrez databases at NCBI. NCBI provides the EUtils web service so that software can query Entrez directly, rather than going through the web interface and dealing with the hassles of web scraping. For more information see http://www.ncbi.nlm.nih.gov/entrez/query/static/eutils_help.html This package provides two levels of interface. The lowest one makes a programmatic interface to construct the query URL and make the request. The higher level ones support history tracking and parsing of query results. These greatly simplify working with the EUtils server. """, package_dir = {"": ".."}, packages = ["EUtils", "EUtils.DTDs"], classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: Freely Distributable", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Scientific/Engineering :: Bio-Informatics", # a '-'? ! "Topic :: Scientific/Engineering :: Medical Science Apps.", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Internet", ], ) if sys.version_info < (2,2,4): del d["classifiers"] if __name__ == "__main__": setup(**d)
apache-2.0
7,258,721,842,337,729,000
-1,465,556,086,708,263,000
30.306452
72
0.659454
false
igabriel85/dmon-adp
adpformater/adpformater.py
1
1615
import pandas as pd class DataFormatter(): def __init__(self, dataloc): self.dataloc = dataloc def aggJsonToCsv(self): return "CSV file" def expTimestamp(self): return "Expand metric timestamp" def window(self): return "Window metrics" def pivot(self): return "Pivot values" def addID(self): return "Add new ID as index" def removeID(self): return "Remove selected column as index" def renameHeader(self): return "Rename headers" def normalize(self): return "Normalize data" def denormalize(self): return "Denormalize data" input_table = pd.read_csv("metrics.csv") for index, row in input_table.iterrows(): input_table = input_table.append([row]*9) input_table = input_table.sort_values(['row ID']) input_table = input_table.reset_index(drop=True) for index, rows in input_table.iterrows(): if int(index) > 59: print "Index to big!" time = rows[0].split(", ", 1) #In Knime row for timestamp is row(55) last one timeHour = time[1].split(":", 2) timeHourSeconds = timeHour[2].split(".", 1) timeHourSecondsDecimal = timeHour[2].split(".", 1) timeHourSecondsDecimal[0] = str(index) if len(timeHourSecondsDecimal[0]) == 1: timeHourSecondsDecimal[0] = '0%s' %timeHourSecondsDecimal[0] decimal = '.'.join(timeHourSecondsDecimal) timeHour[2] = decimal timenew = ':'.join(timeHour) time[1] = timenew finalString = ', '.join(time) input_table.set_value(index, 'row ID', finalString) input_table.to_csv('out.csv')
apache-2.0
-5,506,313,995,805,443,000
4,321,774,828,369,064,400
24.234375
81
0.636533
false
boomsbloom/dtm-fmri
DTM/for_gensim/lib/python2.7/site-packages/boto/dynamodb2/items.py
32
14656
from copy import deepcopy class NEWVALUE(object): # A marker for new data added. pass class Item(object): """ An object representing the item data within a DynamoDB table. An item is largely schema-free, meaning it can contain any data. The only limitation is that it must have data for the fields in the ``Table``'s schema. This object presents a dictionary-like interface for accessing/storing data. It also tries to intelligently track how data has changed throughout the life of the instance, to be as efficient as possible about updates. Empty items, or items that have no data, are considered falsey. """ def __init__(self, table, data=None, loaded=False): """ Constructs an (unsaved) ``Item`` instance. To persist the data in DynamoDB, you'll need to call the ``Item.save`` (or ``Item.partial_save``) on the instance. Requires a ``table`` parameter, which should be a ``Table`` instance. This is required, as DynamoDB's API is focus around all operations being table-level. It's also for persisting schema around many objects. Optionally accepts a ``data`` parameter, which should be a dictionary of the fields & values of the item. Alternatively, an ``Item`` instance may be provided from which to extract the data. Optionally accepts a ``loaded`` parameter, which should be a boolean. ``True`` if it was preexisting data loaded from DynamoDB, ``False`` if it's new data from the user. Default is ``False``. Example:: >>> users = Table('users') >>> user = Item(users, data={ ... 'username': 'johndoe', ... 'first_name': 'John', ... 'date_joined': 1248o61592, ... }) # Change existing data. >>> user['first_name'] = 'Johann' # Add more data. >>> user['last_name'] = 'Doe' # Delete data. >>> del user['date_joined'] # Iterate over all the data. >>> for field, val in user.items(): ... print "%s: %s" % (field, val) username: johndoe first_name: John date_joined: 1248o61592 """ self.table = table self._loaded = loaded self._orig_data = {} self._data = data self._dynamizer = table._dynamizer if isinstance(self._data, Item): self._data = self._data._data if self._data is None: self._data = {} if self._loaded: self._orig_data = deepcopy(self._data) def __getitem__(self, key): return self._data.get(key, None) def __setitem__(self, key, value): self._data[key] = value def __delitem__(self, key): if not key in self._data: return del self._data[key] def keys(self): return self._data.keys() def values(self): return self._data.values() def items(self): return self._data.items() def get(self, key, default=None): return self._data.get(key, default) def __iter__(self): for key in self._data: yield self._data[key] def __contains__(self, key): return key in self._data def __bool__(self): return bool(self._data) __nonzero__ = __bool__ def _determine_alterations(self): """ Checks the ``-orig_data`` against the ``_data`` to determine what changes to the data are present. Returns a dictionary containing the keys ``adds``, ``changes`` & ``deletes``, containing the updated data. """ alterations = { 'adds': {}, 'changes': {}, 'deletes': [], } orig_keys = set(self._orig_data.keys()) data_keys = set(self._data.keys()) # Run through keys we know are in both for changes. for key in orig_keys.intersection(data_keys): if self._data[key] != self._orig_data[key]: if self._is_storable(self._data[key]): alterations['changes'][key] = self._data[key] else: alterations['deletes'].append(key) # Run through additions. for key in data_keys.difference(orig_keys): if self._is_storable(self._data[key]): alterations['adds'][key] = self._data[key] # Run through deletions. for key in orig_keys.difference(data_keys): alterations['deletes'].append(key) return alterations def needs_save(self, data=None): """ Returns whether or not the data has changed on the ``Item``. Optionally accepts a ``data`` argument, which accepts the output from ``self._determine_alterations()`` if you've already called it. Typically unnecessary to do. Default is ``None``. Example: >>> user.needs_save() False >>> user['first_name'] = 'Johann' >>> user.needs_save() True """ if data is None: data = self._determine_alterations() needs_save = False for kind in ['adds', 'changes', 'deletes']: if len(data[kind]): needs_save = True break return needs_save def mark_clean(self): """ Marks an ``Item`` instance as no longer needing to be saved. Example: >>> user.needs_save() False >>> user['first_name'] = 'Johann' >>> user.needs_save() True >>> user.mark_clean() >>> user.needs_save() False """ self._orig_data = deepcopy(self._data) def mark_dirty(self): """ DEPRECATED: Marks an ``Item`` instance as needing to be saved. This method is no longer necessary, as the state tracking on ``Item`` has been improved to automatically detect proper state. """ return def load(self, data): """ This is only useful when being handed raw data from DynamoDB directly. If you have a Python datastructure already, use the ``__init__`` or manually set the data instead. Largely internal, unless you know what you're doing or are trying to mix the low-level & high-level APIs. """ self._data = {} for field_name, field_value in data.get('Item', {}).items(): self[field_name] = self._dynamizer.decode(field_value) self._loaded = True self._orig_data = deepcopy(self._data) def get_keys(self): """ Returns a Python-style dict of the keys/values. Largely internal. """ key_fields = self.table.get_key_fields() key_data = {} for key in key_fields: key_data[key] = self[key] return key_data def get_raw_keys(self): """ Returns a DynamoDB-style dict of the keys/values. Largely internal. """ raw_key_data = {} for key, value in self.get_keys().items(): raw_key_data[key] = self._dynamizer.encode(value) return raw_key_data def build_expects(self, fields=None): """ Builds up a list of expecations to hand off to DynamoDB on save. Largely internal. """ expects = {} if fields is None: fields = list(self._data.keys()) + list(self._orig_data.keys()) # Only uniques. fields = set(fields) for key in fields: expects[key] = { 'Exists': True, } value = None # Check for invalid keys. if not key in self._orig_data and not key in self._data: raise ValueError("Unknown key %s provided." % key) # States: # * New field (only in _data) # * Unchanged field (in both _data & _orig_data, same data) # * Modified field (in both _data & _orig_data, different data) # * Deleted field (only in _orig_data) orig_value = self._orig_data.get(key, NEWVALUE) current_value = self._data.get(key, NEWVALUE) if orig_value == current_value: # Existing field unchanged. value = current_value else: if key in self._data: if not key in self._orig_data: # New field. expects[key]['Exists'] = False else: # Existing field modified. value = orig_value else: # Existing field deleted. value = orig_value if value is not None: expects[key]['Value'] = self._dynamizer.encode(value) return expects def _is_storable(self, value): # We need to prevent ``None``, empty string & empty set from # heading to DDB, but allow false-y values like 0 & False make it. if not value: if not value in (0, 0.0, False): return False return True def prepare_full(self): """ Runs through all fields & encodes them to be handed off to DynamoDB as part of an ``save`` (``put_item``) call. Largely internal. """ # This doesn't save on its own. Rather, we prepare the datastructure # and hand-off to the table to handle creation/update. final_data = {} for key, value in self._data.items(): if not self._is_storable(value): continue final_data[key] = self._dynamizer.encode(value) return final_data def prepare_partial(self): """ Runs through **ONLY** the changed/deleted fields & encodes them to be handed off to DynamoDB as part of an ``partial_save`` (``update_item``) call. Largely internal. """ # This doesn't save on its own. Rather, we prepare the datastructure # and hand-off to the table to handle creation/update. final_data = {} fields = set() alterations = self._determine_alterations() for key, value in alterations['adds'].items(): final_data[key] = { 'Action': 'PUT', 'Value': self._dynamizer.encode(self._data[key]) } fields.add(key) for key, value in alterations['changes'].items(): final_data[key] = { 'Action': 'PUT', 'Value': self._dynamizer.encode(self._data[key]) } fields.add(key) for key in alterations['deletes']: final_data[key] = { 'Action': 'DELETE', } fields.add(key) return final_data, fields def partial_save(self): """ Saves only the changed data to DynamoDB. Extremely useful for high-volume/high-write data sets, this allows you to update only a handful of fields rather than having to push entire items. This prevents many accidental overwrite situations as well as saves on the amount of data to transfer over the wire. Returns ``True`` on success, ``False`` if no save was performed or the write failed. Example:: >>> user['last_name'] = 'Doh!' # Only the last name field will be sent to DynamoDB. >>> user.partial_save() """ key = self.get_keys() # Build a new dict of only the data we're changing. final_data, fields = self.prepare_partial() if not final_data: return False # Remove the key(s) from the ``final_data`` if present. # They should only be present if this is a new item, in which # case we shouldn't be sending as part of the data to update. for fieldname, value in key.items(): if fieldname in final_data: del final_data[fieldname] try: # It's likely also in ``fields``, so remove it there too. fields.remove(fieldname) except KeyError: pass # Build expectations of only the fields we're planning to update. expects = self.build_expects(fields=fields) returned = self.table._update_item(key, final_data, expects=expects) # Mark the object as clean. self.mark_clean() return returned def save(self, overwrite=False): """ Saves all data to DynamoDB. By default, this attempts to ensure that none of the underlying data has changed. If any fields have changed in between when the ``Item`` was constructed & when it is saved, this call will fail so as not to cause any data loss. If you're sure possibly overwriting data is acceptable, you can pass an ``overwrite=True``. If that's not acceptable, you may be able to use ``Item.partial_save`` to only write the changed field data. Optionally accepts an ``overwrite`` parameter, which should be a boolean. If you provide ``True``, the item will be forcibly overwritten within DynamoDB, even if another process changed the data in the meantime. (Default: ``False``) Returns ``True`` on success, ``False`` if no save was performed. Example:: >>> user['last_name'] = 'Doh!' # All data on the Item is sent to DynamoDB. >>> user.save() # If it fails, you can overwrite. >>> user.save(overwrite=True) """ if not self.needs_save() and not overwrite: return False final_data = self.prepare_full() expects = None if overwrite is False: # Build expectations about *all* of the data. expects = self.build_expects() returned = self.table._put_item(final_data, expects=expects) # Mark the object as clean. self.mark_clean() return returned def delete(self): """ Deletes the item's data to DynamoDB. Returns ``True`` on success. Example:: # Buh-bye now. >>> user.delete() """ key_data = self.get_keys() return self.table.delete_item(**key_data)
mit
3,846,824,656,394,053,600
-8,741,851,861,048,472,000
29.985201
80
0.543532
false
IllusionRom-deprecated/android_platform_tools_idea
python/lib/Lib/site-packages/django/contrib/localflavor/nl/forms.py
311
2796
""" NL-specific Form helpers """ import re from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import Field, Select from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import smart_unicode pc_re = re.compile('^\d{4}[A-Z]{2}$') sofi_re = re.compile('^\d{9}$') numeric_re = re.compile('^\d+$') class NLZipCodeField(Field): """ A Dutch postal code field. """ default_error_messages = { 'invalid': _('Enter a valid postal code'), } def clean(self, value): super(NLZipCodeField, self).clean(value) if value in EMPTY_VALUES: return u'' value = value.strip().upper().replace(' ', '') if not pc_re.search(value): raise ValidationError(self.error_messages['invalid']) if int(value[:4]) < 1000: raise ValidationError(self.error_messages['invalid']) return u'%s %s' % (value[:4], value[4:]) class NLProvinceSelect(Select): """ A Select widget that uses a list of provinces of the Netherlands as its choices. """ def __init__(self, attrs=None): from nl_provinces import PROVINCE_CHOICES super(NLProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES) class NLPhoneNumberField(Field): """ A Dutch telephone number field. """ default_error_messages = { 'invalid': _('Enter a valid phone number'), } def clean(self, value): super(NLPhoneNumberField, self).clean(value) if value in EMPTY_VALUES: return u'' phone_nr = re.sub('[\-\s\(\)]', '', smart_unicode(value)) if len(phone_nr) == 10 and numeric_re.search(phone_nr): return value if phone_nr[:3] == '+31' and len(phone_nr) == 12 and \ numeric_re.search(phone_nr[3:]): return value raise ValidationError(self.error_messages['invalid']) class NLSoFiNumberField(Field): """ A Dutch social security number (SoFi/BSN) field. http://nl.wikipedia.org/wiki/Sofinummer """ default_error_messages = { 'invalid': _('Enter a valid SoFi number'), } def clean(self, value): super(NLSoFiNumberField, self).clean(value) if value in EMPTY_VALUES: return u'' if not sofi_re.search(value): raise ValidationError(self.error_messages['invalid']) if int(value) == 0: raise ValidationError(self.error_messages['invalid']) checksum = 0 for i in range(9, 1, -1): checksum += int(value[9-i]) * i checksum -= int(value[-1]) if checksum % 11 != 0: raise ValidationError(self.error_messages['invalid']) return value
apache-2.0
6,702,166,754,058,011,000
-6,355,327,777,432,141,000
26.683168
79
0.59907
false
tdyas/pants
src/python/pants/backend/jvm/register.py
1
12485
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). """Support for both Java and Scala.""" from pants.backend.jvm.artifact import Artifact from pants.backend.jvm.ossrh_publication_metadata import ( Developer, License, OSSRHPublicationMetadata, Scm, ) from pants.backend.jvm.repository import Repository as repo from pants.backend.jvm.scala_artifact import ScalaArtifact from pants.backend.jvm.subsystems.jar_dependency_management import JarDependencyManagementSetup from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform from pants.backend.jvm.subsystems.scoverage_platform import ScoveragePlatform from pants.backend.jvm.subsystems.shader import Shading from pants.backend.jvm.target_types import ( AnnotationProcessor, JarLibrary, JavaAgent, JavacPlugin, JavaLibrary, JunitTests, JvmApp, JvmBenchmark, JvmBinary, JvmCredentials, JvmPrepCommand, ManagedJarDependencies, NetrcCredentials, ScalacPlugin, ScalaLibrary, UnpackedJars, ) from pants.backend.jvm.targets.annotation_processor import ( AnnotationProcessor as AnnotationProcessorV1, ) from pants.backend.jvm.targets.benchmark import Benchmark as BenchmarkV1 from pants.backend.jvm.targets.credentials import LiteralCredentials as LiteralCredentialsV1 from pants.backend.jvm.targets.credentials import NetrcCredentials as NetrcCredentialsV1 from pants.backend.jvm.targets.jar_library import JarLibrary as JarLibraryV1 from pants.backend.jvm.targets.java_agent import JavaAgent as JavaAgentV1 from pants.backend.jvm.targets.java_library import JavaLibrary as JavaLibraryV1 from pants.backend.jvm.targets.javac_plugin import JavacPlugin as JavacPluginV1 from pants.backend.jvm.targets.junit_tests import JUnitTests as JUnitTestsV1 from pants.backend.jvm.targets.jvm_app import JvmApp as JvmAppV1 from pants.backend.jvm.targets.jvm_binary import Duplicate, JarRules from pants.backend.jvm.targets.jvm_binary import JvmBinary as JvmBinaryV1 from pants.backend.jvm.targets.jvm_binary import Skip from pants.backend.jvm.targets.jvm_prep_command import JvmPrepCommand as JvmPrepCommandV1 from pants.backend.jvm.targets.managed_jar_dependencies import ( ManagedJarDependencies as ManagedJarDependenciesV1, ) from pants.backend.jvm.targets.managed_jar_dependencies import ManagedJarLibraries from pants.backend.jvm.targets.scala_exclude import ScalaExclude from pants.backend.jvm.targets.scala_jar_dependency import ScalaJarDependency from pants.backend.jvm.targets.scala_library import ScalaLibrary as ScalaLibraryV1 from pants.backend.jvm.targets.scalac_plugin import ScalacPlugin as ScalacPluginV1 from pants.backend.jvm.targets.unpacked_jars import UnpackedJars as UnpackedJarsV1 from pants.backend.jvm.tasks.analysis_extraction import AnalysisExtraction from pants.backend.jvm.tasks.benchmark_run import BenchmarkRun from pants.backend.jvm.tasks.binary_create import BinaryCreate from pants.backend.jvm.tasks.bootstrap_jvm_tools import BootstrapJvmTools from pants.backend.jvm.tasks.bundle_create import BundleCreate from pants.backend.jvm.tasks.check_published_deps import CheckPublishedDeps from pants.backend.jvm.tasks.checkstyle import Checkstyle from pants.backend.jvm.tasks.classmap import ClassmapTask from pants.backend.jvm.tasks.consolidate_classpath import ConsolidateClasspath from pants.backend.jvm.tasks.coursier_resolve import CoursierResolve from pants.backend.jvm.tasks.detect_duplicates import DuplicateDetector from pants.backend.jvm.tasks.ivy_imports import IvyImports from pants.backend.jvm.tasks.ivy_outdated import IvyOutdated from pants.backend.jvm.tasks.jar_create import JarCreate from pants.backend.jvm.tasks.jar_publish import JarPublish from pants.backend.jvm.tasks.javadoc_gen import JavadocGen from pants.backend.jvm.tasks.junit_run import JUnitRun from pants.backend.jvm.tasks.jvm_compile.javac.javac_compile import JavacCompile from pants.backend.jvm.tasks.jvm_compile.jvm_classpath_publisher import RuntimeClasspathPublisher from pants.backend.jvm.tasks.jvm_compile.rsc.rsc_compile import RscCompile from pants.backend.jvm.tasks.jvm_dependency_check import JvmDependencyCheck from pants.backend.jvm.tasks.jvm_dependency_usage import JvmDependencyUsage from pants.backend.jvm.tasks.jvm_platform_analysis import JvmPlatformExplain, JvmPlatformValidate from pants.backend.jvm.tasks.jvm_run import JvmRun from pants.backend.jvm.tasks.nailgun_task import NailgunKillall from pants.backend.jvm.tasks.prepare_resources import PrepareResources from pants.backend.jvm.tasks.prepare_services import PrepareServices from pants.backend.jvm.tasks.provide_tools_jar import ProvideToolsJar from pants.backend.jvm.tasks.run_jvm_prep_command import ( RunBinaryJvmPrepCommand, RunCompileJvmPrepCommand, RunTestJvmPrepCommand, ) from pants.backend.jvm.tasks.scala_repl import ScalaRepl from pants.backend.jvm.tasks.scaladoc_gen import ScaladocGen from pants.backend.jvm.tasks.scalafix_task import ScalaFixCheck, ScalaFixFix from pants.backend.jvm.tasks.scalafmt_task import ScalaFmtCheckFormat, ScalaFmtFormat from pants.backend.jvm.tasks.scalastyle_task import ScalastyleTask from pants.backend.jvm.tasks.unpack_jars import UnpackJars from pants.backend.project_info.tasks.export_dep_as_jar import ExportDepAsJar from pants.build_graph.app_base import Bundle, DirectoryReMapper from pants.build_graph.build_file_aliases import BuildFileAliases from pants.goal.goal import Goal from pants.goal.task_registrar import TaskRegistrar as task from pants.java.jar.exclude import Exclude from pants.java.jar.jar_dependency import JarDependencyParseContextWrapper def build_file_aliases(): return BuildFileAliases( targets={ "annotation_processor": AnnotationProcessorV1, "benchmark": BenchmarkV1, "credentials": LiteralCredentialsV1, "jar_library": JarLibraryV1, "java_agent": JavaAgentV1, "java_library": JavaLibraryV1, "javac_plugin": JavacPluginV1, "junit_tests": JUnitTestsV1, "jvm_app": JvmAppV1, "jvm_binary": JvmBinaryV1, "jvm_prep_command": JvmPrepCommandV1, "managed_jar_dependencies": ManagedJarDependenciesV1, "netrc_credentials": NetrcCredentialsV1, "scala_library": ScalaLibraryV1, "scalac_plugin": ScalacPluginV1, "unpacked_jars": UnpackedJarsV1, }, objects={ "artifact": Artifact, "scala_artifact": ScalaArtifact, "ossrh": OSSRHPublicationMetadata, "license": License, "scm": Scm, "developer": Developer, "github": Scm.github, "DirectoryReMapper": DirectoryReMapper, "Duplicate": Duplicate, "exclude": Exclude, "scala_jar": ScalaJarDependency, "scala_exclude": ScalaExclude, "jar_rules": JarRules, "repository": repo, "Skip": Skip, "shading_relocate": Shading.create_relocate, "shading_exclude": Shading.create_exclude, "shading_keep": Shading.create_keep, "shading_zap": Shading.create_zap, "shading_relocate_package": Shading.create_relocate_package, "shading_exclude_package": Shading.create_exclude_package, "shading_keep_package": Shading.create_keep_package, "shading_zap_package": Shading.create_zap_package, }, context_aware_object_factories={ "bundle": Bundle, "jar": JarDependencyParseContextWrapper, "managed_jar_libraries": ManagedJarLibraries, }, ) def global_subsystems(): return ( ScalaPlatform, ScoveragePlatform, ) # TODO https://github.com/pantsbuild/pants/issues/604 register_goals def register_goals(): ng_killall = task(name="ng-killall", action=NailgunKillall) ng_killall.install() Goal.by_name("invalidate").install(ng_killall, first=True) Goal.by_name("clean-all").install(ng_killall, first=True) task(name="jar-dependency-management", action=JarDependencyManagementSetup).install("bootstrap") task(name="jvm-platform-explain", action=JvmPlatformExplain).install("jvm-platform-explain") task(name="jvm-platform-validate", action=JvmPlatformValidate).install("jvm-platform-validate") task(name="bootstrap-jvm-tools", action=BootstrapJvmTools).install("bootstrap") task(name="provide-tools-jar", action=ProvideToolsJar).install("bootstrap") # Compile task(name="rsc", action=RscCompile).install("compile") task(name="javac", action=JavacCompile).install("compile") # Analysis extraction. task(name="zinc", action=AnalysisExtraction).install("analysis") # Dependency resolution. task(name="coursier", action=CoursierResolve).install("resolve") task(name="ivy-imports", action=IvyImports).install("imports") task(name="unpack-jars", action=UnpackJars).install() task(name="ivy", action=IvyOutdated).install("outdated") # Resource preparation. task(name="prepare", action=PrepareResources).install("resources") task(name="services", action=PrepareServices).install("resources") task(name="export-classpath", action=RuntimeClasspathPublisher).install() # This goal affects the contents of the runtime_classpath, and should not be # combined with any other goals on the command line. task(name="export-dep-as-jar", action=ExportDepAsJar).install() task(name="jvm", action=JvmDependencyUsage).install("dep-usage") task(name="classmap", action=ClassmapTask).install("classmap") # Generate documentation. task(name="javadoc", action=JavadocGen).install("doc") task(name="scaladoc", action=ScaladocGen).install("doc") # Bundling. task(name="create", action=JarCreate).install("jar") detect_duplicates = task(name="dup", action=DuplicateDetector) task(name="jvm", action=BinaryCreate).install("binary") detect_duplicates.install("binary") task(name="consolidate-classpath", action=ConsolidateClasspath).install("bundle") task(name="jvm", action=BundleCreate).install("bundle") detect_duplicates.install("bundle") task(name="detect-duplicates", action=DuplicateDetector).install() # Publishing. task(name="check-published-deps", action=CheckPublishedDeps).install("check-published-deps") task(name="jar", action=JarPublish).install("publish") # Testing. task(name="junit", action=JUnitRun).install("test") task(name="bench", action=BenchmarkRun).install("bench") # Linting. task(name="scalafix", action=ScalaFixCheck).install("lint") task(name="scalafmt", action=ScalaFmtCheckFormat, serialize=False).install("lint") task(name="scalastyle", action=ScalastyleTask, serialize=False).install("lint") task(name="checkstyle", action=Checkstyle, serialize=False).install("lint") task(name="jvm-dep-check", action=JvmDependencyCheck, serialize=False).install("lint") # Formatting. # Scalafix has to go before scalafmt in order not to # further change Scala files after scalafmt. task(name="scalafix", action=ScalaFixFix).install("fmt") task(name="scalafmt", action=ScalaFmtFormat, serialize=False).install("fmt") # Running. task(name="jvm", action=JvmRun, serialize=False).install("run") task(name="jvm-dirty", action=JvmRun, serialize=False).install("run-dirty") task(name="scala", action=ScalaRepl, serialize=False).install("repl") task(name="scala-dirty", action=ScalaRepl, serialize=False).install("repl-dirty") task(name="test-jvm-prep-command", action=RunTestJvmPrepCommand).install("test", first=True) task(name="binary-jvm-prep-command", action=RunBinaryJvmPrepCommand).install( "binary", first=True ) task(name="compile-jvm-prep-command", action=RunCompileJvmPrepCommand).install( "compile", first=True ) def target_types(): return [ AnnotationProcessor, JvmBenchmark, JvmCredentials, JarLibrary, JavaAgent, JavaLibrary, JavacPlugin, JunitTests, JvmApp, JvmBinary, JvmPrepCommand, ManagedJarDependencies, NetrcCredentials, ScalaLibrary, ScalacPlugin, UnpackedJars, ]
apache-2.0
367,951,564,733,665,700
-361,434,650,812,537,540
42.961268
100
0.743132
false
morenopc/edx-platform
common/test/acceptance/pages/studio/unit.py
13
3855
""" Unit page in Studio """ from bok_choy.page_object import PageObject from bok_choy.promise import EmptyPromise, Promise from . import BASE_URL from .container import ContainerPage class UnitPage(PageObject): """ Unit page in Studio """ def __init__(self, browser, unit_locator): super(UnitPage, self).__init__(browser) self.unit_locator = unit_locator @property def url(self): """URL to the pages UI in a course.""" return "{}/unit/{}".format(BASE_URL, self.unit_locator) def is_browser_on_page(self): def _is_finished_loading(): # Wait until all components have been loaded number_of_leaf_xblocks = len(self.q(css='{} .xblock-student_view'.format(Component.BODY_SELECTOR)).results) number_of_container_xblocks = len(self.q(css='{} .wrapper-xblock'.format(Component.BODY_SELECTOR)).results) is_done = len(self.q(css=Component.BODY_SELECTOR).results) == number_of_leaf_xblocks + number_of_container_xblocks return (is_done, is_done) # First make sure that an element with the view-unit class is present on the page, # and then wait to make sure that the xblocks are all there return ( self.q(css='body.view-unit').present and Promise(_is_finished_loading, 'Finished rendering the xblocks in the unit.').fulfill() ) @property def components(self): """ Return a list of components loaded on the unit page. """ return self.q(css=Component.BODY_SELECTOR).map( lambda el: Component(self.browser, el.get_attribute('data-locator'))).results def edit_draft(self): """ Started editing a draft of this unit. """ EmptyPromise( lambda: self.q(css='.create-draft').present, 'Wait for edit draft link to be present' ).fulfill() self.q(css='.create-draft').first.click() EmptyPromise( lambda: self.q(css='.editing-draft-alert').present, 'Wait for draft mode to be activated' ).fulfill() class Component(PageObject): """ A PageObject representing an XBlock child on the Studio UnitPage (including the editing controls). """ url = None BODY_SELECTOR = '.component' NAME_SELECTOR = '.component-header' def __init__(self, browser, locator): super(Component, self).__init__(browser) self.locator = locator def is_browser_on_page(self): return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present def _bounded_selector(self, selector): """ Return `selector`, but limited to this particular `CourseOutlineChild` context """ return '{}[data-locator="{}"] {}'.format( self.BODY_SELECTOR, self.locator, selector ) @property def name(self): titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text if titles: return titles[0] else: return None @property def preview_selector(self): return self._bounded_selector('.xblock-student_view') def edit(self): self.q(css=self._bounded_selector('.edit-button')).first.click() EmptyPromise( lambda: self.q(css='.xblock-studio_view').present, 'Wait for the Studio editor to be present' ).fulfill() return self @property def editor_selector(self): return '.xblock-studio_view' def go_to_container(self): """ Open the container page linked to by this component, and return an initialized :class:`.ContainerPage` for that xblock. """ return ContainerPage(self.browser, self.locator).visit()
agpl-3.0
623,634,289,406,184,200
8,316,220,358,996,145,000
30.341463
126
0.606226
false
timokoola/timoechobot
docutils/parsers/rst/languages/zh_cn.py
128
4007
# -*- coding: utf-8 -*- # $Id: zh_cn.py 7119 2011-09-02 13:00:23Z milde $ # Author: Panjunyong <[email protected]> # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Simplified Chinese language mappings for language-dependent features of reStructuredText. """ __docformat__ = 'reStructuredText' directives = { # language-dependent: fixed u'注意': 'attention', u'小心': 'caution', u'code (translation required)': 'code', u'危险': 'danger', u'错误': 'error', u'提示': 'hint', u'重要': 'important', u'注解': 'note', u'技巧': 'tip', u'警告': 'warning', u'忠告': 'admonition', u'侧框': 'sidebar', u'主题': 'topic', u'line-block (translation required)': 'line-block', u'parsed-literal (translation required)': 'parsed-literal', u'醒目': 'rubric', u'铭文': 'epigraph', u'要点': 'highlights', u'pull-quote (translation required)': 'pull-quote', u'复合': 'compound', u'容器': 'container', #u'questions (translation required)': 'questions', u'表格': 'table', u'csv表格': 'csv-table', u'列表表格': 'list-table', #u'qa (translation required)': 'questions', #u'faq (translation required)': 'questions', u'元数据': 'meta', u'math (translation required)': 'math', #u'imagemap (translation required)': 'imagemap', u'图片': 'image', u'图例': 'figure', u'包含': 'include', u'原文': 'raw', u'代替': 'replace', u'统一码': 'unicode', u'日期': 'date', u'类型': 'class', u'角色': 'role', u'默认角色': 'default-role', u'标题': 'title', u'目录': 'contents', u'章节序号': 'sectnum', u'题头': 'header', u'页脚': 'footer', #u'footnotes (translation required)': 'footnotes', #u'citations (translation required)': 'citations', u'target-notes (translation required)': 'target-notes', u'restructuredtext-test-directive': 'restructuredtext-test-directive'} """Simplified Chinese name to registered (in directives/__init__.py) directive name mapping.""" roles = { # language-dependent: fixed u'缩写': 'abbreviation', u'简称': 'acronym', u'code (translation required)': 'code', u'index (translation required)': 'index', u'i (translation required)': 'index', u'下标': 'subscript', u'上标': 'superscript', u'title-reference (translation required)': 'title-reference', u'title (translation required)': 'title-reference', u't (translation required)': 'title-reference', u'pep-reference (translation required)': 'pep-reference', u'pep (translation required)': 'pep-reference', u'rfc-reference (translation required)': 'rfc-reference', u'rfc (translation required)': 'rfc-reference', u'强调': 'emphasis', u'加粗': 'strong', u'字面': 'literal', u'math (translation required)': 'math', u'named-reference (translation required)': 'named-reference', u'anonymous-reference (translation required)': 'anonymous-reference', u'footnote-reference (translation required)': 'footnote-reference', u'citation-reference (translation required)': 'citation-reference', u'substitution-reference (translation required)': 'substitution-reference', u'target (translation required)': 'target', u'uri-reference (translation required)': 'uri-reference', u'uri (translation required)': 'uri-reference', u'url (translation required)': 'uri-reference', u'raw (translation required)': 'raw',} """Mapping of Simplified Chinese role names to canonical role names for interpreted text."""
apache-2.0
4,568,722,987,611,402,000
6,490,859,907,313,821,000
35.721154
79
0.626342
false
simbtrix/screenmix
screenmix/reinforcement/shapeSelection.py
1
5607
''' Created on 13.05.2016 @author: mkennert ''' from kivy.properties import ObjectProperty, StringProperty from kivy.uix.gridlayout import GridLayout from kivy.uix.scrollview import ScrollView from ownComponents.design import Design from ownComponents.ownButton import OwnButton from ownComponents.ownGraph import OwnGraph from plot.line import LinePlot class ShapeSelection(GridLayout): ''' the shape-selection-component make it possible to change the cross-section-shape ''' #reinforcement-editor information = ObjectProperty() okStr = StringProperty('ok') cancelStr = StringProperty('cancel') rectStr = StringProperty('rectangle') # constructor def __init__(self, **kwargs): super(ShapeSelection, self).__init__(**kwargs) self.padding = Design.padding self.cols, self.spacing = 2, Design.spacing self.create_gui() ''' create the gui ''' def create_gui(self): self.create_graphs() self.create_selection() ''' create all graphs ''' def create_graphs(self): self.create_graph_rectangle() # default-shape Rectangle self.add_widget(self.graphRectangle) self.focusGraph = self.graphRectangle ################################################################### # here you can add more shapes. # # implement a graph which represent the shape # ################################################################### ''' create the plot graph ''' def create_graph_rectangle(self): self.graphRectangle = OwnGraph( x_ticks_major=0.1, y_ticks_major=0.05, y_grid_label=True, x_grid_label=True, xmin=0, xmax=0.5, ymin=0, ymax=0.25) self.p = LinePlot(color=[1, 1, 1, 1], points=self.draw_rectangle()) self.graphRectangle.add_plot(self.p) ''' draw the plot ''' def draw_rectangle(self): c, h, w = 1e-2, 0.23, 0.45 return [(c, c), (c, h), (w, h), (w, c), (c, c)] ''' create the right area where you can select the shape ''' def create_selection(self): self.create_btns() self.contentRight = GridLayout(cols=1) # self.contentRight.add_widget(self.focusShape) self.btns = GridLayout(cols=1, spacing=Design.spacing, size_hint_y=None) # self.contentRight.add_widget(self.btns) # Make sure the height is such that there is something to scroll. self.btns.bind(minimum_height=self.btns.setter('height')) self.btns.add_widget(self.plot) ################################################################### # here you can add more shapes. # # implement the button in the create_btns method # ################################################################### layout = GridLayout(cols=2, spacing=Design.spacing) layout.add_widget(self.btnOK) layout.add_widget(self.btnCancel) self.btns.add_widget(layout) self.shapes = ScrollView() self.shapes.add_widget(self.btns) self.contentRight.add_widget(self.shapes) self.add_widget(self.contentRight) ''' create and bind all btns from the gui ''' def create_btns(self): self.btnOK = OwnButton(text=self.okStr) self.btnOK.bind(on_press=self.finished) self.btnCancel = OwnButton(text=self.cancelStr) self.btnCancel.bind(on_press=self.cancel) # default-shape=rectangle self.focusShape = OwnButton(text=self.rectStr) self.focusShape.bind(on_press=self.show_shapes_btn) # btns self.plot = OwnButton(text=self.rectStr) self.plot.bind(on_press=self.show_rectangle) ####################################################################### # here you can add more shapes # # Attention: make sure that the buttons habe the properties # # size_hint_y=None, height=self.btnSize and a bind-method # # like the show_rectangle-method # ####################################################################### ''' show Rectangle-Graph ''' def show_rectangle(self, btn): self.remove_widget(self.focusGraph) self.add_widget(self.graphRectangle, 1) self.focusGraph = self.graphRectangle self.focusShape.text = btn.text ####################################################### # if you want add new shapes make sure, that the shape# # has a show-method like the show_rectangle # ####################################################### ''' show the btns where you can select the shape ''' def show_shapes_btn(self, btn): self.contentRight.remove_widget(self.focusShape) self.contentRight.add_widget(self.shapes) ''' finished the totally selection and call the finished_shape_selection of the information ''' def finished(self, btn): self.information.finished_shape_selection(self.focusShape) ''' cancel the shape selection ''' def cancel(self, btn): self.information.cancel_shape_selection()
gpl-3.0
-8,734,538,699,464,355,000
9,174,130,480,050,781,000
31.981818
80
0.522918
false
lanfker/tdma_imac
.waf-1.6.7-0a94702c61504c487a251b8d0a04ca9a/waflib/Tools/glib2.py
3
8308
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/svn/docs/wafbook/single.html#_obtaining_the_waf_file import os from waflib import Task,Utils,Options,Errors,Logs from waflib.TaskGen import taskgen_method,before_method,after_method,feature def add_marshal_file(self,filename,prefix): if not hasattr(self,'marshal_list'): self.marshal_list=[] self.meths.append('process_marshal') self.marshal_list.append((filename,prefix)) def process_marshal(self): for f,prefix in getattr(self,'marshal_list',[]): node=self.path.find_resource(f) if not node: raise Errors.WafError('file not found %r'%f) h_node=node.change_ext('.h') c_node=node.change_ext('.c') task=self.create_task('glib_genmarshal',node,[h_node,c_node]) task.env.GLIB_GENMARSHAL_PREFIX=prefix self.source=self.to_nodes(getattr(self,'source',[])) self.source.append(c_node) class glib_genmarshal(Task.Task): def run(self): bld=self.inputs[0].__class__.ctx get=self.env.get_flat cmd1="%s %s --prefix=%s --header > %s"%(get('GLIB_GENMARSHAL'),self.inputs[0].srcpath(),get('GLIB_GENMARSHAL_PREFIX'),self.outputs[0].abspath()) ret=bld.exec_command(cmd1) if ret:return ret c='''#include "%s"\n'''%self.outputs[0].name self.outputs[1].write(c) cmd2="%s %s --prefix=%s --body >> %s"%(get('GLIB_GENMARSHAL'),self.inputs[0].srcpath(),get('GLIB_GENMARSHAL_PREFIX'),self.outputs[1].abspath()) return bld.exec_command(cmd2) vars=['GLIB_GENMARSHAL_PREFIX','GLIB_GENMARSHAL'] color='BLUE' ext_out=['.h'] def add_enums_from_template(self,source='',target='',template='',comments=''): if not hasattr(self,'enums_list'): self.enums_list=[] self.meths.append('process_enums') self.enums_list.append({'source':source,'target':target,'template':template,'file-head':'','file-prod':'','file-tail':'','enum-prod':'','value-head':'','value-prod':'','value-tail':'','comments':comments}) def add_enums(self,source='',target='',file_head='',file_prod='',file_tail='',enum_prod='',value_head='',value_prod='',value_tail='',comments=''): if not hasattr(self,'enums_list'): self.enums_list=[] self.meths.append('process_enums') self.enums_list.append({'source':source,'template':'','target':target,'file-head':file_head,'file-prod':file_prod,'file-tail':file_tail,'enum-prod':enum_prod,'value-head':value_head,'value-prod':value_prod,'value-tail':value_tail,'comments':comments}) def process_enums(self): for enum in getattr(self,'enums_list',[]): task=self.create_task('glib_mkenums') env=task.env inputs=[] source_list=self.to_list(enum['source']) if not source_list: raise Errors.WafError('missing source '+str(enum)) source_list=[self.path.find_resource(k)for k in source_list] inputs+=source_list env['GLIB_MKENUMS_SOURCE']=[k.abspath()for k in source_list] if not enum['target']: raise Errors.WafError('missing target '+str(enum)) tgt_node=self.path.find_or_declare(enum['target']) if tgt_node.name.endswith('.c'): self.source.append(tgt_node) env['GLIB_MKENUMS_TARGET']=tgt_node.abspath() options=[] if enum['template']: template_node=self.path.find_resource(enum['template']) options.append('--template %s'%(template_node.abspath())) inputs.append(template_node) params={'file-head':'--fhead','file-prod':'--fprod','file-tail':'--ftail','enum-prod':'--eprod','value-head':'--vhead','value-prod':'--vprod','value-tail':'--vtail','comments':'--comments'} for param,option in params.items(): if enum[param]: options.append('%s %r'%(option,enum[param])) env['GLIB_MKENUMS_OPTIONS']=' '.join(options) task.set_inputs(inputs) task.set_outputs(tgt_node) class glib_mkenums(Task.Task): run_str='${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}' color='PINK' ext_out=['.h'] def add_settings_schemas(self,filename_list): if not hasattr(self,'settings_schema_files'): self.settings_schema_files=[] if not isinstance(filename_list,list): filename_list=[filename_list] self.settings_schema_files.extend(filename_list) def add_settings_enums(self,namespace,filename_list): if hasattr(self,'settings_enum_namespace'): raise Errors.WafError("Tried to add gsettings enums to '%s' more than once"%self.name) self.settings_enum_namespace=namespace if type(filename_list)!='list': filename_list=[filename_list] self.settings_enum_files=filename_list def r_change_ext(self,ext): name=self.name k=name.rfind('.') if k>=0: name=name[:k]+ext else: name=name+ext return self.parent.find_or_declare([name]) def process_settings(self): enums_tgt_node=[] install_files=[] settings_schema_files=getattr(self,'settings_schema_files',[]) if settings_schema_files and not self.env['GLIB_COMPILE_SCHEMAS']: raise Errors.WafError("Unable to process GSettings schemas - glib-compile-schemas was not found during configure") if hasattr(self,'settings_enum_files'): enums_task=self.create_task('glib_mkenums') source_list=self.settings_enum_files source_list=[self.path.find_resource(k)for k in source_list] enums_task.set_inputs(source_list) enums_task.env['GLIB_MKENUMS_SOURCE']=[k.abspath()for k in source_list] target=self.settings_enum_namespace+'.enums.xml' tgt_node=self.path.find_or_declare(target) enums_task.set_outputs(tgt_node) enums_task.env['GLIB_MKENUMS_TARGET']=tgt_node.abspath() enums_tgt_node=[tgt_node] install_files.append(tgt_node) options='--comments "<!-- @comment@ -->" --fhead "<schemalist>" --vhead " <@type@ id=\\"%s.@EnumName@\\">" --vprod " <value nick=\\"@valuenick@\\" value=\\"@valuenum@\\"/>" --vtail " </@type@>" --ftail "</schemalist>" '%(self.settings_enum_namespace) enums_task.env['GLIB_MKENUMS_OPTIONS']=options for schema in settings_schema_files: schema_task=self.create_task('glib_validate_schema') schema_node=self.path.find_resource(schema) if not schema_node: raise Errors.WafError("Cannot find the schema file '%s'"%schema) install_files.append(schema_node) source_list=enums_tgt_node+[schema_node] schema_task.set_inputs(source_list) schema_task.env['GLIB_COMPILE_SCHEMAS_OPTIONS']=[("--schema-file="+k.abspath())for k in source_list] target_node=r_change_ext(schema_node,'.xml.valid') schema_task.set_outputs(target_node) schema_task.env['GLIB_VALIDATE_SCHEMA_OUTPUT']=target_node.abspath() def compile_schemas_callback(bld): if not bld.is_install:return Logs.pprint('YELLOW','Updating GSettings schema cache') command=Utils.subst_vars("${GLIB_COMPILE_SCHEMAS} ${GSETTINGSSCHEMADIR}",bld.env) ret=self.bld.exec_command(command) if self.bld.is_install: if not self.env['GSETTINGSSCHEMADIR']: raise Errors.WafError('GSETTINGSSCHEMADIR not defined (should have been set up automatically during configure)') if install_files: self.bld.install_files(self.env['GSETTINGSSCHEMADIR'],install_files) if not hasattr(self.bld,'_compile_schemas_registered'): self.bld.add_post_fun(compile_schemas_callback) self.bld._compile_schemas_registered=True class glib_validate_schema(Task.Task): run_str='rm -f ${GLIB_VALIDATE_SCHEMA_OUTPUT} && ${GLIB_COMPILE_SCHEMAS} --dry-run ${GLIB_COMPILE_SCHEMAS_OPTIONS} && touch ${GLIB_VALIDATE_SCHEMA_OUTPUT}' color='PINK' def configure(conf): conf.find_program('glib-genmarshal',var='GLIB_GENMARSHAL') conf.find_perl_program('glib-mkenums',var='GLIB_MKENUMS') conf.find_program('glib-compile-schemas',var='GLIB_COMPILE_SCHEMAS',mandatory=False) def getstr(varname): return getattr(Options.options,varname,getattr(conf.env,varname,'')) gsettingsschemadir=getstr('GSETTINGSSCHEMADIR') if not gsettingsschemadir: datadir=getstr('DATADIR') if not datadir: prefix=conf.env['PREFIX'] datadir=os.path.join(prefix,'share') gsettingsschemadir=os.path.join(datadir,'glib-2.0','schemas') conf.env['GSETTINGSSCHEMADIR']=gsettingsschemadir def options(opt): opt.add_option('--gsettingsschemadir',help='GSettings schema location [Default: ${datadir}/glib-2.0/schemas]',default='',dest='GSETTINGSSCHEMADIR') taskgen_method(add_marshal_file) before_method('process_source')(process_marshal) taskgen_method(add_enums_from_template) taskgen_method(add_enums) before_method('process_source')(process_enums) taskgen_method(add_settings_schemas) taskgen_method(add_settings_enums) feature('glib2')(process_settings)
gpl-2.0
-7,860,704,455,975,797,000
-3,244,093,164,101,952,000
46.752874
257
0.720751
false
suryakencana/niimanga
niimanga/ctasks/batoto.py
1
34700
""" # Copyright (c) 06 2015 | surya # 26/06/15 [email protected] # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # mangaeden.py """ import shutil from concurrent.futures import ThreadPoolExecutor from niimanga.libs.exceptions import HtmlError from niimanga.libs.utils import LocalDateTime from niimanga.models.master import ISOLang from os import path, makedirs from niimanga.libs import utils from niimanga.models.manga import Manga, Chapter from .celery import load_ini from niimanga.models.meta.base import initialize_sql, DBSession import re import requests from requests.packages.urllib3.connection import ConnectionError from requests_futures.sessions import FuturesSession from sqlalchemy.exc import IntegrityError import transaction INI = load_ini() initialize_sql(INI) def _chapter_slug(str_, slug_manga): name = str_ # print(name[name.index("C"):]) no = re.search(r"\d+(\.\d+)?", name[name.index("C"):]).group(0) # print(no) return no, utils.slugist('{1}-chapter-{0}'.format(no.zfill(3), slug_manga)) def build_to_sys(site, source): try: lt = LocalDateTime.now() """ dict( thumb=self.netlocs[3] + "/".join([image_thumb.split('/')[-2], image_thumb.split('/')[-1]]), origin=origin_url, name=title, # time=self.parseDate.human_to_date_stamp(time), time=time, last_chapter=last_title, last_url=last_url, site=self.netlocs[1] ) """ # list latest # scrap series info # url = "/".join([site.netlocs[2], source.get('origin')]) url = source.get('origin') # print(url) respcontent = site.get_html(url) series_info = site.series_info(respcontent) # series == manga qry = Manga.query manga = qry.filter(Manga.slug == utils.slugist( "-".join([site.netlocs[4], source.get('name', None)]) )).first() if manga is None: with transaction.manager: manga = Manga( site.netlocs[4], series_info.get('name', []), 0, ", ".join(series_info.get('tags', [])), ", ".join(series_info.get('authors', [])), ", ".join(series_info.get('artists', [])), ', '.join(series_info.get('aka', [])), ",".join(series_info.get('description', None)), 1 if 'ongoing' in series_info.get('status', '').lower() else 2 if 'completed' in series_info.get('status', '').lower() else 0 ) # manga.id = utils.guid() manga.origin = source.get('origin', '') manga.chapter_updated = lt.from_time_stamp(source.get('time', 'now')) ext = series_info.get('thumb_url', '').lower().rsplit('.', 1)[-1] manga.thumb = '.'.join(['cover', ext]) manga.category = 'ja' DBSession.add(manga) DBSession.flush() manga = qry.filter(Manga.slug == utils.slugist( "-".join([site.netlocs[4], source.get('name', None)]) )).first() manga_id, manga_thumb, manga_slug = manga.id, manga.thumb, manga.slug ini_path = path.join( path.dirname( path.dirname(__file__) ), '/'.join(['rak', 'manga', manga_id]) ) r = requests.get(source.get('thumb')) path_img = '/'.join([ini_path, manga_thumb]) print(path_img) if not path.exists(ini_path): makedirs(ini_path) with open(path_img, "wb") as code: code.write(r.content) chapters_info = series_info.get('chapters', []) for i, ch in enumerate(chapters_info[0:2]): print(ch.get('name', '')) # batoto slug slug_bt = ch.get('name', '') if ':' in slug_bt: slug_bt = slug_bt.split(':') slug_bt.pop(0) slug_bt = '-'.join(slug_bt) slug_chapter = ' '.join([manga_slug, slug_bt]) # cek chapter sudah didownload chapter = Chapter.query.filter(Chapter.slug == utils.slugist(slug_chapter)).first() if chapter is None: v = utils.parse_number(ch.get('name', ''), "Vol") v = 0 if v is None else v c = utils.parse_number(ch.get('name', ''), "Ch") c = 0 if c is None else c with transaction.manager: chapter = Chapter( slug_bt, c, v ) time = lt.human_to_date(ch.get('time', 'now')) # chapter.id = utils.guid() ch_manga = Manga.query.get(manga_id) ch_manga.chapter_count += 1 chapter.lang = ISOLang.query.filter(ISOLang.iso == 'en').first() chapter.updated = time chapter.manga = ch_manga # s = 1000v + c # chapter.sortorder = (1000*float(v)) + float(c) chapter.sortorder = float(c) chapter.slug = slug_chapter DBSession.add(chapter) DBSession.flush() chapter = Chapter.query.filter(Chapter.slug == utils.slugist(slug_chapter)).first() # batoto html = site.get_html(ch.get('url')) # # ambil image dan download locally di folder chapter.id chapter_info = site.chapter_info(html) try: # series info # chapter info and images session = FuturesSession(executor=ThreadPoolExecutor(max_workers=10)) for page in chapter_info.get('pages', []): ini_chapter = '/'.join([ini_path, chapter.id]) print(page) r = session.get(page).result() if r.status_code != 200: raise HtmlError('cannot fetch') path_img = '/'.join([ini_chapter, page.split('/')[-1]]) print(path_img) if not path.exists(ini_chapter): makedirs(ini_chapter) with open(path_img, "wb") as code: code.write(r.content) except ConnectionError as Conn: print(Conn) chapter = Chapter.query.get(chapter.id) DBSession.delete(chapter) shutil.rmtree(ini_chapter) except AttributeError as e: print(e.message) except KeyError as e: print(e.message) except ValueError as e: print(e.message) def build_from_latestDB(): try: """ dict( thumb=self.netlocs[3] + "/".join([image_thumb.split('/')[-2], image_thumb.split('/')[-1]]), origin=origin_url, name=title, # time=self.parseDate.human_to_date_stamp(time), time=time, last_chapter=last_title, last_url=last_url, site=self.netlocs[1] ) """ trans = transaction.begin() # manga = Manga( # u'bt', # u'Fairy Tail', # 0, # u'comedy, shounen, adventure', # u'Mushi shi', # u'Hiro antsuki', # u'False Love', # u'Nisekoi' # ) # manga.category = u'ja' # manga.save_it() manga = Manga.query.filter(Manga.slug == u'bt-fairy-tail').first() # DBSession.delete(manga) chapters = [ { "name": "Ch.123: Without Fearing Spiciness", "url": "http://bato.to/read/_/327794/shokugeki-no-soma_ch123_by_casanova" }, { "name": "Ch.122: \"M\u00e1\" and \"L\u00e0\"", "url": "http://bato.to/read/_/327793/shokugeki-no-soma_ch122_by_casanova" }, { "name": "Ch.121: Spicy Flavor Worship", "url": "http://bato.to/read/_/325643/shokugeki-no-soma_ch121_by_casanova" }, { "name": "Ch.120: What Is It!!?", "url": "http://bato.to/read/_/324650/shokugeki-no-soma_ch120_by_casanova" }, { "name": "Ch.119: The Distance from the Elite Ten", "url": "http://bato.to/read/_/323145/shokugeki-no-soma_ch119_by_casanova" }, { "name": "Ch.118: Tootsuki Elite Ten", "url": "http://bato.to/read/_/321978/shokugeki-no-soma_ch118_by_casanova" }, { "name": "Ch.117.3 Read Online", "url": "http://bato.to/read/_/321119/shokugeki-no-soma_ch117.3_by_casanova" }, { "name": "Ch.117: Imposingly", "url": "http://bato.to/read/_/321118/shokugeki-no-soma_ch117_by_casanova" }, { "name": "Ch.116.5: A Magnificent Banquet", "url": "http://bato.to/read/_/318818/shokugeki-no-soma_ch116.5_by_casanova" }, { "name": "Ch.116: The Fruit Called Growth", "url": "http://bato.to/read/_/318387/shokugeki-no-soma_ch116_by_casanova" }, { "name": "Ch.115: Tear Through", "url": "http://bato.to/read/_/316969/shokugeki-no-soma_ch115_by_casanova" }, { "name": "Ch.114: Yuihara (Revamped)", "url": "http://bato.to/read/_/316564/shokugeki-no-soma_ch114_by_casanova" }, { "name": "Ch.113: Forgotten Vegetables", "url": "http://bato.to/read/_/314647/shokugeki-no-soma_ch113_by_casanova" }, { "name": "Ch.112: The Guidepost for Growth", "url": "http://bato.to/read/_/314279/shokugeki-no-soma_ch112_by_casanova" }, { "name": "Ch.111: Main Course", "url": "http://bato.to/read/_/312126/shokugeki-no-soma_ch111_by_casanova" }, { "name": "Ch.110: The Magician, Once Again---!", "url": "http://bato.to/read/_/311083/shokugeki-no-soma_ch110_by_casanova" }, { "name": "Ch.109: Those Who Shed Light", "url": "http://bato.to/read/_/309853/shokugeki-no-soma_ch109_by_casanova" }, { "name": "Ch.108: Choosing a Path", "url": "http://bato.to/read/_/308448/shokugeki-no-soma_ch108_by_casanova" }, { "name": "Ch.107: Ideals and Distance", "url": "http://bato.to/read/_/306749/shokugeki-no-soma_ch107_by_casanova" }, { "name": "Ch.106: A Busy Restaurant with Many Problems", "url": "http://bato.to/read/_/305011/shokugeki-no-soma_ch106_by_casanova" }, { "name": "Ch.105: Stagiaire", "url": "http://bato.to/read/_/303297/shokugeki-no-soma_ch105_by_casanova" }, { "name": "Ch.104: New \"Jewel\"", "url": "http://bato.to/read/_/302063/shokugeki-no-soma_ch104_by_casanova" }, { "name": "Ch.103: Specialty", "url": "http://bato.to/read/_/300229/shokugeki-no-soma_ch103_by_casanova" }, { "name": "Ch.102: Souma's Strength", "url": "http://bato.to/read/_/299255/shokugeki-no-soma_ch102_by_casanova" }, { "name": "Ch.101: A Fine Tempered Sword", "url": "http://bato.to/read/_/295858/shokugeki-no-soma_ch101_by_casanova" }, { "name": "Ch.100: A Sharp Blade", "url": "http://bato.to/read/_/294443/shokugeki-no-soma_ch100_by_casanova" }, { "name": "Ch.99: The Fangs That Cut Through The Battlefield", "url": "http://bato.to/read/_/293409/shokugeki-no-soma_ch99_by_casanova" }, { "name": "Ch.98 (full color): The \"Things\" They've Accumulated", "url": "http://bato.to/read/_/292819/shokugeki-no-soma_ch98--full-color-_by_casanova" }, { "name": "Ch.98: The \"Things\" They've Accumulated", "url": "http://bato.to/read/_/290601/shokugeki-no-soma_ch98_by_casanova" }, { "name": "Ch.97 (full color): Moonlight Memories", "url": "http://bato.to/read/_/292818/shokugeki-no-soma_ch97--full-color-_by_casanova" }, { "name": "Ch.97: Moonlight Memories", "url": "http://bato.to/read/_/289696/shokugeki-no-soma_ch97_by_casanova" }, { "name": "Ch.96 (full color): The Answer He Reached", "url": "http://bato.to/read/_/292817/shokugeki-no-soma_ch96--full-color-_by_casanova" }, { "name": "Ch.96: The Answer He Reached", "url": "http://bato.to/read/_/287642/shokugeki-no-soma_ch96_by_casanova" }, { "name": "Ch.95 (full color): A Battle Surrounding the \"Season\"", "url": "http://bato.to/read/_/292816/shokugeki-no-soma_ch95--full-color-_by_casanova" }, { "name": "Ch.95: A Battle Surrounding the \"Season\"", "url": "http://bato.to/read/_/286562/shokugeki-no-soma_ch95_by_casanova" }, { "name": "Ch.94: Seizing the Season", "url": "http://bato.to/read/_/284514/shokugeki-no-soma_ch94_by_casanova" }, { "name": "Ch.93: The \"Sword\" That Announces Autumn", "url": "http://bato.to/read/_/282575/shokugeki-no-soma_ch93_by_casanova" }, { "name": "Ch.92: Firestarter", "url": "http://bato.to/read/_/280599/shokugeki-no-soma_ch92_by_casanova" }, { "name": "Ch.91: Beats Eating Each Other", "url": "http://bato.to/read/_/279908/shokugeki-no-soma_ch91_by_casanova" }, { "name": "Ch.90: Iron Will, Heart of Steel", "url": "http://bato.to/read/_/278692/shokugeki-no-soma_ch90_by_casanova" }, { "name": "Ch.89: Morning Will Come Again", "url": "http://bato.to/read/_/277091/shokugeki-no-soma_ch89_by_casanova" }, { "name": "Ch.88: ~DREAMLAND~", "url": "http://bato.to/read/_/275550/shokugeki-no-soma_ch88_by_casanova" }, { "name": "Ch.87: Secret Plan", "url": "http://bato.to/read/_/274593/shokugeki-no-soma_ch87_by_casanova" }, { "name": "Ch.86: Garniture", "url": "http://bato.to/read/_/272508/shokugeki-no-soma_ch86_by_casanova" }, { "name": "Ch.85.2 Read Online", "url": "http://bato.to/read/_/271777/shokugeki-no-soma_ch85.2_by_casanova" }, { "name": "Ch.85.1 Read Online", "url": "http://bato.to/read/_/271776/shokugeki-no-soma_ch85.1_by_casanova" }, { "name": "Ch.85: The First Bite's Secret", "url": "http://bato.to/read/_/271775/shokugeki-no-soma_ch85_by_casanova" }, { "name": "Ch.84: Hidden Assignment", "url": "http://bato.to/read/_/270967/shokugeki-no-soma_ch84_by_casanova" }, { "name": "Ch.83: The Chaser And The Chased", "url": "http://bato.to/read/_/268312/shokugeki-no-soma_ch83_by_casanova" }, { "name": "Ch.82: Starting Line", "url": "http://bato.to/read/_/265163/shokugeki-no-soma_ch82_by_casanova" }, { "name": "Ch.81: The Observer Arrives", "url": "http://bato.to/read/_/263615/shokugeki-no-soma_ch81_by_casanova" }, { "name": "Ch.80: The Conditions for the Challenge", "url": "http://bato.to/read/_/262016/shokugeki-no-soma_ch80_by_casanova" }, { "name": "Ch.79: The Last \"Card\"", "url": "http://bato.to/read/_/259695/shokugeki-no-soma_ch79_by_casanova" }, { "name": "Ch.78: A Paper-Thin Difference Between Offense and Defense", "url": "http://bato.to/read/_/258287/shokugeki-no-soma_ch78_by_casanova" }, { "name": "Ch.77: Pursuer", "url": "http://bato.to/read/_/256463/shokugeki-no-soma_ch77_by_casanova" }, { "name": "Ch.76: Duel Etiquette", "url": "http://bato.to/read/_/254889/shokugeki-no-soma_ch76_by_casanova" }, { "name": "Ch.75: Beneath The Mask", "url": "http://bato.to/read/_/252716/shokugeki-no-soma_ch75_by_casanova" }, { "name": "Ch.74: Sensitive Monster", "url": "http://bato.to/read/_/250870/shokugeki-no-soma_ch74_by_casanova" }, { "name": "Ch.73: Minding The Details", "url": "http://bato.to/read/_/248966/shokugeki-no-soma_ch73_by_casanova" }, { "name": "Ch.72: The \"Jewels\" Generation", "url": "http://bato.to/read/_/247956/shokugeki-no-soma_ch72_by_casanova" }, { "name": "Ch.71: \"Courage\" and \"Resolution\"", "url": "http://bato.to/read/_/246285/shokugeki-no-soma_ch71_by_casanova" }, { "name": "Ch.70: Polar Opposites", "url": "http://bato.to/read/_/245239/shokugeki-no-soma_ch70_by_casanova" }, { "name": "Ch.69: Kitchen's Dictator", "url": "http://bato.to/read/_/243801/shokugeki-no-soma_ch69_by_casanova" }, { "name": "Ch.68: The \"Port City\" Match", "url": "http://bato.to/read/_/241781/shokugeki-no-soma_ch68_by_casanova" }, { "name": "Ch.67: Blending Light And Shadow", "url": "http://bato.to/read/_/239555/shokugeki-no-soma_ch67_by_casanova" }, { "name": "Ch.66: What Fills That Box", "url": "http://bato.to/read/_/237502/shokugeki-no-soma_ch66_by_casanova" }, { "name": "Ch.65: The Theory of Bento Evolution", "url": "http://bato.to/read/_/236405/shokugeki-no-soma_ch65_by_casanova" }, { "name": "Ch.64: On the Edge", "url": "http://bato.to/read/_/234698/shokugeki-no-soma_ch64_by_casanova" }, { "name": "Ch.63: Plan", "url": "http://bato.to/read/_/232844/shokugeki-no-soma_ch63_by_casanova" }, { "name": "Ch.62: A Meeting of Strong People", "url": "http://bato.to/read/_/230838/shokugeki-no-soma_ch62_by_casanova" }, { "name": "Ch.61: Putting Your Heart Into It", "url": "http://bato.to/read/_/228801/shokugeki-no-soma_ch61_by_casanova" }, { "name": "Ch.60: The Warriors' Banquet", "url": "http://bato.to/read/_/227472/shokugeki-no-soma_ch60_by_casanova" }, { "name": "Ch.59: Their Respective Weapons", "url": "http://bato.to/read/_/225853/shokugeki-no-soma_ch59_by_casanova" }, { "name": "Ch.58: Holy Aroma", "url": "http://bato.to/read/_/224397/shokugeki-no-soma_ch58_by_casanova" }, { "name": "Ch.57: Her Memories", "url": "http://bato.to/read/_/222875/shokugeki-no-soma_ch57_by_casanova" }, { "name": "Ch.56: Tuscan Moon", "url": "http://bato.to/read/_/222555/shokugeki-no-soma_ch56_by_casanova" }, { "name": "Ch.55: A Hole Drilled with Knowledge", "url": "http://bato.to/read/_/221797/shokugeki-no-soma_ch55_by_casanova" }, { "name": "Ch.54: A Recital of Blossoming Individuals", "url": "http://bato.to/read/_/219111/shokugeki-no-soma_ch54_by_casanova" }, { "name": "Ch.53: The Man Who Came From A Cold Country", "url": "http://bato.to/read/_/215047/shokugeki-no-soma_ch53_by_casanova" }, { "name": "Ch.52.5: Natsuyumi no Erina", "url": "http://bato.to/read/_/213824/shokugeki-no-soma_ch52.5_by_casanova" }, { "name": "Ch.52: Those Who Serve the Best", "url": "http://bato.to/read/_/211649/shokugeki-no-soma_ch52_by_casanova" }, { "name": "Ch.51: The Witch's Dining Table", "url": "http://bato.to/read/_/211213/shokugeki-no-soma_ch51_by_casanova" }, { "name": "Ch.50: Those Beyond Ordinary", "url": "http://bato.to/read/_/210069/shokugeki-no-soma_ch50_by_casanova" }, { "name": "Ch.49: Wolf Pack", "url": "http://bato.to/read/_/208381/shokugeki-no-soma_ch49_by_casanova" }, { "name": "Ch.48: The Known Unknown", "url": "http://bato.to/read/_/207413/shokugeki-no-soma_ch48_by_casanova" }, { "name": "Ch.47: Battle Memories", "url": "http://bato.to/read/_/205556/shokugeki-no-soma_ch47_by_casanova" }, { "name": "Ch.46: The Dragon Lies Down and then Ascends to the Sky", "url": "http://bato.to/read/_/203799/shokugeki-no-soma_ch46_by_casanova" }, { "name": "Ch.45: The Accompanist of Aromas and Stimuli", "url": "http://bato.to/read/_/202784/shokugeki-no-soma_ch45_by_casanova" }, { "name": "Ch.44: An Unexpected Straight", "url": "http://bato.to/read/_/201764/shokugeki-no-soma_ch44_by_casanova" }, { "name": "Ch.43: The Cook Who Has Travelled Thousands of Miles", "url": "http://bato.to/read/_/200010/shokugeki-no-soma_ch43_by_casanova" }, { "name": "Ch.42: Wake Up Kiss", "url": "http://bato.to/read/_/199003/shokugeki-no-soma_ch42_by_casanova" }, { "name": "Ch.41: The Man Who was Called an \"Asura\"", "url": "http://bato.to/read/_/196809/shokugeki-no-soma_ch41_by_casanova" }, { "name": "Ch.40: Return", "url": "http://bato.to/read/_/195573/shokugeki-no-soma_ch40_by_casanova" }, { "name": "Ch.39: The Chosen Ones", "url": "http://bato.to/read/_/192744/shokugeki-no-soma_ch39_by_casanova" }, { "name": "Ch.38: Sensual Karaage (4)", "url": "http://bato.to/read/_/192097/shokugeki-no-soma_ch38_by_casanova" }, { "name": "Ch.37: Sensual Karaage (3)", "url": "http://bato.to/read/_/190617/shokugeki-no-soma_ch37_by_casanova" }, { "name": "Ch.36v2: Sensual Kaarage (2)", "url": "http://bato.to/read/_/189007/shokugeki-no-soma_ch36v2_by_casanova" }, { "name": "Ch.35.5: Mid-Summer's Nikumi-san", "url": "http://bato.to/read/_/188961/shokugeki-no-soma_ch35.5_by_casanova" }, { "name": "Ch.35: Sensual Karaage (1)", "url": "http://bato.to/read/_/186597/shokugeki-no-soma_ch35_by_casanova" }, { "name": "Ch.34: The Fate Surrounding Tootsuki", "url": "http://bato.to/read/_/185446/shokugeki-no-soma_ch34_by_casanova" }, { "name": "Ch.33: To the People that will Eventually Fight", "url": "http://bato.to/read/_/184581/shokugeki-no-soma_ch33_by_casanova" }, { "name": "Ch.32: Dancing Cook", "url": "http://bato.to/read/_/183357/shokugeki-no-soma_ch32_by_casanova" }, { "name": "Ch.31: Metamorphose", "url": "http://bato.to/read/_/182129/shokugeki-no-soma_ch31_by_casanova" }, { "name": "Ch.30: A Set Trap", "url": "http://bato.to/read/_/180945/shokugeki-no-soma_ch30_by_casanova" }, { "name": "Ch.29: The Eggs Before Dawn", "url": "http://bato.to/read/_/179806/shokugeki-no-soma_ch29_by_casanova" }, { "name": "Ch.28: Everyone Must Not Fall Asleep", "url": "http://bato.to/read/_/178134/shokugeki-no-soma_ch28_by_casanova" }, { "name": "Ch.27: The Bitterness of Defeat", "url": "http://bato.to/read/_/177135/shokugeki-no-soma_ch27_by_casanova" }, { "name": "Ch.26: Memories of a Dish", "url": "http://bato.to/read/_/176297/shokugeki-no-soma_ch26_by_casanova" }, { "name": "Ch.25: Those Remnants", "url": "http://bato.to/read/_/174116/shokugeki-no-soma_ch25_by_casanova" }, { "name": "Ch.24: The Magician that Came from the East", "url": "http://bato.to/read/_/173475/shokugeki-no-soma_ch24_by_casanova" }, { "name": "Ch.23: Proof of Existence", "url": "http://bato.to/read/_/171105/shokugeki-no-soma_ch23_by_casanova" }, { "name": "Ch.22: Alumni", "url": "http://bato.to/read/_/170355/shokugeki-no-soma_ch22_by_casanova" }, { "name": "Ch.21: The Supreme Recette", "url": "http://bato.to/read/_/167841/shokugeki-no-soma_ch21_by_casanova" }, { "name": "Ch.20: Verdict", "url": "http://bato.to/read/_/166990/shokugeki-no-soma_ch20_by_casanova" }, { "name": "Ch.19: Sparkling Soul", "url": "http://bato.to/read/_/165823/shokugeki-no-soma_ch19_by_casanova" }, { "name": "Ch.18: The Seed of Ideas", "url": "http://bato.to/read/_/165444/shokugeki-no-soma_ch18_by_casanova" }, { "name": "Ch.17: The Coating that Colors the Mountain", "url": "http://bato.to/read/_/164819/shokugeki-no-soma_ch17_by_casanova" }, { "name": "Vol.3 Ch.16.5 Read Online", "url": "http://bato.to/read/_/213776/shokugeki-no-soma_v3_ch16.5_by_casanova" }, { "name": "Ch.16: Concerto of Ideas and Creation", "url": "http://bato.to/read/_/162138/shokugeki-no-soma_ch16_by_casanova" }, { "name": "Ch.15: Friction and Elite", "url": "http://bato.to/read/_/161276/shokugeki-no-soma_ch15_by_casanova" }, { "name": "Vol.2 Ch.14.5: Volume 2 Extra and Recipes", "url": "http://bato.to/read/_/209555/shokugeki-no-soma_v2_ch14.5_by_casanova" }, { "name": "Ch.14: Megumi's Garden", "url": "http://bato.to/read/_/160292/shokugeki-no-soma_ch14_by_casanova" }, { "name": "Ch.13: Quiet Don, An Eloquent Don", "url": "http://bato.to/read/_/159427/shokugeki-no-soma_ch13_by_casanova" }, { "name": "Ch.12: Enter the Battlefield", "url": "http://bato.to/read/_/158233/shokugeki-no-soma_ch12_by_casanova" }, { "name": "Ch.11: The Night Before the Showdown", "url": "http://bato.to/read/_/157118/shokugeki-no-soma_ch11_by_casanova" }, { "name": "Ch.10: The Meat Invader", "url": "http://bato.to/read/_/155824/shokugeki-no-soma_ch10_by_casanova" }, { "name": "Ch.9: The Ice Queen and the Spring Storm", "url": "http://bato.to/read/_/154910/shokugeki-no-soma_ch9_by_casanova" }, { "name": "Ch.8: A Dish that Calls for Spring", "url": "http://bato.to/read/_/153806/shokugeki-no-soma_ch8_by_casanova" }, { "name": "Ch.7: Lawless Area", "url": "http://bato.to/read/_/153114/shokugeki-no-soma_ch7_by_casanova" }, { "name": "Ch.6: Maria of the Polar Star", "url": "http://bato.to/read/_/149043/shokugeki-no-soma_ch6_by_casanova" }, { "name": "Ch.5: The Chef That Doesn't Smile", "url": "http://bato.to/read/_/147981/shokugeki-no-soma_ch5_by_casanova" }, { "name": "Ch.4.5: Kurase-san's Diary + Recipe 1", "url": "http://bato.to/read/_/199090/shokugeki-no-soma_ch4.5_by_casanova" }, { "name": "Ch.4: The Demon King Talks About \"Gems\"", "url": "http://bato.to/read/_/146795/shokugeki-no-soma_ch4_by_casanova" }, { "name": "Ch.3: \"Transforming Furikake\"", "url": "http://bato.to/read/_/146229/shokugeki-no-soma_ch3_by_casanova" }, { "name": "Ch.2: God's Tounge", "url": "http://bato.to/read/_/144856/shokugeki-no-soma_ch2_by_casanova" }, { "name": "Ch.1: The Endless Wilderness", "url": "http://bato.to/read/_/143718/shokugeki-no-soma_ch1_by_casanova" }, { "name": "Ch.0: [Oneshot]", "url": "http://bato.to/read/_/182841/shokugeki-no-soma_by_utopia" } ] for i, ch in enumerate(chapters): # eden # url = "/".join([site.netlocs[2], ch.get('url')]) # html = site.get_html(url) # site.chapter_info(html) v = utils.parse_number(ch.get('name', ''), "Vol") v = 0 if v is None else v c = utils.parse_number(ch.get('name', ''), "Ch") c = 0 if c is None else c try: chapter = Chapter( ch.get('name', '').split(':')[-1], c, v ) chapter.id = utils.guid() chapter.slug = " ".join([manga.slug, ch.get('name', '').split(':')[0]]) chapter.manga = manga # s = 1000v + c chapter.sortorder = (1000*float(v)) + float(c) chapter.save_it() print(chapter.id) ini_path = path.join( path.dirname( path.dirname(__file__) ), '/'.join(['rak', 'manga', chapter.id]) ) print(ini_path) except IntegrityError as IE: print(IE.message) # if 'violates unique constraint' in IE.message: # c += float(c / 100) # chapter = Chapter( # ch.get('name', '').split(':')[-1], # manga.slug, # c, # v # ) # chapter.manga = manga # # s = 1000v + c # print("{0}: {1}".format(v, c)) # chapter.sortorder = (1000*float(v)) + float(c) # chapter.save_it() trans.commit() except AttributeError as e: print(e.message) except KeyError as e: print(e.message) except ValueError as e: print(e.message)
lgpl-3.0
-4,328,923,296,292,903,400
830,053,487,725,327,400
40.656663
107
0.459597
false
idlead/scikit-learn
examples/linear_model/plot_sgd_comparison.py
112
1819
""" ================================== Comparing various online solvers ================================== An example showing how different online solvers perform on the hand-written digits dataset. """ # Author: Rob Zinkov <rob at zinkov dot com> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.linear_model import SGDClassifier, Perceptron from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import LogisticRegression heldout = [0.95, 0.90, 0.75, 0.50, 0.01] rounds = 20 digits = datasets.load_digits() X, y = digits.data, digits.target classifiers = [ ("SGD", SGDClassifier()), ("ASGD", SGDClassifier(average=True)), ("Perceptron", Perceptron()), ("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge', C=1.0)), ("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge', C=1.0)), ("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0])) ] xx = 1. - np.array(heldout) for name, clf in classifiers: print("training %s" % name) rng = np.random.RandomState(42) yy = [] for i in heldout: yy_ = [] for r in range(rounds): X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=i, random_state=rng) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) yy_.append(1 - np.mean(y_pred == y_test)) yy.append(np.mean(yy_)) plt.plot(xx, yy, label=name) plt.legend(loc="upper right") plt.xlabel("Proportion train") plt.ylabel("Test Error Rate") plt.show()
bsd-3-clause
-2,918,654,585,510,330,000
4,118,872,429,450,374,700
30.362069
79
0.60033
false
jianran/spark
examples/src/main/python/ml/vector_assembler_example.py
123
1649
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # $example on$ from pyspark.ml.linalg import Vectors from pyspark.ml.feature import VectorAssembler # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("VectorAssemblerExample")\ .getOrCreate() # $example on$ dataset = spark.createDataFrame( [(0, 18, 1.0, Vectors.dense([0.0, 10.0, 0.5]), 1.0)], ["id", "hour", "mobile", "userFeatures", "clicked"]) assembler = VectorAssembler( inputCols=["hour", "mobile", "userFeatures"], outputCol="features") output = assembler.transform(dataset) print("Assembled columns 'hour', 'mobile', 'userFeatures' to vector column 'features'") output.select("features", "clicked").show(truncate=False) # $example off$ spark.stop()
apache-2.0
-6,465,602,279,955,291,000
-6,414,654,118,121,173,000
34.847826
91
0.702244
false
lindsayad/sympy
sympy/matrices/tests/test_densearith.py
80
1844
from sympy.matrices.densetools import eye from sympy.matrices.densearith import add, sub, mulmatmat, mulmatscaler from sympy import ZZ def test_add(): a = [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]] b = [[ZZ(5), ZZ(4), ZZ(9)], [ZZ(3), ZZ(7), ZZ(1)], [ZZ(12), ZZ(13), ZZ(14)]] c = [[ZZ(12)], [ZZ(17)], [ZZ(21)]] d = [[ZZ(3)], [ZZ(4)], [ZZ(5)]] e = [[ZZ(12), ZZ(78)], [ZZ(56), ZZ(79)]] f = [[ZZ.zero, ZZ.zero], [ZZ.zero, ZZ.zero]] assert add(a, b, ZZ) == [[ZZ(8), ZZ(11), ZZ(13)], [ZZ(5), ZZ(11), ZZ(6)], [ZZ(18), ZZ(15), ZZ(17)]] assert add(c, d, ZZ) == [[ZZ(15)], [ZZ(21)], [ZZ(26)]] assert add(e, f, ZZ) == e def test_sub(): a = [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]] b = [[ZZ(5), ZZ(4), ZZ(9)], [ZZ(3), ZZ(7), ZZ(1)], [ZZ(12), ZZ(13), ZZ(14)]] c = [[ZZ(12)], [ZZ(17)], [ZZ(21)]] d = [[ZZ(3)], [ZZ(4)], [ZZ(5)]] e = [[ZZ(12), ZZ(78)], [ZZ(56), ZZ(79)]] f = [[ZZ.zero, ZZ.zero], [ZZ.zero, ZZ.zero]] assert sub(a, b, ZZ) == [[ZZ(-2), ZZ(3), ZZ(-5)], [ZZ(-1), ZZ(-3), ZZ(4)], [ZZ(-6), ZZ(-11), ZZ(-11)]] assert sub(c, d, ZZ) == [[ZZ(9)], [ZZ(13)], [ZZ(16)]] assert sub(e, f, ZZ) == e def test_mulmatmat(): a = [[ZZ(3), ZZ(4)], [ZZ(5), ZZ(6)]] b = [[ZZ(1), ZZ(2)], [ZZ(7), ZZ(8)]] c = eye(2, ZZ) d = [[ZZ(6)], [ZZ(7)]] assert mulmatmat(a, b, ZZ) == [[ZZ(31), ZZ(38)], [ZZ(47), ZZ(58)]] assert mulmatmat(b, d, ZZ) == [[ZZ(20)], [ZZ(98)]] def test_mulmatscaler(): a = eye(3, ZZ) b = [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]] assert mulmatscaler(a, ZZ(4), ZZ) == [[ZZ(4), ZZ(0), ZZ(0)], [ZZ(0), ZZ(4), ZZ(0)], [ZZ(0), ZZ(0), ZZ(4)]] assert mulmatscaler(b, ZZ(1), ZZ) == [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]]
bsd-3-clause
8,075,311,277,220,899,000
4,674,834,617,968,175,000
37.416667
110
0.444685
false
hs634/algorithms
python/company/dropbox.py
1
1392
__author__ = 'hs634' class Crawler(Thread): def __init__(self, q, seen, index, lock, wlock, worker_pool_size): self.queue = q self.seen = seen self.index = index self.worker_pool_size = worker_pool_size self.qandslock = lock self.worker_lock = wlock def crawl(self, start_url, index): cur_page = fetch_page(start_url) cur_links = fetch_links(cur_page) with self.qandslock: for link in cur_links: self.queue.enqueue() with self.worker_lock: self.status = "Free" self.worker_available.notify() class Controller(): def __init__(self, index): self.queue = Queue() self.seen = {} self.qandslock = Lock() self.worker_lock = Lock() self.url_available = Condition(self.qandslock) self.worker_available = Condition(self.worker_lock) self.index = index self.worker_pool = [Crawler() for __ in range(worker_pool_size)] def run(self, start_url): worker = get_next_worker() with self.qandslock: while self.queue.isEmpty(): self.url_available.wait() next_url = self.queue.dequeue() with self.worker_lock: while worker_unavailabe(): self.worker_available.wait() worker.crawl(start_url)
mit
6,725,673,783,468,313,000
-7,150,520,821,295,601,000
28.617021
72
0.563218
false
anandpdoshi/erpnext
erpnext/config/projects.py
2
1504
from __future__ import unicode_literals from frappe import _ def get_data(): return [ { "label": _("Projects"), "icon": "icon-star", "items": [ { "type": "doctype", "name": "Project", "description": _("Project master."), }, { "type": "doctype", "name": "Task", "description": _("Project activity / task."), }, { "type": "report", "route": "Gantt/Task", "doctype": "Task", "name": "Gantt Chart", "description": _("Gantt chart of all tasks.") }, ] }, { "label": _("Time Tracking"), "items": [ { "type": "doctype", "name": "Timesheet", "description": _("Timesheet for tasks."), }, { "type": "doctype", "name": "Activity Type", "description": _("Types of activities for Time Logs"), }, { "type": "doctype", "name": "Activity Cost", "description": _("Cost of various activities"), }, ] }, { "label": _("Reports"), "icon": "icon-list", "items": [ { "type": "report", "is_query_report": True, "name": "Daily Timesheet Summary", "doctype": "Timesheet" }, { "type": "report", "is_query_report": True, "name": "Project wise Stock Tracking", "doctype": "Project" }, ] }, { "label": _("Help"), "icon": "icon-facetime-video", "items": [ { "type": "help", "label": _("Managing Projects"), "youtube_id": "egxIGwtoKI4" }, ] }, ]
agpl-3.0
-2,153,673,394,908,588,500
2,185,785,828,661,438,700
18.282051
59
0.478059
false
junbochen/pylearn2
pylearn2/scripts/papers/jia_huang_wkshp_11/evaluate.py
44
3208
from __future__ import print_function from optparse import OptionParser import warnings try: from sklearn.metrics import classification_report except ImportError: classification_report = None warnings.warn("couldn't find sklearn.metrics.classification_report") try: from sklearn.metrics import confusion_matrix except ImportError: confusion_matrix = None warnings.warn("couldn't find sklearn.metrics.metrics.confusion_matrix") from galatea.s3c.feature_loading import get_features from pylearn2.utils import serial from pylearn2.datasets.cifar10 import CIFAR10 from pylearn2.datasets.cifar100 import CIFAR100 import numpy as np def test(model, X, y): print("Evaluating svm") y_pred = model.predict(X) #try: if True: acc = (y == y_pred).mean() print("Accuracy ",acc) """except: print("something went wrong") print('y:') print(y) print('y_pred:') print(y_pred) print('extra info') print(type(y)) print(type(y_pred)) print(y.dtype) print(y_pred.dtype) print(y.shape) print(y_pred.shape) raise """ # def get_test_labels(cifar10, cifar100, stl10): assert cifar10 + cifar100 + stl10 == 1 if stl10: print('loading entire stl-10 test set just to get the labels') stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/test.pkl") return stl10.y if cifar10: print('loading entire cifar10 test set just to get the labels') cifar10 = CIFAR10(which_set = 'test') return np.asarray(cifar10.y) if cifar100: print('loading entire cifar100 test set just to get the fine labels') cifar100 = CIFAR100(which_set = 'test') return np.asarray(cifar100.y_fine) assert False def main(model_path, test_path, dataset, **kwargs): model = serial.load(model_path) cifar100 = dataset == 'cifar100' cifar10 = dataset == 'cifar10' stl10 = dataset == 'stl10' assert cifar10 + cifar100 + stl10 == 1 y = get_test_labels(cifar10, cifar100, stl10) X = get_features(test_path, False, False) if stl10: num_examples = 8000 if cifar10 or cifar100: num_examples = 10000 if not X.shape[0] == num_examples: raise AssertionError('Expected %d examples but got %d' % (num_examples, X.shape[0])) assert y.shape[0] == num_examples test(model,X,y) if __name__ == '__main__': """ Useful for quick tests. Usage: python train_bilinear.py """ parser = OptionParser() parser.add_option("-m", "--model", action="store", type="string", dest="model_path") parser.add_option("-t", "--test", action="store", type="string", dest="test") parser.add_option("-o", action="store", dest="output", default = None, help="path to write the report to") parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None) #(options, args) = parser.parse_args() #assert options.output main(model_path='final_model.pkl', test_path='test_features.npy', dataset = 'cifar100', )
bsd-3-clause
802,493,983,236,724,700
2,085,718,367,719,209,200
28.163636
110
0.625623
false
laurentgo/pants
tests/python/pants_test/net/http/test_fetcher.py
14
6703
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os from contextlib import closing import mox import pytest import requests from six import StringIO from pants.net.http.fetcher import Fetcher from pants.util.contextutil import temporary_file class FetcherTest(mox.MoxTestBase): def setUp(self): super(FetcherTest, self).setUp() self.requests = self.mox.CreateMockAnything() self.response = self.mox.CreateMock(requests.Response) self.fetcher = Fetcher(requests_api=self.requests) self.listener = self.mox.CreateMock(Fetcher.Listener) def expect_get(self, url, chunk_size_bytes, timeout_secs, listener=True): self.requests.get(url, stream=True, timeout=timeout_secs).AndReturn(self.response) self.response.status_code = 200 self.response.headers = {'content-length': '11'} if listener: self.listener.status(200, content_length=11) chunks = ['0123456789', 'a'] self.response.iter_content(chunk_size=chunk_size_bytes).AndReturn(chunks) return chunks def test_get(self): for chunk in self.expect_get('http://bar', chunk_size_bytes=1024, timeout_secs=60): self.listener.recv_chunk(chunk) self.listener.finished() self.response.close() self.mox.ReplayAll() self.fetcher.fetch('http://bar', self.listener, chunk_size_bytes=1024, timeout_secs=60) def test_checksum_listener(self): digest = self.mox.CreateMockAnything() for chunk in self.expect_get('http://baz', chunk_size_bytes=1, timeout_secs=37): self.listener.recv_chunk(chunk) digest.update(chunk) self.listener.finished() digest.hexdigest().AndReturn('42') self.response.close() self.mox.ReplayAll() checksum_listener = Fetcher.ChecksumListener(digest=digest) self.fetcher.fetch('http://baz', checksum_listener.wrap(self.listener), chunk_size_bytes=1, timeout_secs=37) self.assertEqual('42', checksum_listener.checksum) def test_download_listener(self): downloaded = '' for chunk in self.expect_get('http://foo', chunk_size_bytes=1048576, timeout_secs=3600): self.listener.recv_chunk(chunk) downloaded += chunk self.listener.finished() self.response.close() self.mox.ReplayAll() with closing(StringIO()) as fp: self.fetcher.fetch('http://foo', Fetcher.DownloadListener(fp).wrap(self.listener), chunk_size_bytes=1024 * 1024, timeout_secs=60 * 60) self.assertEqual(downloaded, fp.getvalue()) def test_size_mismatch(self): self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response) self.response.status_code = 200 self.response.headers = {'content-length': '11'} self.listener.status(200, content_length=11) self.response.iter_content(chunk_size=1024).AndReturn(['a', 'b']) self.listener.recv_chunk('a') self.listener.recv_chunk('b') self.response.close() self.mox.ReplayAll() with pytest.raises(self.fetcher.Error): self.fetcher.fetch('http://foo', self.listener, chunk_size_bytes=1024, timeout_secs=60) def test_get_error_transient(self): self.requests.get('http://foo', stream=True, timeout=60).AndRaise(requests.ConnectionError) self.mox.ReplayAll() with pytest.raises(self.fetcher.TransientError): self.fetcher.fetch('http://foo', self.listener, chunk_size_bytes=1024, timeout_secs=60) def test_get_error_permanent(self): self.requests.get('http://foo', stream=True, timeout=60).AndRaise(requests.TooManyRedirects) self.mox.ReplayAll() with pytest.raises(self.fetcher.PermanentError) as e: self.fetcher.fetch('http://foo', self.listener, chunk_size_bytes=1024, timeout_secs=60) self.assertTrue(e.value.response_code is None) def test_http_error(self): self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response) self.response.status_code = 404 self.listener.status(404) self.response.close() self.mox.ReplayAll() with pytest.raises(self.fetcher.PermanentError) as e: self.fetcher.fetch('http://foo', self.listener, chunk_size_bytes=1024, timeout_secs=60) self.assertEqual(404, e.value.response_code) def test_iter_content_error(self): self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response) self.response.status_code = 200 self.response.headers = {} self.listener.status(200, content_length=None) self.response.iter_content(chunk_size=1024).AndRaise(requests.Timeout) self.response.close() self.mox.ReplayAll() with pytest.raises(self.fetcher.TransientError): self.fetcher.fetch('http://foo', self.listener, chunk_size_bytes=1024, timeout_secs=60) def expect_download(self, path_or_fd=None): downloaded = '' for chunk in self.expect_get('http://1', chunk_size_bytes=13, timeout_secs=13, listener=False): downloaded += chunk self.response.close() self.mox.ReplayAll() path = self.fetcher.download('http://1', path_or_fd=path_or_fd, chunk_size_bytes=13, timeout_secs=13) return downloaded, path def test_download(self): downloaded, path = self.expect_download() try: with open(path) as fp: self.assertEqual(downloaded, fp.read()) finally: os.unlink(path) def test_download_fd(self): with temporary_file() as fd: downloaded, path = self.expect_download(path_or_fd=fd) self.assertEqual(path, fd.name) fd.close() with open(path) as fp: self.assertEqual(downloaded, fp.read()) def test_download_path(self): with temporary_file() as fd: fd.close() downloaded, path = self.expect_download(path_or_fd=fd.name) self.assertEqual(path, fd.name) with open(path) as fp: self.assertEqual(downloaded, fp.read())
apache-2.0
-2,744,288,190,413,257,700
-1,074,524,491,286,832,000
32.019704
99
0.626138
false
vefimova/rally
rally/plugins/openstack/context/quotas/nova_quotas.py
15
2762
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import log as logging LOG = logging.getLogger(__name__) class NovaQuotas(object): """Management of Nova quotas.""" QUOTAS_SCHEMA = { "type": "object", "additionalProperties": False, "properties": { "instances": { "type": "integer", "minimum": -1 }, "cores": { "type": "integer", "minimum": -1 }, "ram": { "type": "integer", "minimum": -1 }, "floating_ips": { "type": "integer", "minimum": -1 }, "fixed_ips": { "type": "integer", "minimum": -1 }, "metadata_items": { "type": "integer", "minimum": -1 }, "injected_files": { "type": "integer", "minimum": -1 }, "injected_file_content_bytes": { "type": "integer", "minimum": -1 }, "injected_file_path_bytes": { "type": "integer", "minimum": -1 }, "key_pairs": { "type": "integer", "minimum": -1 }, "security_groups": { "type": "integer", "minimum": -1 }, "security_group_rules": { "type": "integer", "minimum": -1 }, "server_groups": { "type": "integer", "minimum": -1 }, "server_group_members": { "type": "integer", "minimum": -1 } } } def __init__(self, clients): self.clients = clients def update(self, tenant_id, **kwargs): self.clients.nova().quotas.update(tenant_id, **kwargs) def delete(self, tenant_id): # Reset quotas to defaults and tag database objects as deleted self.clients.nova().quotas.delete(tenant_id)
apache-2.0
4,229,815,340,361,052,000
8,690,283,096,594,602,000
28.073684
78
0.450036
false
CenterForOpenScience/modular-file-renderer
mfr/extensions/tabular/libs/xlrd_tools.py
2
1632
import xlrd from collections import OrderedDict from ..exceptions import TableTooBigError from ..utilities import header_population from mfr.extensions.tabular.compat import range, basestring def xlsx_xlrd(fp): """Read and convert a xlsx file to JSON format using the xlrd library :param fp: File pointer object :return: tuple of table headers and data """ max_size = 10000 wb = xlrd.open_workbook(fp.name) sheets = OrderedDict() for sheet in wb.sheets(): if sheet.ncols > max_size or sheet.nrows > max_size: raise TableTooBigError('Table is too large to render.', '.xlsx', nbr_cols=sheet.ncols, nbr_rows=sheet.nrows) if sheet.ncols < 1 or sheet.nrows < 1: sheets[sheet.name] = ([], []) continue fields = sheet.row_values(0) if sheet.nrows else [] fields = [ str(value) if not isinstance(value, basestring) and value is not None else value or 'Unnamed: {0}'.format(index + 1) for index, value in enumerate(fields) ] data = [] for i in range(1, sheet.nrows): row = [] for cell in sheet.row(i): if cell.ctype == xlrd.XL_CELL_DATE: value = xlrd.xldate.xldate_as_datetime(cell.value, wb.datemode).isoformat() else: value = cell.value row.append(value) data.append(dict(zip(fields, row))) header = header_population(fields) sheets[sheet.name] = (header, data) return sheets
apache-2.0
-3,722,949,182,600,889,000
7,715,062,670,691,411,000
30.384615
95
0.57598
false
BehavioralInsightsTeam/edx-platform
common/lib/xmodule/xmodule/tests/test_editing_module.py
13
2787
""" Tests for editing descriptors""" import unittest import os import logging from mock import Mock from pkg_resources import resource_string from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator from xmodule.editing_module import TabsEditingDescriptor from xblock.field_data import DictFieldData from xblock.fields import ScopeIds from xmodule.tests import get_test_descriptor_system log = logging.getLogger(__name__) class TabsEditingDescriptorTestCase(unittest.TestCase): """ Testing TabsEditingDescriptor""" shard = 1 def setUp(self): super(TabsEditingDescriptorTestCase, self).setUp() system = get_test_descriptor_system() system.render_template = Mock(return_value="<div>Test Template HTML</div>") self.tabs = [ { 'name': "Test_css", 'template': "tabs/codemirror-edit.html", 'current': True, 'css': { 'scss': [ resource_string( __name__, '../../test_files/test_tabseditingdescriptor.scss' ) ], 'css': [ resource_string( __name__, '../../test_files/test_tabseditingdescriptor.css' ) ] } }, { 'name': "Subtitles", 'template': "video/subtitles.html", }, { 'name': "Settings", 'template': "tabs/video-metadata-edit-tab.html" } ] TabsEditingDescriptor.tabs = self.tabs self.descriptor = system.construct_xblock_from_class( TabsEditingDescriptor, scope_ids=ScopeIds(None, None, None, BlockUsageLocator(CourseLocator('org', 'course', 'run', branch='revision'), 'category', 'name')), field_data=DictFieldData({}), ) def test_get_css(self): """test get_css""" css = self.descriptor.get_css() test_files_dir = os.path.dirname(__file__).replace('xmodule/tests', 'test_files') test_css_file = os.path.join(test_files_dir, 'test_tabseditingdescriptor.scss') with open(test_css_file) as new_css: added_css = new_css.read() self.assertEqual(css['scss'].pop(), added_css) self.assertEqual(css['css'].pop(), added_css) def test_get_context(self): """"test get_context""" rendered_context = self.descriptor.get_context() self.assertListEqual(rendered_context['tabs'], self.tabs)
agpl-3.0
-8,833,884,035,399,161,000
5,590,484,806,306,066,000
34.730769
106
0.527449
false
Lautitia/newfies-dialer
newfies/newfies_dialer/settings.py
3
17942
# # Newfies-Dialer License # http://www.newfies-dialer.org # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (C) 2011-2014 Star2Billing S.L. # # The Initial Developer of the Original Code is # Arezqui Belaid <[email protected]> # import os import djcelery djcelery.setup_loader() # Django settings for project. DEBUG = False TEMPLATE_DEBUG = False ADMINS = ( ('Your Name', '[email protected]'), ) MANAGERS = ADMINS SERVER_EMAIL = '[email protected]' APPLICATION_DIR = os.path.dirname(globals()['__file__']) + '/../' DATABASES = { 'default': { # 'postgresql_psycopg2','postgresql','sqlite3','oracle', 'django.db.backends.mysql' 'ENGINE': 'django.db.backends.sqlite3', # Database name or path to database file if using sqlite3. 'NAME': APPLICATION_DIR + '/database/newfies-dialer.db', 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Not used with sqlite3. 'PORT': '', # Not used with sqlite3. # 'OPTIONS': { # 'init_command': 'SET storage_engine=INNODB', # } } } CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', 'LOCATION': '/var/tmp/django_cache', } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # set use of timezone true or false USE_TZ = True # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = False DATETIME_FORMAT = 'Y-m-d H:i:s' DATE_FORMAT = 'Y-m-d' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = os.path.join(APPLICATION_DIR, 'static') # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = os.path.join(APPLICATION_DIR, 'usermedia') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/usermedia/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. # os.path.join(APPLICATION_DIR, "resources"), ("newfies", os.path.join(APPLICATION_DIR, "resources")), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', #'django.contrib.staticfiles.finders.DefaultStorageFinder', 'dajaxice.finders.DajaxiceFinder', 'djangobower.finders.BowerFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'ujau$^uei_ak=@-v8va(&@q_sc0^1nn*qpwyc-776n&qoam@+v' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( #'raven.contrib.django.middleware.SentryResponseErrorIdMiddleware', #'raven.contrib.django.middleware.Sentry404CatchMiddleware', 'corsheaders.middleware.CorsMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', #'pagination.middleware.PaginationMiddleware', 'linaro_django_pagination.middleware.PaginationMiddleware', 'common.filter_persist_middleware.FilterPersistMiddleware', 'audiofield.middleware.threadlocals.ThreadLocals', ) TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.core.context_processors.csrf", "django.core.context_processors.tz", "django.contrib.messages.context_processors.messages", "context_processors.newfies_version", #needed by Sentry "django.core.context_processors.request", ) SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer' ROOT_URLCONF = 'newfies_dialer.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or # "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(APPLICATION_DIR, 'templates'), ) INTERNAL_IPS = ('127.0.0.1',) ALLOWED_HOSTS = ['127.0.0.1'] DAJAXICE_MEDIA_PREFIX = "dajaxice" #DAJAXICE_MEDIA_PREFIX = "dajax" # http://domain.com/dajax/ #DAJAXICE_CACHE_CONTROL = 10 * 24 * 60 * 60 INSTALLED_APPS = ( #admin tool apps 'admin_tools', 'admin_tools.theming', 'admin_tools.menu', 'admin_tools.dashboard', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.sites', 'django.contrib.staticfiles', # 'django.contrib.markup', 'django_countries', 'admin_tools_stats', 'genericadmin', 'mailer', 'south', 'djcelery', 'audiofield', 'tagging', 'adminsortable', 'dajaxice', 'dajax', 'dateutil', #'pagination', 'linaro_django_pagination', #'memcache_status', 'country_dialcode', 'common', 'sms', 'sms_module', 'dialer_contact', 'dialer_audio', 'dialer_campaign', 'dialer_cdr', 'dialer_gateway', 'dialer_settings', 'user_profile', 'notification', 'survey', 'dnc', #'agent', #'callcenter', 'appointment', 'mod_mailer', #'raven.contrib.django', 'frontend_notification', 'django_nvd3', 'rest_framework', 'rest_framework.authtoken', 'corsheaders', 'djangobower', 'activelink', 'bootstrap3_datetime', ) # Django extensions try: import gunicorn except ImportError: pass else: INSTALLED_APPS = INSTALLED_APPS + ('gunicorn',) # Redisboard try: import redisboard except ImportError: pass else: INSTALLED_APPS = INSTALLED_APPS + ('redisboard',) # Debug Toolbar try: import debug_toolbar except ImportError: pass else: INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar', ) #INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar', 'template_timings_panel',) MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + \ ('debug_toolbar.middleware.DebugToolbarMiddleware',) DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', # StaticFilesPanel broken https://github.com/django-debug-toolbar/django-debug-toolbar/issues/503 # 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', #'template_timings_panel.panels.TemplateTimings.TemplateTimings', ] DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, 'HIDE_DJANGO_SQL': False, 'ENABLE_STACKTRACES': True, 'SQL_WARNING_THRESHOLD': 100, # milliseconds } DEBUG_TOOLBAR_PATCH_SETTINGS = False # Django extensions try: import django_extensions except ImportError: pass else: INSTALLED_APPS = INSTALLED_APPS + ('django_extensions',) # Nose try: import nose except ImportError: pass else: INSTALLED_APPS = INSTALLED_APPS + ('django_nose',) TEST_RUNNER = 'utils.test_runner.MyRunner' # Dilla try: import django_dilla except ImportError: pass else: INSTALLED_APPS = INSTALLED_APPS + ('dilla',) #No of records per page #======================= PAGE_SIZE = 10 # AUTH MODULE SETTINGS AUTH_PROFILE_MODULE = 'user_profile.UserProfile' #AUTH_USER_MODEL = 'user_profile.UserProfile' LOGIN_REDIRECT_URL = '/' LOGIN_URL = '/pleaselog/' #DILLA SETTINGS #============== DICTIONARY = "/usr/share/dict/words" DILLA_USE_LOREM_IPSUM = False # set to True ignores dictionary DILLA_APPS = [ 'auth', #'dialer_gateway', 'voip_app', 'dialer_campaign', 'dialer_cdr', ] DILLA_SPAMLIBS = [ #'voip_app.voip_app_custom_spamlib', #'dialer_campaign.dialer_campaign_custom_spamlib', 'dialer_cdr.dialer_cdr_custom_spamlib', ] # To use Dilla # > python manage.py run_dilla --cycles=100 #MEMCACHE #======== #CACHES = { # 'default': { # 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', # 'LOCATION': '127.0.0.1:11211', # 'KEY_PREFIX': 'newfies_', # } #} #REST FRAMEWORK #============== REST_FRAMEWORK = { #'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',), 'PAGINATE_BY': 10, 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.BasicAuthentication', 'rest_framework.authentication.TokenAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', #'rest_framework.permissions.DjangoModelPermissions', ), #'DEFAULT_THROTTLE_CLASSES': ( # 'rest_framework.throttling.SimpleRateThrottle', #), #'DEFAULT_THROTTLE_RATES': { # 'anon': '100/day', # 'user': '1000/day' #} } #REDIS-CACHE #=========== CACHES = { 'default': { 'BACKEND': 'redis_cache.RedisCache', 'LOCATION': '127.0.0.1:6379', #'OPTIONS': { # 'DB': 1, # 'PASSWORD': 'yadayada', # 'PARSER_CLASS': 'redis.connection.HiredisParser' #}, }, } #CELERY SETTINGS #=============== ## Broker settings BROKER_URL = "redis://localhost:6379/0" #BROKER_URL = 'amqp://guest:guest@localhost:5672//' REDIS_HOST = 'localhost' REDIS_PORT = 6379 REDIS_DB = 0 #REDIS_CONNECT_RETRY = True ## Using the database to store task state and results. CELERY_RESULT_BACKEND = "redis://localhost:6379/0" CELERY_TASK_RESULT_EXPIRES = 18000 # 5 hours. #CELERY_REDIS_CONNECT_RETRY = True CELERY_TIMEZONE = 'Europe/Madrid' CELERY_ENABLE_UTC = True REDIS_DB = 0 # REDIS_CONNECT_RETRY = True CELERY_DEFAULT_QUEUE = 'newfies' CELERY_DEFAULT_EXCHANGE = "newfies_tasks" CELERY_DEFAULT_EXCHANGE_TYPE = "topic" CELERY_DEFAULT_ROUTING_KEY = "task.newfies" CELERY_QUEUES = { 'newfies': { 'binding_key': '#', }, } from kombu import Queue CELERY_DEFAULT_QUEUE = 'default' #Define list of Queues and their routing keys CELERY_QUEUES = ( Queue('default', routing_key='task.#'), Queue('sms_tasks', routing_key='sms_module.#'), Queue('appointment', routing_key='appointment.#'), ) CELERY_DEFAULT_EXCHANGE = 'tasks' CELERY_DEFAULT_EXCHANGE_TYPE = 'topic' CELERY_DEFAULT_ROUTING_KEY = 'task.default' # python manage.py celeryd -EB -l info --purge --queue=sms_tasks # Define tasks and which queue they will use with their routing key CELERY_ROUTES = { 'sms_module.tasks.sms_campaign_running': { 'queue': 'sms_tasks', 'routing_key': 'sms_module.sms_campaign_running', }, } """ from datetime import timedelta from celery.schedules import crontab CELERYBEAT_SCHEDULE = { "runs-every-second": { "task": "dialer_campaign.tasks.campaign_running", "schedule": timedelta(seconds=1), #"args": (50) }, } """ #LANGUAGES #=========== gettext = lambda s: s LANGUAGES = ( ('en', gettext('English')), ('fr', gettext('French')), ('es', gettext('Spanish')), ('pt', gettext('Portuguese')), ('zh', gettext('Chinese')), ('tr', gettext('Turkish')), ('ja', gettext('Japanese')), ) LOCALE_PATHS = ( os.path.join(APPLICATION_DIR, 'locale'), ) LANGUAGE_COOKIE_NAME = 'newfies_dialer_language' #DJANGO-ADMIN-TOOL #================= ADMIN_TOOLS_MENU = 'custom_admin_tools.menu.CustomMenu' ADMIN_TOOLS_INDEX_DASHBOARD = \ 'custom_admin_tools.dashboard.CustomIndexDashboard' ADMIN_TOOLS_APP_INDEX_DASHBOARD = \ 'custom_admin_tools.dashboard.CustomAppIndexDashboard' ADMIN_MEDIA_PREFIX = '/static/admin/' #EMAIL BACKEND #============= # Use only in Debug mode. Not in production #EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' MAILER_EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # ADD 'dummy','plivo','twilio','esl' NEWFIES_DIALER_ENGINE = 'esl' #TASTYPIE API #============ API_ALLOWED_IP = ['127.0.0.1', 'localhost'] #SENTRY SETTINGS #=============== #SENTRY_DSN = 'http://asdada:asdasd@localhost:9000/1' #DIALER #====== MAX_CALLS_PER_SECOND = 20 # By default configured to 20 calls per second # Frontend widget values CHANNEL_TYPE_VALUE = 1 # 0-Keep original, 1-Mono, 2-Stereo # 0-Keep original, 8000-8000Hz, 16000-16000Hz, 22050-22050Hz, # 44100-44100Hz, 48000-48000Hz, 96000-96000Hz FREQ_TYPE_VALUE = 8000 # 0-Keep original, 1-Convert to MP3, 2-Convert to WAV, 3-Convert to OGG CONVERT_TYPE_VALUE = 2 AUDIO_DEBUG = False #ESL #=== ESL_HOSTNAME = '127.0.0.1' ESL_PORT = '8021' ESL_SECRET = 'ClueCon' ESL_SCRIPT = '&lua(/usr/share/newfies-lua/newfies.lua)' #TEXT-TO-SPEECH #============== TTS_ENGINE = 'FLITE' # FLITE, CEPSTRAL, ACAPELA ACCOUNT_LOGIN = 'EVAL_XXXX' APPLICATION_LOGIN = 'EVAL_XXXXXXX' APPLICATION_PASSWORD = 'XXXXXXXX' SERVICE_URL = 'http://vaas.acapela-group.com/Services/Synthesizer' QUALITY = '22k' # 22k, 8k, 8ka, 8kmu ACAPELA_GENDER = 'W' ACAPELA_INTONATION = 'NORMAL' #DEBUG DIALER #============ DIALERDEBUG = False DIALERDEBUG_PHONENUMBER = 1000 #Survey in dev #============= SURVEYDEV = False AMD = False #Demo mode #========= #This will disable certain save, to avoid changing password DEMO_MODE = False #IPYTHON #======= IPYTHON_ARGUMENTS = [ '--ext', 'django_extensions.management.notebook_extension', '--profile=nbserver', '--debug' ] #GENERAL #======= # PREFIX_LIMIT_MIN & PREFIX_LIMIT_MAX are used to know # how many digits are used to match against the dialcode prefix database PREFIX_LIMIT_MIN = 2 PREFIX_LIMIT_MAX = 5 # List of phonenumber prefix to ignore, this will be remove prior analysis PREFIX_TO_IGNORE = "+,0,00,000,0000,00000,011,55555,99999" #CORS (Cross-Origin Resource Sharing) #==================================== #if True, the whitelist will not be used and all origins will be accepted CORS_ORIGIN_ALLOW_ALL = True #specify a list of origin hostnames that are authorized to make a cross-site HTTP request #CORS_ORIGIN_WHITELIST = () #specify a regex list of origin hostnames that are authorized to make a cross-site HTTP request #CORS_ORIGIN_REGEX_WHITELIST = ('^http?://(\w+\.)?google\.com$', ) #specify the allowed HTTP methods that can be used when making the actual request CORS_ALLOW_METHODS = ( 'GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS' ) #specify which non-standard HTTP headers can be used when making the actual request CORS_ALLOW_HEADERS = ( 'x-requested-with', 'content-type', 'accept', 'origin', 'authorization', 'x-csrftoken', ) CORS_ORIGIN_WHITELIST = ( 'hostname.example.com', ) #specify which HTTP headers are to be exposed to the browser CORS_EXPOSE_HEADERS = () #specify whether or not cookies are allowed to be included CORS_ALLOW_CREDENTIALS = False # Django-bower # ------------ # Specifie path to components root (you need to use absolute path) BOWER_COMPONENTS_ROOT = os.path.join(APPLICATION_DIR, 'components') BOWER_PATH = '/usr/bin/bower' BOWER_INSTALLED_APPS = ( 'jquery#2.0.3', 'jquery-ui#~1.10.3', 'bootstrap#3.0.3', 'bootstrap-switch#2.0.0', 'bootbox#4.1.0', 'd3#3.3.6', 'nvd3#1.1.12-beta', 'components-font-awesome#4.0.3', ) #Need to build documentation with Django 1.6 LOGGING_CONFIG = None # DAJAXICE setting # Not Include XmlHttpRequest.js inside dajaxice.core.js DAJAXICE_XMLHTTPREQUEST_JS_IMPORT = False #IMPORT LOCAL SETTINGS #===================== try: from settings_local import * except ImportError: pass
mpl-2.0
-1,999,607,677,689,003,800
-9,155,254,103,839,635,000
26.645609
105
0.672779
false
drm00/beets
beets/importer.py
14
53270
# This file is part of beets. # Copyright 2015, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. from __future__ import (division, absolute_import, print_function, unicode_literals) """Provides the basic, interface-agnostic workflow for importing and autotagging music files. """ import os import re import pickle import itertools from collections import defaultdict from tempfile import mkdtemp from bisect import insort, bisect_left from contextlib import contextmanager import shutil import time from beets import logging from beets import autotag from beets import library from beets import dbcore from beets import plugins from beets import util from beets import config from beets.util import pipeline, sorted_walk, ancestry from beets.util import syspath, normpath, displayable_path from enum import Enum from beets import mediafile action = Enum('action', ['SKIP', 'ASIS', 'TRACKS', 'MANUAL', 'APPLY', 'MANUAL_ID', 'ALBUMS']) QUEUE_SIZE = 128 SINGLE_ARTIST_THRESH = 0.25 VARIOUS_ARTISTS = u'Various Artists' PROGRESS_KEY = 'tagprogress' HISTORY_KEY = 'taghistory' # Global logger. log = logging.getLogger('beets') class ImportAbort(Exception): """Raised when the user aborts the tagging operation. """ pass # Utilities. def _open_state(): """Reads the state file, returning a dictionary.""" try: with open(config['statefile'].as_filename()) as f: return pickle.load(f) except Exception as exc: # The `pickle` module can emit all sorts of exceptions during # unpickling, including ImportError. We use a catch-all # exception to avoid enumerating them all (the docs don't even have a # full list!). log.debug(u'state file could not be read: {0}', exc) return {} def _save_state(state): """Writes the state dictionary out to disk.""" try: with open(config['statefile'].as_filename(), 'w') as f: pickle.dump(state, f) except IOError as exc: log.error(u'state file could not be written: {0}', exc) # Utilities for reading and writing the beets progress file, which # allows long tagging tasks to be resumed when they pause (or crash). def progress_read(): state = _open_state() return state.setdefault(PROGRESS_KEY, {}) @contextmanager def progress_write(): state = _open_state() progress = state.setdefault(PROGRESS_KEY, {}) yield progress _save_state(state) def progress_add(toppath, *paths): """Record that the files under all of the `paths` have been imported under `toppath`. """ with progress_write() as state: imported = state.setdefault(toppath, []) for path in paths: # Normally `progress_add` will be called with the path # argument increasing. This is because of the ordering in # `albums_in_dir`. We take advantage of that to make the # code faster if imported and imported[len(imported) - 1] <= path: imported.append(path) else: insort(imported, path) def progress_element(toppath, path): """Return whether `path` has been imported in `toppath`. """ state = progress_read() if toppath not in state: return False imported = state[toppath] i = bisect_left(imported, path) return i != len(imported) and imported[i] == path def has_progress(toppath): """Return `True` if there exist paths that have already been imported under `toppath`. """ state = progress_read() return toppath in state def progress_reset(toppath): with progress_write() as state: if toppath in state: del state[toppath] # Similarly, utilities for manipulating the "incremental" import log. # This keeps track of all directories that were ever imported, which # allows the importer to only import new stuff. def history_add(paths): """Indicate that the import of the album in `paths` is completed and should not be repeated in incremental imports. """ state = _open_state() if HISTORY_KEY not in state: state[HISTORY_KEY] = set() state[HISTORY_KEY].add(tuple(paths)) _save_state(state) def history_get(): """Get the set of completed path tuples in incremental imports. """ state = _open_state() if HISTORY_KEY not in state: return set() return state[HISTORY_KEY] # Abstract session class. class ImportSession(object): """Controls an import action. Subclasses should implement methods to communicate with the user or otherwise make decisions. """ def __init__(self, lib, loghandler, paths, query): """Create a session. `lib` is a Library object. `loghandler` is a logging.Handler. Either `paths` or `query` is non-null and indicates the source of files to be imported. """ self.lib = lib self.logger = self._setup_logging(loghandler) self.paths = paths self.query = query self.seen_idents = set() self._is_resuming = dict() # Normalize the paths. if self.paths: self.paths = map(normpath, self.paths) def _setup_logging(self, loghandler): logger = logging.getLogger(__name__) logger.propagate = False if not loghandler: loghandler = logging.NullHandler() logger.handlers = [loghandler] return logger def set_config(self, config): """Set `config` property from global import config and make implied changes. """ # FIXME: Maybe this function should not exist and should instead # provide "decision wrappers" like "should_resume()", etc. iconfig = dict(config) self.config = iconfig # Incremental and progress are mutually exclusive. if iconfig['incremental']: iconfig['resume'] = False # When based on a query instead of directories, never # save progress or try to resume. if self.query is not None: iconfig['resume'] = False iconfig['incremental'] = False # Copy, move, and link are mutually exclusive. if iconfig['move']: iconfig['copy'] = False iconfig['link'] = False elif iconfig['link']: iconfig['copy'] = False iconfig['move'] = False # Only delete when copying. if not iconfig['copy']: iconfig['delete'] = False self.want_resume = config['resume'].as_choice([True, False, 'ask']) def tag_log(self, status, paths): """Log a message about a given album to the importer log. The status should reflect the reason the album couldn't be tagged. """ self.logger.info(u'{0} {1}', status, displayable_path(paths)) def log_choice(self, task, duplicate=False): """Logs the task's current choice if it should be logged. If ``duplicate``, then this is a secondary choice after a duplicate was detected and a decision was made. """ paths = task.paths if duplicate: # Duplicate: log all three choices (skip, keep both, and trump). if task.should_remove_duplicates: self.tag_log('duplicate-replace', paths) elif task.choice_flag in (action.ASIS, action.APPLY): self.tag_log('duplicate-keep', paths) elif task.choice_flag is (action.SKIP): self.tag_log('duplicate-skip', paths) else: # Non-duplicate: log "skip" and "asis" choices. if task.choice_flag is action.ASIS: self.tag_log('asis', paths) elif task.choice_flag is action.SKIP: self.tag_log('skip', paths) def should_resume(self, path): raise NotImplementedError def choose_match(self, task): raise NotImplementedError def resolve_duplicate(self, task, found_duplicates): raise NotImplementedError def choose_item(self, task): raise NotImplementedError def run(self): """Run the import task. """ self.logger.info(u'import started {0}', time.asctime()) self.set_config(config['import']) # Set up the pipeline. if self.query is None: stages = [read_tasks(self)] else: stages = [query_tasks(self)] # In pretend mode, just log what would otherwise be imported. if self.config['pretend']: stages += [log_files(self)] else: if self.config['group_albums'] and \ not self.config['singletons']: # Split directory tasks into one task for each album. stages += [group_albums(self)] if self.config['autotag']: stages += [lookup_candidates(self), user_query(self)] else: stages += [import_asis(self)] stages += [apply_choices(self)] # Plugin stages. for stage_func in plugins.import_stages(): stages.append(plugin_stage(self, stage_func)) stages += [manipulate_files(self)] pl = pipeline.Pipeline(stages) # Run the pipeline. plugins.send('import_begin', session=self) try: if config['threaded']: pl.run_parallel(QUEUE_SIZE) else: pl.run_sequential() except ImportAbort: # User aborted operation. Silently stop. pass # Incremental and resumed imports def already_imported(self, toppath, paths): """Returns true if the files belonging to this task have already been imported in a previous session. """ if self.is_resuming(toppath) \ and all(map(lambda p: progress_element(toppath, p), paths)): return True if self.config['incremental'] \ and tuple(paths) in self.history_dirs: return True return False @property def history_dirs(self): if not hasattr(self, '_history_dirs'): self._history_dirs = history_get() return self._history_dirs def is_resuming(self, toppath): """Return `True` if user wants to resume import of this path. You have to call `ask_resume` first to determine the return value. """ return self._is_resuming.get(toppath, False) def ask_resume(self, toppath): """If import of `toppath` was aborted in an earlier session, ask user if she wants to resume the import. Determines the return value of `is_resuming(toppath)`. """ if self.want_resume and has_progress(toppath): # Either accept immediately or prompt for input to decide. if self.want_resume is True or \ self.should_resume(toppath): log.warn(u'Resuming interrupted import of {0}', util.displayable_path(toppath)) self._is_resuming[toppath] = True else: # Clear progress; we're starting from the top. progress_reset(toppath) # The importer task class. class BaseImportTask(object): """An abstract base class for importer tasks. Tasks flow through the importer pipeline. Each stage can update them. """ def __init__(self, toppath, paths, items): """Create a task. The primary fields that define a task are: * `toppath`: The user-specified base directory that contains the music for this task. If the task has *no* user-specified base (for example, when importing based on an -L query), this can be None. This is used for tracking progress and history. * `paths`: A list of *specific* paths where the music for this task came from. These paths can be directories, when their entire contents are being imported, or files, when the task comprises individual tracks. This is used for progress/history tracking and for displaying the task to the user. * `items`: A list of `Item` objects representing the music being imported. These fields should not change after initialization. """ self.toppath = toppath self.paths = paths self.items = items class ImportTask(BaseImportTask): """Represents a single set of items to be imported along with its intermediate state. May represent an album or a single item. The import session and stages call the following methods in the given order. * `lookup_candidates()` Sets the `common_artist`, `common_album`, `candidates`, and `rec` attributes. `candidates` is a list of `AlbumMatch` objects. * `choose_match()` Uses the session to set the `match` attribute from the `candidates` list. * `find_duplicates()` Returns a list of albums from `lib` with the same artist and album name as the task. * `apply_metadata()` Sets the attributes of the items from the task's `match` attribute. * `add()` Add the imported items and album to the database. * `manipulate_files()` Copy, move, and write files depending on the session configuration. * `finalize()` Update the import progress and cleanup the file system. """ def __init__(self, toppath, paths, items): super(ImportTask, self).__init__(toppath, paths, items) self.choice_flag = None self.cur_album = None self.cur_artist = None self.candidates = [] self.rec = None self.should_remove_duplicates = False self.is_album = True def set_choice(self, choice): """Given an AlbumMatch or TrackMatch object or an action constant, indicates that an action has been selected for this task. """ # Not part of the task structure: assert choice not in (action.MANUAL, action.MANUAL_ID) assert choice != action.APPLY # Only used internally. if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS): self.choice_flag = choice self.match = None else: self.choice_flag = action.APPLY # Implicit choice. self.match = choice def save_progress(self): """Updates the progress state to indicate that this album has finished. """ if self.toppath: progress_add(self.toppath, *self.paths) def save_history(self): """Save the directory in the history for incremental imports. """ if self.paths: history_add(self.paths) # Logical decisions. @property def apply(self): return self.choice_flag == action.APPLY @property def skip(self): return self.choice_flag == action.SKIP # Convenient data. def chosen_ident(self): """Returns identifying metadata about the current choice. For albums, this is an (artist, album) pair. For items, this is (artist, title). May only be called when the choice flag is ASIS (in which case the data comes from the files' current metadata) or APPLY (data comes from the choice). """ if self.choice_flag is action.ASIS: return (self.cur_artist, self.cur_album) elif self.choice_flag is action.APPLY: return (self.match.info.artist, self.match.info.album) def imported_items(self): """Return a list of Items that should be added to the library. If the tasks applies an album match the method only returns the matched items. """ if self.choice_flag == action.ASIS: return list(self.items) elif self.choice_flag == action.APPLY: return self.match.mapping.keys() else: assert False def apply_metadata(self): """Copy metadata from match info to the items. """ autotag.apply_metadata(self.match.info, self.match.mapping) def duplicate_items(self, lib): duplicate_items = [] for album in self.find_duplicates(lib): duplicate_items += album.items() return duplicate_items def remove_duplicates(self, lib): duplicate_items = self.duplicate_items(lib) log.debug(u'removing {0} old duplicated items', len(duplicate_items)) for item in duplicate_items: item.remove() if lib.directory in util.ancestry(item.path): log.debug(u'deleting duplicate {0}', util.displayable_path(item.path)) util.remove(item.path) util.prune_dirs(os.path.dirname(item.path), lib.directory) def finalize(self, session): """Save progress, clean up files, and emit plugin event. """ # Update progress. if session.want_resume: self.save_progress() if session.config['incremental']: self.save_history() self.cleanup(copy=session.config['copy'], delete=session.config['delete'], move=session.config['move']) if not self.skip: self._emit_imported(session.lib) def cleanup(self, copy=False, delete=False, move=False): """Remove and prune imported paths. """ # Do not delete any files or prune directories when skipping. if self.skip: return items = self.imported_items() # When copying and deleting originals, delete old files. if copy and delete: new_paths = [os.path.realpath(item.path) for item in items] for old_path in self.old_paths: # Only delete files that were actually copied. if old_path not in new_paths: util.remove(syspath(old_path), False) self.prune(old_path) # When moving, prune empty directories containing the original files. elif move: for old_path in self.old_paths: self.prune(old_path) def _emit_imported(self, lib): plugins.send('album_imported', lib=lib, album=self.album) def handle_created(self, session): """Send the `import_task_created` event for this task. Return a list of tasks that should continue through the pipeline. By default, this is a list containing only the task itself, but plugins can replace the task with new ones. """ tasks = plugins.send('import_task_created', session=session, task=self) if not tasks: tasks = [self] else: # The plugins gave us a list of lists of tasks. Flatten it. tasks = [t for inner in tasks for t in inner] return tasks def lookup_candidates(self): """Retrieve and store candidates for this album. """ artist, album, candidates, recommendation = \ autotag.tag_album(self.items) self.cur_artist = artist self.cur_album = album self.candidates = candidates self.rec = recommendation def find_duplicates(self, lib): """Return a list of albums from `lib` with the same artist and album name as the task. """ artist, album = self.chosen_ident() if artist is None: # As-is import with no artist. Skip check. return [] duplicates = [] task_paths = set(i.path for i in self.items if i) duplicate_query = dbcore.AndQuery(( dbcore.MatchQuery('albumartist', artist), dbcore.MatchQuery('album', album), )) for album in lib.albums(duplicate_query): # Check whether the album is identical in contents, in which # case it is not a duplicate (will be replaced). album_paths = set(i.path for i in album.items()) if album_paths != task_paths: duplicates.append(album) return duplicates def align_album_level_fields(self): """Make the some album fields equal across `self.items` """ changes = {} if self.choice_flag == action.ASIS: # Taking metadata "as-is". Guess whether this album is VA. plur_albumartist, freq = util.plurality( [i.albumartist or i.artist for i in self.items] ) if freq == len(self.items) or \ (freq > 1 and float(freq) / len(self.items) >= SINGLE_ARTIST_THRESH): # Single-artist album. changes['albumartist'] = plur_albumartist changes['comp'] = False else: # VA. changes['albumartist'] = VARIOUS_ARTISTS changes['comp'] = True elif self.choice_flag == action.APPLY: # Applying autotagged metadata. Just get AA from the first # item. if not self.items[0].albumartist: changes['albumartist'] = self.items[0].artist if not self.items[0].mb_albumartistid: changes['mb_albumartistid'] = self.items[0].mb_artistid # Apply new metadata. for item in self.items: item.update(changes) def manipulate_files(self, move=False, copy=False, write=False, link=False, session=None): items = self.imported_items() # Save the original paths of all items for deletion and pruning # in the next step (finalization). self.old_paths = [item.path for item in items] for item in items: if move or copy or link: # In copy and link modes, treat re-imports specially: # move in-library files. (Out-of-library files are # copied/moved as usual). old_path = item.path if (copy or link) and self.replaced_items[item] and \ session.lib.directory in util.ancestry(old_path): item.move() # We moved the item, so remove the # now-nonexistent file from old_paths. self.old_paths.remove(old_path) else: # A normal import. Just copy files and keep track of # old paths. item.move(copy, link) if write and self.apply: item.try_write() with session.lib.transaction(): for item in self.imported_items(): item.store() plugins.send('import_task_files', session=session, task=self) def add(self, lib): """Add the items as an album to the library and remove replaced items. """ self.align_album_level_fields() with lib.transaction(): self.record_replaced(lib) self.remove_replaced(lib) self.album = lib.add_album(self.imported_items()) self.reimport_metadata(lib) def record_replaced(self, lib): """Records the replaced items and albums in the `replaced_items` and `replaced_albums` dictionaries. """ self.replaced_items = defaultdict(list) self.replaced_albums = defaultdict(list) replaced_album_ids = set() for item in self.imported_items(): dup_items = list(lib.items( dbcore.query.BytesQuery('path', item.path) )) self.replaced_items[item] = dup_items for dup_item in dup_items: if (not dup_item.album_id or dup_item.album_id in replaced_album_ids): continue replaced_album = dup_item.get_album() if replaced_album: replaced_album_ids.add(dup_item.album_id) self.replaced_albums[replaced_album.path] = replaced_album def reimport_metadata(self, lib): """For reimports, preserves metadata for reimported items and albums. """ if self.is_album: replaced_album = self.replaced_albums.get(self.album.path) if replaced_album: self.album.added = replaced_album.added self.album.update(replaced_album._values_flex) self.album.store() log.debug( u'Reimported album: added {0}, flexible ' u'attributes {1} from album {2} for {3}', self.album.added, replaced_album._values_flex.keys(), replaced_album.id, displayable_path(self.album.path) ) for item in self.imported_items(): dup_items = self.replaced_items[item] for dup_item in dup_items: if dup_item.added and dup_item.added != item.added: item.added = dup_item.added log.debug( u'Reimported item added {0} ' u'from item {1} for {2}', item.added, dup_item.id, displayable_path(item.path) ) item.update(dup_item._values_flex) log.debug( u'Reimported item flexible attributes {0} ' u'from item {1} for {2}', dup_item._values_flex.keys(), dup_item.id, displayable_path(item.path) ) item.store() def remove_replaced(self, lib): """Removes all the items from the library that have the same path as an item from this task. """ for item in self.imported_items(): for dup_item in self.replaced_items[item]: log.debug(u'Replacing item {0}: {1}', dup_item.id, displayable_path(item.path)) dup_item.remove() log.debug(u'{0} of {1} items replaced', sum(bool(l) for l in self.replaced_items.values()), len(self.imported_items())) def choose_match(self, session): """Ask the session which match should apply and apply it. """ choice = session.choose_match(self) self.set_choice(choice) session.log_choice(self) def reload(self): """Reload albums and items from the database. """ for item in self.imported_items(): item.load() self.album.load() # Utilities. def prune(self, filename): """Prune any empty directories above the given file. If this task has no `toppath` or the file path provided is not within the `toppath`, then this function has no effect. Similarly, if the file still exists, no pruning is performed, so it's safe to call when the file in question may not have been removed. """ if self.toppath and not os.path.exists(filename): util.prune_dirs(os.path.dirname(filename), self.toppath, clutter=config['clutter'].as_str_seq()) class SingletonImportTask(ImportTask): """ImportTask for a single track that is not associated to an album. """ def __init__(self, toppath, item): super(SingletonImportTask, self).__init__(toppath, [item.path], [item]) self.item = item self.is_album = False self.paths = [item.path] def chosen_ident(self): assert self.choice_flag in (action.ASIS, action.APPLY) if self.choice_flag is action.ASIS: return (self.item.artist, self.item.title) elif self.choice_flag is action.APPLY: return (self.match.info.artist, self.match.info.title) def imported_items(self): return [self.item] def apply_metadata(self): autotag.apply_item_metadata(self.item, self.match.info) def _emit_imported(self, lib): for item in self.imported_items(): plugins.send('item_imported', lib=lib, item=item) def lookup_candidates(self): candidates, recommendation = autotag.tag_item(self.item) self.candidates = candidates self.rec = recommendation def find_duplicates(self, lib): """Return a list of items from `lib` that have the same artist and title as the task. """ artist, title = self.chosen_ident() found_items = [] query = dbcore.AndQuery(( dbcore.MatchQuery('artist', artist), dbcore.MatchQuery('title', title), )) for other_item in lib.items(query): # Existing items not considered duplicates. if other_item.path != self.item.path: found_items.append(other_item) return found_items duplicate_items = find_duplicates def add(self, lib): with lib.transaction(): self.record_replaced(lib) self.remove_replaced(lib) lib.add(self.item) self.reimport_metadata(lib) def infer_album_fields(self): raise NotImplementedError def choose_match(self, session): """Ask the session which match should apply and apply it. """ choice = session.choose_item(self) self.set_choice(choice) session.log_choice(self) def reload(self): self.item.load() # FIXME The inheritance relationships are inverted. This is why there # are so many methods which pass. More responsibility should be delegated to # the BaseImportTask class. class SentinelImportTask(ImportTask): """A sentinel task marks the progress of an import and does not import any items itself. If only `toppath` is set the task indicates the end of a top-level directory import. If the `paths` argument is also given, the task indicates the progress in the `toppath` import. """ def __init__(self, toppath, paths): super(SentinelImportTask, self).__init__(toppath, paths, ()) # TODO Remove the remaining attributes eventually self.should_remove_duplicates = False self.is_album = True self.choice_flag = None def save_history(self): pass def save_progress(self): if self.paths is None: # "Done" sentinel. progress_reset(self.toppath) else: # "Directory progress" sentinel for singletons progress_add(self.toppath, *self.paths) def skip(self): return True def set_choice(self, choice): raise NotImplementedError def cleanup(self, **kwargs): pass def _emit_imported(self, session): pass class ArchiveImportTask(SentinelImportTask): """An import task that represents the processing of an archive. `toppath` must be a `zip`, `tar`, or `rar` archive. Archive tasks serve two purposes: - First, it will unarchive the files to a temporary directory and return it. The client should read tasks from the resulting directory and send them through the pipeline. - Second, it will clean up the temporary directory when it proceeds through the pipeline. The client should send the archive task after sending the rest of the music tasks to make this work. """ def __init__(self, toppath): super(ArchiveImportTask, self).__init__(toppath, ()) self.extracted = False @classmethod def is_archive(cls, path): """Returns true if the given path points to an archive that can be handled. """ if not os.path.isfile(path): return False for path_test, _ in cls.handlers(): if path_test(path): return True return False @classmethod def handlers(cls): """Returns a list of archive handlers. Each handler is a `(path_test, ArchiveClass)` tuple. `path_test` is a function that returns `True` if the given path can be handled by `ArchiveClass`. `ArchiveClass` is a class that implements the same interface as `tarfile.TarFile`. """ if not hasattr(cls, '_handlers'): cls._handlers = [] from zipfile import is_zipfile, ZipFile cls._handlers.append((is_zipfile, ZipFile)) from tarfile import is_tarfile, TarFile cls._handlers.append((is_tarfile, TarFile)) try: from rarfile import is_rarfile, RarFile except ImportError: pass else: cls._handlers.append((is_rarfile, RarFile)) return cls._handlers def cleanup(self, **kwargs): """Removes the temporary directory the archive was extracted to. """ if self.extracted: log.debug(u'Removing extracted directory: {0}', displayable_path(self.toppath)) shutil.rmtree(self.toppath) def extract(self): """Extracts the archive to a temporary directory and sets `toppath` to that directory. """ for path_test, handler_class in self.handlers(): if path_test(self.toppath): break try: extract_to = mkdtemp() archive = handler_class(self.toppath, mode='r') archive.extractall(extract_to) finally: archive.close() self.extracted = True self.toppath = extract_to class ImportTaskFactory(object): """Generate album and singleton import tasks for all media files indicated by a path. """ def __init__(self, toppath, session): """Create a new task factory. `toppath` is the user-specified path to search for music to import. `session` is the `ImportSession`, which controls how tasks are read from the directory. """ self.toppath = toppath self.session = session self.skipped = 0 # Skipped due to incremental/resume. self.imported = 0 # "Real" tasks created. self.is_archive = ArchiveImportTask.is_archive(syspath(toppath)) def tasks(self): """Yield all import tasks for music found in the user-specified path `self.toppath`. Any necessary sentinel tasks are also produced. During generation, update `self.skipped` and `self.imported` with the number of tasks that were not produced (due to incremental mode or resumed imports) and the number of concrete tasks actually produced, respectively. If `self.toppath` is an archive, it is adjusted to point to the extracted data. """ # Check whether this is an archive. if self.is_archive: archive_task = self.unarchive() if not archive_task: return # Search for music in the directory. for dirs, paths in self.paths(): if self.session.config['singletons']: for path in paths: tasks = self._create(self.singleton(path)) for task in tasks: yield task yield self.sentinel(dirs) else: tasks = self._create(self.album(paths, dirs)) for task in tasks: yield task # Produce the final sentinel for this toppath to indicate that # it is finished. This is usually just a SentinelImportTask, but # for archive imports, send the archive task instead (to remove # the extracted directory). if self.is_archive: yield archive_task else: yield self.sentinel() def _create(self, task): """Handle a new task to be emitted by the factory. Emit the `import_task_created` event and increment the `imported` count if the task is not skipped. Return the same task. If `task` is None, do nothing. """ if task: tasks = task.handle_created(self.session) self.imported += len(tasks) return tasks return [] def paths(self): """Walk `self.toppath` and yield `(dirs, files)` pairs where `files` are individual music files and `dirs` the set of containing directories where the music was found. This can either be a recursive search in the ordinary case, a single track when `toppath` is a file, a single directory in `flat` mode. """ if not os.path.isdir(syspath(self.toppath)): yield [self.toppath], [self.toppath] elif self.session.config['flat']: paths = [] for dirs, paths_in_dir in albums_in_dir(self.toppath): paths += paths_in_dir yield [self.toppath], paths else: for dirs, paths in albums_in_dir(self.toppath): yield dirs, paths def singleton(self, path): """Return a `SingletonImportTask` for the music file. """ if self.session.already_imported(self.toppath, [path]): log.debug(u'Skipping previously-imported path: {0}', displayable_path(path)) self.skipped += 1 return None item = self.read_item(path) if item: return SingletonImportTask(self.toppath, item) else: return None def album(self, paths, dirs=None): """Return a `ImportTask` with all media files from paths. `dirs` is a list of parent directories used to record already imported albums. """ if not paths: return None if dirs is None: dirs = list(set(os.path.dirname(p) for p in paths)) if self.session.already_imported(self.toppath, dirs): log.debug(u'Skipping previously-imported path: {0}', displayable_path(dirs)) self.skipped += 1 return None items = map(self.read_item, paths) items = [item for item in items if item] if items: return ImportTask(self.toppath, dirs, items) else: return None def sentinel(self, paths=None): """Return a `SentinelImportTask` indicating the end of a top-level directory import. """ return SentinelImportTask(self.toppath, paths) def unarchive(self): """Extract the archive for this `toppath`. Extract the archive to a new directory, adjust `toppath` to point to the extracted directory, and return an `ArchiveImportTask`. If extraction fails, return None. """ assert self.is_archive if not (self.session.config['move'] or self.session.config['copy']): log.warn(u"Archive importing requires either " "'copy' or 'move' to be enabled.") return log.debug(u'Extracting archive: {0}', displayable_path(self.toppath)) archive_task = ArchiveImportTask(self.toppath) try: archive_task.extract() except Exception as exc: log.error(u'extraction failed: {0}', exc) return # Now read albums from the extracted directory. self.toppath = archive_task.toppath log.debug(u'Archive extracted to: {0}', self.toppath) return archive_task def read_item(self, path): """Return an `Item` read from the path. If an item cannot be read, return `None` instead and log an error. """ try: return library.Item.from_path(path) except library.ReadError as exc: if isinstance(exc.reason, mediafile.FileTypeError): # Silently ignore non-music files. pass elif isinstance(exc.reason, mediafile.UnreadableFileError): log.warn(u'unreadable file: {0}', displayable_path(path)) else: log.error(u'error reading {0}: {1}', displayable_path(path), exc) # Full-album pipeline stages. def read_tasks(session): """A generator yielding all the albums (as ImportTask objects) found in the user-specified list of paths. In the case of a singleton import, yields single-item tasks instead. """ skipped = 0 for toppath in session.paths: # Check whether we need to resume the import. session.ask_resume(toppath) # Generate tasks. task_factory = ImportTaskFactory(toppath, session) for t in task_factory.tasks(): yield t skipped += task_factory.skipped if not task_factory.imported: log.warn(u'No files imported from {0}', displayable_path(toppath)) # Show skipped directories (due to incremental/resume). if skipped: log.info(u'Skipped {0} paths.', skipped) def query_tasks(session): """A generator that works as a drop-in-replacement for read_tasks. Instead of finding files from the filesystem, a query is used to match items from the library. """ if session.config['singletons']: # Search for items. for item in session.lib.items(session.query): task = SingletonImportTask(None, item) for task in task.handle_created(session): yield task else: # Search for albums. for album in session.lib.albums(session.query): log.debug(u'yielding album {0}: {1} - {2}', album.id, album.albumartist, album.album) items = list(album.items()) # Clear IDs from re-tagged items so they appear "fresh" when # we add them back to the library. for item in items: item.id = None item.album_id = None task = ImportTask(None, [album.item_dir()], items) for task in task.handle_created(session): yield task @pipeline.mutator_stage def lookup_candidates(session, task): """A coroutine for performing the initial MusicBrainz lookup for an album. It accepts lists of Items and yields (items, cur_artist, cur_album, candidates, rec) tuples. If no match is found, all of the yielded parameters (except items) are None. """ if task.skip: # FIXME This gets duplicated a lot. We need a better # abstraction. return plugins.send('import_task_start', session=session, task=task) log.debug(u'Looking up: {0}', displayable_path(task.paths)) task.lookup_candidates() @pipeline.stage def user_query(session, task): """A coroutine for interfacing with the user about the tagging process. The coroutine accepts an ImportTask objects. It uses the session's `choose_match` method to determine the `action` for this task. Depending on the action additional stages are executed and the processed task is yielded. It emits the ``import_task_choice`` event for plugins. Plugins have acces to the choice via the ``taks.choice_flag`` property and may choose to change it. """ if task.skip: return task # Ask the user for a choice. task.choose_match(session) plugins.send('import_task_choice', session=session, task=task) # As-tracks: transition to singleton workflow. if task.choice_flag is action.TRACKS: # Set up a little pipeline for dealing with the singletons. def emitter(task): for item in task.items: task = SingletonImportTask(task.toppath, item) for new_task in task.handle_created(session): yield new_task yield SentinelImportTask(task.toppath, task.paths) ipl = pipeline.Pipeline([ emitter(task), lookup_candidates(session), user_query(session), ]) return pipeline.multiple(ipl.pull()) # As albums: group items by albums and create task for each album if task.choice_flag is action.ALBUMS: ipl = pipeline.Pipeline([ iter([task]), group_albums(session), lookup_candidates(session), user_query(session) ]) return pipeline.multiple(ipl.pull()) resolve_duplicates(session, task) return task def resolve_duplicates(session, task): """Check if a task conflicts with items or albums already imported and ask the session to resolve this. """ if task.choice_flag in (action.ASIS, action.APPLY): ident = task.chosen_ident() found_duplicates = task.find_duplicates(session.lib) if ident in session.seen_idents or found_duplicates: session.resolve_duplicate(task, found_duplicates) session.log_choice(task, True) session.seen_idents.add(ident) @pipeline.mutator_stage def import_asis(session, task): """Select the `action.ASIS` choice for all tasks. This stage replaces the initial_lookup and user_query stages when the importer is run without autotagging. """ if task.skip: return log.info('{}', displayable_path(task.paths)) task.set_choice(action.ASIS) @pipeline.mutator_stage def apply_choices(session, task): """A coroutine for applying changes to albums and singletons during the autotag process. """ if task.skip: return # Change metadata. if task.apply: task.apply_metadata() plugins.send('import_task_apply', session=session, task=task) task.add(session.lib) @pipeline.mutator_stage def plugin_stage(session, func, task): """A coroutine (pipeline stage) that calls the given function with each non-skipped import task. These stages occur between applying metadata changes and moving/copying/writing files. """ if task.skip: return func(session, task) # Stage may modify DB, so re-load cached item data. # FIXME Importer plugins should not modify the database but instead # the albums and items attached to tasks. task.reload() @pipeline.stage def manipulate_files(session, task): """A coroutine (pipeline stage) that performs necessary file manipulations *after* items have been added to the library and finalizes each task. """ if not task.skip: if task.should_remove_duplicates: task.remove_duplicates(session.lib) task.manipulate_files( move=session.config['move'], copy=session.config['copy'], write=session.config['write'], link=session.config['link'], session=session, ) # Progress, cleanup, and event. task.finalize(session) @pipeline.stage def log_files(session, task): """A coroutine (pipeline stage) to log each file to be imported. """ if isinstance(task, SingletonImportTask): log.info(u'Singleton: {0}', displayable_path(task.item['path'])) elif task.items: log.info(u'Album: {0}', displayable_path(task.paths[0])) for item in task.items: log.info(u' {0}', displayable_path(item['path'])) def group_albums(session): """A pipeline stage that groups the items of each task into albums using their metadata. Groups are identified using their artist and album fields. The pipeline stage emits new album tasks for each discovered group. """ def group(item): return (item.albumartist or item.artist, item.album) task = None while True: task = yield task if task.skip: continue tasks = [] sorted_items = sorted(task.items, key=group) for _, items in itertools.groupby(sorted_items, group): items = list(items) task = ImportTask(task.toppath, [i.path for i in items], items) tasks += task.handle_created(session) tasks.append(SentinelImportTask(task.toppath, task.paths)) task = pipeline.multiple(tasks) MULTIDISC_MARKERS = (r'dis[ck]', r'cd') MULTIDISC_PAT_FMT = r'^(.*%s[\W_]*)\d' def albums_in_dir(path): """Recursively searches the given directory and returns an iterable of (paths, items) where paths is a list of directories and items is a list of Items that is probably an album. Specifically, any folder containing any media files is an album. """ collapse_pat = collapse_paths = collapse_items = None ignore = config['ignore'].as_str_seq() for root, dirs, files in sorted_walk(path, ignore=ignore, logger=log): items = [os.path.join(root, f) for f in files] # If we're currently collapsing the constituent directories in a # multi-disc album, check whether we should continue collapsing # and add the current directory. If so, just add the directory # and move on to the next directory. If not, stop collapsing. if collapse_paths: if (not collapse_pat and collapse_paths[0] in ancestry(root)) or \ (collapse_pat and collapse_pat.match(os.path.basename(root))): # Still collapsing. collapse_paths.append(root) collapse_items += items continue else: # Collapse finished. Yield the collapsed directory and # proceed to process the current one. if collapse_items: yield collapse_paths, collapse_items collapse_pat = collapse_paths = collapse_items = None # Check whether this directory looks like the *first* directory # in a multi-disc sequence. There are two indicators: the file # is named like part of a multi-disc sequence (e.g., "Title Disc # 1") or it contains no items but only directories that are # named in this way. start_collapsing = False for marker in MULTIDISC_MARKERS: marker_pat = re.compile(MULTIDISC_PAT_FMT % marker, re.I) match = marker_pat.match(os.path.basename(root)) # Is this directory the root of a nested multi-disc album? if dirs and not items: # Check whether all subdirectories have the same prefix. start_collapsing = True subdir_pat = None for subdir in dirs: # The first directory dictates the pattern for # the remaining directories. if not subdir_pat: match = marker_pat.match(subdir) if match: subdir_pat = re.compile( br'^%s\d' % re.escape(match.group(1)), re.I ) else: start_collapsing = False break # Subsequent directories must match the pattern. elif not subdir_pat.match(subdir): start_collapsing = False break # If all subdirectories match, don't check other # markers. if start_collapsing: break # Is this directory the first in a flattened multi-disc album? elif match: start_collapsing = True # Set the current pattern to match directories with the same # prefix as this one, followed by a digit. collapse_pat = re.compile( br'^%s\d' % re.escape(match.group(1)), re.I ) break # If either of the above heuristics indicated that this is the # beginning of a multi-disc album, initialize the collapsed # directory and item lists and check the next directory. if start_collapsing: # Start collapsing; continue to the next iteration. collapse_paths = [root] collapse_items = items continue # If it's nonempty, yield it. if items: yield [root], items # Clear out any unfinished collapse. if collapse_paths and collapse_items: yield collapse_paths, collapse_items
mit
8,966,345,090,055,130,000
-6,078,753,196,533,049,000
34.069124
79
0.593711
false
alexbruy/QGIS
python/plugins/processing/algs/qgis/RectanglesOvalsDiamondsVariable.py
1
12223
# -*- coding: utf-8 -*- """ *************************************************************************** RectanglesOvalsDiamondsVariable.py --------------------- Date : April 2016 Copyright : (C) 2016 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive323 __revision__ = '$Format:%H$' import os import math from qgis.PyQt.QtGui import QIcon from qgis.core import QgsWkbTypes, QgsFeature, QgsGeometry, QgsPoint from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.ProcessingLog import ProcessingLog from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException from processing.core.parameters import ParameterVector from processing.core.parameters import ParameterSelection from processing.core.parameters import ParameterTableField from processing.core.parameters import ParameterNumber from processing.core.outputs import OutputVector from processing.tools import dataobjects, vector class RectanglesOvalsDiamondsVariable(GeoAlgorithm): INPUT_LAYER = 'INPUT_LAYER' SHAPE = 'SHAPE' WIDTH = 'WIDTH' HEIGHT = 'HEIGHT' ROTATION = 'ROTATION' SEGMENTS = 'SEGMENTS' OUTPUT_LAYER = 'OUTPUT_LAYER' def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('Rectangles, ovals, diamonds (variable)') self.group, self.i18n_group = self.trAlgorithm('Vector geometry tools') self.shapes = [self.tr('Rectangles'), self.tr('Diamonds'), self.tr('Ovals')] self.addParameter(ParameterVector(self.INPUT_LAYER, self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_POINT])) self.addParameter(ParameterSelection(self.SHAPE, self.tr('Buffer shape'), self.shapes)) self.addParameter(ParameterTableField(self.WIDTH, self.tr('Width field'), self.INPUT_LAYER, ParameterTableField.DATA_TYPE_NUMBER)) self.addParameter(ParameterTableField(self.HEIGHT, self.tr('Height field'), self.INPUT_LAYER, ParameterTableField.DATA_TYPE_NUMBER)) self.addParameter(ParameterTableField(self.ROTATION, self.tr('Rotation field'), self.INPUT_LAYER, ParameterTableField.DATA_TYPE_NUMBER, True)) self.addParameter(ParameterNumber(self.SEGMENTS, self.tr('Number of segments'), 1, 999999999, 36)) self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Output'))) def processAlgorithm(self, progress): layer = dataobjects.getObjectFromUri( self.getParameterValue(self.INPUT_LAYER)) shape = self.getParameterValue(self.SHAPE) width = self.getParameterValue(self.WIDTH) height = self.getParameterValue(self.HEIGHT) rotation = self.getParameterValue(self.ROTATION) segments = self.getParameterValue(self.SEGMENTS) writer = self.getOutputFromName( self.OUTPUT_LAYER).getVectorWriter( layer.fields().toList(), QgsWkbTypes.Polygon, layer.crs()) outFeat = QgsFeature() features = vector.features(layer) total = 100.0 / len(features) if shape == 0: self.rectangles(writer, features, width, height, rotation) elif shape == 1: self.diamonds(writer, features, width, height, rotation) else: self.ovals(writer, features, width, height, rotation, segments) del writer def rectangles(self, writer, features, width, height, rotation): ft = QgsFeature() if rotation is not None: for current, feat in enumerate(features): w = feat[width] h = feat[height] angle = feat[rotation] if not w or not h or not angle: ProcessingLog.addToLog(ProcessingLog.LOG_WARNING, self.tr('Feature {} has empty ' 'width, height or angle. ' 'Skipping...'.format(feat.id()))) continue xOffset = w / 2.0 yOffset = h / 2.0 phi = angle * math.pi / 180 point = feat.geometry().asPoint() x = point.x() y = point.y() points = [(-xOffset, -yOffset), (-xOffset, yOffset), (xOffset, yOffset), (xOffset, -yOffset)] polygon = [[QgsPoint(i[0] * math.cos(phi) + i[1] * math.sin(phi) + x, -i[0] * math.sin(phi) + i[1] * math.cos(phi) + y) for i in points]] ft.setGeometry(QgsGeometry.fromPolygon(polygon)) ft.setAttributes(feat.attributes()) writer.addFeature(ft) else: for current, feat in enumerate(features): w = feat[width] h = feat[height] if not w or not h: ProcessingLog.addToLog(ProcessingLog.LOG_WARNING, self.tr('Feature {} has empty ' 'width or height. ' 'Skipping...'.format(feat.id()))) continue xOffset = w / 2.0 yOffset = h / 2.0 point = feat.geometry().asPoint() x = point.x() y = point.y() points = [(-xOffset, -yOffset), (-xOffset, yOffset), (xOffset, yOffset), (xOffset, -yOffset)] polygon = [[QgsPoint(i[0] + x, i[1] + y) for i in points]] ft.setGeometry(QgsGeometry.fromPolygon(polygon)) ft.setAttributes(feat.attributes()) writer.addFeature(ft) def diamonds(self, writer, features, width, height, rotation): ft = QgsFeature() if rotation is not None: for current, feat in enumerate(features): w = feat[width] h = feat[height] angle = feat[rotation] if not w or not h or not angle: ProcessingLog.addToLog(ProcessingLog.LOG_WARNING, self.tr('Feature {} has empty ' 'width, height or angle. ' 'Skipping...'.format(feat.id()))) continue xOffset = w / 2.0 yOffset = h / 2.0 phi = angle * math.pi / 180 point = feat.geometry().asPoint() x = point.x() y = point.y() points = [(0.0, -yOffset), (-xOffset, 0.0), (0.0, yOffset), (xOffset, 0.0)] polygon = [[QgsPoint(i[0] * math.cos(phi) + i[1] * math.sin(phi) + x, -i[0] * math.sin(phi) + i[1] * math.cos(phi) + y) for i in points]] ft.setGeometry(QgsGeometry.fromPolygon(polygon)) ft.setAttributes(feat.attributes()) writer.addFeature(ft) else: for current, feat in enumerate(features): w = feat[width] h = feat[height] if not w or not h: ProcessingLog.addToLog(ProcessingLog.LOG_WARNING, self.tr('Feature {} has empty ' 'width or height. ' 'Skipping...'.format(feat.id()))) continue xOffset = w / 2.0 yOffset = h / 2.0 point = feat.geometry().asPoint() x = point.x() y = point.y() points = [(0.0, -yOffset), (-xOffset, 0.0), (0.0, yOffset), (xOffset, 0.0)] polygon = [[QgsPoint(i[0] + x, i[1] + y) for i in points]] ft.setGeometry(QgsGeometry.fromPolygon(polygon)) ft.setAttributes(feat.attributes()) writer.addFeature(ft) def ovals(self, writer, features, width, height, rotation, segments): ft = QgsFeature() if rotation is not None: for current, feat in enumerate(features): w = feat[width] h = feat[height] angle = feat[rotation] if not w or not h or not angle: ProcessingLog.addToLog(ProcessingLog.LOG_WARNING, self.tr('Feature {} has empty ' 'width, height or angle. ' 'Skipping...'.format(feat.id()))) continue xOffset = w / 2.0 yOffset = h / 2.0 phi = angle * math.pi / 180 point = feat.geometry().asPoint() x = point.x() y = point.y() points = [] for t in [(2 * math.pi) / segments * i for i in xrange(segments)]: points.append((xOffset * math.cos(t), yOffset * math.sin(t))) polygon = [[QgsPoint(i[0] * math.cos(phi) + i[1] * math.sin(phi) + x, -i[0] * math.sin(phi) + i[1] * math.cos(phi) + y) for i in points]] ft.setGeometry(QgsGeometry.fromPolygon(polygon)) ft.setAttributes(feat.attributes()) writer.addFeature(ft) else: for current, feat in enumerate(features): w = feat[width] h = feat[height] if not w or not h: ProcessingLog.addToLog(ProcessingLog.LOG_WARNING, self.tr('Feature {} has empty ' 'width or height. ' 'Skipping...'.format(feat.id()))) continue xOffset = w / 2.0 yOffset = h / 2.0 point = feat.geometry().asPoint() x = point.x() y = point.y() points = [] for t in [(2 * math.pi) / segments * i for i in xrange(segments)]: points.append((xOffset * math.cos(t), yOffset * math.sin(t))) polygon = [[QgsPoint(i[0] + x, i[1] + y) for i in points]] ft.setGeometry(QgsGeometry.fromPolygon(polygon)) ft.setAttributes(feat.attributes()) writer.addFeature(ft)
gpl-2.0
4,627,420,295,768,039,000
-5,272,095,321,783,112,000
42.810036
109
0.462161
false
antonve/s4-project-mooc
lms/djangoapps/bulk_email/migrations/0010_auto__chg_field_optout_course_id__add_field_courseemail_template_name_.py
120
8430
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'Optout.course_id' db.alter_column('bulk_email_optout', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255)) # Adding field 'CourseEmail.template_name' db.add_column('bulk_email_courseemail', 'template_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True), keep_default=False) # Adding field 'CourseEmail.from_addr' db.add_column('bulk_email_courseemail', 'from_addr', self.gf('django.db.models.fields.CharField')(max_length=255, null=True), keep_default=False) # Changing field 'CourseEmail.course_id' db.alter_column('bulk_email_courseemail', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255)) # Adding field 'CourseEmailTemplate.name' db.add_column('bulk_email_courseemailtemplate', 'name', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True, null=True), keep_default=False) # Changing field 'CourseAuthorization.course_id' db.alter_column('bulk_email_courseauthorization', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(unique=True, max_length=255)) def backwards(self, orm): # Changing field 'Optout.course_id' db.alter_column('bulk_email_optout', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255)) # Deleting field 'CourseEmail.template_name' db.delete_column('bulk_email_courseemail', 'template_name') # Deleting field 'CourseEmail.from_addr' db.delete_column('bulk_email_courseemail', 'from_addr') # Changing field 'CourseEmail.course_id' db.alter_column('bulk_email_courseemail', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255)) # Deleting field 'CourseEmailTemplate.name' db.delete_column('bulk_email_courseemailtemplate', 'name') # Changing field 'CourseAuthorization.course_id' db.alter_column('bulk_email_courseauthorization', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'bulk_email.courseauthorization': { 'Meta': {'object_name': 'CourseAuthorization'}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}), 'email_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'bulk_email.courseemail': { 'Meta': {'object_name': 'CourseEmail'}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'from_addr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'template_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'text_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'to_option': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'}) }, 'bulk_email.courseemailtemplate': { 'Meta': {'object_name': 'CourseEmailTemplate'}, 'html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}), 'plain_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'bulk_email.optout': { 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'Optout'}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['bulk_email']
agpl-3.0
-2,816,542,608,386,154,500
8,395,411,155,169,564,000
66.44
182
0.574733
false
drtuxwang/system-config
bin/fls.py
1
5121
#!/usr/bin/env python3 """ Show full list of files. """ import argparse import glob import os import signal import sys from typing import Iterator, List, Union import file_mod class Options: """ Options class """ def __init__(self) -> None: self._args: argparse.Namespace = None self.parse(sys.argv) def get_files(self) -> List[str]: """ Return list of files. """ return self._files def get_order(self) -> str: """ Return display order. """ return self._args.order def get_recursive_flag(self) -> bool: """ Return recursive flag. """ return self._args.recursive_flag def get_reverse_flag(self) -> bool: """ Return reverse flag. """ return self._args.reverse_flag def _parse_args(self, args: List[str]) -> None: parser = argparse.ArgumentParser( description='Show full list of files.', ) parser.add_argument( '-R', dest='recursive_flag', action='store_true', help='Show directories recursively.' ) parser.add_argument( '-s', action='store_const', const='size', dest='order', default='name', help='Sort by size of file.' ) parser.add_argument( '-t', action='store_const', const='mtime', dest='order', default='name', help='Sort by modification time of file.' ) parser.add_argument( '-c', action='store_const', const='ctime', dest='order', default='name', help='Sort by meta data change time of file.' ) parser.add_argument( '-r', dest='reverse_flag', action='store_true', help='Reverse order.' ) parser.add_argument( 'files', nargs='*', metavar='file', help='File or directory.' ) self._args = parser.parse_args(args) def parse(self, args: List[str]) -> None: """ Parse arguments """ self._parse_args(args[1:]) if self._args.files: self._files = self._args.files else: self._files = sorted(os.listdir()) class Main: """ Main class """ def __init__(self) -> None: try: self.config() sys.exit(self.run()) except (EOFError, KeyboardInterrupt): sys.exit(114) except SystemExit as exception: sys.exit(exception) @staticmethod def config() -> None: """ Configure program """ if hasattr(signal, 'SIGPIPE'): signal.signal(signal.SIGPIPE, signal.SIG_DFL) if os.name == 'nt': argv = [] for arg in sys.argv: files = glob.glob(arg) # Fixes Windows globbing bug if files: argv.extend(files) else: argv.append(arg) sys.argv = argv def _list(self, options: Options, files: List[str]) -> None: file_stats = [] for file in files: if os.path.islink(file): file_stats.append(file_mod.FileStat(file, size=0)) elif os.path.isdir(file): file_stats.append(file_mod.FileStat(file + os.sep)) elif os.path.isfile(file): file_stats.append(file_mod.FileStat(file)) for file_stat in self._sorted(options, file_stats): print("{0:10d} [{1:s}] {2:s}".format( file_stat.get_size(), file_stat.get_time_local(), file_stat.get_file() )) if (options.get_recursive_flag() and file_stat.get_file().endswith(os.sep)): self._list(options, sorted( glob.glob(file_stat.get_file() + '.*') + glob.glob(file_stat.get_file() + '*') )) @staticmethod def _sorted( options: Options, file_stats: List[file_mod.FileStat], ) -> Union[Iterator[file_mod.FileStat], List[file_mod.FileStat]]: order = options.get_order() if order == 'ctime': file_stats = sorted(file_stats, key=lambda s: s.get_time_change()) elif order == 'mtime': file_stats = sorted(file_stats, key=lambda s: s.get_time()) elif order == 'size': file_stats = sorted(file_stats, key=lambda s: s.get_size()) if options.get_reverse_flag(): return reversed(file_stats) return file_stats def run(self) -> int: """ Start program """ options = Options() self._list(options, options.get_files()) return 0 if __name__ == '__main__': if '--pydoc' in sys.argv: help(__name__) else: Main()
gpl-2.0
-5,916,014,528,890,073,000
5,309,432,544,980,224,000
25.261538
78
0.486428
false
stelfrich/openmicroscopy
components/tools/OmeroWeb/omeroweb/settings.py
1
40171
#!/usr/bin/env python # -*- coding: utf-8 -*- # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Django settings for OMERO.web project. # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Copyright (c) 2008-2014 University of Dundee. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008. # # Version: 1.0 # import os.path import sys import platform import logging import omero import omero.config import omero.clients import tempfile import re import json from omero_ext import portalocker logger = logging.getLogger(__name__) # LOGS # NEVER DEPLOY a site into production with DEBUG turned on. # Debuging mode. # A boolean that turns on/off debug mode. # handler404 and handler500 works only when False if 'OMERO_HOME' in os.environ: OMERO_HOME = os.environ.get('OMERO_HOME') else: OMERO_HOME = os.path.join(os.path.dirname(__file__), '..', '..', '..') OMERO_HOME = os.path.normpath(OMERO_HOME) INSIGHT_JARS = os.path.join(OMERO_HOME, "lib", "insight").replace('\\', '/') WEBSTART = False if os.path.isdir(INSIGHT_JARS): WEBSTART = True # Logging LOGDIR = os.path.join(OMERO_HOME, 'var', 'log').replace('\\', '/') if not os.path.isdir(LOGDIR): try: os.makedirs(LOGDIR) except Exception, x: exctype, value = sys.exc_info()[:2] raise exctype(value) # DEBUG: Never deploy a site into production with DEBUG turned on. # Logging levels: logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR # logging.CRITICAL # FORMAT: 2010-01-01 00:00:00,000 INFO [omeroweb.webadmin.webadmin_utils] # (proc.1308 ) getGuestConnection:20 Open connection is not available STANDARD_LOGFORMAT = ( '%(asctime)s %(levelname)5.5s [%(name)40.40s]' ' (proc.%(process)5.5d) %(funcName)s:%(lineno)d %(message)s') if platform.system() in ("Windows",): LOGGING_CLASS = 'logging.handlers.RotatingFileHandler' else: LOGGING_CLASS = 'omero_ext.cloghandler.ConcurrentRotatingFileHandler' LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': STANDARD_LOGFORMAT }, }, 'handlers': { 'default': { 'level': 'DEBUG', 'class': LOGGING_CLASS, 'filename': os.path.join( LOGDIR, 'OMEROweb.log').replace('\\', '/'), 'maxBytes': 1024*1024*5, # 5 MB 'backupCount': 10, 'formatter': 'standard', }, 'request_handler': { 'level': 'DEBUG', 'class': LOGGING_CLASS, 'filename': os.path.join( LOGDIR, 'OMEROweb_request.log').replace('\\', '/'), 'maxBytes': 1024*1024*5, # 5 MB 'backupCount': 10, 'formatter': 'standard', }, 'null': { 'level': 'DEBUG', 'class': 'django.utils.log.NullHandler', }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'standard' }, 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { # Stop SQL debug from logging to main logger 'handlers': ['request_handler', 'mail_admins'], 'level': 'DEBUG', 'propagate': False }, 'django': { 'handlers': ['null'], 'level': 'DEBUG', 'propagate': True }, '': { 'handlers': ['default'], 'level': 'DEBUG', 'propagate': True } } } # Load custom settings from etc/grid/config.xml # Tue 2 Nov 2010 11:03:18 GMT -- ticket:3228 from omero.util.concurrency import get_event CONFIG_XML = os.path.join(OMERO_HOME, 'etc', 'grid', 'config.xml') count = 10 event = get_event("websettings") while True: try: CUSTOM_SETTINGS = dict() if os.path.exists(CONFIG_XML): CONFIG_XML = omero.config.ConfigXml(CONFIG_XML, read_only=True) CUSTOM_SETTINGS = CONFIG_XML.as_map() CONFIG_XML.close() break except portalocker.LockException: # logger.error("Exception while loading configuration retrying...", # exc_info=True) exctype, value = sys.exc_info()[:2] count -= 1 if not count: raise exctype(value) else: event.wait(1) # Wait a total of 10 seconds except: # logger.error("Exception while loading configuration...", # exc_info=True) exctype, value = sys.exc_info()[:2] raise exctype(value) del event del count del get_event WSGI = "wsgi" WSGITCP = "wsgi-tcp" WSGI_TYPES = (WSGI, WSGITCP) FASTCGITCP = "fastcgi-tcp" FASTCGI_TYPES = (FASTCGITCP, ) DEVELOPMENT = "development" DEFAULT_SERVER_TYPE = FASTCGITCP ALL_SERVER_TYPES = (WSGI, WSGITCP, FASTCGITCP, DEVELOPMENT) DEFAULT_SESSION_ENGINE = 'omeroweb.filesessionstore' SESSION_ENGINE_VALUES = ('omeroweb.filesessionstore', 'django.contrib.sessions.backends.db', 'django.contrib.sessions.backends.file', 'django.contrib.sessions.backends.cache', 'django.contrib.sessions.backends.cached_db') def parse_boolean(s): s = s.strip().lower() if s in ('true', '1', 't'): return True return False def parse_paths(s): return [os.path.normpath(path) for path in json.loads(s)] def check_server_type(s): if s not in ALL_SERVER_TYPES: raise ValueError( "Unknown server type: %s. Valid values are: %s" % (s, ALL_SERVER_TYPES)) return s def check_session_engine(s): if s not in SESSION_ENGINE_VALUES: raise ValueError( "Unknown session engine: %s. Valid values are: %s" % (s, SESSION_ENGINE_VALUES)) return s def identity(x): return x def str_slash(s): if s is not None: s = str(s) if s and not s.endswith("/"): s += "/" return s class LeaveUnset(Exception): pass def leave_none_unset(s): if s is None: raise LeaveUnset() return s def leave_none_unset_int(s): s = leave_none_unset(s) if s is not None: return int(s) CUSTOM_HOST = CUSTOM_SETTINGS.get("Ice.Default.Host", "localhost") # DO NOT EDIT! INTERNAL_SETTINGS_MAPPING = { "omero.qa.feedback": ["FEEDBACK_URL", "http://qa.openmicroscopy.org.uk", str, None], "omero.web.upgrades.url": ["UPGRADES_URL", None, leave_none_unset, None], "omero.web.check_version": ["CHECK_VERSION", "true", parse_boolean, None], # Allowed hosts: # https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts "omero.web.allowed_hosts": ["ALLOWED_HOSTS", '["*"]', json.loads, None], # WEBSTART "omero.web.webstart_template": ["WEBSTART_TEMPLATE", None, identity, None], "omero.web.webstart_jar": ["WEBSTART_JAR", "omero.insight.jar", str, None], "omero.web.webstart_icon": ["WEBSTART_ICON", "webstart/img/icon-omero-insight.png", str, None], "omero.web.webstart_heap": ["WEBSTART_HEAP", "1024m", str, None], "omero.web.webstart_host": ["WEBSTART_HOST", CUSTOM_HOST, str, None], "omero.web.webstart_port": ["WEBSTART_PORT", "4064", str, None], "omero.web.webstart_class": ["WEBSTART_CLASS", "org.openmicroscopy.shoola.Main", str, None], "omero.web.webstart_title": ["WEBSTART_TITLE", "OMERO.insight", str, None], "omero.web.webstart_vendor": ["WEBSTART_VENDOR", "The Open Microscopy Environment", str, None], "omero.web.webstart_homepage": ["WEBSTART_HOMEPAGE", "http://www.openmicroscopy.org", str, None], "omero.web.webstart_admins_only": ["WEBSTART_ADMINS_ONLY", "false", parse_boolean, None], # Internal email notification for omero.web.admins, # loaded from config.xml directly "omero.mail.from": ["SERVER_EMAIL", None, identity, ("The email address that error messages come from, such as those" " sent to :property:`omero.web.admins`. Requires EMAIL properties" " below.")], "omero.mail.host": ["EMAIL_HOST", None, identity, "The SMTP server host to use for sending email."], "omero.mail.password": ["EMAIL_HOST_PASSWORD", None, identity, "Password to use for the SMTP server."], "omero.mail.username": ["EMAIL_HOST_USER", None, identity, "Username to use for the SMTP server."], "omero.mail.port": ["EMAIL_PORT", 25, identity, "Port to use for the SMTP server."], "omero.web.admins.email_subject_prefix": ["EMAIL_SUBJECT_PREFIX", "[OMERO.web - admin notification]", str, "Subject-line prefix for email messages"], "omero.mail.smtp.starttls.enable": ["EMAIL_USE_TLS", "false", parse_boolean, ("Whether to use a TLS (secure) connection when talking to the SMTP" " server.")], } CUSTOM_SETTINGS_MAPPINGS = { # Deployment configuration "omero.web.debug": ["DEBUG", "false", parse_boolean, "A boolean that turns on/off debug mode."], "omero.web.admins": ["ADMINS", '[]', json.loads, ("A list of people who get code error notifications whenever the " "application identifies a broken link or raises an unhandled " "exception that results in an internal server error. This gives " "the administrators immediate notification of any errors, " "see :doc:`/sysadmins/mail`. " "Example:``'[[\"Full Name\", \"email address\"]]'``.")], "omero.web.application_server": ["APPLICATION_SERVER", DEFAULT_SERVER_TYPE, check_server_type, ("OMERO.web is configured to use FastCGI TCP by default. If you are " "using a non-standard web server configuration you may wish to " "change this before generating your web server configuration. " "Available options: \"fastcgi-tcp\", \"wsgi-tcp\", \"wsgi\"")], "omero.web.application_server.host": ["APPLICATION_SERVER_HOST", "127.0.0.1", str, "Upstream application host"], "omero.web.application_server.port": ["APPLICATION_SERVER_PORT", "4080", str, "Upstream application port"], "omero.web.application_server.max_requests": ["APPLICATION_SERVER_MAX_REQUESTS", 400, int, None], "omero.web.prefix": ["FORCE_SCRIPT_NAME", None, leave_none_unset, ("Used as the value of the SCRIPT_NAME environment variable in any" " HTTP request.")], "omero.web.use_x_forwarded_host": ["USE_X_FORWARDED_HOST", "false", parse_boolean, ("Specifies whether to use the X-Forwarded-Host header in preference " "to the Host header. This should only be enabled if a proxy which " "sets this header is in use.")], "omero.web.static_url": ["STATIC_URL", "/static/", str_slash, ("URL to use when referring to static files. Example: ``'/static/'``" " or ``'http://static.example.com/'``. Used as the base path for" " asset definitions (the Media class) and the staticfiles app. It" " must end in a slash if set to a non-empty value.")], "omero.web.session_engine": ["SESSION_ENGINE", DEFAULT_SESSION_ENGINE, check_session_engine, ("Controls where Django stores session data. See :djangodoc:" "`Configuring the session engine for more details <ref/settings" "/#session-engine>`.")], "omero.web.session_expire_at_browser_close": ["SESSION_EXPIRE_AT_BROWSER_CLOSE", "true", parse_boolean, ("A boolean that determines whether to expire the session when the " "user closes their browser. See :djangodoc:`Django Browser-length " "sessions vs. persistent sessions documentation <topics/http/" "sessions/#browser-length-vs-persistent-sessions>` for more " "details.")], "omero.web.caches": ["CACHES", ('{"default": {"BACKEND":' ' "django.core.cache.backends.dummy.DummyCache"}}'), json.loads, ("OMERO.web offers alternative session backends to automatically" " delete stale data using the cache session store backend, see " ":djangodoc:`Django cached session documentation <topics/http/" "sessions/#using-cached-sessions>` for more details.")], "omero.web.session_cookie_age": ["SESSION_COOKIE_AGE", 86400, int, "The age of session cookies, in seconds."], "omero.web.session_cookie_domain": ["SESSION_COOKIE_DOMAIN", None, leave_none_unset, "The domain to use for session cookies"], "omero.web.session_cookie_name": ["SESSION_COOKIE_NAME", None, leave_none_unset, "The name to use for session cookies"], "omero.web.logdir": ["LOGDIR", LOGDIR, str, "A path to the custom log directory."], # Public user "omero.web.public.enabled": ["PUBLIC_ENABLED", "false", parse_boolean, "Enable and disable the OMERO.web public user functionality."], "omero.web.public.url_filter": ["PUBLIC_URL_FILTER", r'^/(?!webadmin)', re.compile, ("Set a URL filter for which the OMERO.web public user is allowed to" " navigate. The idea is that you can create the public pages" " yourself (see OMERO.web framework since we do not provide public" " pages.")], "omero.web.public.server_id": ["PUBLIC_SERVER_ID", 1, int, "Server to authenticate against."], "omero.web.public.user": ["PUBLIC_USER", None, leave_none_unset, "Username to use during authentication."], "omero.web.public.password": ["PUBLIC_PASSWORD", None, leave_none_unset, "Password to use during authentication."], "omero.web.public.cache.enabled": ["PUBLIC_CACHE_ENABLED", "false", parse_boolean, None], "omero.web.public.cache.key": ["PUBLIC_CACHE_KEY", "omero.web.public.cache.key", str, None], "omero.web.public.cache.timeout": ["PUBLIC_CACHE_TIMEOUT", 60 * 60 * 24, int, None], # Application configuration "omero.web.server_list": ["SERVER_LIST", '[["%s", 4064, "omero"]]' % CUSTOM_HOST, json.loads, "A list of servers the Web client can connect to."], "omero.web.ping_interval": ["PING_INTERVAL", 60000, int, "description"], "omero.web.webgateway_cache": ["WEBGATEWAY_CACHE", None, leave_none_unset, None], # VIEWER # the following parameters configure when to show/hide the 'Volume viewer' # icon in the Image metadata panel "omero.web.open_astex_max_side": ["OPEN_ASTEX_MAX_SIDE", 400, int, None], "omero.web.open_astex_min_side": ["OPEN_ASTEX_MIN_SIDE", 20, int, None], "omero.web.open_astex_max_voxels": ["OPEN_ASTEX_MAX_VOXELS", 27000000, int, None], # 300 x 300 x 300 # PIPELINE 1.3.20 # Pipeline is an asset packaging library for Django, providing both CSS # and JavaScript concatenation and compression, built-in JavaScript # template support, and optional data-URI image and font embedding. "omero.web.pipeline_js_compressor": ["PIPELINE_JS_COMPRESSOR", None, identity, ("Compressor class to be applied to JavaScript files. If empty or " "None, JavaScript files won't be compressed.")], "omero.web.pipeline_css_compressor": ["PIPELINE_CSS_COMPRESSOR", None, identity, ("Compressor class to be applied to CSS files. If empty or None," " CSS files won't be compressed.")], "omero.web.pipeline_staticfile_storage": ["STATICFILES_STORAGE", "pipeline.storage.PipelineStorage", str, ("The file storage engine to use when collecting static files with" " the collectstatic management command. See `the documentation " "<http://django-pipeline.readthedocs.org/en/latest/storages.html>`_" " for more details.")], # Customisation "omero.web.login_logo": ["LOGIN_LOGO", None, leave_none_unset, ("Customize webclient login page with your own logo. Logo images " "should ideally be 150 pixels high or less and will appear above " "the OMERO logo. You will need to host the image somewhere else " "and link to it with" " ``\"http://www.openmicroscopy.org/site/logo.jpg\"``.")], "omero.web.login_view": ["LOGIN_VIEW", "weblogin", str, None], "omero.web.staticfile_dirs": ["STATICFILES_DIRS", '[]', json.loads, ("Defines the additional locations the staticfiles app will traverse" " if the FileSystemFinder finder is enabled, e.g. if you use the" " collectstatic or findstatic management command or use the static" " file serving view.")], "omero.web.template_dirs": ["TEMPLATE_DIRS", '[]', json.loads, ("List of locations of the template source files, in search order. " "Note that these paths should use Unix-style forward slashes, even" " on Windows.")], "omero.web.index_template": ["INDEX_TEMPLATE", None, identity, ("Define template used as an index page ``http://your_host/omero/``." "If None user is automatically redirected to the login page." "For example use 'webstart/start.html'. ")], "omero.web.login_redirect": ["LOGIN_REDIRECT", '{}', json.loads, ("Redirect to the given location after logging in. It only supports " "arguments for :djangodoc:`Django reverse function" " <ref/urlresolvers/#django.core.urlresolvers.reverse>`. " "For example: ``'{\"redirect\": [\"webindex\"], \"viewname\":" " \"load_template\", \"args\":[\"userdata\"], \"query_string\":" " \"experimenter=-1\"}'``")], "omero.web.apps": ["ADDITIONAL_APPS", '[]', json.loads, ("Add additional Django applications. For example, see" " :doc:`/developers/Web/CreateApp`")], "omero.web.databases": ["DATABASES", '{}', json.loads, None], "omero.web.page_size": ["PAGE", 200, int, ("Number of images displayed within a dataset or 'orphaned'" " container to prevent from loading them all at once.")], "omero.web.ui.top_links": ["TOP_LINKS", ('[' '["Data", "webindex", {"title": "Browse Data via Projects, Tags' ' etc"}],' '["History", "history", {"title": "History"}],' '["Help", "http://help.openmicroscopy.org/",' '{"title":"Open OMERO user guide in a new tab", "target":"new"}]' ']'), json.loads, ("Add links to the top header: links are ``['Link Text', 'link'," " options]``, where " "the url is reverse('link') OR simply 'link' (for external urls). " "E.g. ``'[[\"Webtest\", \"webtest_index\"], [\"Homepage\"," " \"http://...\", {\"title\": \"Homepage\", \"target\": \"new\"}" " ]]'``")], "omero.web.ui.right_plugins": ["RIGHT_PLUGINS", ('[["Acquisition",' ' "webclient/data/includes/right_plugin.acquisition.js.html",' ' "metadata_tab"],' # '["ROIs", "webtest/webclient_plugins/right_plugin.rois.js.html", # "image_roi_tab"],' '["Preview", "webclient/data/includes/right_plugin.preview.js.html"' ', "preview_tab"]]'), json.loads, ("Add plugins to the right-hand panel. " "Plugins are ``['Label', 'include.js', 'div_id']``. " "The javascript loads data into ``$('#div_id')``.")], "omero.web.ui.center_plugins": ["CENTER_PLUGINS", ('[' # '["Split View", # "webtest/webclient_plugins/center_plugin.splitview.js.html", # "split_view_panel"],' ']'), json.loads, ("Add plugins to the center panels. Plugins are " "``['Channel overlay'," " 'webtest/webclient_plugins/center_plugin.overlay.js.html'," " 'channel_overlay_panel']``. " "The javascript loads data into ``$('#div_id')``.")], } DEPRECATED_SETTINGS_MAPPINGS = { # Deprecated settings, description should indicate the replacement. "omero.web.force_script_name": ["FORCE_SCRIPT_NAME", None, leave_none_unset, ("Use omero.web.prefix instead.")], "omero.web.server_email": ["SERVER_EMAIL", None, identity, ("Use omero.mail.from instead.")], "omero.web.email_host": ["EMAIL_HOST", None, identity, ("Use omero.mail.host instead.")], "omero.web.email_host_password": ["EMAIL_HOST_PASSWORD", None, identity, ("Use omero.mail.password instead.")], "omero.web.email_host_user": ["EMAIL_HOST_USER", None, identity, ("Use omero.mail.username instead.")], "omero.web.email_port": ["EMAIL_PORT", None, identity, ("Use omero.mail.port instead.")], "omero.web.email_subject_prefix": ["EMAIL_SUBJECT_PREFIX", "[OMERO.web]", str, ("Default email subject is no longer configurable.")], "omero.web.email_use_tls": ["EMAIL_USE_TLS", "false", parse_boolean, ("Use omero.mail.smtp.* instead to set up" " javax.mail.Session properties.")], "omero.web.plate_download.enabled": ["PLATE_DOWNLOAD_ENABLED", "false", parse_boolean, ("Use omero.policy.binary_access instead to restrict download.")], "omero.web.viewer.initial_zoom_level": ["VIEWER_INITIAL_ZOOM_LEVEL", None, leave_none_unset_int, ("Use omero.client.viewer.initial_zoom_level instead.")], "omero.web.send_broken_link_emails": ["SEND_BROKEN_LINK_EMAILS", "false", parse_boolean, ("Replaced by django.middleware.common.BrokenLinkEmailsMiddleware." "To get notification set :property:`omero.web.admins` property.") ], } del CUSTOM_HOST # DEVELOPMENT_SETTINGS_MAPPINGS - WARNING: For each setting developer MUST open # a ticket that needs to be resolved before a release either by moving the # setting to CUSTOM_SETTINGS_MAPPINGS or by removing the setting at all. DEVELOPMENT_SETTINGS_MAPPINGS = {} def map_deprecated_settings(settings): m = {} for key, values in settings.items(): try: global_name = values[0] m[global_name] = (CUSTOM_SETTINGS[key], key) if len(values) < 5: # Not using default (see process_custom_settings) values.append(False) except KeyError: if len(values) < 5: values.append(True) return m def process_custom_settings( module, settings='CUSTOM_SETTINGS_MAPPINGS', deprecated=None): logging.info('Processing custom settings for module %s' % module.__name__) if deprecated: deprecated_map = map_deprecated_settings( getattr(module, deprecated, {})) else: deprecated_map = {} for key, values in getattr(module, settings, {}).items(): # Django may import settings.py more than once, see: # http://blog.dscpl.com.au/2010/03/improved-wsgi-script-for-use-with.html # In that case, the custom settings have already been processed. if len(values) == 5: continue global_name, default_value, mapping, description = values try: global_value = CUSTOM_SETTINGS[key] values.append(False) except KeyError: global_value = default_value values.append(True) try: using_default = values[-1] if global_name in deprecated_map: dep_value, dep_key = deprecated_map[global_name] if using_default: logging.warning( 'Setting %s is deprecated, use %s', dep_key, key) global_value = dep_value else: logging.error( '%s and its deprecated key %s are both set, using %s', key, dep_key, key) setattr(module, global_name, mapping(global_value)) except ValueError: raise ValueError( "Invalid %s JSON: %r" % (global_name, global_value)) except LeaveUnset: pass process_custom_settings(sys.modules[__name__], 'INTERNAL_SETTINGS_MAPPING') process_custom_settings(sys.modules[__name__], 'CUSTOM_SETTINGS_MAPPINGS', 'DEPRECATED_SETTINGS_MAPPINGS') process_custom_settings(sys.modules[__name__], 'DEVELOPMENT_SETTINGS_MAPPINGS') if not DEBUG: # from CUSTOM_SETTINGS_MAPPINGS # noqa LOGGING['loggers']['django.request']['level'] = 'INFO' LOGGING['loggers']['django']['level'] = 'INFO' LOGGING['loggers']['']['level'] = 'INFO' # TEMPLATE_DEBUG: A boolean that turns on/off template debug mode. If this is # True, the fancy error page will display a detailed report for any # TemplateSyntaxError. This report contains # the relevant snippet of the template, with the appropriate line highlighted. # Note that Django only displays fancy error pages if DEBUG is True, # alternatively error is handled by: # handler404 = "omeroweb.feedback.views.handler404" # handler500 = "omeroweb.feedback.views.handler500" TEMPLATE_DEBUG = DEBUG # from CUSTOM_SETTINGS_MAPPINGS # noqa def report_settings(module): from django.views.debug import cleanse_setting custom_settings_mappings = getattr(module, 'CUSTOM_SETTINGS_MAPPINGS', {}) for key in sorted(custom_settings_mappings): values = custom_settings_mappings[key] global_name, default_value, mapping, description, using_default = \ values source = using_default and "default" or key global_value = getattr(module, global_name, None) if global_name.isupper(): logger.debug( "%s = %r (source:%s)", global_name, cleanse_setting(global_name, global_value), source) deprecated_settings = getattr(module, 'DEPRECATED_SETTINGS_MAPPINGS', {}) for key in sorted(deprecated_settings): values = deprecated_settings[key] global_name, default_value, mapping, description, using_default = \ values global_value = getattr(module, global_name, None) if global_name.isupper() and not using_default: logger.debug( "%s = %r (deprecated:%s, %s)", global_name, cleanse_setting(global_name, global_value), key, description) report_settings(sys.modules[__name__]) SITE_ID = 1 # Local time zone for this installation. Choices can be found here: # http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE # although not all variations may be possible on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Europe/London' FIRST_DAY_OF_WEEK = 0 # 0-Monday, ... 6-Sunday # LANGUAGE_CODE: A string representing the language code for this # installation. This should be in standard language format. For example, U.S. # English is "en-us". LANGUAGE_CODE = 'en-gb' # SECRET_KEY: A secret key for this particular Django installation. Used to # provide a seed in secret-key hashing algorithms. Set this to a random string # -- the longer, the better. django-admin.py startproject creates one # automatically. # Make this unique, and don't share it with anybody. SECRET_KEY = '@@k%g#7=%4b6ib7yr1tloma&g0s2nni6ljf!m0h&x9c712c7yj' # USE_I18N: A boolean that specifies whether Django's internationalization # system should be enabled. # This provides an easy way to turn it off, for performance. If this is set to # False, Django will make some optimizations so as not to load the # internationalization machinery. USE_I18N = True # MIDDLEWARE_CLASSES: A tuple of middleware classes to use. # See https://docs.djangoproject.com/en/1.6/topics/http/middleware/. MIDDLEWARE_CLASSES = ( 'django.middleware.common.BrokenLinkEmailsMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) # ROOT_URLCONF: A string representing the full Python import path to your root # URLconf. # For example: "mydjangoapps.urls". Can be overridden on a per-request basis # by setting the attribute urlconf on the incoming HttpRequest object. ROOT_URLCONF = 'omeroweb.urls' # STATICFILES_FINDERS: The list of finder backends that know how to find # static files in various locations. The default will find files stored in the # STATICFILES_DIRS setting (using # django.contrib.staticfiles.finders.FileSystemFinder) and in a static # subdirectory of each app (using # django.contrib.staticfiles.finders.AppDirectoriesFinder) STATICFILES_FINDERS = ( "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", ) # STATIC_URL: URL to use when referring to static files located in # STATIC_ROOT. # Example: "/site_media/static/" or "http://static.example.com/". # If not None, this will be used as the base path for media definitions and # the staticfiles app. It must end in a slash if set to a non-empty value. # This var is configurable by omero.web.static_url STATIC_URL = '/static/' # STATIC_ROOT: The absolute path to the directory where collectstatic will # collect static files for deployment. If the staticfiles contrib app is # enabled (default) the collectstatic management command will collect static # files into this directory. STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static').replace('\\', '/') # STATICFILES_DIRS: This setting defines the additional locations the # staticfiles app will traverse if the FileSystemFinder finder is enabled, # e.g. if you use the collectstatic or findstatic management command or use # the static file serving view. if WEBSTART: # from CUSTOM_SETTINGS_MAPPINGS STATICFILES_DIRS += (("webstart/jars", INSIGHT_JARS),) # noqa # TEMPLATE_CONTEXT_PROCESSORS: A tuple of callables that are used to populate # the context in RequestContext. These callables take a request object as # their argument and return a dictionary of items to be merged into the # context. TEMPLATE_CONTEXT_PROCESSORS = ( "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.contrib.messages.context_processors.messages", "omeroweb.custom_context_processor.url_suffix" ) # TEMPLATE_LOADERS: A tuple of template loader classes, specified as strings. # Each Loader class knows how to import templates from a particular source. # Optionally, a tuple can be used instead of a string. The first item in the # tuple should be the Loader's module, subsequent items are passed to the # Loader during initialization. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) # INSTALLED_APPS: A tuple of strings designating all applications that are # enabled in this Django installation. Each string should be a full Python # path to a Python package that contains a Django application, as created by # django-admin.py startapp. INSTALLED_APPS = ( 'django.contrib.staticfiles', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'omeroweb.feedback', 'omeroweb.webadmin', 'omeroweb.webclient', 'omeroweb.webgateway', 'omeroweb.webredirect', 'omeroweb.webstart', 'pipeline', ) # ADDITONAL_APPS: We import any settings.py from apps. This allows them to # modify settings. # We're also processing any CUSTOM_SETTINGS_MAPPINGS defined there. for app in ADDITIONAL_APPS: # from CUSTOM_SETTINGS_MAPPINGS # noqa # Previously the app was added to INSTALLED_APPS as 'omeroweb.app', which # then required the app to reside within or be symlinked from within # omeroweb, instead of just having to be somewhere on the python path. # To allow apps to just be on the path, but keep it backwards compatible, # try to import as omeroweb.app, if it works, keep that in INSTALLED_APPS, # otherwise add it to INSTALLED_APPS just with its own name. try: __import__('omeroweb.%s' % app) INSTALLED_APPS += ('omeroweb.%s' % app,) except ImportError: INSTALLED_APPS += (app,) try: logger.debug( 'Attempting to import additional app settings for app: %s' % app) module = __import__('%s.settings' % app) process_custom_settings(module.settings) report_settings(module.settings) except ImportError: logger.debug("Couldn't import settings from app: %s" % app) logger.debug('INSTALLED_APPS=%s' % [INSTALLED_APPS]) PIPELINE_CSS = { 'webgateway_viewer': { 'source_filenames': ( 'webgateway/css/reset.css', 'webgateway/css/ome.body.css', 'webclient/css/dusty.css', 'webgateway/css/ome.viewport.css', 'webgateway/css/ome.toolbar.css', 'webgateway/css/ome.gs_slider.css', 'webgateway/css/base.css', 'webgateway/css/ome.snippet_header_logo.css', 'webgateway/css/ome.postit.css', 'webgateway/css/ome.rangewidget.css', '3rdparty/farbtastic-1.2/farbtastic.css', 'webgateway/css/ome.colorbtn.css', '3rdparty/JQuerySpinBtn-1.3a/JQuerySpinBtn.css', '3rdparty/jquery-ui-1.10.4/themes/base/jquery-ui.all.css', 'webgateway/css/omero_image.css', '3rdparty/panojs-2.0.0/panojs.css', ), 'output_filename': 'omeroweb.viewer.min.css', }, } PIPELINE_JS = { 'webgateway_viewer': { 'source_filenames': ( '3rdparty/jquery-1.11.1.js', '3rdparty/jquery-migrate-1.2.1.js', '3rdparty/jquery-ui-1.10.4/js/jquery-ui.1.10.4.js', 'webgateway/js/ome.popup.js', '3rdparty/aop-1.3.js', '3rdparty/raphael-2.1.0/raphael.js', '3rdparty/raphael-2.1.0/scale.raphael.js', '3rdparty/panojs-2.0.0/utils.js', '3rdparty/panojs-2.0.0/PanoJS.js', '3rdparty/panojs-2.0.0/controls.js', '3rdparty/panojs-2.0.0/pyramid_Bisque.js', '3rdparty/panojs-2.0.0/pyramid_imgcnv.js', '3rdparty/panojs-2.0.0/pyramid_Zoomify.js', '3rdparty/panojs-2.0.0/control_thumbnail.js', '3rdparty/panojs-2.0.0/control_info.js', '3rdparty/panojs-2.0.0/control_svg.js', '3rdparty/panojs-2.0.0/control_roi.js', '3rdparty/panojs-2.0.0/control_scalebar.js', '3rdparty/hammer-2.0.2/hammer.min.js', 'webgateway/js/ome.gs_utils.js', 'webgateway/js/ome.viewportImage.js', 'webgateway/js/ome.gs_slider.js', 'webgateway/js/ome.viewport.js', 'webgateway/js/omero_image.js', 'webgateway/js/ome.roidisplay.js', 'webgateway/js/ome.scalebardisplay.js', 'webgateway/js/ome.smartdialog.js', '3rdparty/JQuerySpinBtn-1.3a/JQuerySpinBtn.js', 'webgateway/js/ome.colorbtn.js', 'webgateway/js/ome.postit.js', '3rdparty/jquery.selectboxes-2.2.6.js', 'webgateway/js/ome.rangewidget.js', '3rdparty/farbtastic-1.2/farbtastic.js', '3rdparty/jquery.mousewheel-3.0.6.js', ), 'output_filename': 'omeroweb.viewer.min.js', } } CSRF_FAILURE_VIEW = "omeroweb.feedback.views.csrf_failure" # FEEDBACK - DO NOT MODIFY! # FEEDBACK_URL: Is now configurable for testing purpuse only. Used in # feedback.sendfeedback.SendFeedback class in order to submit errors or # comment messages to http://qa.openmicroscopy.org.uk. # FEEDBACK_APP: 6 = OMERO.web FEEDBACK_APP = 6 # IGNORABLE_404_STARTS: # Default: ('/cgi-bin/', '/_vti_bin', '/_vti_inf') # IGNORABLE_404_ENDS: # Default: ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', # 'favicon.ico', '.php') # SESSION_FILE_PATH: If you're using file-based session storage, this sets the # directory in which Django will store session data. When the default value # (None) is used, Django will use the standard temporary directory for the # system. SESSION_FILE_PATH = tempfile.gettempdir() # FILE_UPLOAD_TEMP_DIR: The directory to store data temporarily while # uploading files. FILE_UPLOAD_TEMP_DIR = tempfile.gettempdir() # # FILE_UPLOAD_MAX_MEMORY_SIZE: The maximum size (in bytes) that an upload # will be before it gets streamed to the file system. FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # default 2621440 (i.e. 2.5 MB). # DEFAULT_IMG: Used in # webclient.webclient_gateway.OmeroWebGateway.defaultThumbnail in order to # load default image while thumbnail can't be retrieved from the server. DEFAULT_IMG = os.path.join( os.path.dirname(__file__), 'webgateway', 'static', 'webgateway', 'img', 'image128.png').replace('\\', '/') # # DEFAULT_USER: Used in # webclient.webclient_gateway.OmeroWebGateway.getExperimenterDefaultPhoto in # order to load default avatar while experimenter photo can't be retrieved # from the server. DEFAULT_USER = os.path.join( os.path.dirname(__file__), 'webgateway', 'static', 'webgateway', 'img', 'personal32.png').replace('\\', '/') # MANAGERS: A tuple in the same format as ADMINS that specifies who should get # broken-link notifications when # SEND_BROKEN_LINK_EMAILS=True. MANAGERS = ADMINS # from CUSTOM_SETTINGS_MAPPINGS # noqa # https://docs.djangoproject.com/en/1.6/releases/1.6/#default-session-serialization-switched-to-json # JSON serializer, which is now the default, cannot handle # omeroweb.connector.Connector object SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer' # Load server list and freeze from connector import Server def load_server_list(): for s in SERVER_LIST: # from CUSTOM_SETTINGS_MAPPINGS # noqa server = (len(s) > 2) and unicode(s[2]) or None Server(host=unicode(s[0]), port=int(s[1]), server=server) Server.freeze() load_server_list()
gpl-2.0
3,387,988,354,743,281,700
-6,497,584,234,419,030,000
36.648547
100
0.617435
false
maxalbert/tohu
tohu/v7/derived_generators/fstr.py
1
1341
import inspect import re from .apply import Apply class fstr(Apply): """ Helper function for easy formatting of tohu generators. Usage example: >>> g1 = Integer(100, 200) >>> g2 = Integer(300, 400) >>> g3 = g1 + g2 >>> h = fstr('{g1} + {g2} = {g3}') >>> print(next(h)) 122 + 338 = 460 >>> print(next(h)) 165 + 325 = 490 """ def __init__(self, spec): # FIXME: this pattern is not yet compatible with the full f-string spec. # For example, it doesn't recognise double '{{' and '}}' (for escaping). # Also it would be awesome if we could parse arbitrary expressions inside # the curly braces. # TODO: re-implement this using the `string.Formatter` class from the standard library. pattern = "{([^}:]+)(:.*)?}" gen_names = [gen_name for (gen_name, _) in re.findall(pattern, spec)] # TODO: do we ever need to store and pass in the original namespace when spawning generators? namespace = inspect.currentframe().f_back.f_globals namespace.update(inspect.currentframe().f_back.f_locals) gens = {name: namespace[name] for name in gen_names} def format_items(**kwargs): return spec.format(**kwargs) super().__init__(format_items, **gens)
mit
464,294,291,272,987,300
3,949,657,202,349,462,500
30.186047
101
0.58091
false
liangazhou/django-rdp
packages/eclipse/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/encodings/mac_romanian.py
593
13917
""" Python Character Mapping Codec mac_romanian generated from 'MAPPINGS/VENDORS/APPLE/ROMANIAN.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='mac-romanian', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> CONTROL CHARACTER u'\x01' # 0x01 -> CONTROL CHARACTER u'\x02' # 0x02 -> CONTROL CHARACTER u'\x03' # 0x03 -> CONTROL CHARACTER u'\x04' # 0x04 -> CONTROL CHARACTER u'\x05' # 0x05 -> CONTROL CHARACTER u'\x06' # 0x06 -> CONTROL CHARACTER u'\x07' # 0x07 -> CONTROL CHARACTER u'\x08' # 0x08 -> CONTROL CHARACTER u'\t' # 0x09 -> CONTROL CHARACTER u'\n' # 0x0A -> CONTROL CHARACTER u'\x0b' # 0x0B -> CONTROL CHARACTER u'\x0c' # 0x0C -> CONTROL CHARACTER u'\r' # 0x0D -> CONTROL CHARACTER u'\x0e' # 0x0E -> CONTROL CHARACTER u'\x0f' # 0x0F -> CONTROL CHARACTER u'\x10' # 0x10 -> CONTROL CHARACTER u'\x11' # 0x11 -> CONTROL CHARACTER u'\x12' # 0x12 -> CONTROL CHARACTER u'\x13' # 0x13 -> CONTROL CHARACTER u'\x14' # 0x14 -> CONTROL CHARACTER u'\x15' # 0x15 -> CONTROL CHARACTER u'\x16' # 0x16 -> CONTROL CHARACTER u'\x17' # 0x17 -> CONTROL CHARACTER u'\x18' # 0x18 -> CONTROL CHARACTER u'\x19' # 0x19 -> CONTROL CHARACTER u'\x1a' # 0x1A -> CONTROL CHARACTER u'\x1b' # 0x1B -> CONTROL CHARACTER u'\x1c' # 0x1C -> CONTROL CHARACTER u'\x1d' # 0x1D -> CONTROL CHARACTER u'\x1e' # 0x1E -> CONTROL CHARACTER u'\x1f' # 0x1F -> CONTROL CHARACTER u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> CONTROL CHARACTER u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS u'\u2020' # 0xA0 -> DAGGER u'\xb0' # 0xA1 -> DEGREE SIGN u'\xa2' # 0xA2 -> CENT SIGN u'\xa3' # 0xA3 -> POUND SIGN u'\xa7' # 0xA4 -> SECTION SIGN u'\u2022' # 0xA5 -> BULLET u'\xb6' # 0xA6 -> PILCROW SIGN u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S u'\xae' # 0xA8 -> REGISTERED SIGN u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\u2122' # 0xAA -> TRADE MARK SIGN u'\xb4' # 0xAB -> ACUTE ACCENT u'\xa8' # 0xAC -> DIAERESIS u'\u2260' # 0xAD -> NOT EQUAL TO u'\u0102' # 0xAE -> LATIN CAPITAL LETTER A WITH BREVE u'\u0218' # 0xAF -> LATIN CAPITAL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later u'\u221e' # 0xB0 -> INFINITY u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO u'\xa5' # 0xB4 -> YEN SIGN u'\xb5' # 0xB5 -> MICRO SIGN u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL u'\u2211' # 0xB7 -> N-ARY SUMMATION u'\u220f' # 0xB8 -> N-ARY PRODUCT u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI u'\u222b' # 0xBA -> INTEGRAL u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA u'\u0103' # 0xBE -> LATIN SMALL LETTER A WITH BREVE u'\u0219' # 0xBF -> LATIN SMALL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later u'\xbf' # 0xC0 -> INVERTED QUESTION MARK u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK u'\xac' # 0xC2 -> NOT SIGN u'\u221a' # 0xC3 -> SQUARE ROOT u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK u'\u2248' # 0xC5 -> ALMOST EQUAL TO u'\u2206' # 0xC6 -> INCREMENT u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS u'\xa0' # 0xCA -> NO-BREAK SPACE u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE u'\u2013' # 0xD0 -> EN DASH u'\u2014' # 0xD1 -> EM DASH u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK u'\xf7' # 0xD6 -> DIVISION SIGN u'\u25ca' # 0xD7 -> LOZENGE u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS u'\u2044' # 0xDA -> FRACTION SLASH u'\u20ac' # 0xDB -> EURO SIGN u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later u'\u021b' # 0xDF -> LATIN SMALL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later u'\u2021' # 0xE0 -> DOUBLE DAGGER u'\xb7' # 0xE1 -> MIDDLE DOT u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK u'\u2030' # 0xE4 -> PER MILLE SIGN u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\uf8ff' # 0xF0 -> Apple logo u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT u'\u02dc' # 0xF7 -> SMALL TILDE u'\xaf' # 0xF8 -> MACRON u'\u02d8' # 0xF9 -> BREVE u'\u02d9' # 0xFA -> DOT ABOVE u'\u02da' # 0xFB -> RING ABOVE u'\xb8' # 0xFC -> CEDILLA u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT u'\u02db' # 0xFE -> OGONEK u'\u02c7' # 0xFF -> CARON ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
apache-2.0
7,329,480,286,181,651,000
3,325,641,647,006,376,000
44.332248
118
0.547747
false
jrwdunham/old
onlinelinguisticdatabase/model/form.py
1
8233
# Copyright 2016 Joel Dunham # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Form model""" from sqlalchemy import Column, Sequence, ForeignKey from sqlalchemy.types import Integer, Unicode, UnicodeText, Date, DateTime from sqlalchemy.orm import relation from onlinelinguisticdatabase.model.meta import Base, now class FormFile(Base): __tablename__ = 'formfile' id = Column(Integer, Sequence('formfile_seq_id', optional=True), primary_key=True) form_id = Column(Integer, ForeignKey('form.id')) file_id = Column(Integer, ForeignKey('file.id')) datetime_modified = Column(DateTime, default=now) class FormTag(Base): __tablename__ = 'formtag' id = Column(Integer, Sequence('formtag_seq_id', optional=True), primary_key=True) form_id = Column(Integer, ForeignKey('form.id')) tag_id = Column(Integer, ForeignKey('tag.id')) datetime_modified = Column(DateTime(), default=now) class CollectionForm(Base): __tablename__ = 'collectionform' id = Column(Integer, Sequence('collectionform_seq_id', optional=True), primary_key=True) collection_id = Column(Integer, ForeignKey('collection.id')) form_id = Column(Integer, ForeignKey('form.id')) datetime_modified = Column(DateTime(), default=now) class Form(Base): __tablename__ = "form" def __repr__(self): return "<Form (%s)>" % self.id id = Column(Integer, Sequence('form_seq_id', optional=True), primary_key=True) UUID = Column(Unicode(36)) transcription = Column(Unicode(510), nullable=False) phonetic_transcription = Column(Unicode(510)) narrow_phonetic_transcription = Column(Unicode(510)) morpheme_break = Column(Unicode(510)) morpheme_gloss = Column(Unicode(510)) comments = Column(UnicodeText) speaker_comments = Column(UnicodeText) grammaticality = Column(Unicode(255)) date_elicited = Column(Date) datetime_entered = Column(DateTime) datetime_modified = Column(DateTime, default=now) syntactic_category_string = Column(Unicode(510)) morpheme_break_ids = Column(UnicodeText) morpheme_gloss_ids = Column(UnicodeText) break_gloss_category = Column(Unicode(1023)) syntax = Column(Unicode(1023)) semantics = Column(Unicode(1023)) status = Column(Unicode(40), default=u'tested') # u'tested' vs. u'requires testing' elicitor_id = Column(Integer, ForeignKey('user.id', ondelete='SET NULL')) elicitor = relation('User', primaryjoin='Form.elicitor_id==User.id') enterer_id = Column(Integer, ForeignKey('user.id', ondelete='SET NULL')) enterer = relation('User', primaryjoin='Form.enterer_id==User.id') modifier_id = Column(Integer, ForeignKey('user.id', ondelete='SET NULL')) modifier = relation('User', primaryjoin='Form.modifier_id==User.id') verifier_id = Column(Integer, ForeignKey('user.id', ondelete='SET NULL')) verifier = relation('User', primaryjoin='Form.verifier_id==User.id') speaker_id = Column(Integer, ForeignKey('speaker.id', ondelete='SET NULL')) speaker = relation('Speaker') elicitationmethod_id = Column(Integer, ForeignKey('elicitationmethod.id', ondelete='SET NULL')) elicitation_method = relation('ElicitationMethod') syntacticcategory_id = Column(Integer, ForeignKey('syntacticcategory.id', ondelete='SET NULL')) syntactic_category = relation('SyntacticCategory', backref='forms') source_id = Column(Integer, ForeignKey('source.id', ondelete='SET NULL')) source = relation('Source') translations = relation('Translation', backref='form', cascade='all, delete, delete-orphan') files = relation('File', secondary=FormFile.__table__, backref='forms') collections = relation('Collection', secondary=CollectionForm.__table__, backref='forms') tags = relation('Tag', secondary=FormTag.__table__, backref='forms') def get_dict(self): """Return a Python dictionary representation of the Form. This facilitates JSON-stringification, cf. utils.JSONOLDEncoder. Relational data are truncated, e.g., form_dict['elicitor'] is a dict with keys for 'id', 'first_name' and 'last_name' (cf. get_mini_user_dict above) and lacks keys for other attributes such as 'username', 'personal_page_content', etc. """ return { 'id': self.id, 'UUID': self.UUID, 'transcription': self.transcription, 'phonetic_transcription': self.phonetic_transcription, 'narrow_phonetic_transcription': self.narrow_phonetic_transcription, 'morpheme_break': self.morpheme_break, 'morpheme_gloss': self.morpheme_gloss, 'comments': self.comments, 'speaker_comments': self.speaker_comments, 'grammaticality': self.grammaticality, 'date_elicited': self.date_elicited, 'datetime_entered': self.datetime_entered, 'datetime_modified': self.datetime_modified, 'syntactic_category_string': self.syntactic_category_string, 'morpheme_break_ids': self.json_loads(self.morpheme_break_ids), 'morpheme_gloss_ids': self.json_loads(self.morpheme_gloss_ids), 'break_gloss_category': self.break_gloss_category, 'syntax': self.syntax, 'semantics': self.semantics, 'status': self.status, 'elicitor': self.get_mini_user_dict(self.elicitor), 'enterer': self.get_mini_user_dict(self.enterer), 'modifier': self.get_mini_user_dict(self.modifier), 'verifier': self.get_mini_user_dict(self.verifier), 'speaker': self.get_mini_speaker_dict(self.speaker), 'elicitation_method': self.get_mini_elicitation_method_dict(self.elicitation_method), 'syntactic_category': self.get_mini_syntactic_category_dict(self.syntactic_category), 'source': self.get_mini_source_dict(self.source), 'translations': self.get_translations_list(self.translations), 'tags': self.get_tags_list(self.tags), 'files': self.get_files_list(self.files) } def extract_word_pos_sequences(self, unknown_category, morpheme_splitter, extract_morphemes=False): """Return the unique word-based pos sequences, as well as (possibly) the morphemes, implicit in the form. :param str unknown_category: the string used in syntactic category strings when a morpheme-gloss pair is unknown :param morpheme_splitter: callable that splits a strings into its morphemes and delimiters :param bool extract_morphemes: determines whether we return a list of morphemes implicit in the form. :returns: 2-tuple: (set of pos/delimiter sequences, list of morphemes as (pos, (mb, mg)) tuples). """ if not self.syntactic_category_string: return None, None pos_sequences = set() morphemes = [] sc_words = self.syntactic_category_string.split() mb_words = self.morpheme_break.split() mg_words = self.morpheme_gloss.split() for sc_word, mb_word, mg_word in zip(sc_words, mb_words, mg_words): pos_sequence = tuple(morpheme_splitter(sc_word)) if unknown_category not in pos_sequence: pos_sequences.add(pos_sequence) if extract_morphemes: morpheme_sequence = morpheme_splitter(mb_word)[::2] gloss_sequence = morpheme_splitter(mg_word)[::2] for pos, morpheme, gloss in zip(pos_sequence[::2], morpheme_sequence, gloss_sequence): morphemes.append((pos, (morpheme, gloss))) return pos_sequences, morphemes
apache-2.0
5,270,046,313,243,040,000
3,284,502,338,156,558,300
47.429412
120
0.667314
false
kirca/OpenUpgrade
addons/mail/mail_mail.py
183
18372
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## import base64 import logging from email.utils import formataddr from urlparse import urljoin from openerp import api, tools from openerp import SUPERUSER_ID from openerp.addons.base.ir.ir_mail_server import MailDeliveryException from openerp.osv import fields, osv from openerp.tools.safe_eval import safe_eval as eval from openerp.tools.translate import _ _logger = logging.getLogger(__name__) class mail_mail(osv.Model): """ Model holding RFC2822 email messages to send. This model also provides facilities to queue and send new email messages. """ _name = 'mail.mail' _description = 'Outgoing Mails' _inherits = {'mail.message': 'mail_message_id'} _order = 'id desc' _rec_name = 'subject' _columns = { 'mail_message_id': fields.many2one('mail.message', 'Message', required=True, ondelete='cascade', auto_join=True), 'state': fields.selection([ ('outgoing', 'Outgoing'), ('sent', 'Sent'), ('received', 'Received'), ('exception', 'Delivery Failed'), ('cancel', 'Cancelled'), ], 'Status', readonly=True, copy=False), 'auto_delete': fields.boolean('Auto Delete', help="Permanently delete this email after sending it, to save space"), 'references': fields.text('References', help='Message references, such as identifiers of previous messages', readonly=1), 'email_to': fields.text('To', help='Message recipients (emails)'), 'recipient_ids': fields.many2many('res.partner', string='To (Partners)'), 'email_cc': fields.char('Cc', help='Carbon copy message recipients'), 'body_html': fields.text('Rich-text Contents', help="Rich-text/HTML message"), 'headers': fields.text('Headers', copy=False), # Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification # and during unlink() we will not cascade delete the parent and its attachments 'notification': fields.boolean('Is Notification', help='Mail has been created to notify people of an existing mail.message'), } _defaults = { 'state': 'outgoing', } def default_get(self, cr, uid, fields, context=None): # protection for `default_type` values leaking from menu action context (e.g. for invoices) # To remove when automatic context propagation is removed in web client if context and context.get('default_type') and context.get('default_type') not in self._all_columns['type'].column.selection: context = dict(context, default_type=None) return super(mail_mail, self).default_get(cr, uid, fields, context=context) def create(self, cr, uid, values, context=None): # notification field: if not set, set if mail comes from an existing mail.message if 'notification' not in values and values.get('mail_message_id'): values['notification'] = True return super(mail_mail, self).create(cr, uid, values, context=context) def unlink(self, cr, uid, ids, context=None): # cascade-delete the parent message for all mails that are not created for a notification ids_to_cascade = self.search(cr, uid, [('notification', '=', False), ('id', 'in', ids)]) parent_msg_ids = [m.mail_message_id.id for m in self.browse(cr, uid, ids_to_cascade, context=context)] res = super(mail_mail, self).unlink(cr, uid, ids, context=context) self.pool.get('mail.message').unlink(cr, uid, parent_msg_ids, context=context) return res def mark_outgoing(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'outgoing'}, context=context) def cancel(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'cancel'}, context=context) @api.cr_uid def process_email_queue(self, cr, uid, ids=None, context=None): """Send immediately queued messages, committing after each message is sent - this is not transactional and should not be called during another transaction! :param list ids: optional list of emails ids to send. If passed no search is performed, and these ids are used instead. :param dict context: if a 'filters' key is present in context, this value will be used as an additional filter to further restrict the outgoing messages to send (by default all 'outgoing' messages are sent). """ if context is None: context = {} if not ids: filters = [('state', '=', 'outgoing')] if 'filters' in context: filters.extend(context['filters']) ids = self.search(cr, uid, filters, context=context) res = None try: # Force auto-commit - this is meant to be called by # the scheduler, and we can't allow rolling back the status # of previously sent emails! res = self.send(cr, uid, ids, auto_commit=True, context=context) except Exception: _logger.exception("Failed processing mail queue") return res def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True): """Perform any post-processing necessary after sending ``mail`` successfully, including deleting it completely along with its attachment if the ``auto_delete`` flag of the mail was set. Overridden by subclasses for extra post-processing behaviors. :param browse_record mail: the mail that was just sent :return: True """ if mail_sent and mail.auto_delete: # done with SUPERUSER_ID to avoid giving large unlink access rights self.unlink(cr, SUPERUSER_ID, [mail.id], context=context) return True #------------------------------------------------------ # mail_mail formatting, tools and send mechanism #------------------------------------------------------ def _get_partner_access_link(self, cr, uid, mail, partner=None, context=None): """Generate URLs for links in mails: partner has access (is user): link to action_mail_redirect action that will redirect to doc or Inbox """ if context is None: context = {} if partner and partner.user_ids: base_url = self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'web.base.url') mail_model = mail.model or 'mail.thread' url = urljoin(base_url, self.pool[mail_model]._get_access_link(cr, uid, mail, partner, context=context)) return "<span class='oe_mail_footer_access'><small>%(access_msg)s <a style='color:inherit' href='%(portal_link)s'>%(portal_msg)s</a></small></span>" % { 'access_msg': _('about') if mail.record_name else _('access'), 'portal_link': url, 'portal_msg': '%s %s' % (context.get('model_name', ''), mail.record_name) if mail.record_name else _('your messages'), } else: return None def send_get_mail_subject(self, cr, uid, mail, force=False, partner=None, context=None): """If subject is void, set the subject as 'Re: <Resource>' or 'Re: <mail.parent_id.subject>' :param boolean force: force the subject replacement """ if (force or not mail.subject) and mail.record_name: return 'Re: %s' % (mail.record_name) elif (force or not mail.subject) and mail.parent_id and mail.parent_id.subject: return 'Re: %s' % (mail.parent_id.subject) return mail.subject def send_get_mail_body(self, cr, uid, mail, partner=None, context=None): """Return a specific ir_email body. The main purpose of this method is to be inherited to add custom content depending on some module.""" body = mail.body_html # generate access links for notifications or emails linked to a specific document with auto threading link = None if mail.notification or (mail.model and mail.res_id and not mail.no_auto_thread): link = self._get_partner_access_link(cr, uid, mail, partner, context=context) if link: body = tools.append_content_to_html(body, link, plaintext=False, container_tag='div') return body def send_get_mail_to(self, cr, uid, mail, partner=None, context=None): """Forge the email_to with the following heuristic: - if 'partner', recipient specific (Partner Name <email>) - else fallback on mail.email_to splitting """ if partner: email_to = [formataddr((partner.name, partner.email))] else: email_to = tools.email_split(mail.email_to) return email_to def send_get_email_dict(self, cr, uid, mail, partner=None, context=None): """Return a dictionary for specific email values, depending on a partner, or generic to the whole recipients given by mail.email_to. :param browse_record mail: mail.mail browse_record :param browse_record partner: specific recipient partner """ body = self.send_get_mail_body(cr, uid, mail, partner=partner, context=context) body_alternative = tools.html2plaintext(body) res = { 'body': body, 'body_alternative': body_alternative, 'subject': self.send_get_mail_subject(cr, uid, mail, partner=partner, context=context), 'email_to': self.send_get_mail_to(cr, uid, mail, partner=partner, context=context), } return res def send(self, cr, uid, ids, auto_commit=False, raise_exception=False, context=None): """ Sends the selected emails immediately, ignoring their current state (mails that have already been sent should not be passed unless they should actually be re-sent). Emails successfully delivered are marked as 'sent', and those that fail to be deliver are marked as 'exception', and the corresponding error mail is output in the server logs. :param bool auto_commit: whether to force a commit of the mail status after sending each mail (meant only for scheduler processing); should never be True during normal transactions (default: False) :param bool raise_exception: whether to raise an exception if the email sending process has failed :return: True """ context = dict(context or {}) ir_mail_server = self.pool.get('ir.mail_server') ir_attachment = self.pool['ir.attachment'] for mail in self.browse(cr, SUPERUSER_ID, ids, context=context): try: # TDE note: remove me when model_id field is present on mail.message - done here to avoid doing it multiple times in the sub method if mail.model: model_id = self.pool['ir.model'].search(cr, SUPERUSER_ID, [('model', '=', mail.model)], context=context)[0] model = self.pool['ir.model'].browse(cr, SUPERUSER_ID, model_id, context=context) else: model = None if model: context['model_name'] = model.name # load attachment binary data with a separate read(), as prefetching all # `datas` (binary field) could bloat the browse cache, triggerring # soft/hard mem limits with temporary data. attachment_ids = [a.id for a in mail.attachment_ids] attachments = [(a['datas_fname'], base64.b64decode(a['datas'])) for a in ir_attachment.read(cr, SUPERUSER_ID, attachment_ids, ['datas_fname', 'datas'])] # specific behavior to customize the send email for notified partners email_list = [] if mail.email_to: email_list.append(self.send_get_email_dict(cr, uid, mail, context=context)) for partner in mail.recipient_ids: email_list.append(self.send_get_email_dict(cr, uid, mail, partner=partner, context=context)) # headers headers = {} bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context) catchall_domain = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.domain", context=context) if bounce_alias and catchall_domain: if mail.model and mail.res_id: headers['Return-Path'] = '%s-%d-%s-%d@%s' % (bounce_alias, mail.id, mail.model, mail.res_id, catchall_domain) else: headers['Return-Path'] = '%s-%d@%s' % (bounce_alias, mail.id, catchall_domain) if mail.headers: try: headers.update(eval(mail.headers)) except Exception: pass # Writing on the mail object may fail (e.g. lock on user) which # would trigger a rollback *after* actually sending the email. # To avoid sending twice the same email, provoke the failure earlier mail.write({'state': 'exception'}) mail_sent = False # build an RFC2822 email.message.Message object and send it without queuing res = None for email in email_list: msg = ir_mail_server.build_email( email_from=mail.email_from, email_to=email.get('email_to'), subject=email.get('subject'), body=email.get('body'), body_alternative=email.get('body_alternative'), email_cc=tools.email_split(mail.email_cc), reply_to=mail.reply_to, attachments=attachments, message_id=mail.message_id, references=mail.references, object_id=mail.res_id and ('%s-%s' % (mail.res_id, mail.model)), subtype='html', subtype_alternative='plain', headers=headers) try: res = ir_mail_server.send_email(cr, uid, msg, mail_server_id=mail.mail_server_id.id, context=context) except AssertionError as error: if error.message == ir_mail_server.NO_VALID_RECIPIENT: # No valid recipient found for this particular # mail item -> ignore error to avoid blocking # delivery to next recipients, if any. If this is # the only recipient, the mail will show as failed. _logger.warning("Ignoring invalid recipients for mail.mail %s: %s", mail.message_id, email.get('email_to')) else: raise if res: mail.write({'state': 'sent', 'message_id': res}) mail_sent = True # /!\ can't use mail.state here, as mail.refresh() will cause an error # see revid:[email protected] in 6.1 if mail_sent: _logger.info('Mail with ID %r and Message-Id %r successfully sent', mail.id, mail.message_id) self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=mail_sent) except MemoryError: # prevent catching transient MemoryErrors, bubble up to notify user or abort cron job # instead of marking the mail as failed _logger.exception('MemoryError while processing mail with ID %r and Msg-Id %r. '\ 'Consider raising the --limit-memory-hard startup option', mail.id, mail.message_id) raise except Exception as e: _logger.exception('failed sending mail.mail %s', mail.id) mail.write({'state': 'exception'}) self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=False) if raise_exception: if isinstance(e, AssertionError): # get the args of the original error, wrap into a value and throw a MailDeliveryException # that is an except_orm, with name and value as arguments value = '. '.join(e.args) raise MailDeliveryException(_("Mail Delivery Failed"), value) raise if auto_commit is True: cr.commit() return True
agpl-3.0
6,940,290,362,195,018,000
3,078,485,573,848,912,400
52.252174
164
0.574897
false
thomazs/geraldo
site/newsite/site-geraldo/django/views/defaults.py
24
3359
from django.core.exceptions import ObjectDoesNotExist from django.template import Context, RequestContext, loader from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django import http def shortcut(request, content_type_id, object_id): "Redirect to an object's page based on a content-type ID and an object ID." # Look up the object, making sure it's got a get_absolute_url() function. try: content_type = ContentType.objects.get(pk=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) except ObjectDoesNotExist: raise http.Http404, "Content type %s object %s doesn't exist" % (content_type_id, object_id) try: absurl = obj.get_absolute_url() except AttributeError: raise http.Http404, "%s objects don't have get_absolute_url() methods" % content_type.name # Try to figure out the object's domain, so we can do a cross-site redirect # if necessary. # If the object actually defines a domain, we're done. if absurl.startswith('http://') or absurl.startswith('https://'): return http.HttpResponseRedirect(absurl) object_domain = None # Otherwise, we need to introspect the object's relationships for a # relation to the Site object opts = obj._meta # First, look for an many-to-many relationship to sites for field in opts.many_to_many: if field.rel.to is Site: try: object_domain = getattr(obj, field.name).all()[0].domain except IndexError: pass if object_domain is not None: break # Next look for a many-to-one relationship to site if object_domain is None: for field in obj._meta.fields: if field.rel and field.rel.to is Site: try: object_domain = getattr(obj, field.name).domain except Site.DoesNotExist: pass if object_domain is not None: break # Fall back to the current site (if possible) if object_domain is None: try: object_domain = Site.objects.get_current().domain except Site.DoesNotExist: pass # If all that malarkey found an object domain, use it; otherwise fall back # to whatever get_absolute_url() returned. if object_domain is not None: protocol = request.is_secure() and 'https' or 'http' return http.HttpResponseRedirect('%s://%s%s' % (protocol, object_domain, absurl)) else: return http.HttpResponseRedirect(absurl) def page_not_found(request, template_name='404.html'): """ Default 404 handler. Templates: `404.html` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/') """ t = loader.get_template(template_name) # You need to create a 404.html template. return http.HttpResponseNotFound(t.render(RequestContext(request, {'request_path': request.path}))) def server_error(request, template_name='500.html'): """ 500 error handler. Templates: `500.html` Context: None """ t = loader.get_template(template_name) # You need to create a 500.html template. return http.HttpResponseServerError(t.render(Context({})))
lgpl-3.0
4,443,825,121,961,874,400
2,945,698,846,152,905,700
36.741573
103
0.645728
false
wrouesnel/ansible
lib/ansible/modules/cloud/vmware/vmware_migrate_vmk.py
26
7038
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen <jcallen () csc.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: vmware_migrate_vmk short_description: Migrate a VMK interface from VSS to VDS description: - Migrate a VMK interface from VSS to VDS version_added: 2.0 author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)" notes: - Tested on vSphere 5.5 requirements: - "python >= 2.6" - PyVmomi options: esxi_hostname: description: - ESXi hostname to be managed required: True device: description: - VMK interface name required: True current_switch_name: description: - Switch VMK interface is currently on required: True current_portgroup_name: description: - Portgroup name VMK interface is currently on required: True migrate_switch_name: description: - Switch name to migrate VMK interface to required: True migrate_portgroup_name: description: - Portgroup name to migrate VMK interface to required: True extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' # Example from Ansible playbook - name: Migrate Management vmk local_action: module: vmware_migrate_vmk hostname: vcsa_host username: vcsa_user password: vcsa_pass esxi_hostname: esxi_hostname device: vmk1 current_switch_name: temp_vswitch current_portgroup_name: esx-mgmt migrate_switch_name: dvSwitch migrate_portgroup_name: Management ''' try: from pyVmomi import vim, vmodl HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import (vmware_argument_spec, find_dvs_by_name, find_hostsystem_by_name, connect_to_api, find_dvspg_by_name) class VMwareMigrateVmk(object): def __init__(self, module): self.module = module self.host_system = None self.migrate_switch_name = self.module.params['migrate_switch_name'] self.migrate_portgroup_name = self.module.params['migrate_portgroup_name'] self.device = self.module.params['device'] self.esxi_hostname = self.module.params['esxi_hostname'] self.current_portgroup_name = self.module.params['current_portgroup_name'] self.current_switch_name = self.module.params['current_switch_name'] self.content = connect_to_api(module) def process_state(self): try: vmk_migration_states = { 'migrate_vss_vds': self.state_migrate_vss_vds, 'migrate_vds_vss': self.state_migrate_vds_vss, 'migrated': self.state_exit_unchanged } vmk_migration_states[self.check_vmk_current_state()]() except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=runtime_fault.msg) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=method_fault.msg) except Exception as e: self.module.fail_json(msg=str(e)) def state_exit_unchanged(self): self.module.exit_json(changed=False) def state_migrate_vds_vss(self): self.module.exit_json(changed=False, msg="Currently Not Implemented") def create_host_vnic_config(self, dv_switch_uuid, portgroup_key): host_vnic_config = vim.host.VirtualNic.Config() host_vnic_config.spec = vim.host.VirtualNic.Specification() host_vnic_config.changeOperation = "edit" host_vnic_config.device = self.device host_vnic_config.portgroup = "" host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection() host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key return host_vnic_config def create_port_group_config(self): port_group_config = vim.host.PortGroup.Config() port_group_config.spec = vim.host.PortGroup.Specification() port_group_config.changeOperation = "remove" port_group_config.spec.name = self.current_portgroup_name port_group_config.spec.vlanId = -1 port_group_config.spec.vswitchName = self.current_switch_name port_group_config.spec.policy = vim.host.NetworkPolicy() return port_group_config def state_migrate_vss_vds(self): host_network_system = self.host_system.configManager.networkSystem dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name) pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name) config = vim.host.NetworkConfig() config.portgroup = [self.create_port_group_config()] config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)] host_network_system.UpdateNetworkConfig(config, "modify") self.module.exit_json(changed=True) def check_vmk_current_state(self): self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname) for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic: if vnic.device == self.device: # self.vnic = vnic if vnic.spec.distributedVirtualPort is None: if vnic.portgroup == self.current_portgroup_name: return "migrate_vss_vds" else: dvs = find_dvs_by_name(self.content, self.current_switch_name) if dvs is None: return "migrated" if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid: return "migrate_vds_vss" def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'), device=dict(required=True, type='str'), current_switch_name=dict(required=True, type='str'), current_portgroup_name=dict(required=True, type='str'), migrate_switch_name=dict(required=True, type='str'), migrate_portgroup_name=dict(required=True, type='str'))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi required for this module') vmware_migrate_vmk = VMwareMigrateVmk(module) vmware_migrate_vmk.process_state() if __name__ == '__main__': main()
gpl-3.0
8,160,091,741,190,676,000
837,527,403,754,184,300
35.278351
105
0.631571
false
adamgilman/ems-costing
tests/tests_postageapp.py
1
1049
from vendors import PostageApp import unittest class TestPostageApp(unittest.TestCase): def setUp(self): self.vendor = PostageApp() def test_ZeroEmails(self): self.assertEqual(self.vendor.getPrice(0), 9) def test_Zero_10000(self): self.assertEqual(self.vendor.getPrice(1), 9) self.assertEqual(self.vendor.getPrice(10000), 9) self.assertEqual(self.vendor.getPrice(11000), 10) def test_40k_pm(self): self.assertEqual(self.vendor.getPrice(40000), 29) self.assertEqual(self.vendor.getPrice(41000), 30) def test_100k_pm(self): self.assertEqual(self.vendor.getPrice(100000), 79) self.assertEqual(self.vendor.getPrice(101333), 80) def test_400k_pm(self): self.assertEqual(self.vendor.getPrice(400000), 199) self.assertEqual(self.vendor.getPrice(401333), 200) def test_500k_pm(self): self.assertEqual(self.vendor.getPrice(500000), 274) def test_700k_pm(self): self.assertEqual(self.vendor.getPrice(700000), 424)
mit
3,161,236,778,834,867,700
9,003,849,459,244,296,000
30.787879
59
0.678742
false
pshahzeb/vsphere-storage-for-docker
esx_service/cli/vmdkops_admin.py
1
58143
#!/usr/bin/env python # Copyright 2016 VMware, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Admin CLI for vmdk_opsd import argparse import os import subprocess import sys import signal import os.path import shutil import time import vmdk_ops # vmdkops python utils are in PY_LOC, so add to path. sys.path.insert(0, vmdk_ops.PY_LOC) import volume_kv as kv import cli_table import cli_xml import vsan_policy import vmdk_utils import vsan_info import local_sh import log_config import auth import auth_data_const import convert import auth_api import auth_data from auth_data import DB_REF from error_code import ErrorCode from error_code import error_code_to_message from error_code import generate_error_info # generic strings NOT_AVAILABLE = 'N/A' UNSET = "Unset" # Volume attributes VOL_SIZE = 'size' VOL_ALLOC = 'allocated' # Return this to shell # note: "1" is returned if a string is passed to sys.exit CLI_ERR_ARGS_PARSE = 3 CLI_ERR_OPERATION_FAILURE = 2 CLI_SUCCESS = 0 def main(): 'Main function for Admin CLI' log_config.configure() kv.init() if not vmdk_ops.is_service_available(): sys.exit('Unable to connect to the host-agent on this host, ensure the ESXi host agent is running before retrying.') args = parse_args() if not args: sys.exit(CLI_ERR_ARGS_PARSE) if args.func(args) != None: sys.exit(CLI_ERR_OPERATION_FAILURE) sys.exit(CLI_SUCCESS) # not really needed, putting here as an eye candy def commands(): """ This function returns a dictionary representation of a CLI specification that is used to generate a CLI parser. The dictionary is recursively walked in the `add_subparser()` function and appropriate calls are made to the `argparse` module to create a CLI parser that fits the specification. Each key in the top level of the dictionary is a command string. Each command may contain the following keys: * func - The callback function to be called when the command is issued. This key is always present unless there are subcommands, denoted by a 'cmds' key. * help - The help string that is printed when the `-h` or `--help` paramters are given without reference to a given command. (i.e. `./vmdkops_admin.py -h`). All top level help strings are printed in this instance. * args - A dictionary of any positional or optional arguments allowed for the given command. The args dictionary may contain the following keys: * help - The help for a given option which is displayed when the `-h` flag is given with mention to a given command. (i.e. `./vmdkops_admin.py volume ls -h`). Help for all options are shown for the command. * action - The action to take when the option is given. This is directly passed to argparse. Note that `store_true` just means pass the option to the callback as a boolean `True` value and don't require option parameters. (i.e. `./vmdkops_admin.py volume ls -l`). Other options for the action value can be found in the argparse documentation. https://docs.python.org/3/library/argparse.html#action * metavar - A way to refer to each expected argument in help documentation. This is directly passed to argparse. See https://docs.python.org/3/library/argparse.html#metavar * required - Whether or not the argument is required. This is directly passed to argparse. * type - A type conversion function that takes the option parameter and converts it to a given type before passing it to the func callback. It prints an error and exits if the given argument cannot be converted. See https://docs.python.org/3/library/argparse.html#type * choices - A list of choices that can be provided for the given option. This list is not directly passed to argparse. Instead a type conversion function is created that only allows one or more of the choices as a comma separated list to be supplied. An error identical to the one presented when using the 'choices' option in argparse is printed if an invalid choice is given. The rationale for not directly using the argparse choices option is that argparse requires space separated arguments of the form: `-l a b c`, rather than the defacto single argument, comma separated form: `-l a,b,c`, common to most unix programs. * cmds - A dictionary of subcommands where the key is the next word in the command line string. For example, in `vmdkops_admin.py tenant create`, `tenant` is the command, and `create` is the subcommand. Subcommands can have further subcommands, but currently there is only one level of subcommands in this specification. Each subcommand can contain the same attributes as top level commands: (func, help, args, cmds). These attributes have identical usage to the top-level keys, except they only apply when the subcommand is part of the command. For example the `--vm-list` argument only applies to `tenant create` or `tenant set` commands. It will be invalid in any other context. Note that the last subcommand in a chain is the one where the callback function is defined. For example, `tenant create` has a callback, but if a user runs the program like: `./vmdkops_admin.py tenant` they will get the following error: ``` usage: vmdkops_admin.py tenant [-h] {rm,create,volume,get} ... vmdkops_admin.py tenant: error: too few arguments ``` """ return { 'volume' : { 'help': "Manipulate volumes", 'cmds': { 'ls': { 'func': ls, 'help': 'List volumes', 'args': { '-c': { 'help': 'Display selected columns', 'choices': ['volume', 'datastore', 'vmgroup', 'capacity', 'used', 'fstype', 'policy', 'disk-format', 'attached-to', 'access', 'attach-as', 'created-by', 'created'], 'metavar': 'Col1,Col2,...' }, '--vmgroup' : { 'help': 'Displays volumes for a given vmgroup' } } }, 'set': { 'func': set_vol_opts, 'help': 'Edit settings for a given volume', 'args': { '--volume': { 'help': 'Volume to set options for, specified as "volume@datastore".', 'required': True }, '--vmgroup': { 'help': 'Name of the vmgroup the volume belongs to.', 'required': True }, '--options': { 'help': 'Options (specifically, access) to be set on the volume.', 'required': True } } } } }, 'policy': { 'help': 'Configure and display storage policy information', 'cmds': { 'create': { 'func': policy_create, 'help': 'Create a storage policy', 'args': { '--name': { 'help': 'The name of the policy', 'required': True }, '--content': { 'help': 'The VSAN policy string', 'required': True } } }, 'rm': { 'func': policy_rm, 'help': 'Remove a storage policy', 'args': { '--name': { 'help': 'Policy name', 'required': True } } }, 'ls': { 'func': policy_ls, 'help': 'List storage policies and volumes using those policies' }, 'update': { 'func': policy_update, 'help': ('Update the definition of a storage policy and all' 'VSAN objects using that policy'), 'args': { '--name': { 'help': 'The name of the policy', 'required': True }, '--content': { 'help': 'The VSAN policy string', 'required': True } } } } }, 'vmgroup': { # # vmgroup {create, update, rm , ls} - manipulates vmgroup # vmgroup vm {add, rm, ls} - manipulates VMs for a vmgroup # vmgroup access {add, set, rm, ls} - manipulates datastore access right for a vmgroup # # Internally, "vmgroup" is called "tenant". # We decided to keep the name of functions as "tenant_*" for now 'help': 'Administer and monitor volume access control', 'cmds': { 'create': { 'func': tenant_create, 'help': 'Create a new vmgroup', 'args': { '--name': { 'help': 'The name of the vmgroup', 'required': True }, '--description': { 'help': 'The description of the vmgroup', }, # a shortcut allowing to add VMs on vmgroup Create '--vm-list': { 'help': 'A list of VM names to place in this vmgroup', 'metavar': 'vm1, vm2, ...', 'type': comma_separated_string }, '--default-datastore': { 'help': 'Datastore to be used by default for volumes placement', 'required': True } } }, 'update': { 'func': tenant_update, 'help': 'Update an existing vmgroup', 'args': { '--name': { 'help': 'The name of the vmgroup', 'required': True }, '--new-name': { 'help': 'The new name of the vmgroup', }, '--description': { 'help': 'The new description of the vmgroup', }, '--default-datastore': { 'help': 'Datastore to be used by default for volumes placement', } } }, 'rm': { 'func': tenant_rm, 'help': 'Delete a vmgroup', 'args': { '--name': { 'help': 'The name of the vmgroup', 'required': True }, '--remove-volumes': { 'help': 'BE CAREFUL: Removes this vmgroup volumes when removing a vmgroup', 'action': 'store_true' }, '--force': { 'help': 'Force operation, ignore warnings', 'action': 'store_true' } } }, 'ls': { 'func': tenant_ls, 'help': 'List vmgroups and the VMs they are applied to' }, 'vm': { 'help': 'Add, removes and lists VMs in a vmgroup', 'cmds': { 'add': { 'help': 'Add a VM(s) to a vmgroup', 'func': tenant_vm_add, 'args': { '--name': { 'help': "Vmgroup to add the VM to", 'required': True }, '--vm-list': { 'help': "A list of VM names to add to this vmgroup", 'type': comma_separated_string, 'required': True } } }, 'rm': { 'help': 'Remove VM(s) from a vmgroup', 'func': tenant_vm_rm, 'args': { '--name': { 'help': "Vmgroup to remove the VM from", 'required': True }, '--vm-list': { 'help': "A list of VM names to rm from this vmgroup", 'type': comma_separated_string, 'required': True } } }, 'replace': { 'help': 'Replace VM(s) for a vmgroup', 'func': tenant_vm_replace, 'args': { '--name': { 'help': "Vmgroup to replace the VM for", 'required': True }, '--vm-list': { 'help': "A list of VM names to replace for this vmgroup", 'type': comma_separated_string, 'required': True } } }, 'ls': { 'help': "list VMs in a vmgroup", 'func': tenant_vm_ls, 'args': { '--name': { 'help': "Vmgroup to list the VMs for", 'required': True } } } } }, 'access': { 'help': 'Add or remove Datastore access and quotas for a vmgroup', 'cmds': { 'add': { 'func': tenant_access_add, 'help': 'Add a datastore access for a vmgroup', 'args': { '--name': { 'help': 'The name of the vmgroup', 'required': True }, '--datastore': { 'help': "Datastore which access is controlled", 'required': True }, '--allow-create': { 'help': 'Allow create and delete on datastore if set', 'action': 'store_true' }, '--volume-maxsize': { 'help': 'Maximum size of the volume that can be created', 'metavar': 'Num{MB,GB,TB} - e.g. 2TB' }, '--volume-totalsize': { 'help': 'Maximum total size of all volume that can be created on the datastore for this vmgroup', 'metavar': 'Num{MB,GB,TB} - e.g. 2TB' } } }, 'set': { 'func': tenant_access_set, 'help': 'Modify datastore access for a vmgroup', 'args': { '--name': { 'help': 'The name of the vmgroup', 'required': True }, '--datastore': { 'help': "Datastore name", 'required': True }, '--allow-create': { 'help': 'Allow create and delete on datastore if set to True; disallow create and delete on datastore if set to False', 'metavar': 'Value{True|False} - e.g. True' }, '--volume-maxsize': { 'help': 'Maximum size of the volume that can be created', 'metavar': 'Num{MB,GB,TB} - e.g. 2TB' }, '--volume-totalsize': { 'help': 'Maximum total size of all volume that can be created on the datastore for this vmgroup', 'metavar': 'Num{MB,GB,TB} - e.g. 2TB' } } }, 'rm': { 'func': tenant_access_rm, 'help': "Remove all access to a datastore for a vmgroup", 'args': { '--name': { 'help': 'The name of the vmgroup', 'required': True }, '--datastore': { 'help': "Datstore which access is controlled", 'required': True } } }, 'ls': { 'func': tenant_access_ls, 'help': 'List all access info for a vmgroup', 'args': { '--name': { 'help': 'The name of the vmgroup', 'required': True } } } } } } }, 'config': { 'help': 'Init and manage Config DB to enable quotas and access control [EXPERIMENTAL]', 'cmds': { 'init': { 'func': config_init, 'help': 'Init ' + DB_REF + ' to allows quotas and access groups, aka vmgroups', 'args': { '--datastore': { 'help': DB_REF + ' will be placed on a shared datastore', }, '--local': { 'help': 'Allows local (SingleNode) Init', 'action': 'store_true' }, '--force': { 'help': 'Force operation, ignore warnings', 'action': 'store_true' } } }, 'rm': { 'func': config_rm, 'help': 'Remove ' + DB_REF, 'args': { '--local': { 'help': 'Remove only local link or local DB', 'action': 'store_true' }, '--unlink': { 'help': 'Remove the local link to shared DB', 'action': 'store_true' }, '--no-backup': { 'help': 'Do not create DB backup before removing', 'action': 'store_true' }, '--confirm': { 'help': 'Explicitly confirm the operation', 'action': 'store_true' } } }, 'mv': { 'func': config_mv, 'help': 'Relocate ' + DB_REF + ' from its current location [NOT SUPPORTED YET]', 'args': { '--force': { 'help': 'Force operation, ignore warnings', 'action': 'store_true' }, '--to': { 'help': 'Where to move the DB to.', 'required': True } } }, 'status': { 'func': config_status, 'help': 'Show the status of the Config DB' } } }, 'status': { 'func': status, 'help': 'Show the status of the vmdk_ops service', 'args': { '--fast': { 'help': 'Skip some of the data collection (port, version)', 'action': 'store_true' } } } } def printList(output_format, header, rows): """ Prints the output generated from header and rows in specified format """ if output_format == "xml": print(cli_xml.create(header, rows)) else: print(cli_table.create(header, rows)) def printMessage(output_format, message): """ Prints the message in specified output format """ if output_format == "xml": print(cli_xml.createMessage(message)) else: print(message) def create_parser(): """ Create a CLI parser via argparse based on the dictionary returned from commands() """ parser = argparse.ArgumentParser(description='vSphere Docker Volume Service admin CLI') parser.add_argument('--output-format', help='Specify output format. Supported format : xml. Default one is plaintext') add_subparser(parser, commands(), title='Manage VMDK-based Volumes for Docker') return parser def add_subparser(parser, cmds_dict, title="", description=""): """ Recursively add subcommand parsers based on a dictionary of commands """ subparsers = parser.add_subparsers(title=title, description=description, help="action") for cmd, attributes in cmds_dict.items(): subparser = subparsers.add_parser(cmd, help=attributes['help']) if 'func' in attributes: subparser.set_defaults(func=attributes['func']) if 'args' in attributes: for arg, opts in attributes['args'].items(): opts = build_argparse_opts(opts) subparser.add_argument(arg, **opts) if 'cmds' in attributes: add_subparser(subparser, attributes['cmds'], title=attributes['help']) def build_argparse_opts(opts): if 'choices' in opts: opts['type'] = make_list_of_values(opts['choices']) help_opts = opts['help'] opts['help'] = '{0}: Choices = {1}'.format(help_opts, opts['choices']) del opts['choices'] return opts def parse_args(): parser = create_parser() args = parser.parse_args() opts = vars(args) if args != argparse.Namespace() and 'func' in opts.keys(): return args else: parser.print_help() def comma_separated_string(string): return string.split(',') def make_list_of_values(allowed): """ Take a list of allowed values for an option and return a function that can be used to typecheck a string of given values and ensure they match the allowed values. This is required to support options that take comma separated lists such as --rights in 'tenant set --rights=create,delete,mount' """ def list_of_values(string): given = string.split(',') for g in given: if g not in allowed: msg = ( 'invalid choices: {0} (choices must be a comma separated list of ' 'only the following words \n {1}. ' 'No spaces are allowed between choices.)').format(g, repr(allowed).replace(' ', '')) raise argparse.ArgumentTypeError(msg) return given return list_of_values def ls(args): """ Print a table of all volumes and their datastores when called with no args. If args.l is True then show all metadata in a table. If args.c is not empty only display columns given in args.c (implies -l). """ tenant_reg = '*' if args.vmgroup: tenant_reg = args.vmgroup if args.c: (header, rows) = ls_dash_c(args.c, tenant_reg) else: header = all_ls_headers() rows = generate_ls_rows(tenant_reg) printList(args.output_format, header, rows) def ls_dash_c(columns, tenant_reg): """ Return only the columns requested in the format required for table construction """ all_headers = all_ls_headers() all_rows = generate_ls_rows(tenant_reg) indexes = [] headers = [] choices = commands()['volume']['cmds']['ls']['args']['-c']['choices'] for i, choice in enumerate(choices): if choice in columns: indexes.append(i) headers.append(all_headers[i]) rows = [] for row in all_rows: rows.append([row[i] for i in indexes]) return (headers, rows) def all_ls_headers(): """ Return a list of all header for ls -l """ return ['Volume', 'Datastore', 'VMGroup', 'Capacity', 'Used', 'Filesystem', 'Policy', 'Disk Format', 'Attached-to', 'Access', 'Attach-as', 'Created By', 'Created Date'] def generate_ls_rows(tenant_reg): """ Gather all volume metadata into rows that can be used to format a table """ rows = [] for v in vmdk_utils.get_volumes(tenant_reg): if 'tenant' not in v or v['tenant'] == auth_data_const.ORPHAN_TENANT: tenant = 'N/A' else: tenant = v['tenant'] path = os.path.join(v['path'], v['filename']) name = vmdk_utils.strip_vmdk_extension(v['filename']) metadata = get_metadata(path) attached_to = get_attached_to(metadata) policy = get_policy(metadata, path) size_info = get_vmdk_size_info(path) created, created_by = get_creation_info(metadata) diskformat = get_diskformat(metadata) fstype = get_fstype(metadata) access = get_access(metadata) attach_as = get_attach_as(metadata) rows.append([name, v['datastore'], tenant, size_info['capacity'], size_info['used'], fstype, policy, diskformat, attached_to, access, attach_as, created_by, created]) return rows def get_creation_info(metadata): """ Return the creation time and creation vm for a volume given its metadata """ try: return (metadata[kv.CREATED], metadata[kv.CREATED_BY]) except: return (NOT_AVAILABLE, NOT_AVAILABLE) def get_attached_to(metadata): """ Return which VM a volume is attached to based on its metadata. """ try: if kv.ATTACHED_VM_UUID in metadata: vm_name = vmdk_ops.vm_uuid2name(metadata[kv.ATTACHED_VM_UUID]) if vm_name: return vm_name # If vm name couldn't be retrieved through uuid, use name from KV elif kv.ATTACHED_VM_NAME in metadata: return metadata[kv.ATTACHED_VM_NAME] else: return metadata[kv.ATTACHED_VM_UUID] else: return kv.DETACHED except: return kv.DETACHED def get_attach_as(metadata): """ Return which mode a volume is attached as based on its metadata """ try: return metadata[kv.VOL_OPTS][kv.ATTACH_AS] except: return kv.DEFAULT_ATTACH_AS def get_access(metadata): """ Return the access mode of a volume based on its metadata """ try: return metadata[kv.VOL_OPTS][kv.ACCESS] except: return kv.DEFAULT_ACCESS def get_policy(metadata, path): """ Return the policy for a volume given its volume options """ try: return metadata[kv.VOL_OPTS][kv.VSAN_POLICY_NAME] except: pass if vsan_info.is_on_vsan(path): return kv.DEFAULT_VSAN_POLICY else: return NOT_AVAILABLE def get_diskformat(metadata): """ Return the Disk Format of the volume based on its metadata """ try: return metadata[kv.VOL_OPTS][kv.DISK_ALLOCATION_FORMAT] except: return NOT_AVAILABLE def get_fstype(metadata): """ Return the Filesystem Type of the volume based on its metadata """ try: return metadata[kv.VOL_OPTS][kv.FILESYSTEM_TYPE] except: return NOT_AVAILABLE def get_metadata(volPath): """ Take the absolute path to volume vmdk and return its metadata as a dict """ return kv.getAll(volPath) def get_vmdk_size_info(path): """ Get the capacity and used space for a given VMDK given its absolute path. Values are returned as strings in human readable form (e.g. 10MB) Using get_vol_info api from volume kv. The info returned by this api is in human readable form """ try: vol_info = kv.get_vol_info(path) if not vol_info: # race: volume is already gone return {'capacity': NOT_AVAILABLE, 'used': NOT_AVAILABLE} return {'capacity': vol_info[VOL_SIZE], 'used': vol_info[VOL_ALLOC]} except subprocess.CalledProcessError: sys.exit("Failed to retrieve volume info for {0}.".format(path) \ + " VMDK corrupted. Please remove and then retry") KB = 1024 MB = 1024*KB GB = 1024*MB TB = 1024*GB def human_readable(size_in_bytes): """ Take an integer size in bytes and convert it to MB, GB, or TB depending upon size. """ if size_in_bytes >= TB: return '{:.2f}TB'.format(size_in_bytes/TB) if size_in_bytes >= GB: return '{:.2f}GB'.format(size_in_bytes/GB) if size_in_bytes >= MB: return '{:.2f}MB'.format(size_in_bytes/MB) if size_in_bytes >= KB: return '{:.2f}KB'.format(size_in_bytes/KB) return '{0}B'.format(size_in_bytes) def policy_create(args): output = vsan_policy.create(args.name, args.content) if output: return err_out(output) else: printMessage(args.output_format, 'Successfully created policy: {0}'.format(args.name)) def policy_rm(args): output = vsan_policy.delete(args.name) if output: return err_out(output) else: printMessage(args.output_format, 'Successfully removed policy: {0}'.format(args.name)) def policy_ls(args): volumes = vsan_policy.list_volumes_and_policies() policies = vsan_policy.get_policies() header = ['Policy Name', 'Policy Content', 'Active'] rows = [] used_policies = {} for v in volumes: policy_name = v['policy'] if policy_name in used_policies: used_policies[policy_name] = used_policies[policy_name] + 1 else: used_policies[policy_name] = 1 for name, content in policies.items(): if name in used_policies: active = 'In use by {0} volumes'.format(used_policies[name]) else: active = 'Unused' rows.append([name, content.strip(), active]) printList(args.output_format, header, rows) def policy_update(args): output = vsan_policy.update(args.name, args.content) if output: return err_out(output) else: printMessage(args.output_format, 'Successfully updated policy {0}'.format(args.name)) def status(args): """Prints misc. status information. Returns an array of 1 element dicts""" result = [] # version is extracted from localcli... slow... result.append({"=== Service": ""}) version = "?" if args.fast else str(get_version()) result.append({"Version": version}) (service_status, pid) = get_service_status() result.append({"Status": str(service_status)}) if pid: result.append({"Pid": str(pid)}) port = "?" if args.fast else str(get_listening_port(pid)) result.append({"Port": port}) result.append({"LogConfigFile": log_config.LOG_CONFIG_FILE}) result.append({"LogFile": log_config.LOG_FILE}) result.append({"LogLevel": log_config.get_log_level()}) result.append({"=== Authorization Config DB": ""}) result += config_db_get_status() output_list = [] for r in result: output_list.append("{}: {}".format(list(r.keys())[0], list(r.values())[0])) printMessage(args.output_format,"\n".join(output_list)) return None def set_vol_opts(args): try: set_ok = vmdk_ops.set_vol_opts(args.volume, args.vmgroup, args.options) if set_ok: printMessage(args.output_format, 'Successfully updated settings for {0}'.format(args.volume)) else: return err_out('Failed to update {0} for {1}.'.format(args.options, args.volume)) except Exception as ex: return err_out('Failed to update {0} for {1} - {2}.'.format(args.options, args.volume, str(ex))) VMDK_OPSD = '/etc/init.d/vmdk-opsd' PS = 'ps -c | grep ' GREP_V_GREP = ' | grep -v grep' NOT_RUNNING_STATUS = ("Stopped", None) def get_service_status(): """ Determine whether the service is running and it's PID. Return the 2 tuple containing a status string and PID. If the service is not running, PID is None """ try: output = subprocess.check_output([VMDK_OPSD, "status"]).split() if output[2] == "not": return NOT_RUNNING_STATUS pidstr = output[3] pidstr = pidstr.decode('utf-8') pid = pidstr.split("=")[1] return ("Running", pid) except subprocess.CalledProcessError: return NOT_RUNNING_STATUS def get_listening_port(pid): """ Return the configured port that the service is listening on """ try: cmd = "{0}{1}{2}".format(PS, pid, GREP_V_GREP) output = subprocess.check_output(cmd, shell=True).split()[6] return output.decode('utf-8') except: return NOT_AVAILABLE def get_version(): """ Return the version of the installed VIB """ try: cmd = 'localcli software vib list | grep esx-vmdkops-service' version_str = subprocess.check_output(cmd, shell=True).split()[1] return version_str.decode('utf-8') except: return NOT_AVAILABLE def tenant_ls_headers(): """ Return column names for tenant ls command """ headers = ['Uuid', 'Name', 'Description', 'Default_datastore', 'VM_list'] return headers def generate_vm_list(vm_list): """ Generate vm names with given list of (vm_uuid, vm_name) from db""" # vm_list is a list of (vm_uuid, vm_name) from db # the return value is a comma separated string of VM names like this vm1,vm2 res = "" for vm_uuid, vm_name_from_db in vm_list: vm_name = vmdk_utils.get_vm_name_by_uuid(vm_uuid) # If the VM name cannot be resolved then use one from db # If it is not available from db then mark it as NOT_AVAILABLE if not vm_name: vm_name = vm_name_from_db if vm_name_from_db else NOT_AVAILABLE res += vm_name + "," if res: res = res[:-1] return res def generate_tenant_ls_rows(tenant_list): """ Generate output for tenant ls command """ rows = [] for tenant in tenant_list: uuid = tenant.id name = tenant.name description = tenant.description # "default_datastore_url" should always be set, and cannot be empty # it can only happen when DB has some corruption if not tenant.default_datastore_url: default_datastore = "" error_info = generate_error_info(ErrorCode.DS_DEFAULT_NOT_SET, name) return error_info, None else: default_datastore = vmdk_utils.get_datastore_name(tenant.default_datastore_url) if default_datastore is None: default_datastore = "" vm_list = generate_vm_list(tenant.vms) rows.append([uuid, name, description, default_datastore, vm_list]) return None, rows def tenant_create(args): """ Handle tenant create command """ desc = "" if args.description: desc = args.description error_info, tenant = auth_api._tenant_create(name=args.name, default_datastore=args.default_datastore, description=desc, vm_list=args.vm_list, privileges=[]) if error_info: return err_out(error_info.msg) elif args.name != auth_data_const.DEFAULT_TENANT: printMessage(args.output_format, "vmgroup '{}' is created. Do not forget to run 'vmgroup vm add' to add vm to vmgroup.".format(args.name)) else: printMessage(args.output_format, "vmgroup '{}' is created.".format(args.name)) def tenant_update(args): """ Handle tenant update command """ desc = "" if args.description: desc = args.description error_info = auth_api._tenant_update(name=args.name, new_name=args.new_name, description=desc, default_datastore=args.default_datastore) if error_info: return err_out(error_info.msg) else: printMessage(args.output_format, "vmgroup modify succeeded") def tenant_rm(args): """ Handle tenant rm command """ remove_volumes = False # If args "remove_volumes" is not specified in CLI # args.remove_volumes will be None if args.remove_volumes: remove_volumes = True error_info = auth_api._tenant_rm(args.name, remove_volumes, args.force) if error_info: return err_out(error_info.msg) else: msg = "vmgroup rm succeeded" printMessage(args.output_format, "All Volumes will be removed. " + msg if remove_volumes else msg) def tenant_ls(args): """ Handle tenant ls command """ error_info, tenant_list = auth_api._tenant_ls() if error_info: return err_out(error_info.msg) header = tenant_ls_headers() error_info, rows = generate_tenant_ls_rows(tenant_list) if error_info: return err_out(error_info.msg) else: printList(args.output_format, header, rows) def tenant_vm_add(args): """ Handle tenant vm add command """ error_info = auth_api._tenant_vm_add(args.name, args.vm_list) if error_info: return err_out(error_info.msg) else: printMessage(args.output_format, "vmgroup vm add succeeded") def tenant_vm_rm(args): """ Handle tenant vm rm command """ error_info = auth_api._tenant_vm_rm(args.name, args.vm_list) if error_info: return err_out(error_info.msg) else: printMessage(args.output_format, "vmgroup vm rm succeeded") def tenant_vm_replace(args): """ Handle tenant vm replace command """ error_info = auth_api._tenant_vm_replace(args.name, args.vm_list) if error_info: return err_out(error_info.msg) else: printMessage(args.output_format, "vmgroup vm replace succeeded") def tenant_vm_ls_headers(): """ Return column names for tenant vm ls command """ headers = ['Uuid', 'Name'] return headers def generate_tenant_vm_ls_rows(vms): """ Generate output for tenant vm ls command """ rows = [] for vm_uuid, vm_name_from_db in vms: vm_name = vmdk_utils.get_vm_name_by_uuid(vm_uuid) # If the VM name cannot be resolved then use one from db # If it is not available from db then mark it as NOT_AVAILABLE if not vm_name: vm_name = vm_name_from_db if vm_name_from_db else NOT_AVAILABLE rows.append([vm_uuid, vm_name]) return rows def tenant_vm_ls(args): """ Handle tenant vm ls command """ # Handling _DEFAULT tenant case separately to print info message # instead of printing empty list if (args.name == auth_data_const.DEFAULT_TENANT): return err_out("{0} tenant contains all VMs which were not added to other tenants".format(auth_data_const.DEFAULT_TENANT)) error_info, vms = auth_api._tenant_vm_ls(args.name) if error_info: return err_out(error_info.msg) header = tenant_vm_ls_headers() rows = generate_tenant_vm_ls_rows(vms) printList(args.output_format, header, rows) def tenant_access_add(args): """ Handle tenant access command """ volume_maxsize_in_MB = None volume_totalsize_in_MB = None if args.volume_maxsize: volume_maxsize_in_MB = convert.convert_to_MB(args.volume_maxsize) if args.volume_totalsize: volume_totalsize_in_MB = convert.convert_to_MB(args.volume_totalsize) error_info = auth_api._tenant_access_add(name=args.name, datastore=args.datastore, allow_create=args.allow_create, volume_maxsize_in_MB=volume_maxsize_in_MB, volume_totalsize_in_MB=volume_totalsize_in_MB ) if error_info: return err_out(error_info.msg) else: printMessage(args.output_format, "vmgroup access add succeeded") def tenant_access_set(args): """ Handle tenant access set command """ volume_maxsize_in_MB = None volume_totalsize_in_MB = None if args.volume_maxsize: volume_maxsize_in_MB = convert.convert_to_MB(args.volume_maxsize) if args.volume_totalsize: volume_totalsize_in_MB = convert.convert_to_MB(args.volume_totalsize) error_info = auth_api._tenant_access_set(name=args.name, datastore=args.datastore, allow_create=args.allow_create, volume_maxsize_in_MB=volume_maxsize_in_MB, volume_totalsize_in_MB=volume_totalsize_in_MB) if error_info: return err_out(error_info.msg) else: printMessage(args.output_format, "vmgroup access set succeeded") def tenant_access_rm(args): """ Handle tenant access rm command """ error_info = auth_api._tenant_access_rm(args.name, args.datastore) if error_info: return err_out(error_info.msg) else: printMessage(args.output_format, "vmgroup access rm succeeded") def tenant_access_ls_headers(): """ Return column names for tenant access ls command """ headers = ['Datastore', 'Allow_create', 'Max_volume_size', 'Total_size'] return headers def generate_tenant_access_ls_rows(privileges, name): """ Generate output for tenant access ls command """ rows = [] for p in privileges: if not p.datastore_url: datastore = "" error_info = generate_error_info(ErrorCode.DS_DEFAULT_NOT_SET, name) return error_info, None else: datastore = vmdk_utils.get_datastore_name(p.datastore_url) if datastore is None: datastore = "" allow_create = ("False", "True")[p.allow_create] # p[auth_data_const.COL_MAX_VOLUME_SIZE] is max_volume_size in MB max_vol_size = UNSET if p.max_volume_size == 0 else human_readable(p.max_volume_size * MB) # p[auth_data_const.COL_USAGE_QUOTA] is total_size in MB total_size = UNSET if p.usage_quota == 0 else human_readable(p.usage_quota * MB) rows.append([datastore, allow_create, max_vol_size, total_size]) return None, rows def tenant_access_ls(args): """ Handle tenant access ls command """ name = args.name error_info, privileges = auth_api._tenant_access_ls(name) if error_info: return err_out(error_info.msg) header = tenant_access_ls_headers() error_info, rows = generate_tenant_access_ls_rows(privileges, name) if error_info: return err_out(error_info.msg) else: printList(args.output_format, header, rows) # ==== CONFIG DB manipulation functions ==== def create_db_symlink(path, link_path): """Force-creates a symlink to path""" if os.path.islink(link_path): os.remove(link_path) try: os.symlink(path, link_path) except Exception as ex: print("Failed to create symlink at {} to {}".format(link_path, path)) sys.exit(ex) def db_move_to_backup(path): """ Saves a DB copy side by side. Basically, glorified copy to a unique file name. Returns target name """ target = "{}.bak_{}".format(path, time.asctime().replace(" ", "_")) # since we generate unique file name, no need to check if it exists shutil.move(path, target) return target def is_local_vmfs(datastore_name): """return True if datastore is local VMFS one""" # TODO - check for datastore being on local VMFS volume. # the code below is supposed to do it, but in ESX 6.5 it returns # " local = <unset>", so leaving it out for now # def vol_info_from_vim(datastore_name): # si = pyVim.connect.Connect() # host = pyVim.host.GetHostSystem(si) # fss = host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo # vmfs_volume_info = [f.volume for f in fss if f.volume.name == datastore_name and # f.volume.type == "VMFS"] # return vmfs_volume_info and vmfs_volume_info.local return False def err_out(_msg, _info=None): """ A helper to print an error message with (optional) info if the vmdkops admin command fails. Returns the message. """ _msg = ("ERROR:" + _msg) if _info: _msg = _msg + (". Additional information: {}".format(_info)) print(_msg) return _msg def err_override(_msg, _info): """A helper to print messages with extra help about --force flag""" new_msg = "{}".format(_msg) + " . Add '--force' flag to force the request execution" return err_out(new_msg, _info) def config_elsewhere(datastore): """Returns a list of config DBs info on other datastore, or empty list""" # Actual implementation: scan vim datastores, check for dockvols/file_name # return None or list of (db_name, full_path) tuples for existing config DBs. others = [] for (ds_name, _, dockvol_path) in vmdk_utils.get_datastores(): full_path = os.path.join(dockvol_path, auth_data.CONFIG_DB_NAME) if ds_name != datastore and os.path.exists(full_path): others.append((ds_name, full_path)) return others def check_ds_local_args(args): """ checks consistency in --local and --datastore args, an datastore presense :Return: None for success, errmsg for error """ if args.datastore: ds_name = args.datastore if not os.path.exists(os.path.join("/vmfs/volumes", ds_name)): return err_out("No such datastore: {}".format(ds_name)) if args.datastore and args.local: return err_out("Error: only one of '--datastore' or '--local' can be set") if not args.datastore and not args.local: return err_out("Error: one of '--datastore' or '--local' have to be set") return None def config_init(args): """ Init Config DB to allows quotas and access groups (vmgroups) :return: None for success, string for error """ err = check_ds_local_args(args) if err: return err output_list = [] output_list.append("Warning: this feature is EXPERIMENTAL") if args.datastore: ds_name = args.datastore db_path = auth_data.AuthorizationDataManager.ds_to_db_path(ds_name) else: db_path = auth_data.AUTH_DB_PATH link_path = auth_data.AUTH_DB_PATH # where was the DB, now is a link # Check the existing config mode with auth_data.AuthorizationDataManager() as auth: try: auth.connect() info = auth.get_info() mode = auth.mode # for usage outside of the 'with' except auth_data.DbAccessError as ex: return err_out(str(ex)) if mode == auth_data.DBMode.NotConfigured: pass elif mode == auth_data.DBMode.MultiNode or mode == auth_data.DBMode.SingleNode: return err_out(DB_REF + " is already initialized. Use 'rm --local' or 'rm --unlink' to reset", info) else: return err_out("Fatal: Internal error - unknown mode: {}".format(mode)) if args.datastore: # Check that the target datastore is NOT local VMFS, bail out if it is (--force to overide). if is_local_vmfs(ds_name) and not args.force: return err_override("{} is a local datastore.".format(ds_name) + "Shared datastores are recommended.", "N/A") # Check other datastores, bail out if dockvols/DB exists there. other_ds_config = config_elsewhere(ds_name) if len(other_ds_config) > 0 and not args.force: return err_override("Found " + DB_REF + "on other datastores.", other_ds_config) if not os.path.exists(db_path): output_list.append("Creating new DB at {}".format(db_path)) auth = auth_data.AuthorizationDataManager(db_path) err = auth.new_db() if err: return err_out("Init failed: %s" % str(err)) # Almost done - just create link and refresh the service if args.local: output_list.append("Warning: Local configuration will not survive ESXi reboot." + " See KB2043564 for details") else: output_list.append("Creating a symlink to {} at {}".format(db_path, link_path)) create_db_symlink(db_path, link_path) output_list.append("Updating {}".format(local_sh.LOCAL_SH_PATH)) local_sh.update_symlink_info(args.datastore) printMessage(args.output_format, "\n".join(output_list)) return None def config_rm(args): """ Remove Local Config DB or local link. We NEVER remove shared DB. :return: None for success, string for error """ # This asks for double confirmation, and removes the local link or DB (if any) # NEVER deletes the shared database - instead prints help if not args.local and not args.unlink: return err_out(""" DB removal is irreversible operation. Please use '--local' flag for removing DB in SingleNode mode, and use '--unlink' to unlink from DB in MultiNode mode. Note that '--unlink' will not remove a shared DB, but simply configure the current ESXi host to stop using it. For removing shared DB, run 'vmdkops_admin config rm --unlink' on ESXi hosts using this DB, and then manually remove the actual DB file '{}' from shared storage. """.format(auth_data.CONFIG_DB_NAME)) if args.local and args.unlink: return err_out(""" Cannot use '--local' and '--unlink' together. Please use '--local' flag for removing DB in SingleNode mode, and use '--unlink' to unlink from DB in MultiNode mode. """ ) if not args.confirm: return err_out("Warning: For extra safety, removal operation requires '--confirm' flag.") # Check the existing config mode with auth_data.AuthorizationDataManager() as auth: try: auth.connect() info = auth.get_info() mode = auth.mode # for usage outside of the 'with' except auth_data.DbAccessError as ex: # the DB is broken and is being asked to be removed, so let's oblige printMessage(args.output_format, "Received error - removing comfiguration anyways. Err: \"{}\"".format(str(ex))) try: os.remove(auth_data.AUTH_DB_PATH) except: pass return None # mode is NotConfigured, path does not exist, nothing to remove if mode == auth_data.DBMode.NotConfigured: return None # mode is NotConfigured, path does not exist if mode == auth_data.DBMode.NotConfigured: printMessage(args.output_format, "Nothing to do - Mode={}.".format(str(mode))) link_path = auth_data.AUTH_DB_PATH # local DB or link if not os.path.lexists(link_path): return None if mode == auth_data.DBMode.MultiNode: if args.local: return err_out("'rm --local' is not supported when " + DB_REF + "is in MultiNode mode." " Use 'rm --unlink' to remove the local link to shared DB.") else: output_list = [] try: os.remove(link_path) output_list.append("Removed link {}".format(link_path)) except Exception as ex: output_list.append("Failed to remove {}: {}".format(link_path, ex)) output_list.append("Updating {}".format(local_sh.LOCAL_SH_PATH)) printMessage(args.output_format, "\n".join(output_list)) local_sh.update_symlink_info(add=False) return None if mode == auth_data.DBMode.SingleNode: if args.unlink: return err_out("'rm --unlink' is not supported when " + DB_REF + "is in SingleNode mode." " Use 'rm --local' to remove local DB configuration.") else: if not args.no_backup: printMessage(args.output_format, "Moved {} to backup file {}".format(link_path, db_move_to_backup(link_path))) return None # All other cases printMessage(args.output_format, "Nothing to do - Mode={}.".format(str(mode))) def config_mv(args): """[Not Supported Yet] Relocate config DB from its current location :return: None for success, string for error """ if not args.force: return err_out(DB_REF + " move to {} ".format(args.to) + "requires '--force' flag to execute the request.") # TODO: # this is pure convenience code, so it is very low priority; still, here are the steps: # checks if target exists upfront, and fail if it does # cp the DB instance 'to' , and flip the symlink. # refresh service (not really needed as next vmci_command handlers will pick it up) # need --dryrun or --confirm # issue: works really with discovery only , as others need to find it out printMessage(args.output_format, "Sorry, configuration move ('config mv' command) is not supported yet") return None def config_db_get_status(): '''A helper fot get config DB status. Returns an array of status info''' result = [] with auth_data.AuthorizationDataManager() as auth: try: auth.connect() except: pass # connect() will set the status regardess of success for (k, v) in auth.get_info().items(): result.append({k: v}) return result def config_status(args): """A subset of 'config' command - prints the DB config only""" output_list = [] for r in config_db_get_status(): output_list.append("{}: {}".format(list(r.keys())[0], list(r.values())[0])) printMessage(args.output_format, "\n".join(output_list)) return None # ==== Run it now ==== if __name__ == "__main__": main()
apache-2.0
898,416,034,834,417,700
1,802,559,131,751,987,000
38.232794
147
0.519839
false
dyim42/zerorpc-python
tests/test_pubpush.py
102
3999
# -*- coding: utf-8 -*- # Open Source Initiative OSI - The MIT License (MIT):Licensing # # The MIT License (MIT) # Copyright (c) 2012 DotCloud Inc ([email protected]) # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import gevent import gevent.event import zerorpc from testutils import teardown, random_ipc_endpoint def test_pushpull_inheritance(): endpoint = random_ipc_endpoint() pusher = zerorpc.Pusher() pusher.bind(endpoint) trigger = gevent.event.Event() class Puller(zerorpc.Puller): def lolita(self, a, b): print 'lolita', a, b assert a + b == 3 trigger.set() puller = Puller() puller.connect(endpoint) gevent.spawn(puller.run) trigger.clear() pusher.lolita(1, 2) trigger.wait() print 'done' def test_pubsub_inheritance(): endpoint = random_ipc_endpoint() publisher = zerorpc.Publisher() publisher.bind(endpoint) trigger = gevent.event.Event() class Subscriber(zerorpc.Subscriber): def lolita(self, a, b): print 'lolita', a, b assert a + b == 3 trigger.set() subscriber = Subscriber() subscriber.connect(endpoint) gevent.spawn(subscriber.run) trigger.clear() # We need this retry logic to wait that the subscriber.run coroutine starts # reading (the published messages will go to /dev/null until then). for attempt in xrange(0, 10): publisher.lolita(1, 2) if trigger.wait(0.2): print 'done' return raise RuntimeError("The subscriber didn't receive any published message") def test_pushpull_composite(): endpoint = random_ipc_endpoint() trigger = gevent.event.Event() class Puller(object): def lolita(self, a, b): print 'lolita', a, b assert a + b == 3 trigger.set() pusher = zerorpc.Pusher() pusher.bind(endpoint) service = Puller() puller = zerorpc.Puller(service) puller.connect(endpoint) gevent.spawn(puller.run) trigger.clear() pusher.lolita(1, 2) trigger.wait() print 'done' def test_pubsub_composite(): endpoint = random_ipc_endpoint() trigger = gevent.event.Event() class Subscriber(object): def lolita(self, a, b): print 'lolita', a, b assert a + b == 3 trigger.set() publisher = zerorpc.Publisher() publisher.bind(endpoint) service = Subscriber() subscriber = zerorpc.Subscriber(service) subscriber.connect(endpoint) gevent.spawn(subscriber.run) trigger.clear() # We need this retry logic to wait that the subscriber.run coroutine starts # reading (the published messages will go to /dev/null until then). for attempt in xrange(0, 10): publisher.lolita(1, 2) if trigger.wait(0.2): print 'done' return raise RuntimeError("The subscriber didn't receive any published message")
mit
3,286,807,619,572,314,000
3,351,035,038,743,126,000
28.622222
81
0.667167
false
jusjusjus/pyedf
pyedf/score/score.py
1
6055
#! /usr/bin/python from __future__ import with_statement import logging from .state import State import numpy as np import os import re class Score(object): logger = logging.getLogger(name='Score') commentSymbol = '#' # used for comments in state.annot lineSeparator = ',' # used as separators in the line states_dict = dict() def __init__(self, filename=None, states=[], verbose=0): self.logger.debug("__init__(filename={}, num_states={})".format(filename, states)) self.verbose = verbose self.states = [] self.set_states(states) self.filename = filename if not self.filename is None: if not os.path.exists(self.filename): raise AttributeError("Score file %s does not exist." % (filename)) self.set_states(self.load(filename)) if self.verbose > 0: print("score: score file '%s' found." % (filename)) if self.verbose == 2: print("score: the states", self.states) def set_states(self, states): for state in states: self.states.append(state) def interpret_states(self): pass def isComment(self, line): line.strip(' ') if line[0] == self.commentSymbol: # if line starts with the commentSymbol, it is a comment .. return True # .. don't process it. else: return False # else: split the line at separators. def load(self, filename): self.logger.debug("load(filename='{}')".format(filename)) states = [] with open(filename, 'r') as score_file: for line in score_file: try: if self.isComment(line): continue line = line.strip('\n').strip('\r').strip(' ') x = line.split(self.lineSeparator) if len(x) > 0: # for example 1 start = x[0].strip(' ') if len(x) == 1: annot = '' duration = '' if len(x) == 2: annot = x[1] duration = '' elif len(x) > 2: # for example 3. duration = x[1].strip(' ') annot = x[2] if duration == '': duration = '-1' states.append( State(start=start, duration=duration, annot=annot) ) except Exception as e: self.logger.debug("# line not readable: {}\n{}".format(line, e)) return states def save(self, filename): print("# opening", filename, "to write ...") with open(filename, 'w') as score_file: string = '# start, duration, annotation\n'+self.__str__() score_file.write(string + '\n') def append(self, new_state=None, start=None, duration=None, annot=None): if new_state is None: new_state = State(start=start, duration=duration, annot=annot) self.states.append(new_state) def __str__(self): if hasattr(self, 'states'): return '\n'.join([str(state) for state in self.states]) else: return 'Score' def select_by_function(self, function, **kwargs): selection = [] for state in self.states: if function(state, **kwargs): selection.append(state) score_select = object.__new__(type(self)) score_select.__init__(states=selection) return score_select def intersect(self, other_score): intersection = [] for state in self.states: section_j = state.intersect(other_score.states) intersection.extend( section_j ) return type(self)(states=intersection) def duration(self, annot=None): duration = 0.0 if annot == None: duration = np.sum([state.duration for state in self.states]) else: for state in self.states: if state.annot == annot: duration += state.duration return duration def count(self, annot=None): if annot == None: count = len(self.states) else: count = 0 for state in self.states: if state.annot == annot: count += 1 return count def connect_adjacent_states(self, close=0.01): if len(self.states) == 0: return new_states = [] last_annot = self.states[0].annot last_duration = self.states[0].duration last_start = self.states[0] # will be interpreted as datetime.datetime for state in self.states[1:]: dt = np.abs((state-last_start.end).total_seconds()) if dt < close and last_annot == state.annot: last_duration += (state.end-last_start.end).total_seconds() else: new_state = State(start=last_start, duration=last_duration, annot=last_annot) new_states.append(new_state) last_annot = state.annot last_duration = state.duration last_start = state # will be interpreted as datetime.datetime new_state = State(start=last_start, duration=last_duration, annot=last_annot) new_states.append(new_state) self.logger.debug("Length of individual states: {} seconds.".format(sum(state.duration for state in self.states))) self.logger.debug("Length of connected states: {} seconds.".format(sum(state.duration for state in new_states))) self.set_states(new_states) score = Score if __name__ == "__main__": score_filename = '../../example/sample.csv' testscore = score(filename=score_filename) print(testscore)
gpl-3.0
-509,232,781,207,018,940
4,634,903,870,972,780,000
27.561321
122
0.523369
false
Perferom/android_external_chromium_org
sync/tools/testserver/chromiumsync_test.py
154
30090
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Tests exercising chromiumsync and SyncDataModel.""" import pickle import unittest import autofill_specifics_pb2 import bookmark_specifics_pb2 import chromiumsync import managed_user_specifics_pb2 import sync_pb2 import theme_specifics_pb2 class SyncDataModelTest(unittest.TestCase): def setUp(self): self.model = chromiumsync.SyncDataModel() # The Synced Bookmarks folder is not created by default self._expect_synced_bookmarks_folder = False def AddToModel(self, proto): self.model._entries[proto.id_string] = proto def GetChangesFromTimestamp(self, requested_types, timestamp): message = sync_pb2.GetUpdatesMessage() message.from_timestamp = timestamp for data_type in requested_types: getattr(message.requested_types, chromiumsync.SYNC_TYPE_TO_DESCRIPTOR[ data_type].name).SetInParent() return self.model.GetChanges( chromiumsync.UpdateSieve(message, self.model.migration_history)) def FindMarkerByNumber(self, markers, datatype): """Search a list of progress markers and find the one for a datatype.""" for marker in markers: if marker.data_type_id == datatype.number: return marker self.fail('Required marker not found: %s' % datatype.name) def testPermanentItemSpecs(self): specs = chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS declared_specs = set(['0']) for spec in specs: self.assertTrue(spec.parent_tag in declared_specs, 'parent tags must ' 'be declared before use') declared_specs.add(spec.tag) unique_datatypes = set([x.sync_type for x in specs]) self.assertEqual(unique_datatypes, set(chromiumsync.ALL_TYPES[1:]), 'Every sync datatype should have a permanent folder ' 'associated with it') def testSaveEntry(self): proto = sync_pb2.SyncEntity() proto.id_string = 'abcd' proto.version = 0 self.assertFalse(self.model._ItemExists(proto.id_string)) self.model._SaveEntry(proto) self.assertEqual(1, proto.version) self.assertTrue(self.model._ItemExists(proto.id_string)) self.model._SaveEntry(proto) self.assertEqual(2, proto.version) proto.version = 0 self.assertTrue(self.model._ItemExists(proto.id_string)) self.assertEqual(2, self.model._entries[proto.id_string].version) def testCreatePermanentItems(self): self.model._CreateDefaultPermanentItems(chromiumsync.ALL_TYPES) self.assertEqual(len(chromiumsync.ALL_TYPES) + 1, len(self.model._entries)) def ExpectedPermanentItemCount(self, sync_type): if sync_type == chromiumsync.BOOKMARK: if self._expect_synced_bookmarks_folder: return 4 else: return 3 else: return 1 def testGetChangesFromTimestampZeroForEachType(self): all_types = chromiumsync.ALL_TYPES[1:] for sync_type in all_types: self.model = chromiumsync.SyncDataModel() request_types = [sync_type] version, changes, remaining = ( self.GetChangesFromTimestamp(request_types, 0)) expected_count = self.ExpectedPermanentItemCount(sync_type) self.assertEqual(expected_count, version) self.assertEqual(expected_count, len(changes)) for change in changes: self.assertTrue(change.HasField('server_defined_unique_tag')) self.assertEqual(change.version, change.sync_timestamp) self.assertTrue(change.version <= version) # Test idempotence: another GetUpdates from ts=0 shouldn't recreate. version, changes, remaining = ( self.GetChangesFromTimestamp(request_types, 0)) self.assertEqual(expected_count, version) self.assertEqual(expected_count, len(changes)) self.assertEqual(0, remaining) # Doing a wider GetUpdates from timestamp zero shouldn't recreate either. new_version, changes, remaining = ( self.GetChangesFromTimestamp(all_types, 0)) if self._expect_synced_bookmarks_folder: self.assertEqual(len(chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS), new_version) else: self.assertEqual( len(chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS) -1, new_version) self.assertEqual(new_version, len(changes)) self.assertEqual(0, remaining) version, changes, remaining = ( self.GetChangesFromTimestamp(request_types, 0)) self.assertEqual(new_version, version) self.assertEqual(expected_count, len(changes)) self.assertEqual(0, remaining) def testBatchSize(self): for sync_type in chromiumsync.ALL_TYPES[1:]: specifics = chromiumsync.GetDefaultEntitySpecifics(sync_type) self.model = chromiumsync.SyncDataModel() request_types = [sync_type] for i in range(self.model._BATCH_SIZE*3): entry = sync_pb2.SyncEntity() entry.id_string = 'batch test %d' % i entry.specifics.CopyFrom(specifics) self.model._SaveEntry(entry) last_bit = self.ExpectedPermanentItemCount(sync_type) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, 0)) self.assertEqual(self.model._BATCH_SIZE, version) self.assertEqual(self.model._BATCH_SIZE*2 + last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(self.model._BATCH_SIZE*2, version) self.assertEqual(self.model._BATCH_SIZE + last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(self.model._BATCH_SIZE*3, version) self.assertEqual(last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(self.model._BATCH_SIZE*3 + last_bit, version) self.assertEqual(0, changes_remaining) # Now delete a third of the items. for i in xrange(self.model._BATCH_SIZE*3 - 1, 0, -3): entry = sync_pb2.SyncEntity() entry.id_string = 'batch test %d' % i entry.deleted = True self.model._SaveEntry(entry) # The batch counts shouldn't change. version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, 0)) self.assertEqual(self.model._BATCH_SIZE, len(changes)) self.assertEqual(self.model._BATCH_SIZE*2 + last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(self.model._BATCH_SIZE, len(changes)) self.assertEqual(self.model._BATCH_SIZE + last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(self.model._BATCH_SIZE, len(changes)) self.assertEqual(last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(last_bit, len(changes)) self.assertEqual(self.model._BATCH_SIZE*4 + last_bit, version) self.assertEqual(0, changes_remaining) def testCommitEachDataType(self): for sync_type in chromiumsync.ALL_TYPES[1:]: specifics = chromiumsync.GetDefaultEntitySpecifics(sync_type) self.model = chromiumsync.SyncDataModel() my_cache_guid = '112358132134' parent = 'foobar' commit_session = {} # Start with a GetUpdates from timestamp 0, to populate permanent items. original_version, original_changes, changes_remaining = ( self.GetChangesFromTimestamp([sync_type], 0)) def DoCommit(original=None, id_string='', name=None, parent=None, position=0): proto = sync_pb2.SyncEntity() if original is not None: proto.version = original.version proto.id_string = original.id_string proto.parent_id_string = original.parent_id_string proto.name = original.name else: proto.id_string = id_string proto.version = 0 proto.specifics.CopyFrom(specifics) if name is not None: proto.name = name if parent: proto.parent_id_string = parent.id_string proto.insert_after_item_id = 'please discard' proto.position_in_parent = position proto.folder = True proto.deleted = False result = self.model.CommitEntry(proto, my_cache_guid, commit_session) self.assertTrue(result) return (proto, result) # Commit a new item. proto1, result1 = DoCommit(name='namae', id_string='Foo', parent=original_changes[-1], position=100) # Commit an item whose parent is another item (referenced via the # pre-commit ID). proto2, result2 = DoCommit(name='Secondo', id_string='Bar', parent=proto1, position=-100) # Commit a sibling of the second item. proto3, result3 = DoCommit(name='Third!', id_string='Baz', parent=proto1, position=-50) self.assertEqual(3, len(commit_session)) for p, r in [(proto1, result1), (proto2, result2), (proto3, result3)]: self.assertNotEqual(r.id_string, p.id_string) self.assertEqual(r.originator_client_item_id, p.id_string) self.assertEqual(r.originator_cache_guid, my_cache_guid) self.assertTrue(r is not self.model._entries[r.id_string], "Commit result didn't make a defensive copy.") self.assertTrue(p is not self.model._entries[r.id_string], "Commit result didn't make a defensive copy.") self.assertEqual(commit_session.get(p.id_string), r.id_string) self.assertTrue(r.version > original_version) self.assertEqual(result1.parent_id_string, proto1.parent_id_string) self.assertEqual(result2.parent_id_string, result1.id_string) version, changes, remaining = ( self.GetChangesFromTimestamp([sync_type], original_version)) self.assertEqual(3, len(changes)) self.assertEqual(0, remaining) self.assertEqual(original_version + 3, version) self.assertEqual([result1, result2, result3], changes) for c in changes: self.assertTrue(c is not self.model._entries[c.id_string], "GetChanges didn't make a defensive copy.") self.assertTrue(result2.position_in_parent < result3.position_in_parent) self.assertEqual(-100, result2.position_in_parent) # Now update the items so that the second item is the parent of the # first; with the first sandwiched between two new items (4 and 5). # Do this in a new commit session, meaning we'll reference items from # the first batch by their post-commit, server IDs. commit_session = {} old_cache_guid = my_cache_guid my_cache_guid = 'A different GUID' proto2b, result2b = DoCommit(original=result2, parent=original_changes[-1]) proto4, result4 = DoCommit(id_string='ID4', name='Four', parent=result2, position=-200) proto1b, result1b = DoCommit(original=result1, parent=result2, position=-150) proto5, result5 = DoCommit(id_string='ID5', name='Five', parent=result2, position=150) self.assertEqual(2, len(commit_session), 'Only new items in second ' 'batch should be in the session') for p, r, original in [(proto2b, result2b, proto2), (proto4, result4, proto4), (proto1b, result1b, proto1), (proto5, result5, proto5)]: self.assertEqual(r.originator_client_item_id, original.id_string) if original is not p: self.assertEqual(r.id_string, p.id_string, 'Ids should be stable after first commit') self.assertEqual(r.originator_cache_guid, old_cache_guid) else: self.assertNotEqual(r.id_string, p.id_string) self.assertEqual(r.originator_cache_guid, my_cache_guid) self.assertEqual(commit_session.get(p.id_string), r.id_string) self.assertTrue(r is not self.model._entries[r.id_string], "Commit result didn't make a defensive copy.") self.assertTrue(p is not self.model._entries[r.id_string], "Commit didn't make a defensive copy.") self.assertTrue(r.version > p.version) version, changes, remaining = ( self.GetChangesFromTimestamp([sync_type], original_version)) self.assertEqual(5, len(changes)) self.assertEqual(0, remaining) self.assertEqual(original_version + 7, version) self.assertEqual([result3, result2b, result4, result1b, result5], changes) for c in changes: self.assertTrue(c is not self.model._entries[c.id_string], "GetChanges didn't make a defensive copy.") self.assertTrue(result4.parent_id_string == result1b.parent_id_string == result5.parent_id_string == result2b.id_string) self.assertTrue(result4.position_in_parent < result1b.position_in_parent < result5.position_in_parent) def testUpdateSieve(self): # from_timestamp, legacy mode autofill = chromiumsync.SYNC_TYPE_FIELDS['autofill'] theme = chromiumsync.SYNC_TYPE_FIELDS['theme'] msg = sync_pb2.GetUpdatesMessage() msg.from_timestamp = 15412 msg.requested_types.autofill.SetInParent() msg.requested_types.theme.SetInParent() sieve = chromiumsync.UpdateSieve(msg) self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 15412, chromiumsync.AUTOFILL: 15412, chromiumsync.THEME: 15412}) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(15412, response) self.assertEqual(0, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(15413, response) self.assertEqual(0, len(response.new_progress_marker)) self.assertTrue(response.HasField('new_timestamp')) self.assertEqual(15413, response.new_timestamp) # Existing tokens msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = pickle.dumps((15412, 1)) marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 1)) sieve = chromiumsync.UpdateSieve(msg) self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 15412, chromiumsync.AUTOFILL: 15412, chromiumsync.THEME: 15413}) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(15413, response) self.assertEqual(1, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = response.new_progress_marker[0] self.assertEqual(marker.data_type_id, autofill.number) self.assertEqual(pickle.loads(marker.token), (15413, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) # Empty tokens indicating from timestamp = 0 msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = pickle.dumps((412, 1)) marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = '' sieve = chromiumsync.UpdateSieve(msg) self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 0, chromiumsync.AUTOFILL: 412, chromiumsync.THEME: 0}) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(1, response) self.assertEqual(1, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = response.new_progress_marker[0] self.assertEqual(marker.data_type_id, theme.number) self.assertEqual(pickle.loads(marker.token), (1, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(412, response) self.assertEqual(1, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = response.new_progress_marker[0] self.assertEqual(marker.data_type_id, theme.number) self.assertEqual(pickle.loads(marker.token), (412, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(413, response) self.assertEqual(2, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = self.FindMarkerByNumber(response.new_progress_marker, theme) self.assertEqual(pickle.loads(marker.token), (413, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, autofill) self.assertEqual(pickle.loads(marker.token), (413, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) # Migration token timestamps (client gives timestamp, server returns token) # These are for migrating from the old 'timestamp' protocol to the # progressmarker protocol, and have nothing to do with the MIGRATION_DONE # error code. msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.timestamp_token_for_migration = 15213 marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.timestamp_token_for_migration = 15211 sieve = chromiumsync.UpdateSieve(msg) self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 15211, chromiumsync.AUTOFILL: 15213, chromiumsync.THEME: 15211}) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(16000, response) # There were updates self.assertEqual(2, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = self.FindMarkerByNumber(response.new_progress_marker, theme) self.assertEqual(pickle.loads(marker.token), (16000, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, autofill) self.assertEqual(pickle.loads(marker.token), (16000, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.timestamp_token_for_migration = 3000 marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.timestamp_token_for_migration = 3000 sieve = chromiumsync.UpdateSieve(msg) self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 3000, chromiumsync.AUTOFILL: 3000, chromiumsync.THEME: 3000}) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(3000, response) # Already up to date self.assertEqual(2, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = self.FindMarkerByNumber(response.new_progress_marker, theme) self.assertEqual(pickle.loads(marker.token), (3000, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, autofill) self.assertEqual(pickle.loads(marker.token), (3000, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) def testCheckRaiseTransientError(self): testserver = chromiumsync.TestServer() http_code, raw_respon = testserver.HandleSetTransientError() self.assertEqual(http_code, 200) try: testserver.CheckTransientError() self.fail('Should have raised transient error exception') except chromiumsync.TransientError: self.assertTrue(testserver.transient_error) def testUpdateSieveStoreMigration(self): autofill = chromiumsync.SYNC_TYPE_FIELDS['autofill'] theme = chromiumsync.SYNC_TYPE_FIELDS['theme'] migrator = chromiumsync.MigrationHistory() msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = pickle.dumps((15412, 1)) marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 1)) sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() migrator.Bump([chromiumsync.BOOKMARK, chromiumsync.PASSWORD]) # v=2 sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 15412, chromiumsync.AUTOFILL: 15412, chromiumsync.THEME: 15413}) migrator.Bump([chromiumsync.AUTOFILL, chromiumsync.PASSWORD]) # v=3 sieve = chromiumsync.UpdateSieve(msg, migrator) try: sieve.CheckMigrationState() self.fail('Should have raised.') except chromiumsync.MigrationDoneError, error: # We want this to happen. self.assertEqual([chromiumsync.AUTOFILL], error.datatypes) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = '' marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 1)) sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(15412, response) # There were updates self.assertEqual(1, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, autofill) self.assertEqual(pickle.loads(marker.token), (15412, 3)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = pickle.dumps((15412, 3)) marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 1)) sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() migrator.Bump([chromiumsync.THEME, chromiumsync.AUTOFILL]) # v=4 migrator.Bump([chromiumsync.AUTOFILL]) # v=5 sieve = chromiumsync.UpdateSieve(msg, migrator) try: sieve.CheckMigrationState() self.fail("Should have raised.") except chromiumsync.MigrationDoneError, error: # We want this to happen. self.assertEqual(set([chromiumsync.THEME, chromiumsync.AUTOFILL]), set(error.datatypes)) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = '' marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 1)) sieve = chromiumsync.UpdateSieve(msg, migrator) try: sieve.CheckMigrationState() self.fail("Should have raised.") except chromiumsync.MigrationDoneError, error: # We want this to happen. self.assertEqual([chromiumsync.THEME], error.datatypes) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = '' marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = '' sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(15412, response) # There were updates self.assertEqual(2, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, autofill) self.assertEqual(pickle.loads(marker.token), (15412, 5)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, theme) self.assertEqual(pickle.loads(marker.token), (15412, 4)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = pickle.dumps((15412, 5)) marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 4)) sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() def testCreateSyncedBookmarks(self): version1, changes, remaining = ( self.GetChangesFromTimestamp([chromiumsync.BOOKMARK], 0)) id_string = self.model._MakeCurrentId(chromiumsync.BOOKMARK, '<server tag>synced_bookmarks') self.assertFalse(self.model._ItemExists(id_string)) self._expect_synced_bookmarks_folder = True self.model.TriggerCreateSyncedBookmarks() self.assertTrue(self.model._ItemExists(id_string)) # Check that the version changed when the folder was created and the only # change was the folder creation. version2, changes, remaining = ( self.GetChangesFromTimestamp([chromiumsync.BOOKMARK], version1)) self.assertEqual(len(changes), 1) self.assertEqual(changes[0].id_string, id_string) self.assertNotEqual(version1, version2) self.assertEqual( self.ExpectedPermanentItemCount(chromiumsync.BOOKMARK), version2) # Ensure getting from timestamp 0 includes the folder. version, changes, remaining = ( self.GetChangesFromTimestamp([chromiumsync.BOOKMARK], 0)) self.assertEqual( self.ExpectedPermanentItemCount(chromiumsync.BOOKMARK), len(changes)) self.assertEqual(version2, version) def testAcknowledgeManagedUser(self): # Create permanent items. self.GetChangesFromTimestamp([chromiumsync.MANAGED_USER], 0) proto = sync_pb2.SyncEntity() proto.id_string = 'abcd' proto.version = 0 # Make sure the managed_user field exists. proto.specifics.managed_user.acknowledged = False self.assertTrue(proto.specifics.HasField('managed_user')) self.AddToModel(proto) version1, changes1, remaining1 = ( self.GetChangesFromTimestamp([chromiumsync.MANAGED_USER], 0)) for change in changes1: self.assertTrue(not change.specifics.managed_user.acknowledged) # Turn on managed user acknowledgement self.model.acknowledge_managed_users = True version2, changes2, remaining2 = ( self.GetChangesFromTimestamp([chromiumsync.MANAGED_USER], 0)) for change in changes2: self.assertTrue(change.specifics.managed_user.acknowledged) def testGetKey(self): [key1] = self.model.GetKeystoreKeys() [key2] = self.model.GetKeystoreKeys() self.assertTrue(len(key1)) self.assertEqual(key1, key2) # Trigger the rotation. A subsequent GetUpdates should return the nigori # node (whose timestamp was bumped by the rotation). version1, changes, remaining = ( self.GetChangesFromTimestamp([chromiumsync.NIGORI], 0)) self.model.TriggerRotateKeystoreKeys() version2, changes, remaining = ( self.GetChangesFromTimestamp([chromiumsync.NIGORI], version1)) self.assertNotEqual(version1, version2) self.assertEquals(len(changes), 1) self.assertEquals(changes[0].name, "Nigori") # The current keys should contain the old keys, with the new key appended. [key1, key3] = self.model.GetKeystoreKeys() self.assertEquals(key1, key2) self.assertNotEqual(key1, key3) self.assertTrue(len(key3) > 0) def testTriggerEnableKeystoreEncryption(self): version1, changes, remaining = ( self.GetChangesFromTimestamp([chromiumsync.EXPERIMENTS], 0)) keystore_encryption_id_string = ( self.model._ClientTagToId( chromiumsync.EXPERIMENTS, chromiumsync.KEYSTORE_ENCRYPTION_EXPERIMENT_TAG)) self.assertFalse(self.model._ItemExists(keystore_encryption_id_string)) self.model.TriggerEnableKeystoreEncryption() self.assertTrue(self.model._ItemExists(keystore_encryption_id_string)) # The creation of the experiment should be downloaded on the next # GetUpdates. version2, changes, remaining = ( self.GetChangesFromTimestamp([chromiumsync.EXPERIMENTS], version1)) self.assertEqual(len(changes), 1) self.assertEqual(changes[0].id_string, keystore_encryption_id_string) self.assertNotEqual(version1, version2) # Verify the experiment was created properly and is enabled. self.assertEqual(chromiumsync.KEYSTORE_ENCRYPTION_EXPERIMENT_TAG, changes[0].client_defined_unique_tag) self.assertTrue(changes[0].HasField("specifics")) self.assertTrue(changes[0].specifics.HasField("experiments")) self.assertTrue( changes[0].specifics.experiments.HasField("keystore_encryption")) self.assertTrue( changes[0].specifics.experiments.keystore_encryption.enabled) if __name__ == '__main__': unittest.main()
bsd-3-clause
-4,201,014,836,781,829,000
589,915,622,716,675,300
43.25
80
0.685078
false
Omegaphora/external_chromium_org
third_party/protobuf/python/google/protobuf/descriptor_database.py
230
4411
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Provides a container for DescriptorProtos.""" __author__ = '[email protected] (Matt Toia)' class DescriptorDatabase(object): """A container accepting FileDescriptorProtos and maps DescriptorProtos.""" def __init__(self): self._file_desc_protos_by_file = {} self._file_desc_protos_by_symbol = {} def Add(self, file_desc_proto): """Adds the FileDescriptorProto and its types to this database. Args: file_desc_proto: The FileDescriptorProto to add. """ self._file_desc_protos_by_file[file_desc_proto.name] = file_desc_proto package = file_desc_proto.package for message in file_desc_proto.message_type: self._file_desc_protos_by_symbol.update( (name, file_desc_proto) for name in _ExtractSymbols(message, package)) for enum in file_desc_proto.enum_type: self._file_desc_protos_by_symbol[ '.'.join((package, enum.name))] = file_desc_proto def FindFileByName(self, name): """Finds the file descriptor proto by file name. Typically the file name is a relative path ending to a .proto file. The proto with the given name will have to have been added to this database using the Add method or else an error will be raised. Args: name: The file name to find. Returns: The file descriptor proto matching the name. Raises: KeyError if no file by the given name was added. """ return self._file_desc_protos_by_file[name] def FindFileContainingSymbol(self, symbol): """Finds the file descriptor proto containing the specified symbol. The symbol should be a fully qualified name including the file descriptor's package and any containing messages. Some examples: 'some.package.name.Message' 'some.package.name.Message.NestedEnum' The file descriptor proto containing the specified symbol must be added to this database using the Add method or else an error will be raised. Args: symbol: The fully qualified symbol name. Returns: The file descriptor proto containing the symbol. Raises: KeyError if no file contains the specified symbol. """ return self._file_desc_protos_by_symbol[symbol] def _ExtractSymbols(desc_proto, package): """Pulls out all the symbols from a descriptor proto. Args: desc_proto: The proto to extract symbols from. package: The package containing the descriptor type. Yields: The fully qualified name found in the descriptor. """ message_name = '.'.join((package, desc_proto.name)) yield message_name for nested_type in desc_proto.nested_type: for symbol in _ExtractSymbols(nested_type, message_name): yield symbol for enum_type in desc_proto.enum_type: yield '.'.join((message_name, enum_type.name))
bsd-3-clause
-8,642,681,472,356,627,000
-2,843,256,702,011,600,000
35.758333
80
0.72818
false
arongdari/sparse-graph-prior
sgp/GraphUtil.py
1
1649
import numpy as np import networkx as nx from collections import defaultdict from scipy.sparse import csr_matrix, csc_matrix, triu def sparse_to_networkx(G): nnz = G.nonzero() _G = nx.Graph() _G.add_edges_from(zip(nnz[0], nnz[1])) return _G def compute_growth_rate(G, n_repeat=10): """ Compute the growth rate of graph G :param G: sparse matrix (csc_matrix or csr_matrix) :param n_repeat: int :return: """ n_n = G.shape[0] nnz = G.nonzero() n_link = defaultdict(list) for si in range(n_repeat): rnd_nodes = np.arange(n_n, dtype=int) np.random.shuffle(rnd_nodes) node_dic = {i: n for i, n in enumerate(rnd_nodes)} row_idx = list(map(lambda x: node_dic[x], nnz[0])) col_idx = list(map(lambda x: node_dic[x], nnz[1])) rnd_row = csr_matrix((G.data, (row_idx, col_idx)), shape=G.shape) rnd_col = csc_matrix((G.data, (row_idx, col_idx)), shape=G.shape) n_link[0].append(0) for i in range(1, n_n): # counting triples by expanding tensor cnt = 0 cnt += rnd_row.getrow(i)[:, :i].nnz cnt += rnd_col.getcol(i)[:i - 1, :].nnz n_link[i].append(cnt + n_link[i - 1][-1]) return np.array([np.mean(n_link[x]) for x in range(n_n)]) def degree_distribution(G): d = defaultdict(int) # degree = triu(G).sum(0) degree = G.sum(0) + G.sum(1) degree /= 2 max_d = degree.max() for _d in degree.tolist()[0]: d[int(_d)] += 1 return d, [d[i] for i in range(int(max_d))] def degree_one_nodes(G): return np.sum(G.sum(0) / 2 == 1)
mit
-2,232,984,693,569,199,900
-974,055,517,704,446,500
24.765625
73
0.562159
false
bjodah/aqchem
chempy/tests/test_reactionsystem.py
1
13409
# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function) from itertools import chain import pytest from ..util.testing import requires from ..util.parsing import parsing_library from ..units import default_units, units_library, allclose from ..chemistry import Substance, Reaction from ..reactionsystem import ReactionSystem @requires(parsing_library, 'numpy') def test_ReactionSystem(): import numpy as np kw = dict(substance_factory=Substance.from_formula) r1 = Reaction.from_string('H2O -> H+ + OH-', 'H2O H+ OH-', name='r1') rs = ReactionSystem([r1], 'H2O H+ OH-', **kw) r2 = Reaction.from_string('H2O -> 2 H+ + OH-', 'H2O H+ OH-', name='r2') with pytest.raises(ValueError): ReactionSystem([r2], 'H2O H+ OH-', **kw) with pytest.raises(ValueError): ReactionSystem([r1, r1], 'H2O H+ OH-', **kw) assert rs.as_substance_index('H2O') == 0 assert rs.as_substance_index(0) == 0 varied, varied_keys = rs.per_substance_varied({'H2O': 55.4, 'H+': 1e-7, 'OH-': 1e-7}, {'H+': [1e-8, 1e-9, 1e-10, 1e-11], 'OH-': [1e-3, 1e-2]}) assert varied_keys == ('H+', 'OH-') assert len(varied.shape) == 3 assert varied.shape[:-1] == (4, 2) assert varied.shape[-1] == 3 assert np.all(varied[..., 0] == 55.4) assert np.all(varied[:, 1, 2] == 1e-2) assert rs['r1'] is r1 rs.rxns.append(r2) assert rs['r2'] is r2 with pytest.raises(KeyError): rs['r3'] rs.rxns.append(Reaction({}, {}, 0, name='r2', checks=())) with pytest.raises(ValueError): rs['r2'] empty_rs = ReactionSystem([]) rs2 = empty_rs + rs assert rs2 == rs rs3 = rs + empty_rs assert rs3 == rs @requires(parsing_library) def test_ReactionSystem__missing_substances_from_keys(): r1 = Reaction({'H2O'}, {'H+', 'OH-'}) with pytest.raises(ValueError): ReactionSystem([r1], substances={'H2O': Substance.from_formula('H2O')}) kw = dict(missing_substances_from_keys=True, substance_factory=Substance.from_formula) rs = ReactionSystem([r1], substances={'H2O': Substance.from_formula('H2O')}, **kw) assert rs.substances['OH-'].composition == {0: -1, 1: 1, 8: 1} @requires(parsing_library) def test_ReactionSystem__check_balance(): rs1 = ReactionSystem.from_string('\n'.join(['2 NH3 -> N2 + 3 H2', 'N2H4 -> N2 + 2 H2'])) assert rs1.check_balance(strict=True) rs2 = ReactionSystem.from_string('\n'.join(['2 A -> B', 'B -> 2A']), substance_factory=Substance) assert not rs2.check_balance(strict=True) assert rs2.composition_balance_vectors() == ([], []) def test_ReactionSystem__per_reaction_effect_on_substance(): rs = ReactionSystem([Reaction({'H2': 2, 'O2': 1}, {'H2O': 2})]) assert rs.per_reaction_effect_on_substance('H2') == {0: -2} assert rs.per_reaction_effect_on_substance('O2') == {0: -1} assert rs.per_reaction_effect_on_substance('H2O') == {0: 2} def test_ReactionSystem__rates(): rs = ReactionSystem([Reaction({'H2O'}, {'H+', 'OH-'}, 11)]) assert rs.rates({'H2O': 3, 'H+': 5, 'OH-': 7}) == {'H2O': -11*3, 'H+': 11*3, 'OH-': 11*3} def test_ReactionSystem__rates__cstr(): k = 11 rs = ReactionSystem([Reaction({'H2O2': 2}, {'O2': 1, 'H2O': 2}, k)]) c0 = {'H2O2': 3, 'O2': 5, 'H2O': 53} fr = 7 fc = {'H2O2': 13, 'O2': 17, 'H2O': 23} r = k*c0['H2O2']**2 ref = { 'H2O2': -2*r + fr*fc['H2O2'] - fr*c0['H2O2'], 'O2': r + fr*fc['O2'] - fr*c0['O2'], 'H2O': 2*r + fr*fc['H2O'] - fr*c0['H2O'] } variables = dict(chain(c0.items(), [('fc_'+key, val) for key, val in fc.items()], [('fr', fr)])) assert rs.rates(variables, cstr_fr_fc=('fr', {sk: 'fc_'+sk for sk in rs.substances})) == ref @requires('numpy') def test_ReactionSystem__html_tables(): r1 = Reaction({'A': 2}, {'A'}, name='R1') r2 = Reaction({'A'}, {'A': 2}, name='R2') rs = ReactionSystem([r1, r2]) ut, unc = rs.unimolecular_html_table() assert unc == {0} from chempy.printing import html assert html(ut, with_name=False) == u'<table><tr><td>A</td><td ><a title="1: A → 2 A">R2</a></td></tr></table>' bt, bnc = rs.bimolecular_html_table() assert bnc == {1} assert html(bt, with_name=False) == ( u'<table><th></th><th>A</th>\n<tr><td>A</td><td ><a title="0: 2 A → A">R1</a></td></tr></table>') @requires(parsing_library, 'numpy') def test_ReactionSystem__substance_factory(): r1 = Reaction.from_string('H2O -> H+ + OH-', 'H2O H+ OH-') rs = ReactionSystem([r1], 'H2O H+ OH-', substance_factory=Substance.from_formula) assert rs.net_stoichs(['H2O']) == [-1] assert rs.net_stoichs(['H+']) == [1] assert rs.net_stoichs(['OH-']) == [1] assert rs.substances['H2O'].composition[8] == 1 assert rs.substances['OH-'].composition[0] == -1 assert rs.substances['H+'].charge == 1 @requires(units_library) def test_ReactionSystem__as_per_substance_array_dict(): mol = default_units.mol m = default_units.metre M = default_units.molar rs = ReactionSystem([], [Substance('H2O')]) c = rs.as_per_substance_array({'H2O': 1*M}, unit=M) assert c.dimensionality == M.dimensionality assert abs(c[0]/(1000*mol/m**3) - 1) < 1e-16 c = rs.as_per_substance_array({'H2O': 1}) with pytest.raises(KeyError): c = rs.as_per_substance_array({'H': 1}) assert rs.as_per_substance_dict([42]) == {'H2O': 42} @requires(parsing_library) def test_ReactionSystem__add(): rs1 = ReactionSystem.from_string('\n'.join(['2 H2O2 -> O2 + 2 H2O', 'H2 + O2 -> H2O2'])) rs2 = ReactionSystem.from_string('\n'.join(['2 NH3 -> N2 + 3 H2'])) rs3 = rs1 + rs2 assert rs1 == rs1 assert rs1 != rs2 assert rs3 != rs1 assert len(rs1.rxns) == 2 and len(rs2.rxns) == 1 and len(rs3.rxns) == 3 for k in 'H2O2 O2 H2O H2 NH3 N2'.split(): assert k in rs3.substances rs1 += rs2 assert len(rs1.rxns) == 3 and len(rs2.rxns) == 1 assert rs1 == rs3 rs4 = ReactionSystem.from_string("H2O -> H+ + OH-; 1e-4") rs4 += [Reaction({'H+', 'OH-'}, {'H2O'}, 1e10)] res = rs4.rates({'H2O': 1, 'H+': 1e-7, 'OH-': 1e-7}) for k in 'H2O H+ OH-'.split(): assert abs(res[k]) < 1e-16 rs5 = ReactionSystem.from_string("H3O+ -> H+ + H2O") rs6 = rs4 + rs5 rs7 = rs6 + (Reaction.from_string("H+ + H2O -> H3O+"),) assert len(rs7.rxns) == 4 rs1 = ReactionSystem.from_string('O2 + H2 -> H2O2') rs1.substances['H2O2'].data['D'] = 123 rs2 = ReactionSystem.from_string('H2O2 -> 2 OH') rs2.substances['H2O2'].data['D'] = 456 rs2.substances['OH'].data['D'] = 789 rs3 = rs2 + rs1 assert (rs3.substances['H2O2'].data['D'] == 123 and rs3.substances['OH'].data['D'] == 789) rs2 += rs1 assert (rs2.substances['H2O2'].data['D'] == 123 and rs2.substances['OH'].data['D'] == 789) @requires(parsing_library) def test_ReactionSystem__from_string(): rs = ReactionSystem.from_string('-> H + OH; Radiolytic(2.1e-7)', checks=()) assert rs.rxns[0].reac == {} assert rs.rxns[0].prod == {'H': 1, 'OH': 1} assert rs.rxns[0].param.args == [2.1e-7] ref = 2.1e-7 * 0.15 * 998 assert rs.rates({'doserate': .15, 'density': 998}) == {'H': ref, 'OH': ref} r2, = ReactionSystem.from_string("H2O + H2O + H+ -> H3O+ + H2O").rxns assert r2.reac == {'H2O': 2, 'H+': 1} assert r2.prod == {'H2O': 1, 'H3O+': 1} @requires(parsing_library, units_library) def test_ReactionSystem__from_string__units(): r3, = ReactionSystem.from_string('(H2O) -> e-(aq) + H+ + OH; Radiolytic(2.1e-7*mol/J)').rxns assert len(r3.reac) == 0 and r3.inact_reac == {'H2O': 1} assert r3.prod == {'e-(aq)': 1, 'H+': 1, 'OH': 1} from chempy.kinetics.rates import Radiolytic mol, J = default_units.mol, default_units.J assert r3.param == Radiolytic(2.1e-7*mol/J) assert r3.param != Radiolytic(2.0e-7*mol/J) assert r3.param != Radiolytic(2.1e-7) assert r3.order() == 0 k = 1e-4/default_units.second rs = ReactionSystem.from_string(""" H2O -> H+ + OH-; {} """.format(repr(k))) assert allclose(rs.rxns[0].param, k) def test_ReactionSystem__from_string___special_naming(): rs = ReactionSystem.from_string(""" H2O* + H2O -> 2 H2O H2O* -> OH + H """) # excited water for sk in 'H2O* H2O OH H'.split(): assert sk in rs.substances assert rs.substances['H2O*'].composition == {1: 2, 8: 1} assert rs.categorize_substances() == dict(accumulated={'OH', 'H', 'H2O'}, depleted={'H2O*'}, unaffected=set(), nonparticipating=set()) @requires(parsing_library) def test_ReactionSystem__from_string__string_rate_const(): rsys = ReactionSystem.from_string("H+ + OH- -> H2O; 'kf'") r2, = rsys.rxns assert r2.reac == {'OH-': 1, 'H+': 1} assert r2.prod == {'H2O': 1} r2str = r2.string(rsys.substances, with_param=True) assert r2str.endswith('; kf') @requires('numpy') def test_ReactionSystem__upper_conc_bounds(): rs = ReactionSystem.from_string('\n'.join(['2 NH3 -> N2 + 3 H2', 'N2H4 -> N2 + 2 H2'])) c0 = {'NH3': 5, 'N2': 7, 'H2': 11, 'N2H4': 2} _N = 5 + 14 + 4 _H = 15 + 22 + 8 ref = { 'NH3': min(_N, _H/3), 'N2': _N/2, 'H2': _H/2, 'N2H4': min(_N/2, _H/4), } res = rs.as_per_substance_dict(rs.upper_conc_bounds(c0)) assert res == ref @requires('numpy') def test_ReactionSystem__upper_conc_bounds__a_substance_no_composition(): rs = ReactionSystem.from_string(""" H2O -> e-(aq) + H2O+ H2O+ + e-(aq) -> H2O """) c0 = {'H2O': 55.0, 'e-(aq)': 2e-3, 'H2O+': 3e-3} _O = 55 + 3e-3 _H = 2*55 + 2*3e-3 ref = { 'H2O': min(_O, _H/2), 'e-(aq)': float('inf'), 'H2O+': min(_O, _H/2), } res = rs.as_per_substance_dict(rs.upper_conc_bounds(c0)) assert res == ref @requires(parsing_library) def test_ReactionSystem__identify_equilibria(): rsys = ReactionSystem.from_string(""" 2 H2 + O2 -> 2 H2O ; 1e-3 H2O -> H+ + OH- ; 1e-4/55.35 H+ + OH- -> H2O ; 1e10 2 H2O -> 2 H2 + O2 """) assert rsys.identify_equilibria() == [(0, 3), (1, 2)] @requires(parsing_library) def test_ReactionSystem__categorize_substances(): rsys1 = ReactionSystem.from_string(""" 2 H2 + O2 -> 2 H2O ; 1e-3 H2O -> H+ + OH- ; 1e-4/55.35 H+ + OH- -> H2O ; 1e10 2 H2O -> 2 H2 + O2 """) assert all(not s for s in rsys1.categorize_substances().values()) rsys2 = ReactionSystem.from_string('\n'.join(['2 NH3 -> N2 + 3 H2', 'N2H4 -> N2 + 2 H2'])) assert rsys2.categorize_substances() == dict(accumulated={'N2', 'H2'}, depleted={'NH3', 'N2H4'}, unaffected=set(), nonparticipating=set()) rsys3 = ReactionSystem.from_string("H+ + OH- -> H2O; 'kf'") assert rsys3.categorize_substances() == dict(accumulated={'H2O'}, depleted={'H+', 'OH-'}, unaffected=set(), nonparticipating=set()) rsys4 = ReactionSystem([Reaction({'H2': 2, 'O2': 1}, {'H2O': 2})], 'H2 O2 H2O N2 Ar') assert rsys4.categorize_substances() == dict(accumulated={'H2O'}, depleted={'H2', 'O2'}, unaffected=set(), nonparticipating={'N2', 'Ar'}) rsys5 = ReactionSystem.from_string(""" A -> B; MassAction(unique_keys=('k1',)) B + C -> A + C; MassAction(unique_keys=('k2',)) 2 B -> B + C; MassAction(unique_keys=('k3',)) """, substance_factory=lambda formula: Substance(formula)) assert rsys5.categorize_substances() == dict(accumulated={'C'}, depleted=set(), unaffected=set(), nonparticipating=set()) rsys6 = ReactionSystem.from_string("""H2O2 + Fe+3 + (H2O2) -> 2 H2O + O2 + Fe+3""") assert rsys6.rxns[0].order() == 2 # the additional H2O2 within parenthesis assert rsys6.categorize_substances() == dict(accumulated={'H2O', 'O2'}, depleted={'H2O2'}, unaffected={'Fe+3'}, nonparticipating=set()) @requires(parsing_library) def test_ReactionSystem__split(): a = """ 2 H2 + O2 -> 2 H2O ; 1e-3 H2O -> H+ + OH- ; 1e-4/55.35 H+ + OH- -> H2O ; 1e10 2 H2O -> 2 H2 + O2""" b = """ 2 N -> N2""" c = """ 2 ClBr -> Cl2 + Br2 """ rsys1 = ReactionSystem.from_string(a+b+c) res = rsys1.split() ref = list(map(ReactionSystem.from_string, [a, b, c])) for rs in chain(res, ref): rs.sort_substances_inplace() res1a, res1b, res1c = res ref1a, ref1b, ref1c = ref assert res1a == ref1a assert res1b == ref1b assert res1c == ref1c assert res1c != ref1a assert rsys1.categorize_substances() == dict( accumulated={'N2', 'Cl2', 'Br2'}, depleted={'N', 'ClBr'}, unaffected=set(), nonparticipating=set()) def test_ReactionSystem__subset(): r1 = Reaction({'NH3': 2}, {'N2': 1, 'H2': 3}) r2 = Reaction({'N2H4': 1}, {'N2': 1, 'H2': 2}) rs1 = ReactionSystem([r1, r2]) rs2 = rs1.subset(lambda r: 'N2H4' in r.keys()) assert len(rs1.rxns) == 2 and len(rs2.rxns) == 1 assert rs2 == ReactionSystem([r2])
bsd-2-clause
2,084,343,621,056,759,800
-7,734,478,871,467,916,000
36.867232
115
0.560686
false
ianmiell/docker-selinux
docker_selinux.py
1
6584
"""ShutIt module. See http://shutit.tk """ from shutit_module import ShutItModule class docker_selinux(ShutItModule): def build(self, shutit): # Some useful API calls for reference see shutit's docs for more info and options: # shutit.send(send) - send a command # shutit.multisend(send,send_dict) - send a command, dict contains {expect1:response1,expect2:response2,...} # shutit.log(msg) - send a message to the log # shutit.run_script(script) - run the passed-in string as a script # shutit.send_file(path, contents) - send file to path on target with given contents as a string # shutit.send_host_file(path, hostfilepath) - send file from host machine to path on the target # shutit.send_host_dir(path, hostfilepath) - send directory and contents to path on the target # shutit.host_file_exists(filename, directory=False) - returns True if file exists on host # shutit.file_exists(filename, directory=False) - returns True if file exists on target # shutit.add_to_bashrc(line) - add a line to bashrc # shutit.get_url(filename, locations) - get a file via url from locations specified in a list # shutit.user_exists(user) - returns True if the user exists on the target # shutit.package_installed(package) - returns True if the package exists on the target # shutit.pause_point(msg='') - give control of the terminal to the user # shutit.step_through(msg='') - give control to the user and allow them to step through commands # shutit.send_and_get_output(send) - returns the output of the sent command # shutit.send_and_match_output(send, matches) - returns True if any lines in output match any of # the regexp strings in the matches list # shutit.install(package) - install a package # shutit.remove(package) - remove a package # shutit.login(user='root', command='su -') - log user in with given command, and set up prompt and expects # shutit.logout() - clean up from a login # shutit.set_password(password, user='') - set password for a given user on target # shutit.get_config(module_id,option,default=None,boolean=False) - get configuration value # shutit.get_ip_address() - returns the ip address of the target # shutit.add_line_to_file(line, filename) - add line (or lines in an array) to the filename vagrant_dir = shutit.cfg[self.module_id]['vagrant_dir'] setenforce = shutit.cfg[self.module_id]['setenforce'] compile_policy = shutit.cfg[self.module_id]['compile_policy'] shutit.install('linux-generic linux-image-generic linux-headers-generic linux-signed-generic') shutit.install('virtualbox') shutit.send('wget -qO- https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.deb > /tmp/vagrant.deb') shutit.send('dpkg -i /tmp/vagrant.deb') shutit.send('rm /tmp/vagrant.deb') shutit.send('mkdir -p ' + vagrant_dir) shutit.send('cd ' + vagrant_dir) # If the Vagrantfile exists, we assume we've already init'd appropriately. shutit.send('rm -f Vagrantfile') shutit.send(r'''cat > Vagrantfile << END Vagrant.configure(2) do |config| config.vm.box = "jdiprizio/centos-docker-io" config.vm.provider "virtualbox" do |vb| vb.memory = "1024" end end END''') # Query the status - if it's powered off or not created, bring it up. if shutit.send_and_match_output('vagrant status',['.*poweroff.*','.*not created.*','.*aborted.*']): shutit.send('vagrant up') # It should be up now, ssh into it and get root. shutit.login(command='vagrant ssh') shutit.login(command='sudo su') # Ensure required software's installed. shutit.send('yum install -y wget selinux-policy-devel') shutit.send('rm -rf /root/selinux') # Ensure we have the latest version of docker. shutit.send('wget -qO- https://get.docker.com/builds/Linux/x86_64/docker-latest > docker') shutit.send('mv -f docker /usr/bin/docker') shutit.send('chmod +x /usr/bin/docker') # Optional code for enforcing> if setenforce: shutit.send('''sed -i 's/=permissive/=enforcing/' /etc/selinux/config''') else: shutit.send('''sed -i 's/=enforcing/=permissive/' /etc/selinux/config''') # Log out to ensure the prompt stack is stable. shutit.logout() shutit.logout(command='sudo reboot') # Give it time... shutit.send('sleep 30') # Go back in. shutit.login(command='vagrant ssh') # Get back to root. shutit.login(command='sudo su') # Remove any pre-existing containers. # Recycle docker service. shutit.send('systemctl stop docker') shutit.send('systemctl start docker') # Remove any pre-existing containers. shutit.send('docker rm -f selinuxdock || /bin/true') if compile_policy: # Ensure we've cleaned up the files we're adding here. shutit.send('mkdir -p /root/selinux') shutit.send('cd /root/selinux') shutit.send('rm -rf /root/selinux/docker_apache.tc /root/selinux/script.sh /root/selinux/docker_apache.te') shutit.add_line_to_file('''policy_module(docker_apache,1.0) virt_sandbox_domain_template(docker_apache) allow docker_apache_t self: capability { chown dac_override kill setgid setuid net_bind_service sys_chroot sys_nice sys_tty_config } ; allow docker_apache_t self:tcp_socket create_stream_socket_perms; allow docker_apache_t self:udp_socket create_socket_perms; corenet_tcp_bind_all_nodes(docker_apache_t) corenet_tcp_bind_http_port(docker_apache_t) corenet_udp_bind_all_nodes(docker_apache_t) corenet_udp_bind_http_port(docker_apache_t) sysnet_dns_name_resolve(docker_apache_t) '''.split('\n'),'/root/selinux/docker_apache.te') shutit.add_line_to_file('''make -f /usr/share/selinux/devel/Makefile docker_apache.pp semodule -i docker_apache.pp docker run -d --name selinuxdock --security-opt label:type:docker_apache_t httpd '''.split('\n'),'/root/selinux/script.sh') shutit.send('chmod +x ./script.sh') shutit.send('./script.sh') shutit.send('sleep 2 && docker logs selinuxdock') # Have a look at the log output. shutit.send('grep -w denied /var/log/audit/audit.log') shutit.pause_point('Have a shell:') # Log out. shutit.logout() shutit.logout() return True def get_config(self, shutit): shutit.get_config(self.module_id, 'vagrant_dir', '/tmp/vagrant_dir') shutit.get_config(self.module_id, 'setenforce', False, boolean=True) shutit.get_config(self.module_id, 'compile_policy', True, boolean=True) return True def module(): return docker_selinux( 'io.dockerinpractice.docker_selinux.docker_selinux', 1184271914.00, description='Test of docker selinux on a vagrant box', maintainer='[email protected]', depends=['shutit.tk.setup'] )
mit
-6,673,611,457,716,883,000
-3,746,304,616,644,715,000
47.77037
134
0.715674
false
ycaihua/kbengine
kbe/src/lib/python/Lib/test/test_traceback.py
80
16912
"""Test cases for traceback module""" from io import StringIO import sys import unittest import re from test.support import run_unittest, Error, captured_output from test.support import TESTFN, unlink, cpython_only import traceback class SyntaxTracebackCases(unittest.TestCase): # For now, a very minimal set of tests. I want to be sure that # formatting of SyntaxErrors works based on changes for 2.1. def get_exception_format(self, func, exc): try: func() except exc as value: return traceback.format_exception_only(exc, value) else: raise ValueError("call did not raise exception") def syntax_error_with_caret(self): compile("def fact(x):\n\treturn x!\n", "?", "exec") def syntax_error_with_caret_2(self): compile("1 +\n", "?", "exec") def syntax_error_bad_indentation(self): compile("def spam():\n print(1)\n print(2)", "?", "exec") def syntax_error_with_caret_non_ascii(self): compile('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', "?", "exec") def syntax_error_bad_indentation2(self): compile(" print(2)", "?", "exec") def test_caret(self): err = self.get_exception_format(self.syntax_error_with_caret, SyntaxError) self.assertEqual(len(err), 4) self.assertTrue(err[1].strip() == "return x!") self.assertIn("^", err[2]) # third line has caret self.assertEqual(err[1].find("!"), err[2].find("^")) # in the right place err = self.get_exception_format(self.syntax_error_with_caret_2, SyntaxError) self.assertIn("^", err[2]) # third line has caret self.assertEqual(err[2].count('\n'), 1) # and no additional newline self.assertEqual(err[1].find("+"), err[2].find("^")) # in the right place err = self.get_exception_format(self.syntax_error_with_caret_non_ascii, SyntaxError) self.assertIn("^", err[2]) # third line has caret self.assertEqual(err[2].count('\n'), 1) # and no additional newline self.assertEqual(err[1].find("+"), err[2].find("^")) # in the right place def test_nocaret(self): exc = SyntaxError("error", ("x.py", 23, None, "bad syntax")) err = traceback.format_exception_only(SyntaxError, exc) self.assertEqual(len(err), 3) self.assertEqual(err[1].strip(), "bad syntax") def test_bad_indentation(self): err = self.get_exception_format(self.syntax_error_bad_indentation, IndentationError) self.assertEqual(len(err), 4) self.assertEqual(err[1].strip(), "print(2)") self.assertIn("^", err[2]) self.assertEqual(err[1].find(")"), err[2].find("^")) err = self.get_exception_format(self.syntax_error_bad_indentation2, IndentationError) self.assertEqual(len(err), 4) self.assertEqual(err[1].strip(), "print(2)") self.assertIn("^", err[2]) self.assertEqual(err[1].find("p"), err[2].find("^")) def test_base_exception(self): # Test that exceptions derived from BaseException are formatted right e = KeyboardInterrupt() lst = traceback.format_exception_only(e.__class__, e) self.assertEqual(lst, ['KeyboardInterrupt\n']) def test_format_exception_only_bad__str__(self): class X(Exception): def __str__(self): 1/0 err = traceback.format_exception_only(X, X()) self.assertEqual(len(err), 1) str_value = '<unprintable %s object>' % X.__name__ if X.__module__ in ('__main__', 'builtins'): str_name = X.__name__ else: str_name = '.'.join([X.__module__, X.__name__]) self.assertEqual(err[0], "%s: %s\n" % (str_name, str_value)) def test_without_exception(self): err = traceback.format_exception_only(None, None) self.assertEqual(err, ['None\n']) def test_encoded_file(self): # Test that tracebacks are correctly printed for encoded source files: # - correct line number (Issue2384) # - respect file encoding (Issue3975) import tempfile, sys, subprocess, os # The spawned subprocess has its stdout redirected to a PIPE, and its # encoding may be different from the current interpreter, on Windows # at least. process = subprocess.Popen([sys.executable, "-c", "import sys; print(sys.stdout.encoding)"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = process.communicate() output_encoding = str(stdout, 'ascii').splitlines()[0] def do_test(firstlines, message, charset, lineno): # Raise the message in a subprocess, and catch the output try: output = open(TESTFN, "w", encoding=charset) output.write("""{0}if 1: import traceback; raise RuntimeError('{1}') """.format(firstlines, message)) output.close() process = subprocess.Popen([sys.executable, TESTFN], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = process.communicate() stdout = stdout.decode(output_encoding).splitlines() finally: unlink(TESTFN) # The source lines are encoded with the 'backslashreplace' handler encoded_message = message.encode(output_encoding, 'backslashreplace') # and we just decoded them with the output_encoding. message_ascii = encoded_message.decode(output_encoding) err_line = "raise RuntimeError('{0}')".format(message_ascii) err_msg = "RuntimeError: {0}".format(message_ascii) self.assertIn(("line %s" % lineno), stdout[1], "Invalid line number: {0!r} instead of {1}".format( stdout[1], lineno)) self.assertTrue(stdout[2].endswith(err_line), "Invalid traceback line: {0!r} instead of {1!r}".format( stdout[2], err_line)) self.assertTrue(stdout[3] == err_msg, "Invalid error message: {0!r} instead of {1!r}".format( stdout[3], err_msg)) do_test("", "foo", "ascii", 3) for charset in ("ascii", "iso-8859-1", "utf-8", "GBK"): if charset == "ascii": text = "foo" elif charset == "GBK": text = "\u4E02\u5100" else: text = "h\xe9 ho" do_test("# coding: {0}\n".format(charset), text, charset, 4) do_test("#!shebang\n# coding: {0}\n".format(charset), text, charset, 5) do_test(" \t\f\n# coding: {0}\n".format(charset), text, charset, 5) # Issue #18960: coding spec should has no effect do_test("0\n# coding: GBK\n", "h\xe9 ho", 'utf-8', 5) class TracebackFormatTests(unittest.TestCase): def some_exception(self): raise KeyError('blah') @cpython_only def check_traceback_format(self, cleanup_func=None): from _testcapi import traceback_print try: self.some_exception() except KeyError: type_, value, tb = sys.exc_info() if cleanup_func is not None: # Clear the inner frames, not this one cleanup_func(tb.tb_next) traceback_fmt = 'Traceback (most recent call last):\n' + \ ''.join(traceback.format_tb(tb)) file_ = StringIO() traceback_print(tb, file_) python_fmt = file_.getvalue() # Call all _tb and _exc functions with captured_output("stderr") as tbstderr: traceback.print_tb(tb) tbfile = StringIO() traceback.print_tb(tb, file=tbfile) with captured_output("stderr") as excstderr: traceback.print_exc() excfmt = traceback.format_exc() excfile = StringIO() traceback.print_exc(file=excfile) else: raise Error("unable to create test traceback string") # Make sure that Python and the traceback module format the same thing self.assertEqual(traceback_fmt, python_fmt) # Now verify the _tb func output self.assertEqual(tbstderr.getvalue(), tbfile.getvalue()) # Now verify the _exc func output self.assertEqual(excstderr.getvalue(), excfile.getvalue()) self.assertEqual(excfmt, excfile.getvalue()) # Make sure that the traceback is properly indented. tb_lines = python_fmt.splitlines() self.assertEqual(len(tb_lines), 5) banner = tb_lines[0] location, source_line = tb_lines[-2:] self.assertTrue(banner.startswith('Traceback')) self.assertTrue(location.startswith(' File')) self.assertTrue(source_line.startswith(' raise')) def test_traceback_format(self): self.check_traceback_format() def test_traceback_format_with_cleared_frames(self): # Check that traceback formatting also works with a clear()ed frame def cleanup_tb(tb): tb.tb_frame.clear() self.check_traceback_format(cleanup_tb) def test_stack_format(self): # Verify _stack functions. Note we have to use _getframe(1) to # compare them without this frame appearing in the output with captured_output("stderr") as ststderr: traceback.print_stack(sys._getframe(1)) stfile = StringIO() traceback.print_stack(sys._getframe(1), file=stfile) self.assertEqual(ststderr.getvalue(), stfile.getvalue()) stfmt = traceback.format_stack(sys._getframe(1)) self.assertEqual(ststderr.getvalue(), "".join(stfmt)) cause_message = ( "\nThe above exception was the direct cause " "of the following exception:\n\n") context_message = ( "\nDuring handling of the above exception, " "another exception occurred:\n\n") boundaries = re.compile( '(%s|%s)' % (re.escape(cause_message), re.escape(context_message))) class BaseExceptionReportingTests: def get_exception(self, exception_or_callable): if isinstance(exception_or_callable, Exception): return exception_or_callable try: exception_or_callable() except Exception as e: return e def zero_div(self): 1/0 # In zero_div def check_zero_div(self, msg): lines = msg.splitlines() self.assertTrue(lines[-3].startswith(' File')) self.assertIn('1/0 # In zero_div', lines[-2]) self.assertTrue(lines[-1].startswith('ZeroDivisionError'), lines[-1]) def test_simple(self): try: 1/0 # Marker except ZeroDivisionError as _: e = _ lines = self.get_report(e).splitlines() self.assertEqual(len(lines), 4) self.assertTrue(lines[0].startswith('Traceback')) self.assertTrue(lines[1].startswith(' File')) self.assertIn('1/0 # Marker', lines[2]) self.assertTrue(lines[3].startswith('ZeroDivisionError')) def test_cause(self): def inner_raise(): try: self.zero_div() except ZeroDivisionError as e: raise KeyError from e def outer_raise(): inner_raise() # Marker blocks = boundaries.split(self.get_report(outer_raise)) self.assertEqual(len(blocks), 3) self.assertEqual(blocks[1], cause_message) self.check_zero_div(blocks[0]) self.assertIn('inner_raise() # Marker', blocks[2]) def test_context(self): def inner_raise(): try: self.zero_div() except ZeroDivisionError: raise KeyError def outer_raise(): inner_raise() # Marker blocks = boundaries.split(self.get_report(outer_raise)) self.assertEqual(len(blocks), 3) self.assertEqual(blocks[1], context_message) self.check_zero_div(blocks[0]) self.assertIn('inner_raise() # Marker', blocks[2]) def test_context_suppression(self): try: try: raise Exception except: raise ZeroDivisionError from None except ZeroDivisionError as _: e = _ lines = self.get_report(e).splitlines() self.assertEqual(len(lines), 4) self.assertTrue(lines[0].startswith('Traceback')) self.assertTrue(lines[1].startswith(' File')) self.assertIn('ZeroDivisionError from None', lines[2]) self.assertTrue(lines[3].startswith('ZeroDivisionError')) def test_cause_and_context(self): # When both a cause and a context are set, only the cause should be # displayed and the context should be muted. def inner_raise(): try: self.zero_div() except ZeroDivisionError as _e: e = _e try: xyzzy except NameError: raise KeyError from e def outer_raise(): inner_raise() # Marker blocks = boundaries.split(self.get_report(outer_raise)) self.assertEqual(len(blocks), 3) self.assertEqual(blocks[1], cause_message) self.check_zero_div(blocks[0]) self.assertIn('inner_raise() # Marker', blocks[2]) def test_cause_recursive(self): def inner_raise(): try: try: self.zero_div() except ZeroDivisionError as e: z = e raise KeyError from e except KeyError as e: raise z from e def outer_raise(): inner_raise() # Marker blocks = boundaries.split(self.get_report(outer_raise)) self.assertEqual(len(blocks), 3) self.assertEqual(blocks[1], cause_message) # The first block is the KeyError raised from the ZeroDivisionError self.assertIn('raise KeyError from e', blocks[0]) self.assertNotIn('1/0', blocks[0]) # The second block (apart from the boundary) is the ZeroDivisionError # re-raised from the KeyError self.assertIn('inner_raise() # Marker', blocks[2]) self.check_zero_div(blocks[2]) def test_syntax_error_offset_at_eol(self): # See #10186. def e(): raise SyntaxError('', ('', 0, 5, 'hello')) msg = self.get_report(e).splitlines() self.assertEqual(msg[-2], " ^") def e(): exec("x = 5 | 4 |") msg = self.get_report(e).splitlines() self.assertEqual(msg[-2], ' ^') class PyExcReportingTests(BaseExceptionReportingTests, unittest.TestCase): # # This checks reporting through the 'traceback' module, with both # format_exception() and print_exception(). # def get_report(self, e): e = self.get_exception(e) s = ''.join( traceback.format_exception(type(e), e, e.__traceback__)) with captured_output("stderr") as sio: traceback.print_exception(type(e), e, e.__traceback__) self.assertEqual(sio.getvalue(), s) return s class CExcReportingTests(BaseExceptionReportingTests, unittest.TestCase): # # This checks built-in reporting by the interpreter. # @cpython_only def get_report(self, e): from _testcapi import exception_print e = self.get_exception(e) with captured_output("stderr") as s: exception_print(e) return s.getvalue() class MiscTracebackCases(unittest.TestCase): # # Check non-printing functions in traceback module # def test_clear(self): def outer(): middle() def middle(): inner() def inner(): i = 1 1/0 try: outer() except: type_, value, tb = sys.exc_info() # Initial assertion: there's one local in the inner frame. inner_frame = tb.tb_next.tb_next.tb_next.tb_frame self.assertEqual(len(inner_frame.f_locals), 1) # Clear traceback frames traceback.clear_frames(tb) # Local variable dict should now be empty. self.assertEqual(len(inner_frame.f_locals), 0) def test_main(): run_unittest(__name__) if __name__ == "__main__": test_main()
lgpl-3.0
-5,028,389,412,394,599,000
-7,880,621,656,023,481,000
36.498891
82
0.568236
false
kvar/ansible
lib/ansible/module_utils/ec2.py
20
28571
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <[email protected]>, 2012-2013 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import re import traceback from ansible.module_utils.ansible_release import __version__ from ansible.module_utils.basic import missing_required_lib, env_fallback from ansible.module_utils._text import to_native, to_text from ansible.module_utils.cloud import CloudRetry from ansible.module_utils.six import string_types, binary_type, text_type from ansible.module_utils.common.dict_transformations import ( camel_dict_to_snake_dict, snake_dict_to_camel_dict, _camel_to_snake, _snake_to_camel, ) BOTO_IMP_ERR = None try: import boto import boto.ec2 # boto does weird import stuff HAS_BOTO = True except ImportError: BOTO_IMP_ERR = traceback.format_exc() HAS_BOTO = False BOTO3_IMP_ERR = None try: import boto3 import botocore HAS_BOTO3 = True except Exception: BOTO3_IMP_ERR = traceback.format_exc() HAS_BOTO3 = False try: # Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also # uses this (and it works as expected). Python 2.6 will trigger the ImportError. from functools import cmp_to_key PY3_COMPARISON = True except ImportError: PY3_COMPARISON = False class AnsibleAWSError(Exception): pass def _botocore_exception_maybe(): """ Allow for boto3 not being installed when using these utils by wrapping botocore.exceptions instead of assigning from it directly. """ if HAS_BOTO3: return botocore.exceptions.ClientError return type(None) class AWSRetry(CloudRetry): base_class = _botocore_exception_maybe() @staticmethod def status_code_from_exception(error): return error.response['Error']['Code'] @staticmethod def found(response_code, catch_extra_error_codes=None): # This list of failures is based on this API Reference # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html # # TooManyRequestsException comes from inside botocore when it # does retrys, unfortunately however it does not try long # enough to allow some services such as API Gateway to # complete configuration. At the moment of writing there is a # botocore/boto3 bug open to fix this. # # https://github.com/boto/boto3/issues/876 (and linked PRs etc) retry_on = [ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', 'InternalFailure', 'InternalError', 'TooManyRequestsException', 'Throttling' ] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) not_found = re.compile(r'^\w+.NotFound') return response_code in retry_on or not_found.search(response_code) def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): try: return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) except ValueError as e: module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e)) except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e: module.fail_json(msg=to_native(e)) except botocore.exceptions.NoRegionError as e: module.fail_json(msg="The %s module requires a region and none was found in configuration, " "environment variables or module parameters" % module._name) def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params): profile = params.pop('profile_name', None) if conn_type not in ['both', 'resource', 'client']: raise ValueError('There is an issue in the calling code. You ' 'must specify either both, resource, or client to ' 'the conn_type parameter in the boto3_conn function ' 'call') if params.get('config'): config = params.pop('config') config.user_agent_extra = 'Ansible/{0}'.format(__version__) else: config = botocore.config.Config( user_agent_extra='Ansible/{0}'.format(__version__), ) session = boto3.session.Session( profile_name=profile, ) if conn_type == 'resource': return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params) elif conn_type == 'client': return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params) else: client = session.client(resource, region_name=region, endpoint_url=endpoint, **params) resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params) return client, resource boto3_inventory_conn = _boto3_conn def boto_exception(err): """ Extracts the error message from a boto exception. :param err: Exception from boto :return: Error message """ if hasattr(err, 'error_message'): error = err.error_message elif hasattr(err, 'message'): error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err)) else: error = '%s: %s' % (Exception, err) return error def aws_common_argument_spec(): return dict( debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'), ec2_url=dict(), aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), aws_access_key=dict(aliases=['ec2_access_key', 'access_key']), validate_certs=dict(default=True, type='bool'), security_token=dict(aliases=['access_token'], no_log=True), profile=dict(), ) def ec2_argument_spec(): spec = aws_common_argument_spec() spec.update( dict( region=dict(aliases=['aws_region', 'ec2_region']), ) ) return spec def get_aws_connection_info(module, boto3=False): # Check module args for credentials, then check environment vars # access_key ec2_url = module.params.get('ec2_url') access_key = module.params.get('aws_access_key') secret_key = module.params.get('aws_secret_key') security_token = module.params.get('security_token') region = module.params.get('region') profile_name = module.params.get('profile') validate_certs = module.params.get('validate_certs') if not ec2_url: if 'AWS_URL' in os.environ: ec2_url = os.environ['AWS_URL'] elif 'EC2_URL' in os.environ: ec2_url = os.environ['EC2_URL'] if not access_key: if os.environ.get('AWS_ACCESS_KEY_ID'): access_key = os.environ['AWS_ACCESS_KEY_ID'] elif os.environ.get('AWS_ACCESS_KEY'): access_key = os.environ['AWS_ACCESS_KEY'] elif os.environ.get('EC2_ACCESS_KEY'): access_key = os.environ['EC2_ACCESS_KEY'] elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'): access_key = boto.config.get('Credentials', 'aws_access_key_id') elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'): access_key = boto.config.get('default', 'aws_access_key_id') else: # in case access_key came in as empty string access_key = None if not secret_key: if os.environ.get('AWS_SECRET_ACCESS_KEY'): secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] elif os.environ.get('AWS_SECRET_KEY'): secret_key = os.environ['AWS_SECRET_KEY'] elif os.environ.get('EC2_SECRET_KEY'): secret_key = os.environ['EC2_SECRET_KEY'] elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'): secret_key = boto.config.get('Credentials', 'aws_secret_access_key') elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'): secret_key = boto.config.get('default', 'aws_secret_access_key') else: # in case secret_key came in as empty string secret_key = None if not region: if 'AWS_REGION' in os.environ: region = os.environ['AWS_REGION'] elif 'AWS_DEFAULT_REGION' in os.environ: region = os.environ['AWS_DEFAULT_REGION'] elif 'EC2_REGION' in os.environ: region = os.environ['EC2_REGION'] else: if not boto3: if HAS_BOTO: # boto.config.get returns None if config not found region = boto.config.get('Boto', 'aws_region') if not region: region = boto.config.get('Boto', 'ec2_region') else: module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR) elif HAS_BOTO3: # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None. try: region = botocore.session.Session(profile=profile_name).get_config_variable('region') except botocore.exceptions.ProfileNotFound as e: pass else: module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR) if not security_token: if os.environ.get('AWS_SECURITY_TOKEN'): security_token = os.environ['AWS_SECURITY_TOKEN'] elif os.environ.get('AWS_SESSION_TOKEN'): security_token = os.environ['AWS_SESSION_TOKEN'] elif os.environ.get('EC2_SECURITY_TOKEN'): security_token = os.environ['EC2_SECURITY_TOKEN'] elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'): security_token = boto.config.get('Credentials', 'aws_security_token') elif HAS_BOTO and boto.config.get('default', 'aws_security_token'): security_token = boto.config.get('default', 'aws_security_token') else: # in case secret_token came in as empty string security_token = None if HAS_BOTO3 and boto3: boto_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key, aws_session_token=security_token) boto_params['verify'] = validate_certs if profile_name: boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None) boto_params['profile_name'] = profile_name else: boto_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=security_token) # only set profile_name if passed as an argument if profile_name: boto_params['profile_name'] = profile_name boto_params['validate_certs'] = validate_certs for param, value in boto_params.items(): if isinstance(value, binary_type): boto_params[param] = text_type(value, 'utf-8', 'strict') return region, ec2_url, boto_params def get_ec2_creds(module): ''' for compatibility mode with old modules that don't/can't yet use ec2_connect method ''' region, ec2_url, boto_params = get_aws_connection_info(module) return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region def boto_fix_security_token_in_profile(conn, profile_name): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + profile_name if boto.config.has_option(profile, 'aws_security_token'): conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token')) return conn def connect_to_aws(aws_module, region, **params): try: conn = aws_module.connect_to_region(region, **params) except(boto.provider.ProfileNotFoundError): raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.") if not conn: if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]: raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade " "boto or extend with endpoints_path" % (region, aws_module.__name__)) else: raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__)) if params.get('profile_name'): conn = boto_fix_security_token_in_profile(conn, params['profile_name']) return conn def ec2_connect(module): """ Return an ec2 connection""" region, ec2_url, boto_params = get_aws_connection_info(module) # If we have a region specified, connect to its endpoint. if region: try: ec2 = connect_to_aws(boto.ec2, region, **boto_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e: module.fail_json(msg=str(e)) # Otherwise, no region so we fallback to the old connection method elif ec2_url: try: ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") return ec2 def ansible_dict_to_boto3_filter_list(filters_dict): """ Convert an Ansible dict of filters to list of dicts that boto3 can use Args: filters_dict (dict): Dict of AWS filters. Basic Usage: >>> filters = {'some-aws-id': 'i-01234567'} >>> ansible_dict_to_boto3_filter_list(filters) { 'some-aws-id': 'i-01234567' } Returns: List: List of AWS filters and their values [ { 'Name': 'some-aws-id', 'Values': [ 'i-01234567', ] } ] """ filters_list = [] for k, v in filters_dict.items(): filter_dict = {'Name': k} if isinstance(v, string_types): filter_dict['Values'] = [v] else: filter_dict['Values'] = v filters_list.append(filter_dict) return filters_list def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None): """ Convert a boto3 list of resource tags to a flat dict of key:value pairs Args: tags_list (list): List of dicts representing AWS tags. tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value") Basic Usage: >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}] >>> boto3_tag_list_to_ansible_dict(tags_list) [ { 'Key': 'MyTagKey', 'Value': 'MyTagValue' } ] Returns: Dict: Dict of key:value pairs representing AWS tags { 'MyTagKey': 'MyTagValue', } """ if tag_name_key_name and tag_value_key_name: tag_candidates = {tag_name_key_name: tag_value_key_name} else: tag_candidates = {'key': 'value', 'Key': 'Value'} if not tags_list: return {} for k, v in tag_candidates.items(): if k in tags_list[0] and v in tags_list[0]: return dict((tag[k], tag[v]) for tag in tags_list) raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list))) def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'): """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts Args: tags_dict (dict): Dict representing AWS resource tags. tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value") Basic Usage: >>> tags_dict = {'MyTagKey': 'MyTagValue'} >>> ansible_dict_to_boto3_tag_list(tags_dict) { 'MyTagKey': 'MyTagValue' } Returns: List: List of dicts containing tag keys and values [ { 'Key': 'MyTagKey', 'Value': 'MyTagValue' } ] """ tags_list = [] for k, v in tags_dict.items(): tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)}) return tags_list def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True): """ Return list of security group IDs from security group names. Note that security group names are not unique across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in a try block """ def get_sg_name(sg, boto3): if boto3: return sg['GroupName'] else: return sg.name def get_sg_id(sg, boto3): if boto3: return sg['GroupId'] else: return sg.id sec_group_id_list = [] if isinstance(sec_group_list, string_types): sec_group_list = [sec_group_list] # Get all security groups if boto3: if vpc_id: filters = [ { 'Name': 'vpc-id', 'Values': [ vpc_id, ] } ] all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups'] else: all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups'] else: if vpc_id: filters = {'vpc-id': vpc_id} all_sec_groups = ec2_connection.get_all_security_groups(filters=filters) else: all_sec_groups = ec2_connection.get_all_security_groups() unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups) sec_group_name_list = list(set(sec_group_list) - set(unmatched)) if len(unmatched) > 0: # If we have unmatched names that look like an ID, assume they are import re sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)] still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)] if len(still_unmatched) > 0: raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched)) sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list] return sec_group_id_list def _hashable_policy(policy, policy_list): """ Takes a policy and returns a list, the contents of which are all hashable and sorted. Example input policy: {'Version': '2012-10-17', 'Statement': [{'Action': 's3:PutObjectAcl', 'Sid': 'AddCannedAcl2', 'Resource': 'arn:aws:s3:::test_policy/*', 'Effect': 'Allow', 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} }]} Returned value: [('Statement', ((('Action', (u's3:PutObjectAcl',)), ('Effect', (u'Allow',)), ('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))), ('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))), ('Version', (u'2012-10-17',)))] """ if isinstance(policy, list): for each in policy: tupleified = _hashable_policy(each, []) if isinstance(tupleified, list): tupleified = tuple(tupleified) policy_list.append(tupleified) elif isinstance(policy, string_types) or isinstance(policy, binary_type): policy = to_text(policy) # convert root account ARNs to just account IDs if policy.startswith('arn:aws:iam::') and policy.endswith(':root'): policy = policy.split(':')[4] return [policy] elif isinstance(policy, dict): sorted_keys = list(policy.keys()) sorted_keys.sort() for key in sorted_keys: tupleified = _hashable_policy(policy[key], []) if isinstance(tupleified, list): tupleified = tuple(tupleified) policy_list.append((key, tupleified)) # ensure we aren't returning deeply nested structures of length 1 if len(policy_list) == 1 and isinstance(policy_list[0], tuple): policy_list = policy_list[0] if isinstance(policy_list, list): if PY3_COMPARISON: policy_list.sort(key=cmp_to_key(py3cmp)) else: policy_list.sort() return policy_list def py3cmp(a, b): """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3.""" try: if a > b: return 1 elif a < b: return -1 else: return 0 except TypeError as e: # check to see if they're tuple-string # always say strings are less than tuples (to maintain compatibility with python2) str_ind = to_text(e).find('str') tup_ind = to_text(e).find('tuple') if -1 not in (str_ind, tup_ind): if str_ind < tup_ind: return -1 elif tup_ind < str_ind: return 1 raise def compare_policies(current_policy, new_policy): """ Compares the existing policy and the updated policy Returns True if there is a difference between policies. """ return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, [])) def sort_json_policy_dict(policy_dict): """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but different orders will return true Args: policy_dict (dict): Dict representing IAM JSON policy. Basic Usage: >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]} >>> sort_json_policy_dict(my_iam_policy) Returns: Dict: Will return a copy of the policy as a Dict but any List will be sorted { 'Principle': { 'AWS': [ '7', '14', '31', '101' ] } } """ def value_is_list(my_list): checked_list = [] for item in my_list: if isinstance(item, dict): checked_list.append(sort_json_policy_dict(item)) elif isinstance(item, list): checked_list.append(value_is_list(item)) else: checked_list.append(item) # Sort list. If it's a list of dictionaries, sort by tuple of key-value # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries. checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x) return checked_list ordered_policy_dict = {} for key, value in policy_dict.items(): if isinstance(value, dict): ordered_policy_dict[key] = sort_json_policy_dict(value) elif isinstance(value, list): ordered_policy_dict[key] = value_is_list(value) else: ordered_policy_dict[key] = value return ordered_policy_dict def map_complex_type(complex_type, type_map): """ Allows to cast elements within a dictionary to a specific type Example of usage: DEPLOYMENT_CONFIGURATION_TYPE_MAP = { 'maximum_percent': 'int', 'minimum_healthy_percent': 'int' } deployment_configuration = map_complex_type(module.params['deployment_configuration'], DEPLOYMENT_CONFIGURATION_TYPE_MAP) This ensures all keys within the root element are casted and valid integers """ if complex_type is None: return new_type = type(complex_type)() if isinstance(complex_type, dict): for key in complex_type: if key in type_map: if isinstance(type_map[key], list): new_type[key] = map_complex_type( complex_type[key], type_map[key][0]) else: new_type[key] = map_complex_type( complex_type[key], type_map[key]) else: return complex_type elif isinstance(complex_type, list): for i in range(len(complex_type)): new_type.append(map_complex_type( complex_type[i], type_map)) elif type_map: return globals()['__builtins__'][type_map](complex_type) return new_type def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True): """ Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function. Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ these may not be able to be used out of the box. :param current_tags_dict: :param new_tags_dict: :param purge_tags: :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty """ tag_key_value_pairs_to_set = {} tag_keys_to_unset = [] for key in current_tags_dict.keys(): if key not in new_tags_dict and purge_tags: tag_keys_to_unset.append(key) for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset): if to_text(new_tags_dict[key]) != current_tags_dict.get(key): tag_key_value_pairs_to_set[key] = new_tags_dict[key] return tag_key_value_pairs_to_set, tag_keys_to_unset
gpl-3.0
-8,980,883,171,108,096,000
-7,977,248,612,771,281,000
38.138356
152
0.613349
false
armink/rt-thread
bsp/avr32uc3b0/rtconfig.py
18
1667
import os # toolchains options ARCH = 'avr32' CPU = 'uc3' PART = 'uc3b0256' BOARD = 'USERBOARD' CROSS_TOOL = 'gcc' if os.getenv('RTT_CC'): CROSS_TOOL = os.getenv('RTT_CC') if CROSS_TOOL == 'gcc': PLATFORM = 'gcc' EXEC_PATH = 'C:/Program Files/Atmel/AVR Tools/AVR Toolchain/bin' elif CROSS_TOOL == 'keil': print('================ERROR============================') print('Not support keil yet!') print('=================================================') exit(0) elif CROSS_TOOL == 'iar': print('================ERROR============================') print('Not support iar yet!') print('=================================================') exit(0) if os.getenv('RTT_EXEC_PATH'): EXEC_PATH = os.getenv('RTT_EXEC_PATH') #BUILD = 'debug' BUILD = 'release' if PLATFORM == 'gcc': # toolchains PREFIX = 'avr32-' CC = PREFIX + 'gcc' AS = PREFIX + 'gcc' AR = PREFIX + 'ar' LINK = PREFIX + 'gcc' TARGET_EXT = 'elf' SIZE = PREFIX + 'size' OBJDUMP = PREFIX + 'objdump' OBJCPY = PREFIX + 'objcopy' DEVICE = ' -mpart=' + PART CFLAGS = DEVICE + ' -DBOARD=' + BOARD + ' -fmessage-length=0 -ffunction-sections -masm-addr-pseudos' AFLAGS = ' -c -x assembler-with-cpp' + DEVICE LFLAGS = DEVICE + ' -Wl,--gc-sections --rodata-writable -Wl,--direct-data -LSOFTWARE_FRAMEWORK/UTILS/LIBS/NEWLIB_ADDONS -T avr32elf_uc3b0256.lds' CPATH = '' LPATH = '' if BUILD == 'debug': CFLAGS += ' -O0 -g3 -Wall' AFLAGS += ' -g3' else: CFLAGS += ' -O2 -Wall' POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
apache-2.0
3,732,818,908,055,819,000
7,395,826,986,107,822,000
26.783333
149
0.512897
false
azumimuo/family-xbmc-addon
plugin.video.specto/resources/lib/resolvers/cloudyvideos.py
10
2259
# -*- coding: utf-8 -*- ''' Specto Add-on Copyright (C) 2015 lambda This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re,urllib,time from resources.lib.libraries import client from resources.lib.libraries import jsunpack def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] page = 'http://cloudyvideos.com/%s' % url result = client.request(page, close=False) if '>File Not Found<' in result: raise Exception() post = {} f = client.parseDOM(result, 'Form', attrs = {'action': ''}) k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post = urllib.urlencode(post) for i in range(0, 5): try: result = client.request(page, post=post, close=False) url = re.compile("file *: *'(.+?)'").findall(result) if len(url) == 0: result = re.compile('(eval.*?\)\)\))').findall(result) result = [i for i in result if '|download|' in i][0] result = jsunpack.unpack(result) url = client.parseDOM(result, 'embed', ret='src') url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: time.sleep(1) except: return
gpl-2.0
4,380,324,018,455,343,600
-98,584,879,873,343,230
33.753846
102
0.563081
false
456838/usefulCode
YHamburgGit/freeline.py
11
1696
#!/usr/bin/python # -*- coding:utf-8 -*- from __future__ import print_function import sys from argparse import ArgumentParser from freeline_core.dispatcher import Dispatcher from freeline_core.init import init class Freeline(object): def __init__(self): self.dispatcher = Dispatcher() def call(self, args=None): if 'init' in args and args.init: print('init freeline project...') init() exit() self.dispatcher.call_command(args) def get_parser(): parser = ArgumentParser() parser.add_argument('-v', '--version', action='store_true', help='show version') parser.add_argument('-f', '--cleanBuild', action='store_true', help='force to execute a clean build') parser.add_argument('-w', '--wait', action='store_true', help='make application wait for debugger') parser.add_argument('-a', '--all', action='store_true', help="together with '-f', freeline will force to clean build all projects.") parser.add_argument('-c', '--clean', action='store_true', help='clean cache directory and workspace') parser.add_argument('-d', '--debug', action='store_true', help='show freeline debug output (NOT DEBUG APPLICATION)') # parser.add_argument('-i', '--init', action='store_true', help='init freeline project') parser.parse_args() return parser def main(): if sys.version_info > (3, 0): print('Freeline only support Python 2.7+ now. Please use the correct version of Python for freeline.') exit() parser = get_parser() args = parser.parse_args() freeline = Freeline() freeline.call(args=args) if __name__ == '__main__': main()
apache-2.0
6,171,471,032,156,372,000
-182,941,812,058,492,600
32.92
120
0.637382
false
indro/t2c
libs/external_libs/Pygments-0.11.1/pygments/styles/friendly.py
24
2508
# -*- coding: utf-8 -*- """ pygments.styles.friendly ~~~~~~~~~~~~~~~~~~~~~~~~ A modern style based on the VIM pyte theme. :copyright: 2006-2007 by Georg Brandl, Armin Ronacher. :license: BSD, see LICENSE for more details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace class FriendlyStyle(Style): """ A modern style based on the VIM pyte theme. """ background_color = "#f0f0f0" default_style = "" styles = { Whitespace: "#bbbbbb", Comment: "italic #60a0b0", Comment.Preproc: "noitalic #007020", Comment.Special: "noitalic bg:#fff0f0", Keyword: "bold #007020", Keyword.Pseudo: "nobold", Keyword.Type: "nobold #902000", Operator: "#666666", Operator.Word: "bold #007020", Name.Builtin: "#007020", Name.Function: "#06287e", Name.Class: "bold #0e84b5", Name.Namespace: "bold #0e84b5", Name.Exception: "#007020", Name.Variable: "#bb60d5", Name.Constant: "#60add5", Name.Label: "bold #002070", Name.Entity: "bold #d55537", Name.Attribute: "#4070a0", Name.Tag: "bold #062873", Name.Decorator: "bold #555555", String: "#4070a0", String.Doc: "italic", String.Interpol: "italic #70a0d0", String.Escape: "bold #4070a0", String.Regex: "#235388", String.Symbol: "#517918", String.Other: "#c65d09", Number: "#40a070", Generic.Heading: "bold #000080", Generic.Subheading: "bold #800080", Generic.Deleted: "#A00000", Generic.Inserted: "#00A000", Generic.Error: "#FF0000", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold #c65d09", Generic.Output: "#888", Generic.Traceback: "#04D", Error: "border:#FF0000" }
mit
1,664,418,439,217,473,000
-1,997,691,721,533,664,300
33.833333
67
0.448963
false
kuiwei/edx-platform
common/lib/xmodule/xmodule/tests/test_peer_grading.py
33
16061
import unittest import json import logging from mock import Mock, patch from webob.multidict import MultiDict from xblock.field_data import DictFieldData from xblock.fields import ScopeIds from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey from xmodule.tests import get_test_system, get_test_descriptor_system from xmodule.tests.test_util_open_ended import DummyModulestore from xmodule.open_ended_grading_classes.peer_grading_service import MockPeerGradingService from xmodule.peer_grading_module import PeerGradingModule, PeerGradingDescriptor, MAX_ALLOWED_FEEDBACK_LENGTH from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem log = logging.getLogger(__name__) class PeerGradingModuleTest(unittest.TestCase, DummyModulestore): """ Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an external grading service. """ course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall') problem_location = course_id.make_usage_key("peergrading", "PeerGradingSample") coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion") calibrated_dict = {'location': "blah"} coe_dict = {'location': coe_location.to_deprecated_string()} save_dict = MultiDict({ 'location': "blah", 'submission_id': 1, 'submission_key': "", 'score': 1, 'feedback': "", 'submission_flagged': False, 'answer_unknown': False, }) save_dict.extend(('rubric_scores[]', val) for val in (0, 1)) def get_module_system(self, descriptor): test_system = get_test_system(self.course_id) test_system.open_ended_grading_interface = None return test_system def setUp(self): """ Create a peer grading module from a test system @return: """ self.setup_modulestore(self.course_id.course) self.peer_grading = self.get_module_from_location(self.problem_location) self.coe = self.get_module_from_location(self.coe_location) def test_module_closed(self): """ Test if peer grading is closed @return: """ closed = self.peer_grading.closed() self.assertFalse(closed) def test_get_html(self): """ Test to see if the module can be rendered @return: """ _html = self.peer_grading.get_html() def test_get_data(self): """ Try getting data from the external grading service @return: """ success, _data = self.peer_grading.query_data_for_location(self.problem_location) self.assertTrue(success) def test_get_score_none(self): """ Test getting the score. """ score = self.peer_grading.get_score() # Score should be None. self.assertIsNone(score['score']) def test_get_max_score(self): """ Test getting the max score @return: """ max_score = self.peer_grading.max_score() self.assertEquals(max_score, None) def get_next_submission(self): """ Test to see if we can get the next mock submission @return: """ success, _next_submission = self.peer_grading.get_next_submission({'location': 'blah'}) self.assertEqual(success, True) def test_save_grade(self): """ Test if we can save the grade @return: """ response = self.peer_grading.save_grade(self.save_dict) self.assertEqual(response['success'], True) def test_is_student_calibrated(self): """ Check to see if the student has calibrated yet @return: """ response = self.peer_grading.is_student_calibrated(self.calibrated_dict) self.assertTrue(response['success']) def test_show_calibration_essay(self): """ Test showing the calibration essay @return: """ response = self.peer_grading.show_calibration_essay(self.calibrated_dict) self.assertTrue(response['success']) def test_save_calibration_essay(self): """ Test saving the calibration essay @return: """ response = self.peer_grading.save_calibration_essay(self.save_dict) self.assertTrue(response['success']) def test_peer_grading_problem(self): """ See if we can render a single problem @return: """ response = self.peer_grading.peer_grading_problem(self.coe_dict) self.assertTrue(response['success']) def test___find_corresponding_module_for_location_exceptions(self): """ Unit test for the exception cases of __find_corresponding_module_for_location Mainly for diff coverage @return: """ # pylint: disable=protected-access with self.assertRaises(ItemNotFoundError): self.peer_grading._find_corresponding_module_for_location( Location('org', 'course', 'run', 'category', 'name', 'revision') ) def test_get_instance_state(self): """ Get the instance state dict @return: """ self.peer_grading.get_instance_state() def test_save_grade_with_long_feedback(self): """ Test if feedback is too long save_grade() should return error message. """ feedback_fragment = "This is very long feedback." self.save_dict["feedback"] = feedback_fragment * ( (MAX_ALLOWED_FEEDBACK_LENGTH / len(feedback_fragment) + 1) ) response = self.peer_grading.save_grade(self.save_dict) # Should not succeed. self.assertEqual(response['success'], False) self.assertEqual( response['error'], "Feedback is too long, Max length is {0} characters.".format( MAX_ALLOWED_FEEDBACK_LENGTH ) ) def test_get_score_success_fails(self): """ Test if query_data_for_location not succeed, their score is None. """ score_dict = self.get_score(False, 0, 0) # Score dict should be None. self.assertIsNone(score_dict) def test_get_score(self): """ Test if the student has graded equal to required submissions, their score is 1.0. """ score_dict = self.get_score(True, 3, 3) # Score should be 1.0. self.assertEqual(score_dict["score"], 1.0) # Testing score after data is stored in student_data_for_location in xmodule. _score_dict = self.peer_grading.get_score() # Score should be 1.0. self.assertEqual(_score_dict["score"], 1.0) def test_get_score_zero(self): """ Test if the student has graded not equal to required submissions, their score is 0.0. """ score_dict = self.get_score(True, 2, 3) # Score should be 0.0. self.assertEqual(score_dict["score"], 0.0) def get_score(self, success, count_graded, count_required): self.peer_grading.use_for_single_location_local = True self.peer_grading.graded = True # Patch for external grading service. with patch('xmodule.peer_grading_module.PeerGradingModule.query_data_for_location') as mock_query_data_for_location: mock_query_data_for_location.return_value = ( success, {"count_graded": count_graded, "count_required": count_required} ) # Returning score dict. return self.peer_grading.get_score() class MockPeerGradingServiceProblemList(MockPeerGradingService): def get_problem_list(self, course_id, grader_id): return {'success': True, 'problem_list': [ { "num_graded": 3, "num_pending": 681, "num_required": 3, "location": course_id.make_usage_key('combinedopenended', 'SampleQuestion'), "problem_name": "Peer-Graded Essay" }, ]} class PeerGradingModuleScoredTest(unittest.TestCase, DummyModulestore): """ Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an external grading service. """ course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall') problem_location = course_id.make_usage_key("peergrading", "PeerGradingScored") def get_module_system(self, descriptor): test_system = get_test_system(self.course_id) test_system.open_ended_grading_interface = None return test_system def setUp(self): """ Create a peer grading module from a test system @return: """ self.setup_modulestore(self.course_id.course) def test_metadata_load(self): peer_grading = self.get_module_from_location(self.problem_location) self.assertFalse(peer_grading.closed()) def test_problem_list(self): """ Test to see if a peer grading problem list can be correctly initialized. """ # Initialize peer grading module. peer_grading = self.get_module_from_location(self.problem_location) # Ensure that it cannot find any peer grading. html = peer_grading.peer_grading() self.assertNotIn("Peer-Graded", html) # Swap for our mock class, which will find peer grading. peer_grading.peer_gs = MockPeerGradingServiceProblemList() html = peer_grading.peer_grading() self.assertIn("Peer-Graded", html) class PeerGradingModuleLinkedTest(unittest.TestCase, DummyModulestore): """ Test peer grading that is linked to an open ended module. """ course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall') problem_location = course_id.make_usage_key("peergrading", "PeerGradingLinked") coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion") def get_module_system(self, descriptor): test_system = get_test_system(self.course_id) test_system.open_ended_grading_interface = None return test_system def setUp(self): """ Create a peer grading module from a test system. """ self.setup_modulestore(self.course_id.course) @property def field_data(self): """ Setup the proper field data for a peer grading module. """ return DictFieldData({ 'data': '<peergrading/>', 'location': self.problem_location, 'use_for_single_location': True, 'link_to_location': self.coe_location.to_deprecated_string(), 'graded': True, }) @property def scope_ids(self): """ Return the proper scope ids for the peer grading module. """ return ScopeIds(None, None, self.problem_location, self.problem_location) def _create_peer_grading_descriptor_with_linked_problem(self): # Initialize the peer grading module. system = get_test_descriptor_system() return system.construct_xblock_from_class( PeerGradingDescriptor, field_data=self.field_data, scope_ids=self.scope_ids ) def _create_peer_grading_with_linked_problem(self, location, valid_linked_descriptor=True): """ Create a peer grading problem with a linked location. """ # Mock the linked problem descriptor. linked_descriptor = Mock() linked_descriptor.location = location # Mock the peer grading descriptor. pg_descriptor = Mock() pg_descriptor.location = self.problem_location if valid_linked_descriptor: pg_descriptor.get_required_module_descriptors = lambda: [linked_descriptor, ] else: pg_descriptor.get_required_module_descriptors = lambda: [] test_system = self.get_module_system(pg_descriptor) # Initialize the peer grading module. peer_grading = PeerGradingModule( pg_descriptor, test_system, self.field_data, self.scope_ids, ) return peer_grading def _get_descriptor_with_invalid_link(self, exception_to_raise): """ Ensure that a peer grading descriptor with an invalid link will return an empty list. """ # Create a descriptor, and make loading an item throw an error. descriptor = self._create_peer_grading_descriptor_with_linked_problem() descriptor.system.load_item = Mock(side_effect=exception_to_raise) # Ensure that modules is a list of length 0. modules = descriptor.get_required_module_descriptors() self.assertIsInstance(modules, list) self.assertEqual(len(modules), 0) def test_descriptor_with_nopath(self): """ Test to see if a descriptor with a NoPathToItem error when trying to get its linked module behaves properly. """ self._get_descriptor_with_invalid_link(NoPathToItem) def test_descriptor_with_item_not_found(self): """ Test to see if a descriptor with an ItemNotFound error when trying to get its linked module behaves properly. """ self._get_descriptor_with_invalid_link(ItemNotFoundError) def test_invalid_link(self): """ Ensure that a peer grading problem with no linked locations stays in panel mode. """ # Setup the peer grading module with no linked locations. peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location, valid_linked_descriptor=False) self.assertFalse(peer_grading.use_for_single_location_local) self.assertTrue(peer_grading.use_for_single_location) def test_linked_problem(self): """ Ensure that a peer grading problem with a linked location loads properly. """ # Setup the peer grading module with the proper linked location. peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location) # Ensure that it is properly setup. self.assertTrue(peer_grading.use_for_single_location) def test_linked_ajax(self): """ Ensure that a peer grading problem with a linked location responds to ajax calls. """ # Setup the peer grading module with the proper linked location. peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location) # If we specify a location, it will render the problem for that location. data = peer_grading.handle_ajax('problem', {'location': self.coe_location.to_deprecated_string()}) self.assertTrue(json.loads(data)['success']) # If we don't specify a location, it should use the linked location. data = peer_grading.handle_ajax('problem', {}) self.assertTrue(json.loads(data)['success']) def test_linked_score(self): """ Ensure that a peer grading problem with a linked location is properly scored. """ # Setup the peer grading module with the proper linked location. peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location) score_dict = peer_grading.get_score() self.assertEqual(score_dict['score'], 1) self.assertEqual(score_dict['total'], 1) def test_get_next_submission(self): """ Ensure that a peer grading problem with a linked location can get a submission to score. """ # Setup the peer grading module with the proper linked location. peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location) data = peer_grading.handle_ajax('get_next_submission', {'location': self.coe_location}) self.assertEqual(json.loads(data)['submission_id'], 1)
agpl-3.0
-8,866,350,184,400,153,000
-2,733,399,254,727,919,000
33.839479
124
0.626736
false
chrisnatali/networkx
networkx/drawing/tests/test_layout.py
43
1870
"""Unit tests for layout functions.""" import sys from nose import SkipTest from nose.tools import assert_equal import networkx as nx class TestLayout(object): numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test @classmethod def setupClass(cls): global numpy try: import numpy except ImportError: raise SkipTest('numpy not available.') def setUp(self): self.Gi=nx.grid_2d_graph(5,5) self.Gs=nx.Graph() self.Gs.add_path('abcdef') self.bigG=nx.grid_2d_graph(25,25) #bigger than 500 nodes for sparse def test_smoke_int(self): G=self.Gi vpos=nx.random_layout(G) vpos=nx.circular_layout(G) vpos=nx.spring_layout(G) vpos=nx.fruchterman_reingold_layout(G) vpos=nx.spectral_layout(G) vpos=nx.spectral_layout(self.bigG) vpos=nx.shell_layout(G) def test_smoke_string(self): G=self.Gs vpos=nx.random_layout(G) vpos=nx.circular_layout(G) vpos=nx.spring_layout(G) vpos=nx.fruchterman_reingold_layout(G) vpos=nx.spectral_layout(G) vpos=nx.shell_layout(G) def test_adjacency_interface_numpy(self): A=nx.to_numpy_matrix(self.Gs) pos=nx.drawing.layout._fruchterman_reingold(A) pos=nx.drawing.layout._fruchterman_reingold(A,dim=3) assert_equal(pos.shape,(6,3)) def test_adjacency_interface_scipy(self): try: import scipy except ImportError: raise SkipTest('scipy not available.') A=nx.to_scipy_sparse_matrix(self.Gs,dtype='d') pos=nx.drawing.layout._sparse_fruchterman_reingold(A) pos=nx.drawing.layout._sparse_spectral(A) pos=nx.drawing.layout._sparse_fruchterman_reingold(A,dim=3) assert_equal(pos.shape,(6,3))
bsd-3-clause
-3,808,977,403,832,460,300
6,271,881,260,821,423,000
29.655738
76
0.627273
false
AnishShah/tensorflow
tensorflow/compiler/tests/adadelta_test.py
16
5553
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Adadelta Optimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests import xla_test from tensorflow.python.framework import constant_op from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adadelta class AdadeltaOptimizerTest(xla_test.XLATestCase): def testBasic(self): num_updates = 4 # number of ADADELTA steps to perform for dtype in self.float_types: with self.cached_session(), self.test_scope(): for grad in [0.2, 0.1, 0.01]: for lr in [1.0, 0.5, 0.1]: var0_init = [1.0, 2.0] var1_init = [3.0, 4.0] var0 = resource_variable_ops.ResourceVariable( var0_init, dtype=dtype) var1 = resource_variable_ops.ResourceVariable( var1_init, dtype=dtype) grads = constant_op.constant([grad, grad], dtype=dtype) accum = 0.0 accum_update = 0.0 # ADADELTA gradient optimizer rho = 0.95 epsilon = 1e-8 adadelta_opt = adadelta.AdadeltaOptimizer( learning_rate=lr, rho=rho, epsilon=epsilon) adadelta_update = adadelta_opt.apply_gradients( zip([grads, grads], [var0, var1])) self.evaluate(variables.global_variables_initializer()) opt_vars = adadelta_opt.variables() self.assertStartsWith(opt_vars[0].name, var0._shared_name) self.assertStartsWith(opt_vars[1].name, var0._shared_name) self.assertStartsWith(opt_vars[2].name, var1._shared_name) self.assertStartsWith(opt_vars[3].name, var1._shared_name) self.assertEqual(4, len(opt_vars)) # Assign slots slot = [None] * 2 slot_update = [None] * 2 self.assertEqual(["accum", "accum_update"], adadelta_opt.get_slot_names()) slot[0] = adadelta_opt.get_slot(var0, "accum") self.assertEquals(slot[0].get_shape(), var0.get_shape()) self.assertFalse(slot[0] in variables.trainable_variables()) slot_update[0] = adadelta_opt.get_slot(var0, "accum_update") self.assertEquals(slot_update[0].get_shape(), var0.get_shape()) self.assertFalse(slot_update[0] in variables.trainable_variables()) slot[1] = adadelta_opt.get_slot(var1, "accum") self.assertEquals(slot[1].get_shape(), var1.get_shape()) self.assertFalse(slot[1] in variables.trainable_variables()) slot_update[1] = adadelta_opt.get_slot(var1, "accum_update") self.assertEquals(slot_update[1].get_shape(), var1.get_shape()) self.assertFalse(slot_update[1] in variables.trainable_variables()) # Fetch params to validate initial values self.assertAllClose(var0_init, self.evaluate(var0)) self.assertAllClose(var1_init, self.evaluate(var1)) update = [None] * num_updates tot_update = 0 for step in range(num_updates): # Run adadelta update for comparison self.evaluate(adadelta_update) # Perform initial update without previous accum values accum = accum * rho + (grad**2) * (1 - rho) update[step] = ( np.sqrt(accum_update + epsilon) * (1. / np.sqrt(accum + epsilon)) * grad) accum_update = ( accum_update * rho + (update[step]**2) * (1.0 - rho)) tot_update += update[step] * lr # Check that the accumulators have been updated for slot_idx in range(2): self.assertAllCloseAccordingToType( np.array([accum, accum], dtype=dtype), self.evaluate(slot[slot_idx]), rtol=1e-5) self.assertAllCloseAccordingToType( np.array([accum_update, accum_update], dtype=dtype), self.evaluate(slot_update[slot_idx]), rtol=1e-5) # Check that the parameters have been updated self.assertAllCloseAccordingToType( np.array( [var0_init[0] - tot_update, var0_init[1] - tot_update], dtype=dtype), self.evaluate(var0), rtol=1e-5) self.assertAllCloseAccordingToType( np.array( [var1_init[0] - tot_update, var1_init[1] - tot_update], dtype=dtype), self.evaluate(var1), rtol=1e-5) if __name__ == "__main__": test.main()
apache-2.0
-4,673,924,313,791,500,000
-4,178,232,881,178,441,000
40.440299
80
0.585269
false
LoHChina/nova
nova/api/openstack/compute/contrib/cells.py
31
13462
# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The cells extension.""" from oslo_config import cfg import oslo_messaging as messaging from oslo_utils import strutils from oslo_utils import timeutils import six from webob import exc from nova.api.openstack import common from nova.api.openstack import extensions from nova.cells import rpcapi as cells_rpcapi from nova import context as nova_context from nova import exception from nova.i18n import _ from nova import rpc CONF = cfg.CONF CONF.import_opt('name', 'nova.cells.opts', group='cells') CONF.import_opt('capabilities', 'nova.cells.opts', group='cells') authorize = extensions.extension_authorizer('compute', 'cells') def _filter_keys(item, keys): """Filters all model attributes except for keys item is a dict """ return {k: v for k, v in six.iteritems(item) if k in keys} def _fixup_cell_info(cell_info, keys): """If the transport_url is present in the cell, derive username, rpc_host, and rpc_port from it. """ if 'transport_url' not in cell_info: return # Disassemble the transport URL transport_url = cell_info.pop('transport_url') try: transport_url = rpc.get_transport_url(transport_url) except messaging.InvalidTransportURL: # Just go with None's for key in keys: cell_info.setdefault(key, None) return if not transport_url.hosts: return transport_host = transport_url.hosts[0] transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'} for key in keys: if key in cell_info: continue transport_field = transport_field_map.get(key, key) cell_info[key] = getattr(transport_host, transport_field) def _scrub_cell(cell, detail=False): keys = ['name', 'username', 'rpc_host', 'rpc_port'] if detail: keys.append('capabilities') cell_info = _filter_keys(cell, keys + ['transport_url']) _fixup_cell_info(cell_info, keys) cell_info['type'] = 'parent' if cell['is_parent'] else 'child' return cell_info class Controller(object): """Controller for Cell resources.""" def __init__(self, ext_mgr): self.cells_rpcapi = cells_rpcapi.CellsAPI() self.ext_mgr = ext_mgr def _get_cells(self, ctxt, req, detail=False): """Return all cells.""" # Ask the CellsManager for the most recent data items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt) items = common.limited(items, req) items = [_scrub_cell(item, detail=detail) for item in items] return dict(cells=items) @common.check_cells_enabled def index(self, req): """Return all cells in brief.""" ctxt = req.environ['nova.context'] authorize(ctxt) return self._get_cells(ctxt, req) @common.check_cells_enabled def detail(self, req): """Return all cells in detail.""" ctxt = req.environ['nova.context'] authorize(ctxt) return self._get_cells(ctxt, req, detail=True) @common.check_cells_enabled def info(self, req): """Return name and capabilities for this cell.""" context = req.environ['nova.context'] authorize(context) cell_capabs = {} my_caps = CONF.cells.capabilities for cap in my_caps: key, value = cap.split('=') cell_capabs[key] = value cell = {'name': CONF.cells.name, 'type': 'self', 'rpc_host': None, 'rpc_port': 0, 'username': None, 'capabilities': cell_capabs} return dict(cell=cell) @common.check_cells_enabled def capacities(self, req, id=None): """Return capacities for a given cell or all cells.""" # TODO(kaushikc): return capacities as a part of cell info and # cells detail calls in v3, along with capabilities if not self.ext_mgr.is_loaded('os-cell-capacities'): raise exc.HTTPNotFound() context = req.environ['nova.context'] authorize(context) try: capacities = self.cells_rpcapi.get_capacities(context, cell_name=id) except exception.CellNotFound: msg = (_("Cell %(id)s not found.") % {'id': id}) raise exc.HTTPNotFound(explanation=msg) return dict(cell={"capacities": capacities}) @common.check_cells_enabled def show(self, req, id): """Return data about the given cell name. 'id' is a cell name.""" context = req.environ['nova.context'] authorize(context) try: cell = self.cells_rpcapi.cell_get(context, id) except exception.CellNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) return dict(cell=_scrub_cell(cell)) @common.check_cells_enabled def delete(self, req, id): """Delete a child or parent cell entry. 'id' is a cell name.""" context = req.environ['nova.context'] authorize(context) authorize(context, action="delete") # NOTE(eliqiao): back-compatible with db layer hard-code admin # permission checks. nova_context.require_admin_context(context) try: num_deleted = self.cells_rpcapi.cell_delete(context, id) except exception.CellsUpdateUnsupported as e: raise exc.HTTPForbidden(explanation=e.format_message()) if num_deleted == 0: raise exc.HTTPNotFound() return {} def _validate_cell_name(self, cell_name): """Validate cell name is not empty and doesn't contain '!' or '.'.""" if not cell_name: msg = _("Cell name cannot be empty") raise exc.HTTPBadRequest(explanation=msg) if '!' in cell_name or '.' in cell_name: msg = _("Cell name cannot contain '!' or '.'") raise exc.HTTPBadRequest(explanation=msg) def _validate_cell_type(self, cell_type): """Validate cell_type is 'parent' or 'child'.""" if cell_type not in ['parent', 'child']: msg = _("Cell type must be 'parent' or 'child'") raise exc.HTTPBadRequest(explanation=msg) def _normalize_cell(self, cell, existing=None): """Normalize input cell data. Normalizations include: * Converting cell['type'] to is_parent boolean. * Merging existing transport URL with transport information. """ # Start with the cell type conversion if 'type' in cell: self._validate_cell_type(cell['type']) cell['is_parent'] = cell['type'] == 'parent' del cell['type'] # Avoid cell type being overwritten to 'child' elif existing: cell['is_parent'] = existing['is_parent'] else: cell['is_parent'] = False # Now we disassemble the existing transport URL... transport_url = existing.get('transport_url') if existing else None transport_url = rpc.get_transport_url(transport_url) if 'rpc_virtual_host' in cell: transport_url.virtual_host = cell.pop('rpc_virtual_host') if not transport_url.hosts: transport_url.hosts.append(messaging.TransportHost()) transport_host = transport_url.hosts[0] if cell.get('rpc_port') is not None: try: cell['rpc_port'] = int(cell['rpc_port']) except ValueError: raise exc.HTTPBadRequest( explanation=_('rpc_port must be integer')) # Copy over the input fields transport_field_map = { 'username': 'username', 'password': 'password', 'hostname': 'rpc_host', 'port': 'rpc_port', } for key, input_field in transport_field_map.items(): # Only override the value if we're given an override if input_field in cell: setattr(transport_host, key, cell.pop(input_field)) # Now set the transport URL cell['transport_url'] = str(transport_url) @common.check_cells_enabled def create(self, req, body): """Create a child cell entry.""" context = req.environ['nova.context'] authorize(context) authorize(context, action="create") # NOTE(eliqiao): back-compatible with db layer hard-code admin # permission checks. nova_context.require_admin_context(context) if 'cell' not in body: msg = _("No cell information in request") raise exc.HTTPBadRequest(explanation=msg) cell = body['cell'] if 'name' not in cell: msg = _("No cell name in request") raise exc.HTTPBadRequest(explanation=msg) self._validate_cell_name(cell['name']) self._normalize_cell(cell) try: cell = self.cells_rpcapi.cell_create(context, cell) except exception.CellsUpdateUnsupported as e: raise exc.HTTPForbidden(explanation=e.format_message()) return dict(cell=_scrub_cell(cell)) @common.check_cells_enabled def update(self, req, id, body): """Update a child cell entry. 'id' is the cell name to update.""" context = req.environ['nova.context'] authorize(context) authorize(context, action="update") # NOTE(eliqiao): back-compatible with db layer hard-code admin # permission checks. nova_context.require_admin_context(context) if 'cell' not in body: msg = _("No cell information in request") raise exc.HTTPBadRequest(explanation=msg) cell = body['cell'] cell.pop('id', None) if 'name' in cell: self._validate_cell_name(cell['name']) try: # NOTE(Vek): There is a race condition here if multiple # callers are trying to update the cell # information simultaneously. Since this # operation is administrative in nature, and # will be going away in the future, I don't see # it as much of a problem... existing = self.cells_rpcapi.cell_get(context, id) except exception.CellNotFound: raise exc.HTTPNotFound() self._normalize_cell(cell, existing) try: cell = self.cells_rpcapi.cell_update(context, id, cell) except exception.CellNotFound: raise exc.HTTPNotFound() except exception.CellsUpdateUnsupported as e: raise exc.HTTPForbidden(explanation=e.format_message()) return dict(cell=_scrub_cell(cell)) @common.check_cells_enabled def sync_instances(self, req, body): """Tell all cells to sync instance info.""" context = req.environ['nova.context'] authorize(context) authorize(context, action="sync_instances") project_id = body.pop('project_id', None) deleted = body.pop('deleted', False) updated_since = body.pop('updated_since', None) if body: msg = _("Only 'updated_since', 'project_id' and 'deleted' are " "understood.") raise exc.HTTPBadRequest(explanation=msg) if isinstance(deleted, six.string_types): try: deleted = strutils.bool_from_string(deleted, strict=True) except ValueError as err: raise exc.HTTPBadRequest(explanation=six.text_type(err)) if updated_since: try: timeutils.parse_isotime(updated_since) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) self.cells_rpcapi.sync_instances(context, project_id=project_id, updated_since=updated_since, deleted=deleted) class Cells(extensions.ExtensionDescriptor): """Enables cells-related functionality such as adding neighbor cells, listing neighbor cells, and getting the capabilities of the local cell. """ name = "Cells" alias = "os-cells" namespace = "http://docs.openstack.org/compute/ext/cells/api/v1.1" updated = "2013-05-14T00:00:00Z" def get_resources(self): coll_actions = { 'detail': 'GET', 'info': 'GET', 'sync_instances': 'POST', 'capacities': 'GET', } memb_actions = { 'capacities': 'GET', } res = extensions.ResourceExtension('os-cells', Controller(self.ext_mgr), collection_actions=coll_actions, member_actions=memb_actions) return [res]
apache-2.0
8,312,020,826,239,267,000
7,259,008,127,952,612,000
35.383784
78
0.600282
false
syhpoon/xyzcmd
libxyz/ui/size.py
1
1143
#-*- coding: utf8 -* # # Max E. Kuznecov ~syhpoon <[email protected]> 2008 # # This file is part of XYZCommander. # XYZCommander is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # XYZCommander is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # You should have received a copy of the GNU Lesser Public License # along with XYZCommander. If not, see <http://www.gnu.org/licenses/>. class Size(object): """ Simple widget size wrapper """ def __init__(self, rows, cols): self.rows = rows self.cols = cols #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def __str__(self): return "<Size: %d, %d>" % (self.rows, self.cols) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def __repr__(self): return self.__str__()
gpl-3.0
-1,703,085,297,653,277,000
-4,770,466,625,592,704,000
32.617647
70
0.619423
false
junbochen/pylearn2
pylearn2/datasets/tests/test_hdf5.py
47
7240
""" HDF5 dataset tests. """ import numpy as np import os import tempfile from pylearn2.config import yaml_parse from pylearn2.testing.datasets import ( random_dense_design_matrix, random_one_hot_dense_design_matrix, random_one_hot_topological_dense_design_matrix) from pylearn2.testing.skip import skip_if_no_h5py def test_hdf5_design_matrix(): """Train using an HDF5 dataset.""" skip_if_no_h5py() import h5py # save random data to HDF5 handle, filename = tempfile.mkstemp() dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1), num_examples=10, dim=5, num_classes=3) with h5py.File(filename, 'w') as f: f.create_dataset('X', data=dataset.get_design_matrix()) f.create_dataset('y', data=dataset.get_targets()) # instantiate Train object trainer = yaml_parse.load(design_matrix_yaml % {'filename': filename}) trainer.main_loop() # cleanup os.remove(filename) def test_hdf5_topo_view(): """Train using an HDF5 dataset with topo_view instead of X.""" skip_if_no_h5py() import h5py # save random data to HDF5 handle, filename = tempfile.mkstemp() dataset = random_one_hot_topological_dense_design_matrix( np.random.RandomState(1), num_examples=10, shape=(2, 2), channels=3, axes=('b', 0, 1, 'c'), num_classes=3) with h5py.File(filename, 'w') as f: f.create_dataset('topo_view', data=dataset.get_topological_view()) f.create_dataset('y', data=dataset.get_targets()) # instantiate Train object trainer = yaml_parse.load(topo_view_yaml % {'filename': filename}) trainer.main_loop() # cleanup os.remove(filename) def test_hdf5_convert_to_one_hot(): """Train using an HDF5 dataset with one-hot target conversion.""" skip_if_no_h5py() import h5py # save random data to HDF5 handle, filename = tempfile.mkstemp() dataset = random_dense_design_matrix(np.random.RandomState(1), num_examples=10, dim=5, num_classes=3) with h5py.File(filename, 'w') as f: f.create_dataset('X', data=dataset.get_design_matrix()) f.create_dataset('y', data=dataset.get_targets()) # instantiate Train object trainer = yaml_parse.load(convert_to_one_hot_yaml % {'filename': filename}) trainer.main_loop() # cleanup os.remove(filename) def test_hdf5_load_all(): """Train using an HDF5 dataset with all data loaded into memory.""" skip_if_no_h5py() import h5py # save random data to HDF5 handle, filename = tempfile.mkstemp() dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1), num_examples=10, dim=5, num_classes=3) with h5py.File(filename, 'w') as f: f.create_dataset('X', data=dataset.get_design_matrix()) f.create_dataset('y', data=dataset.get_targets()) # instantiate Train object trainer = yaml_parse.load(load_all_yaml % {'filename': filename}) trainer.main_loop() # cleanup os.remove(filename) design_matrix_yaml = """ !obj:pylearn2.train.Train { dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset { filename: %(filename)s, X: X, y: y, }, model: !obj:pylearn2.models.mlp.MLP { layers: [ !obj:pylearn2.models.mlp.Sigmoid { layer_name: h0, dim: 10, irange: .005, }, !obj:pylearn2.models.mlp.Softmax { layer_name: y, n_classes: 3, irange: 0. } ], nvis: 5, }, algorithm: !obj:pylearn2.training_algorithms.sgd.SGD { batch_size: 5, learning_rate: .1, monitoring_dataset: { 'train' : *train, }, termination_criterion: !obj:pylearn2.termination_criteria.EpochCounter { max_epochs: 1, }, }, } """ topo_view_yaml = """ !obj:pylearn2.train.Train { dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset { filename: %(filename)s, topo_view: topo_view, y: y, }, model: !obj:pylearn2.models.mlp.MLP { layers: [ !obj:pylearn2.models.mlp.Sigmoid { layer_name: h0, dim: 10, irange: .005, }, !obj:pylearn2.models.mlp.Softmax { layer_name: y, n_classes: 3, irange: 0. } ], nvis: 12, }, algorithm: !obj:pylearn2.training_algorithms.sgd.SGD { batch_size: 5, learning_rate: .1, monitoring_dataset: { 'train' : *train, }, termination_criterion: !obj:pylearn2.termination_criteria.EpochCounter { max_epochs: 1, }, }, } """ convert_to_one_hot_yaml = """ !obj:pylearn2.train.Train { dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset { filename: %(filename)s, X: X, y: y, y_labels: 3 }, model: !obj:pylearn2.models.mlp.MLP { layers: [ !obj:pylearn2.models.mlp.Sigmoid { layer_name: h0, dim: 10, irange: .005, }, !obj:pylearn2.models.mlp.Softmax { layer_name: y, n_classes: 3, irange: 0. } ], nvis: 5, }, algorithm: !obj:pylearn2.training_algorithms.sgd.SGD { batch_size: 5, learning_rate: .1, monitoring_dataset: { 'train' : *train, }, termination_criterion: !obj:pylearn2.termination_criteria.EpochCounter { max_epochs: 1, }, }, } """ load_all_yaml = """ !obj:pylearn2.train.Train { dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset { filename: %(filename)s, X: X, y: y, load_all: 1, }, model: !obj:pylearn2.models.mlp.MLP { layers: [ !obj:pylearn2.models.mlp.Sigmoid { layer_name: h0, dim: 10, irange: .005, }, !obj:pylearn2.models.mlp.Softmax { layer_name: y, n_classes: 3, irange: 0. } ], nvis: 5, }, algorithm: !obj:pylearn2.training_algorithms.sgd.SGD { batch_size: 5, learning_rate: .1, monitoring_dataset: { 'train' : *train, }, termination_criterion: !obj:pylearn2.termination_criteria.EpochCounter { max_epochs: 1, }, }, } """
bsd-3-clause
-4,715,732,287,919,013,000
-9,179,292,473,290,366,000
27.96
79
0.509254
false
yoer/hue
desktop/core/ext-py/Pygments-1.3.1/scripts/reindent.py
194
9926
#! /usr/bin/env python # Released to the public domain, by Tim Peters, 03 October 2000. # -B option added by Georg Brandl, 2006. """reindent [-d][-r][-v] [ path ... ] -d (--dryrun) Dry run. Analyze, but don't make any changes to files. -r (--recurse) Recurse. Search for all .py files in subdirectories too. -B (--no-backup) Don't write .bak backup files. -v (--verbose) Verbose. Print informative msgs; else only names of changed files. -h (--help) Help. Print this usage information and exit. Change Python (.py) files to use 4-space indents and no hard tab characters. Also trim excess spaces and tabs from ends of lines, and remove empty lines at the end of files. Also ensure the last line ends with a newline. If no paths are given on the command line, reindent operates as a filter, reading a single source file from standard input and writing the transformed source to standard output. In this case, the -d, -r and -v flags are ignored. You can pass one or more file and/or directory paths. When a directory path, all .py files within the directory will be examined, and, if the -r option is given, likewise recursively for subdirectories. If output is not to standard output, reindent overwrites files in place, renaming the originals with a .bak extension. If it finds nothing to change, the file is left alone. If reindent does change a file, the changed file is a fixed-point for future runs (i.e., running reindent on the resulting .py file won't change it again). The hard part of reindenting is figuring out what to do with comment lines. So long as the input files get a clean bill of health from tabnanny.py, reindent should do a good job. """ __version__ = "1" import tokenize import os import sys verbose = 0 recurse = 0 dryrun = 0 no_backup = 0 def usage(msg=None): if msg is not None: print >> sys.stderr, msg print >> sys.stderr, __doc__ def errprint(*args): sep = "" for arg in args: sys.stderr.write(sep + str(arg)) sep = " " sys.stderr.write("\n") def main(): import getopt global verbose, recurse, dryrun, no_backup try: opts, args = getopt.getopt(sys.argv[1:], "drvhB", ["dryrun", "recurse", "verbose", "help", "no-backup"]) except getopt.error, msg: usage(msg) return for o, a in opts: if o in ('-d', '--dryrun'): dryrun += 1 elif o in ('-r', '--recurse'): recurse += 1 elif o in ('-v', '--verbose'): verbose += 1 elif o in ('-B', '--no-backup'): no_backup += 1 elif o in ('-h', '--help'): usage() return if not args: r = Reindenter(sys.stdin) r.run() r.write(sys.stdout) return for arg in args: check(arg) def check(file): if os.path.isdir(file) and not os.path.islink(file): if verbose: print "listing directory", file names = os.listdir(file) for name in names: fullname = os.path.join(file, name) if ((recurse and os.path.isdir(fullname) and not os.path.islink(fullname)) or name.lower().endswith(".py")): check(fullname) return if verbose: print "checking", file, "...", try: f = open(file) except IOError, msg: errprint("%s: I/O Error: %s" % (file, str(msg))) return r = Reindenter(f) f.close() if r.run(): if verbose: print "changed." if dryrun: print "But this is a dry run, so leaving it alone." else: print "reindented", file, (dryrun and "(dry run => not really)" or "") if not dryrun: if not no_backup: bak = file + ".bak" if os.path.exists(bak): os.remove(bak) os.rename(file, bak) if verbose: print "renamed", file, "to", bak f = open(file, "w") r.write(f) f.close() if verbose: print "wrote new", file else: if verbose: print "unchanged." class Reindenter: def __init__(self, f): self.find_stmt = 1 # next token begins a fresh stmt? self.level = 0 # current indent level # Raw file lines. self.raw = f.readlines() # File lines, rstripped & tab-expanded. Dummy at start is so # that we can use tokenize's 1-based line numbering easily. # Note that a line is all-blank iff it's "\n". self.lines = [line.rstrip('\n \t').expandtabs() + "\n" for line in self.raw] self.lines.insert(0, None) self.index = 1 # index into self.lines of next line # List of (lineno, indentlevel) pairs, one for each stmt and # comment line. indentlevel is -1 for comment lines, as a # signal that tokenize doesn't know what to do about them; # indeed, they're our headache! self.stats = [] def run(self): tokenize.tokenize(self.getline, self.tokeneater) # Remove trailing empty lines. lines = self.lines while lines and lines[-1] == "\n": lines.pop() # Sentinel. stats = self.stats stats.append((len(lines), 0)) # Map count of leading spaces to # we want. have2want = {} # Program after transformation. after = self.after = [] # Copy over initial empty lines -- there's nothing to do until # we see a line with *something* on it. i = stats[0][0] after.extend(lines[1:i]) for i in range(len(stats)-1): thisstmt, thislevel = stats[i] nextstmt = stats[i+1][0] have = getlspace(lines[thisstmt]) want = thislevel * 4 if want < 0: # A comment line. if have: # An indented comment line. If we saw the same # indentation before, reuse what it most recently # mapped to. want = have2want.get(have, -1) if want < 0: # Then it probably belongs to the next real stmt. for j in xrange(i+1, len(stats)-1): jline, jlevel = stats[j] if jlevel >= 0: if have == getlspace(lines[jline]): want = jlevel * 4 break if want < 0: # Maybe it's a hanging # comment like this one, # in which case we should shift it like its base # line got shifted. for j in xrange(i-1, -1, -1): jline, jlevel = stats[j] if jlevel >= 0: want = have + getlspace(after[jline-1]) - \ getlspace(lines[jline]) break if want < 0: # Still no luck -- leave it alone. want = have else: want = 0 assert want >= 0 have2want[have] = want diff = want - have if diff == 0 or have == 0: after.extend(lines[thisstmt:nextstmt]) else: for line in lines[thisstmt:nextstmt]: if diff > 0: if line == "\n": after.append(line) else: after.append(" " * diff + line) else: remove = min(getlspace(line), -diff) after.append(line[remove:]) return self.raw != self.after def write(self, f): f.writelines(self.after) # Line-getter for tokenize. def getline(self): if self.index >= len(self.lines): line = "" else: line = self.lines[self.index] self.index += 1 return line # Line-eater for tokenize. def tokeneater(self, type, token, (sline, scol), end, line, INDENT=tokenize.INDENT, DEDENT=tokenize.DEDENT, NEWLINE=tokenize.NEWLINE, COMMENT=tokenize.COMMENT, NL=tokenize.NL): if type == NEWLINE: # A program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? self.find_stmt = 1 elif type == INDENT: self.find_stmt = 1 self.level += 1 elif type == DEDENT: self.find_stmt = 1 self.level -= 1 elif type == COMMENT: if self.find_stmt: self.stats.append((sline, -1)) # but we're still looking for a new stmt, so leave # find_stmt alone elif type == NL: pass elif self.find_stmt: # This is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER. self.find_stmt = 0 if line: # not endmarker self.stats.append((sline, self.level)) # Count number of leading blanks. def getlspace(line): i, n = 0, len(line) while i < n and line[i] == " ": i += 1 return i if __name__ == '__main__': main()
apache-2.0
205,902,687,794,405,150
4,192,364,401,829,222,000
33.109966
82
0.50816
false
tomzhang/googletest
test/gtest_filter_unittest.py
2826
21261
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking gtest_filter_unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ __author__ = '[email protected] (Zhanyong Wan)' import os import re import sets import sys import gtest_test_utils # Constants. # Checks if this platform can pass empty environment variables to child # processes. We set an env variable to an empty string and invoke a python # script in a subprocess to print whether the variable is STILL in # os.environ. We then use 'eval' to parse the child's output so that an # exception is thrown if the input is anything other than 'True' nor 'False'. os.environ['EMPTY_VAR'] = '' child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ']) CAN_PASS_EMPTY_ENV = eval(child.output) # Check if this platform can unset environment variables in child processes. # We set an env variable to a non-empty string, unset it, and invoke # a python script in a subprocess to print whether the variable # is NO LONGER in os.environ. # We use 'eval' to parse the child's output so that an exception # is thrown if the input is neither 'True' nor 'False'. os.environ['UNSET_VAR'] = 'X' del os.environ['UNSET_VAR'] child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ']) CAN_UNSET_ENV = eval(child.output) # Checks if we should test with an empty filter. This doesn't # make sense on platforms that cannot pass empty env variables (Win32) # and on platforms that cannot unset variables (since we cannot tell # the difference between "" and NULL -- Borland and Solaris < 5.10) CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the gtest_filter_unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Full names of all tests in gtest_filter_unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] if SUPPORTS_DEATH_TESTS: DEATH_TESTS = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] else: DEATH_TESTS = [] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS param_tests_present = None # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(args = None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ).output def RunAndExtractTestList(args = None): """Runs the test program and returns its exit code and a list of tests run.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) tests_run = [] test_case = '' test = '' for line in p.output.split('\n'): match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run.append(test_case + '.' + test) return (tests_run, p.exit_code) def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = environ.copy() environ.update(extra_env) return function(*args, **kwargs) finally: environ.clear() environ.update(original_env) def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(sets.Set(set_var), sets.Set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for a given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using the environment variable. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = RunAndExtractTestList()[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using the command line flag. if gtest_filter is None: args = [] else: args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, args=None, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of gtest_filter_unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. args : Arguments to pass to the to the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, args) if check_exit_0: self.assertEqual(0, exit_code) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs gtest_filter_unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG] if gtest_filter is not None: args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( RunAndReturnOutput()) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-BazTest.TestOne', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') args = ['--%s=%s' % (FILTER_FLAG, '*One')] tests_run = RunAndExtractTestList(args)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) finally: self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with the "list_tests" flag.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: output = InvokeWithModifiedEnv(extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]) finally: # This assertion ensures that Google Test enumerated the tests as # opposed to running them. self.assert_('[==========]' not in output, 'Unexpected output during test enumeration.\n' 'Please ensure that LIST_TESTS_FLAG is assigned the\n' 'correct flag value for listing Google Test tests.') self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for flag in ['--gtest_death_test_style=threadsafe', '--gtest_death_test_style=fast']: self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
-5,897,445,039,768,975,000
-503,857,296,269,181,200
32.587678
80
0.663468
false
Cloudef/mpv
waftools/checks/custom.py
1
4397
from waftools.inflectors import DependencyInflector from waftools.checks.generic import * from waflib import Utils import os __all__ = ["check_pthreads", "check_iconv", "check_lua", "check_oss_4front", "check_cocoa"] pthreads_program = load_fragment('pthreads.c') def check_pthread_flag(ctx, dependency_identifier): checks = [ check_cc(fragment = pthreads_program, cflags = '-pthread'), check_cc(fragment = pthreads_program, cflags = '-pthread', linkflags = '-pthread') ] for fn in checks: if fn(ctx, dependency_identifier): return True return False def check_pthreads(ctx, dependency_identifier): if ctx.dependency_satisfied('win32-internal-pthreads'): h = ctx.path.find_node('osdep/win32/include').abspath() # define IN_WINPTHREAD to workaround mingw stupidity (we never want it # to define features specific to its own pthread stuff) ctx.env.CFLAGS += ['-isystem', h, '-I', h, '-DIN_WINPTHREAD'] return True if check_pthread_flag(ctx, dependency_identifier): return True platform_cflags = { 'linux': '-D_REENTRANT', 'freebsd': '-D_THREAD_SAFE', 'netbsd': '-D_THREAD_SAFE', 'openbsd': '-D_THREAD_SAFE', }.get(ctx.env.DEST_OS, '') libs = ['pthreadGC2', 'pthread'] checkfn = check_cc(fragment=pthreads_program, cflags=platform_cflags) checkfn_nocflags = check_cc(fragment=pthreads_program) for fn in [checkfn, checkfn_nocflags]: if check_libs(libs, fn)(ctx, dependency_identifier): return True return False def check_iconv(ctx, dependency_identifier): iconv_program = load_fragment('iconv.c') libdliconv = " ".join(ctx.env.LIB_LIBDL + ['iconv']) libs = ['iconv', libdliconv] checkfn = check_cc(fragment=iconv_program) return check_libs(libs, checkfn)(ctx, dependency_identifier) def check_lua(ctx, dependency_identifier): lua_versions = [ ( '51', 'lua >= 5.1.0 lua < 5.2.0'), ( '51deb', 'lua5.1 >= 5.1.0'), # debian ( '51fbsd', 'lua-5.1 >= 5.1.0'), # FreeBSD ( '52', 'lua >= 5.2.0' ), ( '52deb', 'lua5.2 >= 5.2.0'), # debian ( '52fbsd', 'lua-5.2 >= 5.2.0'), # FreeBSD ( 'luajit', 'luajit >= 2.0.0' ), ] if ctx.options.LUA_VER: lua_versions = \ [lv for lv in lua_versions if lv[0] == ctx.options.LUA_VER] for lua_version, pkgconfig_query in lua_versions: if check_pkg_config(pkgconfig_query, uselib_store=lua_version) \ (ctx, dependency_identifier): # XXX: this is a bit of a hack, ask waf developers if I can copy # the uselib_store to 'lua' ctx.mark_satisfied(lua_version) ctx.add_optional_message(dependency_identifier, 'version found: ' + lua_version) return True return False def __get_osslibdir(): cmd = ['sh', '-c', '. /etc/oss.conf && echo $OSSLIBDIR'] p = Utils.subprocess.Popen(cmd, stdin=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE) return p.communicate()[0].decode().rstrip() def check_oss_4front(ctx, dependency_identifier): oss_libdir = __get_osslibdir() # avoid false positive from native sys/soundcard.h if not oss_libdir: defkey = DependencyInflector(dependency_identifier).define_key() ctx.undefine(defkey) return False soundcard_h = os.path.join(oss_libdir, "include/sys/soundcard.h") include_dir = os.path.join(oss_libdir, "include") fn = check_cc(header_name=soundcard_h, defines=['PATH_DEV_DSP="/dev/dsp"', 'PATH_DEV_MIXER="/dev/mixer"'], cflags='-I{0}'.format(include_dir), fragment=load_fragment('oss_audio.c')) return fn(ctx, dependency_identifier) def check_cocoa(ctx, dependency_identifier): fn = check_cc( fragment = load_fragment('cocoa.m'), compile_filename = 'test.m', framework_name = ['Cocoa', 'IOKit', 'OpenGL', 'QuartzCore'], includes = ctx.srcnode.abspath(), linkflags = '-fobjc-arc') return fn(ctx, dependency_identifier)
gpl-2.0
5,454,516,490,788,092,000
8,670,836,820,289,340,000
37.911504
78
0.586764
false
anryko/ansible
test/units/modules/network/nxos/test_nxos_ospf.py
23
2029
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat.mock import patch from ansible.modules.network.nxos import nxos_ospf from .nxos_module import TestNxosModule, set_module_args class TestNxosOspfModule(TestNxosModule): module = nxos_ospf def setUp(self): super(TestNxosOspfModule, self).setUp() self.mock_load_config = patch('ansible.modules.network.nxos.nxos_ospf.load_config') self.load_config = self.mock_load_config.start() self.mock_get_config = patch('ansible.modules.network.nxos.nxos_ospf.get_config') self.get_config = self.mock_get_config.start() def tearDown(self): super(TestNxosOspfModule, self).tearDown() self.mock_load_config.stop() self.mock_get_config.stop() def load_fixtures(self, commands=None, device=''): self.load_config.return_value = None def test_nxos_ospf_present(self): set_module_args(dict(ospf=1, state='present')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['router ospf 1']) def test_nxos_ospf_absent(self): set_module_args(dict(ospf=1, state='absent')) result = self.execute_module(changed=False) self.assertEqual(result['commands'], [])
gpl-3.0
-228,766,770,894,842,530
4,787,824,306,915,042,000
35.232143
91
0.705274
false
anryko/ansible
lib/ansible/modules/cloud/vultr/vultr_plan_info.py
21
3763
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2018, Yanis Guenane <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: vultr_plan_info short_description: Gather information about the Vultr plans available. description: - Gather information about plans available to boot servers. version_added: "2.9" author: "Yanis Guenane (@Spredzy)" extends_documentation_fragment: vultr ''' EXAMPLES = r''' - name: Gather Vultr plans information local_action: module: vultr_plan_info register: result - name: Print the gathered information debug: var: result.vultr_plan_info ''' RETURN = r''' --- vultr_api: description: Response from Vultr API with a few additions/modification returned: success type: complex contains: api_account: description: Account used in the ini file to select the key returned: success type: str sample: default api_timeout: description: Timeout used for the API requests returned: success type: int sample: 60 api_retries: description: Amount of max retries for the API requests returned: success type: int sample: 5 api_retry_max_delay: description: Exponential backoff delay in seconds between retries up to this max delay value. returned: success type: int sample: 12 version_added: '2.9' api_endpoint: description: Endpoint used for the API requests returned: success type: str sample: "https://api.vultr.com" vultr_plan_info: description: Response from Vultr API returned: success type: complex contains: plan: description: List of the plans available. returned: success type: list sample: [{ "available_locations": [ 1 ], "bandwidth": 40.0, "bandwidth_gb": 40960, "disk": 110, "id": 118, "name": "32768 MB RAM,110 GB SSD,40.00 TB BW", "plan_type": "DEDICATED", "price_per_month": 240.0, "ram": 32768, "vcpu_count": 8, "windows": false }] ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vultr import ( Vultr, vultr_argument_spec, ) class AnsibleVultrPlanInfo(Vultr): def __init__(self, module): super(AnsibleVultrPlanInfo, self).__init__(module, "vultr_plan_info") self.returns = { "VPSPLANID": dict(key='id', convert_to='int'), "available_locations": dict(), "bandwidth": dict(convert_to='float'), "bandwidth_gb": dict(convert_to='int'), "disk": dict(convert_to='int'), "name": dict(), "plan_type": dict(), "price_per_month": dict(convert_to='float'), "ram": dict(convert_to='int'), "vcpu_count": dict(convert_to='int'), "windows": dict(convert_to='bool') } def get_plans(self): return self.api_query(path="/v1/plans/list") def parse_plans_list(plans_list): return [plan for id, plan in plans_list.items()] def main(): argument_spec = vultr_argument_spec() module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) plan_info = AnsibleVultrPlanInfo(module) result = plan_info.get_result(parse_plans_list(plan_info.get_plans())) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
-6,058,826,422,089,600,000
6,049,868,018,529,890,000
25.314685
99
0.61334
false
kailIII/geraldo
site/newsite/django_1_0/django/dispatch/dispatcher.py
9
17129
"""Multiple-producer-multiple-consumer signal-dispatching dispatcher is the core of the PyDispatcher system, providing the primary API and the core logic for the system. Module attributes of note: Any -- Singleton used to signal either "Any Sender" or "Any Signal". See documentation of the _Any class. Anonymous -- Singleton used to signal "Anonymous Sender" See documentation of the _Anonymous class. Internal attributes: WEAKREF_TYPES -- tuple of types/classes which represent weak references to receivers, and thus must be de- referenced on retrieval to retrieve the callable object connections -- { senderkey (id) : { signal : [receivers...]}} senders -- { senderkey (id) : weakref(sender) } used for cleaning up sender references on sender deletion sendersBack -- { receiverkey (id) : [senderkey (id)...] } used for cleaning up receiver references on receiver deletion, (considerably speeds up the cleanup process vs. the original code.) """ import weakref from django.dispatch import saferef, robustapply, errors __author__ = "Patrick K. O'Brien <[email protected]>" __cvsid__ = "$Id: dispatcher.py,v 1.9 2005/09/17 04:55:57 mcfletch Exp $" __version__ = "$Revision: 1.9 $"[11:-2] class _Parameter: """Used to represent default parameter values.""" def __repr__(self): return self.__class__.__name__ class _Any(_Parameter): """Singleton used to signal either "Any Sender" or "Any Signal" The Any object can be used with connect, disconnect, send, or sendExact to signal that the parameter given Any should react to all senders/signals, not just a particular sender/signal. """ Any = _Any() class _Anonymous(_Parameter): """Singleton used to signal "Anonymous Sender" The Anonymous object is used to signal that the sender of a message is not specified (as distinct from being "any sender"). Registering callbacks for Anonymous will only receive messages sent without senders. Sending with anonymous will only send messages to those receivers registered for Any or Anonymous. Note: The default sender for connect is Any, while the default sender for send is Anonymous. This has the effect that if you do not specify any senders in either function then all messages are routed as though there was a single sender (Anonymous) being used everywhere. """ Anonymous = _Anonymous() WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref) connections = {} senders = {} sendersBack = {} def connect(receiver, signal=Any, sender=Any, weak=True): """Connect receiver to sender for signal receiver -- a callable Python object which is to receive messages/signals/events. Receivers must be hashable objects. if weak is True, then receiver must be weak-referencable (more precisely saferef.safeRef() must be able to create a reference to the receiver). Receivers are fairly flexible in their specification, as the machinery in the robustApply module takes care of most of the details regarding figuring out appropriate subsets of the sent arguments to apply to a given receiver. Note: if receiver is itself a weak reference (a callable), it will be de-referenced by the system's machinery, so *generally* weak references are not suitable as receivers, though some use might be found for the facility whereby a higher-level library passes in pre-weakrefed receiver references. signal -- the signal to which the receiver should respond if Any, receiver will receive any signal from the indicated sender (which might also be Any, but is not necessarily Any). Otherwise must be a hashable Python object other than None (DispatcherError raised on None). sender -- the sender to which the receiver should respond if Any, receiver will receive the indicated signals from any sender. if Anonymous, receiver will only receive indicated signals from send/sendExact which do not specify a sender, or specify Anonymous explicitly as the sender. Otherwise can be any python object. weak -- whether to use weak references to the receiver By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. returns None, may raise DispatcherTypeError """ if signal is None: raise errors.DispatcherTypeError( 'Signal cannot be None (receiver=%r sender=%r)' % (receiver, sender) ) if weak: receiver = saferef.safeRef(receiver, onDelete=_removeReceiver) senderkey = id(sender) signals = connections.setdefault(senderkey, {}) # Keep track of senders for cleanup. # Is Anonymous something we want to clean up? if sender not in (None, Anonymous, Any): def remove(object, senderkey=senderkey): _removeSender(senderkey=senderkey) # Skip objects that can not be weakly referenced, which means # they won't be automatically cleaned up, but that's too bad. try: weakSender = weakref.ref(sender, remove) senders[senderkey] = weakSender except: pass receiverID = id(receiver) # get current set, remove any current references to # this receiver in the set, including back-references if signals.has_key(signal): receivers = signals[signal] _removeOldBackRefs(senderkey, signal, receiver, receivers) else: receivers = signals[signal] = [] try: current = sendersBack.get(receiverID) if current is None: sendersBack[ receiverID ] = current = [] if senderkey not in current: current.append(senderkey) except: pass receivers.append(receiver) def disconnect(receiver, signal=Any, sender=Any, weak=True): """Disconnect receiver from sender for signal receiver -- the registered receiver to disconnect signal -- the registered signal to disconnect sender -- the registered sender to disconnect weak -- the weakref state to disconnect disconnect reverses the process of connect, the semantics for the individual elements are logically equivalent to a tuple of (receiver, signal, sender, weak) used as a key to be deleted from the internal routing tables. (The actual process is slightly more complex but the semantics are basically the same). Note: Using disconnect is not required to cleanup routing when an object is deleted, the framework will remove routes for deleted objects automatically. It's only necessary to disconnect if you want to stop routing to a live object. returns None, may raise DispatcherTypeError or DispatcherKeyError """ if signal is None: raise errors.DispatcherTypeError( 'Signal cannot be None (receiver=%r sender=%r)' % (receiver, sender) ) if weak: receiver = saferef.safeRef(receiver) senderkey = id(sender) try: signals = connections[senderkey] receivers = signals[signal] except KeyError: raise errors.DispatcherKeyError( """No receivers found for signal %r from sender %r""" %( signal, sender ) ) try: # also removes from receivers _removeOldBackRefs(senderkey, signal, receiver, receivers) except ValueError: raise errors.DispatcherKeyError( """No connection to receiver %s for signal %s from sender %s""" %( receiver, signal, sender ) ) _cleanupConnections(senderkey, signal) def getReceivers(sender=Any, signal=Any): """Get list of receivers from global tables This utility function allows you to retrieve the raw list of receivers from the connections table for the given sender and signal pair. Note: there is no guarantee that this is the actual list stored in the connections table, so the value should be treated as a simple iterable/truth value rather than, for instance a list to which you might append new records. Normally you would use liveReceivers(getReceivers(...)) to retrieve the actual receiver objects as an iterable object. """ existing = connections.get(id(sender)) if existing is not None: return existing.get(signal, []) return [] def liveReceivers(receivers): """Filter sequence of receivers to get resolved, live receivers This is a generator which will iterate over the passed sequence, checking for weak references and resolving them, then returning all live receivers. """ for receiver in receivers: if isinstance(receiver, WEAKREF_TYPES): # Dereference the weak reference. receiver = receiver() if receiver is not None: yield receiver else: yield receiver def getAllReceivers(sender=Any, signal=Any): """Get list of all receivers from global tables This gets all dereferenced receivers which should receive the given signal from sender, each receiver should be produced only once by the resulting generator """ receivers = {} # Get receivers that receive *this* signal from *this* sender. # Add receivers that receive *any* signal from *this* sender. # Add receivers that receive *this* signal from *any* sender. # Add receivers that receive *any* signal from *any* sender. l = [] i = id(sender) if i in connections: sender_receivers = connections[i] if signal in sender_receivers: l.extend(sender_receivers[signal]) if signal is not Any and Any in sender_receivers: l.extend(sender_receivers[Any]) if sender is not Any: i = id(Any) if i in connections: sender_receivers = connections[i] if sender_receivers is not None: if signal in sender_receivers: l.extend(sender_receivers[signal]) if signal is not Any and Any in sender_receivers: l.extend(sender_receivers[Any]) for receiver in l: try: if not receiver in receivers: if isinstance(receiver, WEAKREF_TYPES): receiver = receiver() # this should only (rough guess) be possible if somehow, deref'ing # triggered a wipe. if receiver is None: continue receivers[receiver] = 1 yield receiver except TypeError: # dead weakrefs raise TypeError on hash... pass def send(signal=Any, sender=Anonymous, *arguments, **named): """Send signal from sender to all connected receivers. signal -- (hashable) signal value, see connect for details sender -- the sender of the signal if Any, only receivers registered for Any will receive the message. if Anonymous, only receivers registered to receive messages from Anonymous or Any will receive the message Otherwise can be any python object (normally one registered with a connect if you actually want something to occur). arguments -- positional arguments which will be passed to *all* receivers. Note that this may raise TypeErrors if the receivers do not allow the particular arguments. Note also that arguments are applied before named arguments, so they should be used with care. named -- named arguments which will be filtered according to the parameters of the receivers to only provide those acceptable to the receiver. Return a list of tuple pairs [(receiver, response), ... ] if any receiver raises an error, the error propagates back through send, terminating the dispatch loop, so it is quite possible to not have all receivers called if a raises an error. """ # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. responses = [] for receiver in getAllReceivers(sender, signal): response = robustapply.robustApply( receiver, signal=signal, sender=sender, *arguments, **named ) responses.append((receiver, response)) return responses def sendExact(signal=Any, sender=Anonymous, *arguments, **named ): """Send signal only to those receivers registered for exact message sendExact allows for avoiding Any/Anonymous registered handlers, sending only to those receivers explicitly registered for a particular signal on a particular sender. """ responses = [] for receiver in liveReceivers(getReceivers(sender, signal)): response = robustapply.robustApply( receiver, signal=signal, sender=sender, *arguments, **named ) responses.append((receiver, response)) return responses def _removeReceiver(receiver): """Remove receiver from connections.""" if not sendersBack: # During module cleanup the mapping will be replaced with None return False backKey = id(receiver) for senderkey in sendersBack.get(backKey,()): try: signals = connections[senderkey].keys() except KeyError,err: pass else: for signal in signals: try: receivers = connections[senderkey][signal] except KeyError: pass else: try: receivers.remove(receiver) except Exception, err: pass _cleanupConnections(senderkey, signal) try: del sendersBack[ backKey ] except KeyError: pass def _cleanupConnections(senderkey, signal): """Delete any empty signals for senderkey. Delete senderkey if empty.""" try: receivers = connections[senderkey][signal] except: pass else: if not receivers: # No more connected receivers. Therefore, remove the signal. try: signals = connections[senderkey] except KeyError: pass else: del signals[signal] if not signals: # No more signal connections. Therefore, remove the sender. _removeSender(senderkey) def _removeSender(senderkey): """Remove senderkey from connections.""" _removeBackrefs(senderkey) connections.pop(senderkey, None) senders.pop(senderkey, None) def _removeBackrefs(senderkey): """Remove all back-references to this senderkey""" for receiver_list in connections.pop(senderkey, {}).values(): for receiver in receiver_list: _killBackref(receiver, senderkey) def _removeOldBackRefs(senderkey, signal, receiver, receivers): """Kill old sendersBack references from receiver This guards against multiple registration of the same receiver for a given signal and sender leaking memory as old back reference records build up. Also removes old receiver instance from receivers """ try: index = receivers.index(receiver) # need to scan back references here and remove senderkey except ValueError: return False else: oldReceiver = receivers[index] del receivers[index] found = 0 signals = connections.get(signal) if signals is not None: for sig,recs in connections.get(signal,{}).iteritems(): if sig != signal: for rec in recs: if rec is oldReceiver: found = 1 break if not found: _killBackref(oldReceiver, senderkey) return True return False def _killBackref(receiver, senderkey): """Do the actual removal of back reference from receiver to senderkey""" receiverkey = id(receiver) receivers_list = sendersBack.get(receiverkey, ()) while senderkey in receivers_list: try: receivers_list.remove(senderkey) except: break if not receivers_list: try: del sendersBack[ receiverkey ] except KeyError: pass return True
lgpl-3.0
-7,324,174,809,743,811,000
457,071,753,636,265,600
33.60404
86
0.634363
false
Dunkas12/BeepBoopBot
lib/youtube_dl/extractor/tubitv.py
32
3025
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, sanitized_Request, urlencode_postdata, ) class TubiTvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tubitv\.com/video/(?P<id>[0-9]+)' _LOGIN_URL = 'http://tubitv.com/login' _NETRC_MACHINE = 'tubitv' _GEO_COUNTRIES = ['US'] _TEST = { 'url': 'http://tubitv.com/video/283829/the_comedian_at_the_friday', 'md5': '43ac06be9326f41912dc64ccf7a80320', 'info_dict': { 'id': '283829', 'ext': 'mp4', 'title': 'The Comedian at The Friday', 'description': 'A stand up comedian is forced to look at the decisions in his life while on a one week trip to the west coast.', 'uploader_id': 'bc168bee0d18dd1cb3b86c68706ab434', }, } def _login(self): (username, password) = self._get_login_info() if username is None: return self.report_login() form_data = { 'username': username, 'password': password, } payload = urlencode_postdata(form_data) request = sanitized_Request(self._LOGIN_URL, payload) request.add_header('Content-Type', 'application/x-www-form-urlencoded') login_page = self._download_webpage( request, None, False, 'Wrong login info') if not re.search(r'id="tubi-logout"', login_page): raise ExtractorError( 'Login failed (invalid username/password)', expected=True) def _real_initialize(self): self._login() def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'http://tubitv.com/oz/videos/%s/content' % video_id, video_id) title = video_data['title'] formats = self._extract_m3u8_formats( self._proto_relative_url(video_data['url']), video_id, 'mp4', 'm3u8_native') self._sort_formats(formats) thumbnails = [] for thumbnail_url in video_data.get('thumbnails', []): if not thumbnail_url: continue thumbnails.append({ 'url': self._proto_relative_url(thumbnail_url), }) subtitles = {} for sub in video_data.get('subtitles', []): sub_url = sub.get('url') if not sub_url: continue subtitles.setdefault(sub.get('lang', 'English'), []).append({ 'url': self._proto_relative_url(sub_url), }) return { 'id': video_id, 'title': title, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'uploader_id': video_data.get('publisher_id'), }
gpl-3.0
-1,345,150,819,346,666,800
-7,207,844,266,421,260,000
32.611111
140
0.550413
false
rmariano/pywars
game/views.py
2
7077
import json from django.http import HttpResponse, JsonResponse from django.shortcuts import render, redirect from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_POST from django.contrib.auth.decorators import login_required from django.views.decorators.cache import cache_page from django.db.models import Q from django.core.exceptions import ObjectDoesNotExist from .forms import BotBufferForm from models import Challenge, Bot, UserProfile from game.tasks import validate_bot def index(request, match_id=None): return render(request, 'home.html', {'tab': 'arena', 'match_id': match_id}) def about(request): return render(request, 'about.html', {'tab': 'about'}) @login_required def scoreboard(request): #bots = Bot.objects.all().order_by('-points') users = UserProfile.objects.filter(current_bot__isnull=False, user__is_active=True).order_by('-score') users = ((user, request.user.profile.latest_match_id(user)) for user in users) challenges = Challenge.objects.filter(requested_by=request.user.profile, challenger_bot=request.user.profile.current_bot, played=False, canceled=False) # if challenges.count() > 0: # pending_challenges = True # else: # pending_challenges = False pending_challenged_bots = [c.challenged_bot for c in challenges] played_challenges = Challenge.objects.filter(requested_by=request.user.profile, played=True, canceled=False) challenged_bots = [c.challenged_bot for c in played_challenges] return render(request, 'scoreboard.html', {'tab': 'score', 'users': users, 'challenged_bots': challenged_bots, 'pending_challenged_bots': pending_challenged_bots}) @login_required def tournament(request): user_query = UserProfile.objects.filter(current_bot__isnull=False, user__is_active=True, user__is_superuser=False) for user in user_query.all(): user.score = user.points user.save() users = user_query.order_by('-score') return render(request, 'tournament.html', {'tab': 'tournament', 'users': users}) @login_required def mybots(request): user_prof = UserProfile.objects.get(user=request.user) if request.method == 'POST': form = BotBufferForm(request.POST) if not form.is_valid(): print "ERROR in form!" return new_code = form.cleaned_data['code'] user_prof.code = new_code if 'publish_buffer' in request.POST: bot = Bot() bot.owner = user_prof bot.code = new_code bot.save() validate_bot.delay(bot.id, new_code) user_prof.current_bot = bot user_prof.save() return redirect('/mybots') else: form = BotBufferForm(instance=user_prof) return render(request, "my_bots.html", { 'form': form, 'user_prof': user_prof, 'tab': 'mybots', 'my_bots': reversed(Bot.objects.filter(owner=user_prof)) }) @login_required @csrf_exempt @require_POST def challenge(request): if request.is_ajax(): challenge_bot_id = json.loads(request.body)['msg'] challenge_bot = Bot.objects.get(pk=challenge_bot_id) # get the user current bot user_prof = UserProfile.objects.get(user=request.user) if not user_prof.current_bot: print "Can not challenge if does not have a bot!" return HttpResponse("Error") if challenge_bot.owner == user_prof: print "[CHEATING!] - wrong challenge bot!" return HttpResponse("Error") # challenged bot must be the owners current bot if not challenge_bot.is_current_bot: print "[CHEATING!] - wrong challenge bot!, must be the owners current bot!." return HttpResponse("Error") print "Got a challenge for bot: ", challenge_bot # Get pending challenges for this user challenges = Challenge.objects.filter(requested_by=user_prof, played=False, canceled=False) if challenges.count() > 0: # has pending challenges, must wait. return HttpResponse("Can not challenge more than one bot at a time") # Check if these bots haven't already played. #played_challs = Challenge.objects.filter(challenger_bot=user_prof.current_bot, # challenged_bot=challenge_bot, played=True) #if played_challs.count() > 0: # # has already played against this bot, must upload a new one # return HttpResponse("Already played against this bot!. Upload a new one.") if (user_prof.current_bot.valid != Bot.READY or challenge_bot.valid != Bot.READY): return JsonResponse({'success': False, 'msg': 'One of the bot is not READY' }) new_challengue = Challenge() new_challengue.requested_by = user_prof new_challengue.challenger_bot = user_prof.current_bot new_challengue.challenged_bot = challenge_bot new_challengue.save() return JsonResponse({'success': True}) @login_required @cache_page(60) def main_match(request): return HttpResponse(None) @login_required def my_matches(request): matches = Challenge.objects.filter(Q(challenger_bot__owner=request.user) | Q(challenged_bot__owner=request.user)).filter(canceled=False).filter(played=True).order_by('-creation_date').select_related('challenger_bot__owner__user', 'challenged_bot__owner__user', 'winner_bot__owner__user') return render(request, 'mymatches.html', {'matches': matches, 'tab': 'my-matches'}) @login_required def get_match(request, match_id): try: challenge = Challenge.objects.get(pk=match_id) if challenge.canceled: return JsonResponse({'success': False}) else: return JsonResponse({'success': True, 'data': json.loads(challenge.result)}) except ObjectDoesNotExist: return JsonResponse({'success': False}) @login_required def get_bot_status(request, bot_id): try: bot = Bot.objects.get(pk=bot_id) return JsonResponse({'success': True, 'status': bot.valid, 'code': bot.code ,'reason': bot.invalid_reason}) except ObjectDoesNotExist: return JsonResponse({'success': False}) @login_required def random_test_match(request): return HttpResponse(None) @login_required def bot_code(request, bot_pk): if bot_pk == "0": user_prof = UserProfile.objects.get(user=request.user) return HttpResponse(user_prof.my_buffer) bot_code = Bot.objects.get(pk=bot_pk, owner=request.user).code return HttpResponse(bot_code) @login_required @cache_page(10) def get_playlist(request): challenges = Challenge.objects.filter(played=True, canceled=False).order_by('-creation_date')[:50] if not challenges: return JsonResponse({'success': False, 'data': []}) challs = [ [ch.id, ch.caption()] for ch in challenges ] return JsonResponse({'success': True, 'data': challs})
mit
3,248,928,748,393,762,000
-5,577,583,937,960,186,000
35.668394
252
0.657341
false
flotre/sickbeard-vfvo
cherrypy/_cpchecker.py
39
14290
import os import warnings import cherrypy class Checker(object): """A checker for CherryPy sites and their mounted applications. on: set this to False to turn off the checker completely. When this object is called at engine startup, it executes each of its own methods whose names start with "check_". If you wish to disable selected checks, simply add a line in your global config which sets the appropriate method to False: [global] checker.check_skipped_app_config = False You may also dynamically add or replace check_* methods in this way. """ on = True def __init__(self): self._populate_known_types() def __call__(self): """Run all check_* methods.""" if self.on: oldformatwarning = warnings.formatwarning warnings.formatwarning = self.formatwarning try: for name in dir(self): if name.startswith("check_"): method = getattr(self, name) if method and callable(method): method() finally: warnings.formatwarning = oldformatwarning def formatwarning(self, message, category, filename, lineno, line=None): """Function to format a warning.""" return "CherryPy Checker:\n%s\n\n" % message # This value should be set inside _cpconfig. global_config_contained_paths = False def check_app_config_entries_dont_start_with_script_name(self): for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue if not app.config: continue if sn == '': continue sn_atoms = sn.strip("/").split("/") for key in app.config.keys(): key_atoms = key.strip("/").split("/") if key_atoms[:len(sn_atoms)] == sn_atoms: warnings.warn( "The application mounted at %r has config " \ "entries that start with its script name: %r" % (sn, key)) def check_site_config_entries_in_app_config(self): for sn, app in cherrypy.tree.apps.iteritems(): if not isinstance(app, cherrypy.Application): continue msg = [] for section, entries in app.config.iteritems(): if section.startswith('/'): for key, value in entries.iteritems(): for n in ("engine.", "server.", "tree.", "checker."): if key.startswith(n): msg.append("[%s] %s = %s" % (section, key, value)) if msg: msg.insert(0, "The application mounted at %r contains the following " "config entries, which are only allowed in site-wide " "config. Move them to a [global] section and pass them " "to cherrypy.config.update() instead of tree.mount()." % sn) warnings.warn(os.linesep.join(msg)) def check_skipped_app_config(self): for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue if not app.config: msg = "The Application mounted at %r has an empty config." % sn if self.global_config_contained_paths: msg += (" It looks like the config you passed to " "cherrypy.config.update() contains application-" "specific sections. You must explicitly pass " "application config via " "cherrypy.tree.mount(..., config=app_config)") warnings.warn(msg) return def check_app_config_brackets(self): for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue if not app.config: continue for key in app.config.keys(): if key.startswith("[") or key.endswith("]"): warnings.warn( "The application mounted at %r has config " \ "section names with extraneous brackets: %r. " "Config *files* need brackets; config *dicts* " "(e.g. passed to tree.mount) do not." % (sn, key)) def check_static_paths(self): # Use the dummy Request object in the main thread. request = cherrypy.request for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue request.app = app for section in app.config: # get_resource will populate request.config request.get_resource(section + "/dummy.html") conf = request.config.get if conf("tools.staticdir.on", False): msg = "" root = conf("tools.staticdir.root") dir = conf("tools.staticdir.dir") if dir is None: msg = "tools.staticdir.dir is not set." else: fulldir = "" if os.path.isabs(dir): fulldir = dir if root: msg = ("dir is an absolute path, even " "though a root is provided.") testdir = os.path.join(root, dir[1:]) if os.path.exists(testdir): msg += ("\nIf you meant to serve the " "filesystem folder at %r, remove " "the leading slash from dir." % testdir) else: if not root: msg = "dir is a relative path and no root provided." else: fulldir = os.path.join(root, dir) if not os.path.isabs(fulldir): msg = "%r is not an absolute path." % fulldir if fulldir and not os.path.exists(fulldir): if msg: msg += "\n" msg += ("%r (root + dir) is not an existing " "filesystem path." % fulldir) if msg: warnings.warn("%s\nsection: [%s]\nroot: %r\ndir: %r" % (msg, section, root, dir)) # -------------------------- Compatibility -------------------------- # obsolete = { 'server.default_content_type': 'tools.response_headers.headers', 'log_access_file': 'log.access_file', 'log_config_options': None, 'log_file': 'log.error_file', 'log_file_not_found': None, 'log_request_headers': 'tools.log_headers.on', 'log_to_screen': 'log.screen', 'show_tracebacks': 'request.show_tracebacks', 'throw_errors': 'request.throw_errors', 'profiler.on': ('cherrypy.tree.mount(profiler.make_app(' 'cherrypy.Application(Root())))'), } deprecated = {} def _compat(self, config): """Process config and warn on each obsolete or deprecated entry.""" for section, conf in config.items(): if isinstance(conf, dict): for k, v in conf.items(): if k in self.obsolete: warnings.warn("%r is obsolete. Use %r instead.\n" "section: [%s]" % (k, self.obsolete[k], section)) elif k in self.deprecated: warnings.warn("%r is deprecated. Use %r instead.\n" "section: [%s]" % (k, self.deprecated[k], section)) else: if section in self.obsolete: warnings.warn("%r is obsolete. Use %r instead." % (section, self.obsolete[section])) elif section in self.deprecated: warnings.warn("%r is deprecated. Use %r instead." % (section, self.deprecated[section])) def check_compatibility(self): """Process config and warn on each obsolete or deprecated entry.""" self._compat(cherrypy.config) for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue self._compat(app.config) # ------------------------ Known Namespaces ------------------------ # extra_config_namespaces = [] def _known_ns(self, app): ns = ["wsgi"] ns.extend(app.toolboxes.keys()) ns.extend(app.namespaces.keys()) ns.extend(app.request_class.namespaces.keys()) ns.extend(cherrypy.config.namespaces.keys()) ns += self.extra_config_namespaces for section, conf in app.config.items(): is_path_section = section.startswith("/") if is_path_section and isinstance(conf, dict): for k, v in conf.items(): atoms = k.split(".") if len(atoms) > 1: if atoms[0] not in ns: # Spit out a special warning if a known # namespace is preceded by "cherrypy." if (atoms[0] == "cherrypy" and atoms[1] in ns): msg = ("The config entry %r is invalid; " "try %r instead.\nsection: [%s]" % (k, ".".join(atoms[1:]), section)) else: msg = ("The config entry %r is invalid, because " "the %r config namespace is unknown.\n" "section: [%s]" % (k, atoms[0], section)) warnings.warn(msg) elif atoms[0] == "tools": if atoms[1] not in dir(cherrypy.tools): msg = ("The config entry %r may be invalid, " "because the %r tool was not found.\n" "section: [%s]" % (k, atoms[1], section)) warnings.warn(msg) def check_config_namespaces(self): """Process config and warn on each unknown config namespace.""" for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue self._known_ns(app) # -------------------------- Config Types -------------------------- # known_config_types = {} def _populate_known_types(self): import __builtin__ as builtins b = [x for x in vars(builtins).values() if type(x) is type(str)] def traverse(obj, namespace): for name in dir(obj): # Hack for 3.2's warning about body_params if name == 'body_params': continue vtype = type(getattr(obj, name, None)) if vtype in b: self.known_config_types[namespace + "." + name] = vtype traverse(cherrypy.request, "request") traverse(cherrypy.response, "response") traverse(cherrypy.server, "server") traverse(cherrypy.engine, "engine") traverse(cherrypy.log, "log") def _known_types(self, config): msg = ("The config entry %r in section %r is of type %r, " "which does not match the expected type %r.") for section, conf in config.items(): if isinstance(conf, dict): for k, v in conf.items(): if v is not None: expected_type = self.known_config_types.get(k, None) vtype = type(v) if expected_type and vtype != expected_type: warnings.warn(msg % (k, section, vtype.__name__, expected_type.__name__)) else: k, v = section, conf if v is not None: expected_type = self.known_config_types.get(k, None) vtype = type(v) if expected_type and vtype != expected_type: warnings.warn(msg % (k, section, vtype.__name__, expected_type.__name__)) def check_config_types(self): """Assert that config values are of the same type as default values.""" self._known_types(cherrypy.config) for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue self._known_types(app.config) # -------------------- Specific config warnings -------------------- # def check_localhost(self): """Warn if any socket_host is 'localhost'. See #711.""" for k, v in cherrypy.config.items(): if k == 'server.socket_host' and v == 'localhost': warnings.warn("The use of 'localhost' as a socket host can " "cause problems on newer systems, since 'localhost' can " "map to either an IPv4 or an IPv6 address. You should " "use '127.0.0.1' or '[::1]' instead.")
gpl-3.0
-6,261,900,069,971,831,000
-8,743,636,467,445,215,000
43.378882
84
0.464171
false
GrognardsFromHell/TemplePlus
tpdatasrc/co8infra/scr/Spell741 - Ice Breath Weapon.py
1
1381
from toee import * def OnBeginSpellCast( spell ): print "Frozen Breath OnBeginSpellCast" print "spell.target_list=", spell.target_list print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level game.particles( "sp-evocation-conjure", spell.caster ) def OnSpellEffect ( spell ): print "Frozen Breath OnSpellEffect" remove_list = [] dam = dice_new( '1d6' ) dam.number = spell.spell_level if dam.number > 6: dam.number = 6 game.particles( 'sp-Cone of Cold', spell.caster ) npc = spell.caster spell.dc = spell.dc + 5 if npc.name == 14999: ## Old White Dragon dam.number = 8 spell.dc = 27 # range = 25 + 5 * int(spell.caster_level/2) range = 60 target_list = list(game.obj_list_cone( spell.caster, OLC_CRITTERS, range, -30, 60 )) target_list.remove(spell.caster) for obj in target_list: if obj.reflex_save_and_damage( spell.caster, spell.dc, D20_Save_Reduction_Half, D20STD_F_NONE, dam, D20DT_COLD, D20DAP_UNSPECIFIED, D20A_CAST_SPELL, spell.id ) > 0: # saving throw successful obj.float_mesfile_line( 'mes\\spell.mes', 30001 ) else: # saving throw unsuccessful obj.float_mesfile_line( 'mes\\spell.mes', 30002 ) spell.target_list.remove_list( remove_list ) spell.spell_end(spell.id) def OnBeginRound( spell ): print "Frozen Breath OnBeginRound" def OnEndSpellCast( spell ): print "Frozen Breath OnEndSpellCast"
mit
-6,563,472,845,608,669,000
-488,483,602,191,197,000
25.557692
85
0.703838
false
Nowheresly/odoo
addons/delivery/stock.py
38
10914
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields,osv from openerp.tools.translate import _ import openerp.addons.decimal_precision as dp # Overloaded stock_picking to manage carriers : class stock_picking(osv.osv): _inherit = 'stock.picking' def _cal_weight(self, cr, uid, ids, name, args, context=None): res = {} for picking in self.browse(cr, uid, ids, context=context): total_weight = total_weight_net = 0.00 for move in picking.move_lines: if move.state != 'cancel': total_weight += move.weight total_weight_net += move.weight_net res[picking.id] = { 'weight': total_weight, 'weight_net': total_weight_net, } return res def _get_picking_line(self, cr, uid, ids, context=None): result = {} for line in self.pool.get('stock.move').browse(cr, uid, ids, context=context): result[line.picking_id.id] = True return result.keys() _columns = { 'carrier_id':fields.many2one("delivery.carrier","Carrier"), 'volume': fields.float('Volume', copy=False), 'weight': fields.function(_cal_weight, type='float', string='Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_weight', store={ 'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['move_lines'], 40), 'stock.move': (_get_picking_line, ['state', 'picking_id', 'product_id','product_uom_qty','product_uom'], 40), }), 'weight_net': fields.function(_cal_weight, type='float', string='Net Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_weight', store={ 'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['move_lines'], 40), 'stock.move': (_get_picking_line, ['state', 'picking_id', 'product_id','product_uom_qty','product_uom'], 40), }), 'carrier_tracking_ref': fields.char('Carrier Tracking Ref', copy=False), 'number_of_packages': fields.integer('Number of Packages', copy=False), 'weight_uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True,readonly="1",help="Unit of measurement for Weight",), } def _prepare_shipping_invoice_line(self, cr, uid, picking, invoice, context=None): """Prepare the invoice line to add to the shipping costs to the shipping's invoice. :param browse_record picking: the stock picking being invoiced :param browse_record invoice: the stock picking's invoice :return: dict containing the values to create the invoice line, or None to create nothing """ if picking.sale_id: delivery_line = picking.sale_id.order_line.filtered(lambda l: l.is_delivery and l.invoiced) if delivery_line: return None carrier_obj = self.pool.get('delivery.carrier') grid_obj = self.pool.get('delivery.grid') currency_obj = self.pool.get('res.currency') if not picking.carrier_id or \ any(inv_line.product_id.id == picking.carrier_id.product_id.id for inv_line in invoice.invoice_line): return None grid_id = carrier_obj.grid_get(cr, uid, [picking.carrier_id.id], picking.partner_id.id, context=context) if not grid_id: raise osv.except_osv(_('Warning!'), _('The carrier %s (id: %d) has no delivery grid!') \ % (picking.carrier_id.name, picking.carrier_id.id)) quantity = sum([line.product_uom_qty for line in picking.move_lines]) price = grid_obj.get_price_from_picking(cr, uid, grid_id, invoice.amount_untaxed, picking.weight, picking.volume, quantity, context=context) if invoice.company_id.currency_id.id != invoice.currency_id.id: price = currency_obj.compute(cr, uid, invoice.company_id.currency_id.id, invoice.currency_id.id, price, context=dict(context or {}, date=invoice.date_invoice)) account_id = picking.carrier_id.product_id.property_account_income.id if not account_id: account_id = picking.carrier_id.product_id.categ_id\ .property_account_income_categ.id taxes = picking.carrier_id.product_id.taxes_id partner = picking.partner_id or False fp = invoice.fiscal_position or partner.property_account_position if partner: account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fp, account_id) taxes_ids = self.pool.get('account.fiscal.position').map_tax(cr, uid, fp, taxes, context=context) else: taxes_ids = [x.id for x in taxes] return { 'name': picking.carrier_id.name, 'invoice_id': invoice.id, 'uos_id': picking.carrier_id.product_id.uos_id.id, 'product_id': picking.carrier_id.product_id.id, 'account_id': account_id, 'price_unit': price, 'quantity': 1, 'invoice_line_tax_id': [(6, 0, taxes_ids)], } def _invoice_create_line(self, cr, uid, moves, journal_id, inv_type='out_invoice', context=None): invoice_obj = self.pool.get('account.invoice') invoice_line_obj = self.pool.get('account.invoice.line') invoice_ids = super(stock_picking, self)._invoice_create_line(cr, uid, moves, journal_id, inv_type=inv_type, context=context) delivey_invoices = {} for move in moves: for invoice in move.picking_id.sale_id.invoice_ids: if invoice.id in invoice_ids: delivey_invoices[invoice] = move.picking_id if delivey_invoices: for invoice, picking in delivey_invoices.items(): invoice_line = self._prepare_shipping_invoice_line(cr, uid, picking, invoice, context=context) if invoice_line: invoice_line_obj.create(cr, uid, invoice_line) invoice_obj.button_compute(cr, uid, [invoice.id], context=context, set_total=(inv_type in ('in_invoice', 'in_refund'))) return invoice_ids def _get_default_uom(self, cr, uid, context=None): uom_categ_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'product.product_uom_categ_kgm') return self.pool.get('product.uom').search(cr, uid, [('category_id', '=', uom_categ_id), ('factor', '=', 1)])[0] _defaults = { 'weight_uom_id': lambda self, cr, uid, c: self._get_default_uom(cr, uid, c), } class stock_move(osv.osv): _inherit = 'stock.move' def _cal_move_weight(self, cr, uid, ids, name, args, context=None): res = {} uom_obj = self.pool.get('product.uom') for move in self.browse(cr, uid, ids, context=context): weight = weight_net = 0.00 if move.product_id.weight > 0.00: converted_qty = move.product_qty weight = (converted_qty * move.product_id.weight) if move.product_id.weight_net > 0.00: weight_net = (converted_qty * move.product_id.weight_net) res[move.id] = { 'weight': weight, 'weight_net': weight_net, } return res _columns = { 'weight': fields.function(_cal_move_weight, type='float', string='Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_move_weight', store={ 'stock.move': (lambda self, cr, uid, ids, c=None: ids, ['product_id', 'product_uom_qty', 'product_uom'], 30), }), 'weight_net': fields.function(_cal_move_weight, type='float', string='Net weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_move_weight', store={ 'stock.move': (lambda self, cr, uid, ids, c=None: ids, ['product_id', 'product_uom_qty', 'product_uom'], 30), }), 'weight_uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True,readonly="1",help="Unit of Measure (Unit of Measure) is the unit of measurement for Weight",), } def action_confirm(self, cr, uid, ids, context=None): """ Pass the carrier to the picking from the sales order (Should also work in case of Phantom BoMs when on explosion the original move is deleted) """ procs_to_check = [] for move in self.browse(cr, uid, ids, context=context): if move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.carrier_id: procs_to_check += [move.procurement_id] res = super(stock_move, self).action_confirm(cr, uid, ids, context=context) pick_obj = self.pool.get("stock.picking") for proc in procs_to_check: pickings = list(set([x.picking_id.id for x in proc.move_ids if x.picking_id and not x.picking_id.carrier_id])) if pickings: pick_obj.write(cr, uid, pickings, {'carrier_id': proc.sale_line_id.order_id.carrier_id.id}, context=context) return res def _get_default_uom(self, cr, uid, context=None): uom_categ_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'product.product_uom_categ_kgm') return self.pool.get('product.uom').search(cr, uid, [('category_id', '=', uom_categ_id),('factor','=',1)])[0] _defaults = { 'weight_uom_id': lambda self, cr, uid, c: self._get_default_uom(cr, uid, c), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-7,427,434,891,477,411,000
3,370,493,688,671,387,000
49.06422
183
0.582829
false
fboers/jumeg
examples/do_MLICA.py
1
5891
""" Compute ICA object based on filtered and downsampled data. Identify ECG and EOG artifacts using MLICA and compare results to correlation & ctps analysis. Apply ICA object to filtered and unfiltered data. Ahmad Hasasneh, Nikolas Kampel, Praveen Sripad, N. Jon Shah, and Juergen Dammers "Deep Learning Approach for Automatic Classification of Ocular and Cardiac Artifacts in MEG Data" Journal of Engineering, vol. 2018, Article ID 1350692,10 pages, 2018. https://doi.org/10.1155/2018/1350692 """ import os.path as op import matplotlib.pylab as plt plt.ion() import numpy as np import mne from jumeg.decompose.ica_replace_mean_std import ICA, ica_update_mean_std from keras.models import load_model from jumeg.jumeg_noise_reducer import noise_reducer from jumeg.jumeg_preprocessing import get_ics_cardiac, get_ics_ocular from jumeg.jumeg_plot import plot_performance_artifact_rejection from jumeg.jumeg_utils import get_jumeg_path # config MLICA_threshold = 0.8 n_components = 60 njobs = 4 # for downsampling tmin = 0 tmax = tmin + 15000 flow_ecg, fhigh_ecg = 8, 20 flow_eog, fhigh_eog = 1, 20 ecg_thresh, eog_thresh = 0.3, 0.3 ecg_ch = 'ECG 001' eog1_ch = 'EOG 001' eog2_ch = 'EOG 002' reject = {'mag': 5e-12} refnotch = [50., 100., 150., 200., 250., 300., 350., 400.] data_path = op.join(get_jumeg_path(), 'data') print(data_path) # example filname raw_fname = "/Volumes/megraid21/sripad/cau_fif_data/jumeg_test_data/" \ "109925_CAU01A_100715_0842_2_c,rfDC-raw.fif" # load the model for artifact rejection # the details of the model is provided in the x_validation_shuffle_v4_split_23.txt model_name = op.join(data_path, "dcnn_model.hdf5") model = load_model(model_name) # noise reducer raw_nr = noise_reducer(raw_fname, reflp=5., return_raw=True) raw_nr = noise_reducer(raw_fname, raw=raw_nr, refhp=0.1, noiseref=['RFG ...'], return_raw=True) # 50HZ and 60HZ notch filter to remove noise raw = noise_reducer(raw_fname, raw=raw_nr, refnotch=refnotch, return_raw=True) picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False, stim=False, exclude='bads') raw_filtered = raw.copy().filter(0., 45., picks=picks, filter_length='auto', l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=njobs, method='fir', phase='zero', fir_window='hamming') # downsample the data to 250 Hz, necessary for the model raw_ds = raw_filtered.copy().resample(250, npad='auto', window='boxcar', stim_picks=None, n_jobs=njobs, events=None) raw_ds_chop = raw_ds.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000) # downsampled raw raw_filtered_chop = raw_filtered.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000) raw_chop = raw.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000) ica = ICA(method='fastica', n_components=n_components, random_state=42, max_pca_components=None, max_iter=5000, verbose=None) # do the ICA decomposition on downsampled raw ica.fit(raw_ds_chop, picks=picks, reject=reject, verbose=None) sources = ica.get_sources(raw_ds_chop)._data # extract temporal and spatial components mm = np.float32(np.dot(ica.mixing_matrix_[:, :].T, ica.pca_components_[:ica.n_components_])) # use [:, :15000] to make sure it's 15000 data points chop = sources[:, :15000] chop_reshaped = np.reshape(chop, (len(chop), len(chop[0]), 1)) model_scores = model.predict([mm, chop_reshaped], verbose=1) bads_MLICA = [] # print model_scores for idx in range(0, len(model_scores)): if model_scores[idx][0] > MLICA_threshold: bads_MLICA.append(idx) # visualisation # ica.exclude = bads_MLICA # ica.plot_sources(raw_ds_chop, block=True) # compare MLICA to results from correlation and ctps analysis ica.exclude = [] print('Identifying components..') # get ECG/EOG related components using JuMEG ic_ecg = get_ics_cardiac(raw_filtered_chop, ica, flow=flow_ecg, fhigh=fhigh_ecg, thresh=ecg_thresh, tmin=-0.5, tmax=0.5, name_ecg=ecg_ch, use_CTPS=True)[0] # returns both ICs and scores (take only ICs) ic_eog = get_ics_ocular(raw_filtered_chop, ica, flow=flow_eog, fhigh=fhigh_eog, thresh=eog_thresh, name_eog_hor=eog1_ch, name_eog_ver=eog2_ch, score_func='pearsonr') bads_corr_ctps = list(ic_ecg) + list(ic_eog) bads_corr_ctps = list(set(bads_corr_ctps)) # remove potential duplicates bads_corr_ctps.sort() # visualisation # ica.exclude = bads_corr_ctps # ica.plot_sources(raw_chop, block=True) print('Bad components from MLICA:', bads_MLICA) print('Bad components from correlation & ctps:', bads_corr_ctps) # apply MLICA result to filtered and unfiltered data # exclude bad components identified by MLICA ica.exclude = bads_MLICA fnout_fig = '109925_CAU01A_100715_0842_2_c,rfDC,0-45hz,ar-perf' ica_filtered_chop = ica_update_mean_std(raw_filtered_chop, ica, picks=picks, reject=reject) raw_filtered_chop_clean = ica_filtered_chop.apply(raw_filtered_chop, exclude=ica.exclude, n_pca_components=None) ica_unfiltered_chop = ica_update_mean_std(raw_chop, ica, picks=picks, reject=reject) raw_unfiltered_chop_clean = ica_unfiltered_chop.apply(raw_chop, exclude=ica.exclude, n_pca_components=None) # create copy of original data since apply_ica_replace_mean_std changes the input data in place (raw and ica) raw_copy = raw.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000) plot_performance_artifact_rejection(raw_copy, ica_unfiltered_chop, fnout_fig, meg_clean=raw_unfiltered_chop_clean, show=False, verbose=False, name_ecg=ecg_ch, name_eog=eog2_ch)
bsd-3-clause
4,831,181,168,214,128,000
7,724,786,509,221,929,000
38.013245
109
0.672382
false
bfaviero/ok
nodisk.py
1
2852
import os import subprocess H_NAME = 'oauth-kerberos-server' H_FOLDER = os.path.join('/cgroup', H_NAME) CGROUP_NAME = 'thegroup' CGROUP_FOLDER = os.path.join(H_FOLDER, CGROUP_NAME) MOUNT_CMD_PATH = '/bin/mount' UMOUNT_CMD_PATH = '/bin/umount' MOUNTPOINT_CMD_PATH = '/bin/mountpoint' def prevent_swapping(): """prevents the calling process (and any children spawned after calling) from being swapped out in whole or in part This is done by creating a Linux cgroup which the calling process is added to, then setting the memory.swappiness value for the cgroup to 0. According to the cgroup documentation, this accomplishes the desire effect. The calling process must be root (have euid 0), but it is fine if the process drops privelidges after calling this.""" if os.geteuid() != 0: raise Exception("you must have effective uid 0 to run this") # setup cgroup folders if they don't already exist makedirs(H_FOLDER, 0o700, NO_ERROR_IF_EXISTING) # only root # mount cgroup heierarchy, if it isn't already mounted if mountpoint(H_FOLDER)!=0: code = mount('-t', 'cgroup', '-o', 'memory', H_NAME, H_FOLDER) if code != 0: raise Exception("unable to create cgroup using mount") # make the cgroup if it doesn't exist makedirs(CGROUP_FOLDER, 0o700, NO_ERROR_IF_EXISTING) # set memory.swappiiness to 0 for the cgroup f = open(os.path.join(CGROUP_FOLDER, 'memory.swappiness'), 'w') f.write('0') f.close() # we don't need the file anymore, plus we want the write to be flushedyy # add our pid to the cgroup f = open(os.path.join(CGROUP_FOLDER, 'tasks'), 'w') f.write(str(os.getpid())) f.close() # we don't need the file anymore, plus we want the write to be flushedyy ERROR_IF_EXISTING = 0 # raise an error if leaf exists NO_ERROR_IF_EXISTING = 1 # don't raise an error if leaf exists def makedirs(path, mode=0o777, behavior=ERROR_IF_EXISTING): """this does the same thing as os.makedirs, but offers the option to change the behavior in the event that the leaf directory to be created already exists""" try: os.makedirs(path, mode) except OSError as e: # If we encountered error because file exists, everything is # fine. Otherwise, re-throw the exception if e.errno != 17 or behavior==ERROR_IF_EXISTING: raise e def mount(*argv): """calls the mount command with the given arguments, returning whatever the mount command returns""" return subprocess.call([MOUNT_CMD_PATH] + list(argv)) def umount(*argv): """calls the umount command with the given arguments, returning whatever the mount command returns""" return subprocess.call([UMOUNT_CMD_PATH] + list(argv)) def mountpoint(dirname): """calls the mountpoint comand with the -q (quiet) argument followed by the dirname argument, returning whatever the command returns""" return subprocess.call([MOUNTPOINT_CMD_PATH, '-q', dirname])
mit
-5,373,618,445,044,859,000
-4,657,148,058,795,582,000
33.780488
84
0.728612
false
explosion/thinc
thinc/tests/layers/test_combinators.py
1
7655
import pytest import numpy from numpy.testing import assert_allclose from thinc.api import clone, concatenate, noop, add, map_list from thinc.api import Linear, Dropout, Model, NumpyOps from thinc.layers import chain, tuplify @pytest.fixture(params=[1, 2, 9]) def nB(request): return request.param @pytest.fixture(params=[1, 6]) def nI(request): return request.param @pytest.fixture(params=[1, 5, 3]) def nH(request): return request.param @pytest.fixture(params=[1, 2, 7, 9]) def nO(request): return request.param @pytest.fixture def model1(nH, nI): return Linear(nH, nI) @pytest.fixture def model2(nO, nH): return Linear(nO, nH) @pytest.fixture def model3(nO): return Linear(nO, nO) def test_tuplify_zero(): with pytest.raises(TypeError): tuplify() def test_tuplify_one(model1): with pytest.raises(TypeError): tuplify(model1) def test_tuplify_two(model1, model2): model = tuplify(model1, model2) assert len(model.layers) == 2 def test_tuplify_operator_two(model1, model2): with Model.define_operators({"&": tuplify}): model = model1 & model2 assert len(model.layers) == 2 def test_tuplify_dulicates_input(): model = tuplify(noop(), noop()) ones = numpy.ones([10]) out = model.predict(ones) assert out == (ones, ones) def test_tuplify_three(model1, model2, model3): model = tuplify(model1, model2, model3) assert len(model.layers) == 3 def test_tuplify_operator_three(model1, model2, model3): # Previously we 'flattened' these nested calls. We might opt to do so # again, especially for the operators. with Model.define_operators({"&": tuplify}): model = model1 & model2 & model3 assert len(model.layers) == 2 assert len(model.layers[0].layers) == 2 def test_chain_zero(): with pytest.raises(TypeError): chain() def test_chain_one(model1): with pytest.raises(TypeError): chain(model1) def test_chain_two(model1, model2): model = chain(model1, model2) assert len(model.layers) == 2 def test_chain_operator_two(model1, model2): with Model.define_operators({">>": chain}): model = model1 >> model2 assert len(model.layers) == 2 def test_chain_three(model1, model2, model3): model = chain(model1, model2, model3) assert len(model.layers) == 3 def test_chain_operator_three(model1, model2, model3): # Previously we 'flattened' these nested calls. We might opt to do so # again, especially for the operators. with Model.define_operators({">>": chain}): model = model1 >> model2 >> model3 assert len(model.layers) == 2 assert len(model.layers[0].layers) == 2 def test_chain_right_branch(model1, model2, model3): # Previously we 'flattened' these nested calls. We might opt to do so # again, especially for the operators. merge1 = chain(model1, model2) merge2 = chain(merge1, model3) assert len(merge1.layers) == 2 assert len(merge2.layers) == 2 @pytest.mark.parametrize("ops", [NumpyOps(), NumpyOps(use_blis=True)]) def test_chain(ops): data = numpy.asarray([[1, 2, 3, 4]], dtype="f") model = chain(Linear(1), Dropout(), Linear(1)) model.ops = ops model.initialize(data, data) Y, backprop = model(data, is_train=True) backprop(Y) # Layers with and without nO/nI model = chain(Linear(1), Dropout(), Linear(1, 1)) model.initialize(data, data) # Setting dim on model model = chain(Linear(1), Dropout(), Linear(1)) model.set_dim("nO", 1) model.initialize(data, None) model = chain(Linear(1, 1), Dropout(), Linear(1, 1)) model.set_dim("nI", 1) model.initialize(None, data) # Not enough arguments with pytest.raises(TypeError): chain(Linear()) with pytest.raises(TypeError): chain() def test_concatenate_one(model1): model = concatenate(model1) assert isinstance(model, Model) def test_concatenate_two(model1, model2): model = concatenate(model1, model2) assert len(model.layers) == 2 def test_concatenate_operator_two(model1, model2): with Model.define_operators({"|": concatenate}): model = model1 | model2 assert len(model.layers) == 2 def test_concatenate_three(model1, model2, model3): model = concatenate(model1, model2, model3) assert len(model.layers) == 3 def test_concatenate_operator_three(model1, model2, model3): with Model.define_operators({"|": concatenate}): model = model1 | model2 | model3 assert len(model.layers) == 3 def test_clone_changes_predictions(nH, nI): model1 = Linear(nH) model = clone(model1, 10) ones = numpy.ones((10, nI), dtype="f") model.initialize(X=ones) output_from_cloned = model.predict(ones) output_from_orig = model1.predict(ones) assert output_from_cloned.sum() != output_from_orig.sum() def test_clone_gives_distinct_ids(nH, nI): model = clone(Linear(nH), 5) assert len(model.layers) == 5 seen_ids = set() for node in model.walk(): assert node.id not in seen_ids seen_ids.add(node.id) assert len(seen_ids) == 6 def test_clone_noop(): model = clone(Linear(), 0) assert len(model.layers) == 0 assert model.name == "noop" def test_concatenate_noop(): model = concatenate() assert len(model.layers) == 0 assert model.name == "noop" def test_noop(): data = numpy.asarray([1, 2, 3], dtype="f") model = noop(Linear(), Linear()) model.initialize(data, data) Y, backprop = model(data, is_train=True) assert numpy.array_equal(Y, data) dX = backprop(Y) assert numpy.array_equal(dX, data) def test_add(): data = numpy.asarray([[1, 2, 3, 4]], dtype="f") model = add(Linear(), Linear()) model.initialize(data, data) Y, backprop = model(data, is_train=True) Y2 = sum(layer.predict(data) for layer in model.layers) assert numpy.array_equal(Y, Y2) dX = backprop(Y) assert dX.shape == data.shape # Test that nesting works model2 = add(model, Linear()) assert len(model2.layers) == 3 model.initialize(data, data) Y = model2.predict(data) Y2 = sum(layer.predict(data) for layer in model2.layers) assert numpy.array_equal(Y, Y2) def test_add_edge_cases(): data = numpy.asarray([[1, 2, 3, 4]], dtype="f") with pytest.raises(TypeError): add() model = add(Linear(), Linear()) model._layers = [] Y, backprop = model(data, is_train=True) assert numpy.array_equal(data, Y) dX = backprop(Y) assert numpy.array_equal(dX, data) def test_concatenate(): data = numpy.asarray([[1, 2, 3], [4, 5, 6]], dtype="f") model = concatenate(Linear(), Linear()) model.initialize(data, data) Y, backprop = model(data, is_train=True) assert Y.shape[1] == sum([layer.predict(data).shape[1] for layer in model.layers]) dX = backprop(Y) assert dX.shape == data.shape def test_map_list(): nI = 4 nO = 9 Xs = [ numpy.zeros((6, nI), dtype="f"), numpy.ones((3, nI), dtype="f") ] Y_shapes = [(x.shape[0], nO) for x in Xs] model = map_list(Linear()) model.initialize(X=Xs, Y=[numpy.zeros(shape, dtype="f") for shape in Y_shapes]) Ys, backprop = model(Xs, is_train=True) assert isinstance(Ys, list) assert len(Ys) == len(Xs) layer = model.layers[0] for X, Y in zip(Xs, Ys): assert_allclose(layer.predict(X), Y) dXs = backprop(Ys) assert isinstance(dXs, list) assert len(dXs) == len(Xs) assert dXs[0].shape == Xs[0].shape assert dXs[1].shape == Xs[1].shape
mit
8,739,076,560,107,319,000
-5,154,266,064,047,677,000
25.954225
86
0.640366
false
tersmitten/ansible
lib/ansible/modules/cloud/vmware/vmware_tag.py
7
8097
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # Copyright: (c) 2018, Abhijeet Kasurde <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: vmware_tag short_description: Manage VMware tags description: - This module can be used to create / delete / update VMware tags. - Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere. - All variables and VMware object names are case sensitive. version_added: '2.6' author: - Abhijeet Kasurde (@Akasurde) notes: - Tested on vSphere 6.5 requirements: - python >= 2.6 - PyVmomi - vSphere Automation SDK options: tag_name: description: - The name of tag to manage. required: True tag_description: description: - The tag description. - This is required only if C(state) is set to C(present). - This parameter is ignored, when C(state) is set to C(absent). - Process of updating tag only allows description change. required: False default: '' category_id: description: - The unique ID generated by vCenter should be used to. - User can get this unique ID from facts module. required: False state: description: - The state of tag. - If set to C(present) and tag does not exists, then tag is created. - If set to C(present) and tag exists, then tag is updated. - If set to C(absent) and tag exists, then tag is deleted. - If set to C(absent) and tag does not exists, no action is taken. required: False default: 'present' choices: [ 'present', 'absent' ] extends_documentation_fragment: vmware_rest_client.documentation ''' EXAMPLES = r''' - name: Create a tag vmware_tag: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' validate_certs: no category_id: 'urn:vmomi:InventoryServiceCategory:e785088d-6981-4b1c-9fb8-1100c3e1f742:GLOBAL' tag_name: Sample_Tag_0002 tag_description: Sample Description state: present delegate_to: localhost - name: Update tag description vmware_tag: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' tag_name: Sample_Tag_0002 tag_description: Some fancy description state: present delegate_to: localhost - name: Delete tag vmware_tag: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' tag_name: Sample_Tag_0002 state: absent delegate_to: localhost ''' RETURN = r''' results: description: dictionary of tag metadata returned: on success type: dict sample: { "msg": "Tag 'Sample_Tag_0002' created.", "tag_id": "urn:vmomi:InventoryServiceTag:bff91819-f529-43c9-80ca-1c9dfda09441:GLOBAL" } ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware_rest_client import VmwareRestClient class VmwareTag(VmwareRestClient): def __init__(self, module): super(VmwareTag, self).__init__(module) self.global_tags = dict() # api_client to call APIs instead of individual service self.tag_service = self.api_client.tagging.Tag self.tag_name = self.params.get('tag_name') self.get_all_tags() self.category_service = self.api_client.tagging.Category def ensure_state(self): """ Manage internal states of tags """ desired_state = self.params.get('state') states = { 'present': { 'present': self.state_update_tag, 'absent': self.state_create_tag, }, 'absent': { 'present': self.state_delete_tag, 'absent': self.state_unchanged, } } states[desired_state][self.check_tag_status()]() def state_create_tag(self): """ Create tag """ tag_spec = self.tag_service.CreateSpec() tag_spec.name = self.tag_name tag_spec.description = self.params.get('tag_description') category_id = self.params.get('category_id', None) if category_id is None: self.module.fail_json(msg="'category_id' is required parameter while creating tag.") category_found = False for category in self.category_service.list(): category_obj = self.category_service.get(category) if category_id == category_obj.id: category_found = True break if not category_found: self.module.fail_json(msg="Unable to find category specified using 'category_id' - %s" % category_id) tag_spec.category_id = category_id tag_id = self.tag_service.create(tag_spec) if tag_id: self.module.exit_json(changed=True, results=dict(msg="Tag '%s' created." % tag_spec.name, tag_id=tag_id)) self.module.exit_json(changed=False, results=dict(msg="No tag created", tag_id='')) def state_unchanged(self): """ Return unchanged state """ self.module.exit_json(changed=False) def state_update_tag(self): """ Update tag """ changed = False tag_id = self.global_tags[self.tag_name]['tag_id'] results = dict(msg="Tag %s is unchanged." % self.tag_name, tag_id=tag_id) tag_update_spec = self.tag_service.UpdateSpec() tag_desc = self.global_tags[self.tag_name]['tag_description'] desired_tag_desc = self.params.get('tag_description') if tag_desc != desired_tag_desc: tag_update_spec.description = desired_tag_desc self.tag_service.update(tag_id, tag_update_spec) results['msg'] = 'Tag %s updated.' % self.tag_name changed = True self.module.exit_json(changed=changed, results=results) def state_delete_tag(self): """ Delete tag """ tag_id = self.global_tags[self.tag_name]['tag_id'] self.tag_service.delete(tag_id=tag_id) self.module.exit_json(changed=True, results=dict(msg="Tag '%s' deleted." % self.tag_name, tag_id=tag_id)) def check_tag_status(self): """ Check if tag exists or not Returns: 'present' if tag found, else 'absent' """ ret = 'present' if self.tag_name in self.global_tags else 'absent' return ret def get_all_tags(self): """ Retrieve all tag information """ for tag in self.tag_service.list(): tag_obj = self.tag_service.get(tag) self.global_tags[tag_obj.name] = dict(tag_description=tag_obj.description, tag_used_by=tag_obj.used_by, tag_category_id=tag_obj.category_id, tag_id=tag_obj.id ) def main(): argument_spec = VmwareRestClient.vmware_client_argument_spec() argument_spec.update( tag_name=dict(type='str', required=True), tag_description=dict(type='str', default='', required=False), category_id=dict(type='str', required=False), state=dict(type='str', choices=['present', 'absent'], default='present', required=False), ) module = AnsibleModule(argument_spec=argument_spec) vmware_tag = VmwareTag(module) vmware_tag.ensure_state() if __name__ == '__main__': main()
gpl-3.0
-8,923,983,244,012,545,000
-8,685,601,000,009,260,000
31.781377
117
0.592689
false
shadyueh/pyranking
env/lib/python2.7/site-packages/django/contrib/auth/handlers/modwsgi.py
537
1344
from django import db from django.contrib import auth from django.utils.encoding import force_bytes def check_password(environ, username, password): """ Authenticates against Django's auth database mod_wsgi docs specify None, True, False as return value depending on whether the user exists and authenticates. """ UserModel = auth.get_user_model() # db connection state is managed similarly to the wsgi handler # as mod_wsgi may call these functions outside of a request/response cycle db.reset_queries() try: try: user = UserModel._default_manager.get_by_natural_key(username) except UserModel.DoesNotExist: return None if not user.is_active: return None return user.check_password(password) finally: db.close_old_connections() def groups_for_user(environ, username): """ Authorizes a user based on groups """ UserModel = auth.get_user_model() db.reset_queries() try: try: user = UserModel._default_manager.get_by_natural_key(username) except UserModel.DoesNotExist: return [] if not user.is_active: return [] return [force_bytes(group.name) for group in user.groups.all()] finally: db.close_old_connections()
mit
-2,827,836,929,221,085,700
-6,825,178,338,438,233,000
27
78
0.645833
false
matejv/micropython-weatherstation
weatherstation.py
1
4667
from machine import I2C, Pin, Timer import socket import utime as time import dht from bmp180 import BMP180 # https://github.com/micropython-IMU/micropython-bmp180 from esp8266_i2c_lcd import I2cLcd # https://github.com/dhylands/python_lcd/ import clock, nethelper class WeatherStation: DHTPIN = 14 # DHT data pin BMPSCL = 5 # BMP I2C clock pin BMPSDA = 4 # BMP I2C data pin DISSCL = 12 # LCD I2C clock pin DISSDA = 13 # LCD I2C data pin DEFAULT_LCD_ADDR = 0x27 DEFAULT_INTERVAL = 10 MEASURE_TRIES = 3 SERVER_NAME = 'graphite.example.com' # hostname of your graphite server SERVER_PORT = 2003 def __init__(self): self.bmp = None self.dht = None self.lcd = None self.socket = None self.online = False self.interval = self.DEFAULT_INTERVAL self.init_lcd() self.init_net() self.init_bmp() self.init_dht() self.init_clock() self.init_socket() self.timer = Timer(-1) self.timer.init(period=self.interval*1000, mode=Timer.PERIODIC, callback=self.update) self.update(None) def update(self, timer): print('update') self.check_net() self.update_clock() self.measure() self.update_lcd() self.send_data() def stop(self): self.timer.deinit() def measure(self): print('measure') tries = self.MEASURE_TRIES while tries: try: self.dht.measure() except: tries -= 1 def update_lcd(self): print('update_lcd') if self.online: now = time.localtime() time_str = '%02d:%02d' % (now[3], now[4]) else: time_str = 'noNet' #self.lcd.clear() # this will cause flicker self.lcd.move_to(0, 0) # better to overwrite whole display self.lcd.putstr('T: %.1f\xdfC H: %.0f%% %s' % ( self.dht.temperature(), self.dht.humidity(), time_str )) def send_data(self): print('send_data') if not self.socket: print('no_socket') return data = 'weatherstation.temp.dht {tempdht:.1f} {ts}\nweatherstation.hum.dht {humdht:.0f} {ts}\nweatherstation.temp.bmp {tempbmp:.1f} {ts}\nweatherstation.pressure.bmp {pressurebmp:.1f} {ts}\nweatherstation.time {ts} {ts}\n'.format( tempdht=self.dht.temperature(), humdht=self.dht.humidity(), tempbmp=self.bmp.temperature, pressurebmp=self.bmp.pressure, ts=self.clock.get_ts() ) try: print('writing socket') self.socket.write(data) print('socket write complete') except: print('wtite failed') self.check_net(recheck=True) self.init_socket() def init_bmp(self): bus = I2C(scl=Pin(self.BMPSCL), sda=Pin(self.BMPSDA), freq=100000) self.bmp = BMP180(bus) def init_dht(self): self.dht = dht.DHT22(Pin(self.DHTPIN)) def init_lcd(self): i2c = I2C(scl=Pin(self.DISSCL), sda=Pin(self.DISSDA), freq=400000) self.lcd = I2cLcd(i2c, self.DEFAULT_LCD_ADDR, 2, 16) def init_net(self): self.net = nethelper.NetHelper() self.net.check() def check_net(self, recheck=False): info = self.net.check(recheck) if info and self.online: return True elif info and not self.online: import utime self.online = True self.got_online() self.lcd.clear() self.lcd.putstr('% 16s%s' % (info[1], info[0])) utime.sleep_ms(5000) self.lcd.clear() return True elif not info and self.online: import utime self.online = False self.lcd.clear() self.lcd.putstr('Reconnecting...') utime.sleep_ms(5000) self.lcd.clear() return False elif not info and not self.online: return False def got_online(self): self.init_socket() self.init_clock() def init_socket(self): print('init_socket') if self.online: addr_info = socket.getaddrinfo(self.SERVER_NAME, self.SERVER_PORT) addr = addr_info[0][-1] self.socket = socket.socket() self.socket.connect(addr) else: self.socket = None def init_clock(self): self.clock = clock.Clock() def update_clock(self): if self.online: self.clock.sync()
mit
6,552,055,342,110,769,000
-8,467,223,203,600,615,000
29.703947
238
0.552603
false
Tao-Ma/gpdb
src/test/tinc/tincrepo/mpp/models/mpp_tc.py
12
25269
""" Copyright (c) 2004-Present Pivotal Software, Inc. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import inspect import os import re import sys import time import tinctest from tinctest.runner import TINCTextTestResult from tinctest.lib.system import TINCSystem from gppylib.commands.base import Command from mpp.lib.datagen.databases import __databases__, TINCTestDatabase, TINCDatagenException from mpp.lib.gplog import GpLog from mpp.lib.gpstop import GpStop from mpp.lib.PSQL import PSQL import unittest2 as unittest class MPPTestCaseException(Exception): """ The exception that will be thrown for any errors or failures in MPPTestCase """ pass class MPPDUT(object): """ This class is used to find the Device Under Test. It provides instance variables for product name and version_string. It will only be used by MPPMetaClassType to dynamically change a class's MRO. It also provides a product_environment dictionary to store gpopt version if found. """ def __init__(self, product = None, version_string = None): # Valid products as of 11/25/13: gpdb, hawq self.product = product # version_string has this format: major#.minor#.service_pack#.version_number<hotfix_alphanumeral> # It can be incomplete: 4.3 or 4.2.1 self.version_string = version_string self.product_environment = {} # First, get the product version if (self.product is None) or (self.version_string is None): self._get_product_version() # Next, get gpopt (GP Optimizer Mode) version gpopt_version = self._get_gpopt_version() if gpopt_version: self.product_environment['gpopt'] = gpopt_version def _get_version_string_output(self): # Version string is the output of postgres --gp-version or postgress --version # Output in gpdb: "postgres (Greenplum Database) 4.3_PARISTX_ORCA build 43249" # Output in hawq: "postgres (HAWQ) 4.2.0 build 1" # Output in postgres: "postgres (PostgreSQL) 9.2.4" # The following command will fail if the DUT is postgres version_command = Command(name = 'get gp-version', cmdStr = 'postgres --gp-version') try: version_command.run(validateAfter = True) except Exception, e: tinctest.logger.debug("Failed while running get gp-version: %s" %e) version_command = Command(name = 'get version', cmdStr = 'postgres --version') version_command.run(validateAfter = True) return version_command.get_results().stdout def _get_product_version(self): version_string_information = '' try: version_string_information = self._get_version_string_output() except Exception, e: tinctest.logger.exception("Failure while getting version information: %s" %e) tinctest.logger.critical("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.") raise MPPTestCaseException("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.") match_object = re.search("\((.+)\)", version_string_information) database_match = match_object.group(0) if "HAWQ" in database_match: self.product = 'hawq' # Replace version_string_information to point to hawq-version version_command = Command(name = 'get hawq-version', cmdStr = 'postgres --hawq-version') version_command.run(validateAfter = True) version_string_information = version_command.get_results().stdout tinctest.logger.info("DUT is detected to be hawq. Version string: %s" %version_string_information) elif "Greenplum Database" in database_match: tinctest.logger.info("DUT is detected to be gpdb. Version string: %s" %version_string_information) self.product = 'gpdb' elif "PostgreSQL" in database_match: tinctest.logger.info("DUT is detected to be postgres. Version string: %s" %version_string_information) self.product = 'postgres' else: tinctest.logger.critical("Unexpected version string obtained: %s." %version_string_information) tinctest.logger.critical("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.") raise MPPTestCaseException("Unexpected version string obtained: %s" %version_string_information) # At this point, version_string_information can be extracted to get the exact version # version_string_information for gpdb (--gp_version): "postgres (Greenplum Database) 4.3_PARISTX_ORCA build 43249" # version_string_information for hawq (--hawq_version): "postgres (HAWQ) 1.1.4.0 build dev" # version_string_information for postgres (--version): "postgres (PostgreSQL) 9.2.4" version_string_information_match_list = re.findall("\)\s(.*)", version_string_information) if version_string_information_match_list: # Remove everything after space and underscore version_part = re.sub(r'\s.*$', r'', version_string_information_match_list[0]) version_part = re.sub(r'_.*$', r'', version_part) # At this point, we have a version self.version_string = version_part else: tinctest.logger.critical("Unexpected version string obtained: %s." %version_string_information) tinctest.logger.critical("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.") raise MPPTestCaseException("Unexpected version string obtained: %s" %version_string_information) def _get_gpopt_version(self): # Return gpopt_version. Return empty, if not found. gp_opt_version = "" try: # The following command will fail if the DUT doesn't have optimizer gp_opt_version_cmd_results = {} psql_stdout = PSQL.run_sql_command("select gp_opt_version()", flags = "-t -q", results=gp_opt_version_cmd_results).strip() if gp_opt_version_cmd_results['rc'] or gp_opt_version_cmd_results['stderr'] != "": # received an error return gp_opt_version # Output is in the format of: GPOPT version: 1.241, GPOS version: 1.90, Xerces version: 3.1.1-p1 # We want 1.241 from the above gp_opt_version = psql_stdout.split()[2].strip(",") except Exception, e: tinctest.logger.debug("Failed while running select gp_opt_version: %s" %e) return gp_opt_version def __str__(self): return "DUT: product: %s ; version: %s" % (self.product, self.version_string) class _MPPMetaClassType(type): """ MPPMetaClassType class overrides new and init methods of metaclass type. It is used to dynamically change a class's MRO for a DUT. It does this by iterating through the base classes and checking if there are any product-specific hidden models of those base classes. MPPTestCase and all of its derived classes are of type MPPMetaClassType. Product-specific hidden models have to follow these rules: - They have to reside in the same module as the base class. - They have to be prefixed and suffixed with two underscores (__) - They have to have the lower-case product name in the class name, following the prefix of __ - The product name has to be same as the one provided by DUT class. An example of product-specific hidden model: __gpdbSQLTestCase__ in the same module as SQLTestCase for gpdb DUT. """ # Class variable to keep track of DUT DUT = MPPDUT() tinctest.logger.info(DUT) def __new__(metaclass, clsname, bases, dct): # Add DUT to class's built-in dictionary dct['__product__'] = _MPPMetaClassType.DUT.product dct['__version_string__'] = _MPPMetaClassType.DUT.version_string dct['__product_environment__'] = _MPPMetaClassType.DUT.product_environment dct['change_mro'] = False dct['make_me_product_agnostic'] = classmethod(metaclass.make_me_product_agnostic) new_bases = () if (clsname.startswith('__') and clsname.endswith('__')) or (clsname is 'MPPTestCase'): # If here, our clsname is one of the product-specific hidden models or MPPTestCase # No need to check bases new_bases += bases else: # If here, we need to check each of our clsname's bases # and see if each has product-specific class for base in bases: new_base_name = '__' + _MPPMetaClassType.DUT.product + base.__name__ + '__' # Variable to track whether we found a match for the base try: """ Product-specific hidden models should always reside in the same module as the base class """ exec ('from ' + base.__module__ + ' import ' + new_base_name) new_bases += (eval(new_base_name),) except: new_bases += (base,) return super(_MPPMetaClassType, metaclass).__new__(metaclass, clsname, new_bases, dct) def __init__(cls, clsname, bases, dct): super(_MPPMetaClassType, cls).__init__(clsname, bases, dct) @staticmethod def make_me_product_agnostic(cls): # Change the class variable change_mro to let mro() method know that this class needs to prepend product specific model cls.change_mro = True # The line below (fakingly changing the cls' bases) retriggers mro() method cls.__bases__ = cls.__bases__ + tuple() def mro(cls): default_mro = super(_MPPMetaClassType, cls).mro() if hasattr(cls, "change_mro") and cls.change_mro: new_class_name = '__' + _MPPMetaClassType.DUT.product + cls.__name__ + '__' try: exec ('from ' + cls.__module__ + ' import ' + new_class_name) new_class_object = eval(new_class_name) default_mro.insert(0, new_class_object) return default_mro except: # No hidden class defined. Nothing to do pass return default_mro @tinctest.skipLoading("Test model. No tests loaded.") class MPPTestCase(tinctest.TINCTestCase): """ MPPTestCase model is a top-level executor for all MPP test cases. All MPP test cases (HAWQ, GPDB, etc.) should either directly or indirectly inherit from MPPTestCase. It inherits from TINCTestCase, and is a parent of SQLTestCase. When a test of this type fails, we do the following: -> if restart_on_fatal_failure is set to True, inspect logs for errors and restart the cluster. -> if gather_logs_on_failure is set to True, gather master and segment logs for the duration of the test case when this test case fails. @metadata: host: Host where the MPP database resides. Defaults to localhost. @metadata: db_name: Database where the test case will be executed. Defaults to system environment variable DBNAME. @metadata: username: Username to use to login to the database. Defaults to system environment variable USER. @metadata: password: Password to use to login to the database. If not given, it assumes that user has trust authentication. @metadata: gather_logs_on_fatal_failure: Gather master and segment logs in case of a fatal failure. @metadata: restart_on_fatal_failure: Boolean to determine if the cluster should be restarted on failure. If the metadata doesn't exist, it won't be restarted. @undocumented: defaultTestResult @undocumented: __metaclass__ """ # MPPTestCase class is of type MPPMetaClassType # MPPMetaClassType will take of reconfiguring the bases of all the derived classes that have product-specific hidden models __metaclass__ = _MPPMetaClassType #: Directory relative to the test module where all the output artifacts will be collected. Defaults to 'output/' out_dir = 'output/' #: Database name to be used for any connection to the test cluster. Defaults to None. This database will also be configured in setUpClass on MPPTestCase db_name = None def __init__(self, methodName, baseline_result = None): #: boolean that determines whether or not to restart the cluster on a fatal failure. Defaults to False. self.restart_on_fatal_failure = False #: boolean that determines whether or not to gather logs on failure. Defaults to False self.gather_logs_on_failure = False super(MPPTestCase, self).__init__(methodName, baseline_result) @classmethod def setUpClass(cls): """ setUpClass of MPPTestCase does the following: -> Create out directory for the class if it does not exist. This is thread safe in case an MPPTestCase is used concurrently within a ScenarioTestCase or ConcurrencyTestCase -> Configures the database specified at the class level variable 'db_name' """ tinctest.logger.trace_in() #QAINF-760 - we need to treat db_name in the class level doc string as a class level variable #rather than an instance level variable ds = cls.__doc__ if ds: lines = ds.splitlines() for line in lines: line = line.strip() if line.find('@db_name') != 0: continue line = line[1:] if len(line.split()) <= 1: break (key, cls.db_name) = line.split(' ', 1) break super(MPPTestCase, cls).setUpClass() if not os.path.exists(cls.get_out_dir()): TINCSystem.make_dirs(cls.get_out_dir(), ignore_exists_error = True) if cls.db_name: tinctest.logger.debug("Configure database %s from MPPTestCase setUpClass." % cls.db_name) cls.configure_database(cls.db_name) tinctest.logger.trace_out() @classmethod def get_out_dir(cls): """ Returns the absolute output directory for this test class. Joins cls.out_dir with the location where the test module exists. """ source_file = sys.modules[cls.__module__].__file__ source_dir = os.path.dirname(source_file) abs_out_dir = os.path.join(source_dir, cls.out_dir) return abs_out_dir @classmethod def get_source_dir(cls): """ Returns the directory at which this test class exists. """ source_file = sys.modules[cls.__module__].__file__ source_dir = os.path.dirname(source_file) return source_dir @classmethod def configure_database(cls,db_name): """ Configures the given database using datagen libraries. @param db_name: Name of the database to be configured. If there is no specific datagen available for this database, this will just create an empty database with the given name. @type db_name: string """ tinctest.logger.trace_in(db_name) if not __databases__.has_key(db_name): tinctest.logger.info("db_name %s is not defined in __databases__ dictionary." % db_name) __databases__[db_name] = TINCTestDatabase(database_name=db_name) py_mod = sys.modules[cls.__module__] TINCTestCustomDatabase = None for obj in inspect.getmembers(py_mod, lambda member: inspect.isclass(member) and issubclass(member, TINCTestDatabase)): if obj[1]._infer_metadata().get('db_name', None) == db_name: TINCTestCustomDatabase = obj[1] break if TINCTestCustomDatabase: __databases__[db_name] = TINCTestCustomDatabase(database_name=db_name) else: tinctest.logger.warning("No CustomDatabase class provided for %s." %db_name) if __databases__[db_name]: tinctest.logger.info("Running setup of database %s." % db_name) try: __databases__[db_name].setUp() except Exception, exp: # if we are here, setup failed. handle errors # accordingly. __databases__[db_name].tearDown() raise TINCDatagenException(exp) tinctest.logger.trace_out() def setUp(self): """ setUp method in MPPTestCase does the following: -> Configures the database specified through the metadat 'db_name'. This will configure the database only if it was not already done in setUpClass. """ tinctest.logger.trace_in() super(MPPTestCase, self).setUp() # Create the database if db_name metadata is specified and if it doesn't exists # TODO: Change TINCTestDatabase to take-in PSQL options (part of story QAINF-191) if self.db_name and self.__class__.db_name and self.db_name == self.__class__.db_name: tinctest.logger.debug("No need to configure database %s in setUp, since it would have already been configured via setUpClass." % self.db_name) elif self.db_name: tinctest.logger.debug("Configure database %s from MPPTestCase setUp." % self.db_name) self.configure_database(self.db_name) tinctest.logger.trace_out() def defaultTestResult(self, stream=None, descriptions=None, verbosity=None): """ TODO: This method should not be exposed as a public method. All result objects will be internal. Return a custom result object for MPPTestCase. We need a handle on whether the test errored out / failed to honor metadata like 'restart' """ if stream and descriptions and verbosity: return _MPPTestCaseResult(stream, descriptions, verbosity) else: return unittest.TestResult() def get_product_version(self): """ This function is used by TINCTestCase to determine the current DUT version. It uses this information, along with @product_version, to determine if a test case should run in this particular DUT. @return: A two-tuple containing name and version of the product where test is executed @rtype: (string, string) """ return (self.__class__.__product__, self.__class__.__version_string__) def _infer_metadata(self): """ Read all the metadata and store them as instance variables. """ super(MPPTestCase, self)._infer_metadata() self.host = self._metadata.get('host', 'localhost') self.db_name = self._metadata.get('db_name', self.__class__.db_name) self.username = self._metadata.get('username', None) self.password = self._metadata.get('password', None) if self._metadata.get('gather_logs_on_failure') and self._metadata.get('gather_logs_on_failure').lower() == 'true': self.gather_logs_on_failure = True if self._metadata.get('restart_on_fatal_failure') and self._metadata.get('restart_on_fatal_failure').lower() == 'true': self.restart_on_fatal_failure = True self.gpopt = self._metadata.get('gpopt', None) if self.gpopt: if 'gpopt' not in self.__class__.__product_environment__: self.skip = 'Test does not apply to the deployed system. Test Case GPOPT version - %s , Deployed system has no GPOPT' % self.gpopt elif tuple(self.gpopt.split('.')) > tuple(self.__class__.__product_environment__['gpopt'].split('.')): self.skip = 'Test does not apply to the deployed GPOPT version. Test Case GPOPT version - %s , Deployed version - %s' % (self.gpopt, self.__class__.__product_environment__['gpopt']) def install_cluster(self): """ This function will install the cluster """ pass def initialize_cluster(self): """ This function will initialize the cluster """ pass def configure_cluster(self): """ This function will configure the cluster """ pass def inspect_cluster(self): """ This function will inspect the cluster from the start time of this test till now. Returns true if there are no errors in logs, False if there are errors in logs. @return: Returns True / False depending on whether errors were found in the log @rtype: boolean """ tinctest.logger.trace_in() start_time = self.start_time if start_time == 0 or not start_time: return True end_time = self.end_time if end_time == 0 or not end_time: end_time = time.time() return_status = not GpLog.check_log_for_errors(start_time, end_time) tinctest.logger.trace_out(str(return_status)) return return_status def gather_log(self): """ This method will gather logs from all segments between start_time and end_time of the test and write it to an out file in the output directory. The default name of the log file will be <testmethodname>.logs """ tinctest.logger.trace_in() start_time = self.start_time if start_time == 0 or not start_time: return end_time = self.end_time if end_time == 0 or not end_time: end_time = time.time() out_file = os.path.join(self.get_out_dir(), self._testMethodName + '.logs') GpLog.gather_log(start_time, end_time, out_file) tinctest.logger.trace_out() def delete_cluster(self): """ This function will delete the cluster """ pass def start_cluster(self): """ This function will start the cluster """ pass def stop_cluster(self): """ This function will stop the cluster """ pass def restart_cluster(self): """ This function will restart the cluster """ pass class _MPPTestCaseResult(TINCTextTestResult): """ A custom listener class for MPPTestCase. This is responsible for reacting appropriately to failures and errors of type MPPTestCase. Following is what this class does on failure: -> If restart_on_fatal_failure is set for the test , inspects the logs for fatal failure and restarts the cluster if there are any errors found. -> If gather_logs_on_failure is set for the test, gathers segment and master logs to the output directory. """ def addFailure(self, test, err): try: # restart the cluster if restart_on_failure is set to True and inspect cluster returns False if test.gather_logs_on_failure: test.gather_log() if test.restart_on_fatal_failure: if not test.inspect_cluster(): tinctest.logger.warning("Errors found in the logs for this test case. Restarting the cluster") test.restart_cluster() except Exception, e: tinctest.logger.exception("Re-starting cluster failed - %s" %e) super(_MPPTestCaseResult, self).addFailure(test, err) class __gpdbMPPTestCase__(MPPTestCase): """ __gpdbMPPTestCase__ is a hidden class that overrides GPDB specific methods of MPPTestCase. This class should never be used as a parent or as an executor for any test cases. Presently, this class doesn't override any methods. It is here only for reference. """ pass class __hawqMPPTestCase__(MPPTestCase): """ __hawqMPPTestCase__ is a hidden class that overrides HAWQ specific methods of MPPTestCase. This class should never be used as a parent or as an executor for any test cases. Presently, this class doesn't override any methods. It is here only for reference. """ pass class __postgresMPPTestCase__(MPPTestCase): """ __postgresMPPTestCase__ is a hidden class that overrides postgres specific methods of MPPTestCase. This class should never be used as a parent or as an executor for any test cases. Presently, this class doesn't override any methods. It is here only for reference. """ pass
apache-2.0
6,377,634,018,731,531,000
6,606,523,043,236,876,000
44.943636
197
0.639519
false
schmidsi/django-pyodbc
tests/order_with_respect_to/models.py
24
2132
""" Tests for the order_with_respect_to Meta attribute. """ from django.db import models class Question(models.Model): text = models.CharField(max_length=200) class Answer(models.Model): text = models.CharField(max_length=200) question = models.ForeignKey(Question) class Meta: order_with_respect_to = 'question' def __unicode__(self): return unicode(self.text) __test__ = {'API_TESTS': """ >>> q1 = Question(text="Which Beatle starts with the letter 'R'?") >>> q1.save() >>> q2 = Question(text="What is your name?") >>> q2.save() >>> Answer(text="John", question=q1).save() >>> Answer(text="Jonno",question=q2).save() >>> Answer(text="Paul", question=q1).save() >>> Answer(text="Paulo", question=q2).save() >>> Answer(text="George", question=q1).save() >>> Answer(text="Ringo", question=q1).save() The answers will always be ordered in the order they were inserted. >>> q1.answer_set.all() [<Answer: John>, <Answer: Paul>, <Answer: George>, <Answer: Ringo>] We can retrieve the answers related to a particular object, in the order they were created, once we have a particular object. >>> a1 = Answer.objects.filter(question=q1)[0] >>> a1 <Answer: John> >>> a2 = a1.get_next_in_order() >>> a2 <Answer: Paul> >>> a4 = list(Answer.objects.filter(question=q1))[-1] >>> a4 <Answer: Ringo> >>> a4.get_previous_in_order() <Answer: George> Determining (and setting) the ordering for a particular item is also possible. >>> id_list = [o.pk for o in q1.answer_set.all()] >>> a2.question.get_answer_order() == id_list True >>> a5 = Answer(text="Number five", question=q1) >>> a5.save() It doesn't matter which answer we use to check the order, it will always be the same. >>> a2.question.get_answer_order() == a5.question.get_answer_order() True The ordering can be altered: >>> id_list = [o.pk for o in q1.answer_set.all()] >>> x = id_list.pop() >>> id_list.insert(-1, x) >>> a5.question.get_answer_order() == id_list False >>> a5.question.set_answer_order(id_list) >>> q1.answer_set.all() [<Answer: John>, <Answer: Paul>, <Answer: George>, <Answer: Number five>, <Answer: Ringo>] """ }
bsd-3-clause
-3,246,507,871,356,857,000
-1,966,787,788,162,798,300
26.333333
90
0.66182
false
mjirik/teigen
tests/teigen_test.py
1
6709
#! /usr/bin/env python # -*- coding: utf-8 -*- import logging logger = logging.getLogger(__name__) import unittest import sys import pytest # import teigen # import io3d import os.path as op path_to_script = op.dirname(op.abspath(__file__)) class MyTestCase(unittest.TestCase): # @pytest.mark.interactive def test_teigen_gui_interactive(self): import os.path as op params = None # params = io3d.misc.obj_from_file(op.expanduser("~/teigen_data/038/slice_parameters.yaml")) from PyQt5.QtWidgets import QApplication, QFileDialog # from teigen.dictwidgetqt import DictWidget import teigen.gui app = QApplication(sys.argv) cw = teigen.gui.TeigenWidget(config=params) cw.show() app.exec_() @pytest.mark.interactive def test_teigen_gui_interactive_with_parameters(self): """ reproduces undetected colision bug :return: """ import os.path as op params = None # params = io3d.misc.obj_from_file(op.expanduser("~/teigen_data/038/slice_parameters.yaml")) params = { "generator_id": 3, "areasampling": { "voxelsize_mm": [1., 1., 1.], "areasize_px": [20, 20, 20], "areasize_mm": [20, 20, 20], }, "postprocessing": { "measurement_resolution": 15, "measurement_multiplier": -1, "add_noise": False }, "generators": { "Unconnected tubes": { "element_number": 3, "random_generator_seed": 110, "radius_distribution_mean": 15, "radius_distribution_maximum": 20, "orientation_anisotropic": False, } } } # tg.update_config(**conf) from PyQt5.QtWidgets import QApplication # from teigen.dictwidgetqt import DictWidget import teigen.gui app = QApplication(sys.argv) cw = teigen.gui.TeigenWidget(use_default_config=True, config=params) cw.show() app.exec_() # def test_teigen_gui(self): # import PyQt4 # from PyQt4.QtGui import QApplication, QFileDialog # # from teigen.dictwidgetqt import DictWidget # import teigen # import teigen.geometry3d # import teigen.gui # app = QApplication(sys.argv) # cw = teigen.gui.TeigenWidget() # cw.show() # cw.deleteLater() # app.deleteLater() @pytest.mark.interactive def test_teigen_without_save(self): import teigen.gui tg = teigen.gui.Teigen() conf = { "generator_id": 3, "areasampling": { "voxelsize_mm": [1., 1., 1.], "areasize_px": [110, 120, 130], "areasize_mm": [110, 120, 130], }, "postprocessing": { "measurement_multiplier": -1, "add_noise": False }, "generators": { "Unconnected cylinders": { "element_number": 10 } } } tg.update_config(**conf) tg.step1() @pytest.mark.interactive def test_teigen_big(self): import teigen.gui tg = teigen.gui.Teigen() conf = { "areasampling": { "voxelsize_mm": [1., 1., 1.], "areasize_px": [210, 720, 730], "areasize_mm": [210, 720, 730], }, "postprocessing": { "measurement_multiplier": -1, "add_noise": False }, "generators": { "Unconnected cylinders": { "element_number": 10 } } } tg.update_config(**conf) tg.step1() tg.step2() # def test_teigen_small(self): # import teigen.gui # tg = teigen.gui.Teigen() # conf = { # "areasampling":{ # "voxelsize_mm": [1., 1., 1.], # "areasize_px": [110, 120, 130], # "areasize_mm": [110, 120, 130], # }, # "postprocessing":{ # "measurement_multiplier":-1, # } # } # tg.update_config(**conf) # tg.run() # tg.save_volume() def test_teigen_prepare_parameters_and_measurement(self): """ Check string like generator_id :return: """ logger.debug("test prepare parameters and measurement") import teigen.gui tg = teigen.gui.Teigen() tg.use_default_config() conf = { "generator_id": "Unconnected tubes", "areasampling": { "voxelsize_mm": [1., 1., 1.], "areasize_px": [110, 120, 130], "areasize_mm": [110, 120, 130], }, "postprocessing": { # "measurement_multiplier": -1, "add_noise": False }, "generators": { "Unconnected tubes": { "element_number": 1 } } } tg.update_config(**conf) tg.step1() params = tg.get_config_and_measurement() tg.step2() logger.debug(params) def test_teigen_read_tube_skeleton_from_file(self): """ Read tube skeleton from file :return: """ logger.debug("test read tube skeleton from file") import teigen.gui tg = teigen.gui.Teigen() tg.use_default_config() conf = { "generator_id": "Unconnected tubes", "areasampling": { "voxelsize_mm": [1., 1., 1.], "areasize_px": [110, 120, 130], "areasize_mm": [110, 120, 130], }, "postprocessing": { # "measurement_multiplier": -1, "add_noise": False }, "generators": { "Unconnected tubes": { "element_number": 1 } } } tg.update_config(**conf) tg.set_loglevel("DEBUG") tg.step1_by_load_tube_skeleton( op.join(path_to_script, "data_vt.yaml" )) #op.join(path_to_script, "vt_biodur.yaml" )) params = tg.get_config_and_measurement() tg.step2() logger.debug(params) if __name__ == '__main__': unittest.main()
apache-2.0
-2,101,904,281,500,670,200
3,261,219,896,339,333,000
28.555066
100
0.471158
false