repo_name
stringlengths
5
100
path
stringlengths
4
299
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1.03M
license
stringclasses
15 values
hash
int64
-9,223,351,895,964,839,000
9,223,297,778B
line_mean
float64
3.17
100
line_max
int64
7
1k
alpha_frac
float64
0.25
0.98
autogenerated
bool
1 class
luceatnobis/youtube-dl
youtube_dl/extractor/hotstar.py
33
3711
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, int_or_none, ) class HotStarIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hotstar\.com/(?:.+?[/-])?(?P<id>\d{10})' _TESTS = [{ 'url': 'http://www.hotstar.com/on-air-with-aib--english-1000076273', 'info_dict': { 'id': '1000076273', 'ext': 'mp4', 'title': 'On Air With AIB - English', 'description': 'md5:c957d8868e9bc793ccb813691cc4c434', 'timestamp': 1447227000, 'upload_date': '20151111', 'duration': 381, }, 'params': { # m3u8 download 'skip_download': True, } }, { 'url': 'http://www.hotstar.com/sports/cricket/rajitha-sizzles-on-debut-with-329/2001477583', 'only_matching': True, }, { 'url': 'http://www.hotstar.com/1000000515', 'only_matching': True, }] def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', fatal=True, query=None): json_data = super(HotStarIE, self)._download_json( url_or_request, video_id, note, fatal=fatal, query=query) if json_data['resultCode'] != 'OK': if fatal: raise ExtractorError(json_data['errorDescription']) return None return json_data['resultObj'] def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'http://account.hotstar.com/AVS/besc', video_id, query={ 'action': 'GetAggregatedContentDetails', 'channel': 'PCTV', 'contentId': video_id, })['contentInfo'][0] title = video_data['episodeTitle'] if video_data.get('encrypted') == 'Y': raise ExtractorError('This video is DRM protected.', expected=True) formats = [] for f in ('JIO',): format_data = self._download_json( 'http://getcdn.hotstar.com/AVS/besc', video_id, 'Downloading %s JSON metadata' % f, fatal=False, query={ 'action': 'GetCDN', 'asJson': 'Y', 'channel': f, 'id': video_id, 'type': 'VOD', }) if format_data: format_url = format_data.get('src') if not format_url: continue ext = determine_ext(format_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) elif ext == 'f4m': # produce broken files continue else: formats.append({ 'url': format_url, 'width': int_or_none(format_data.get('width')), 'height': int_or_none(format_data.get('height')), }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'timestamp': int_or_none(video_data.get('broadcastDate')), 'formats': formats, 'episode': title, 'episode_number': int_or_none(video_data.get('episodeNumber')), 'series': video_data.get('contentTitle'), }
unlicense
6,372,708,783,486,775,000
35.742574
113
0.489356
false
hryamzik/ansible
lib/ansible/modules/cloud/amazon/rds_snapshot_facts.py
32
12499
#!/usr/bin/python # Copyright (c) 2014-2017 Ansible Project # Copyright (c) 2017, 2018 Will Thames # Copyright (c) 2017, 2018 Michael De La Rue # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: rds_snapshot_facts version_added: "2.6" short_description: obtain facts about one or more RDS snapshots description: - obtain facts about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora) - Aurora snapshot facts may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed. options: db_snapshot_identifier: description: - Name of an RDS (unclustered) snapshot. Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier) required: false aliases: - snapshot_name db_instance_identifier: description: - RDS instance name for which to find snapshots. Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier) required: false db_cluster_identifier: description: - RDS cluster name for which to find snapshots. Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier), I(db_cluster_snapshot_identifier) required: false db_cluster_snapshot_identifier: description: - Name of an RDS cluster snapshot. Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier) required: false snapshot_type: description: - Type of snapshot to find. By default both automated and manual snapshots will be returned. required: false choices: ['automated', 'manual', 'shared', 'public'] requirements: - "python >= 2.6" - "boto3" author: - "Will Thames (@willthames)" extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Get facts about an snapshot - rds_snapshot_facts: db_snapshot_identifier: snapshot_name register: new_database_facts # Get all RDS snapshots for an RDS instance - rds_snapshot_facts: db_instance_identifier: helloworld-rds-master ''' RETURN = ''' snapshots: description: List of non-clustered snapshots returned: When cluster parameters are not passed type: complex contains: allocated_storage: description: How many gigabytes of storage are allocated returned: always type: int sample: 10 availability_zone: description: The availability zone of the database from which the snapshot was taken returned: always type: string sample: us-west-2b db_instance_identifier: description: Database instance identifier returned: always type: string sample: hello-world-rds db_snapshot_arn: description: Snapshot ARN returned: always type: string sample: arn:aws:rds:us-west-2:111111111111:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03 db_snapshot_identifier: description: Snapshot name returned: always type: string sample: rds:hello-world-rds-us1-2018-05-16-04-03 encrypted: description: Whether the snapshot was encrypted returned: always type: bool sample: true engine: description: Database engine returned: always type: string sample: postgres engine_version: description: Database engine version returned: always type: string sample: 9.5.10 iam_database_authentication_enabled: description: Whether database authentication through IAM is enabled returned: always type: bool sample: false instance_create_time: description: Time the Instance was created returned: always type: string sample: '2017-10-10T04:00:07.434000+00:00' kms_key_id: description: ID of the KMS Key encrypting the snapshot returned: always type: string sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-1234-aaaa-0000-1234567890ab license_model: description: License model returned: always type: string sample: postgresql-license master_username: description: Database master username returned: always type: string sample: dbadmin option_group_name: description: Database option group name returned: always type: string sample: default:postgres-9-5 percent_progress: description: Perecent progress of snapshot returned: always type: int sample: 100 snapshot_create_time: description: Time snapshot was created returned: always type: string sample: '2018-05-16T04:03:33.871000+00:00' snapshot_type: description: Type of snapshot returned: always type: string sample: automated status: description: Status of snapshot returned: always type: string sample: available storage_type: description: Storage type of underlying DB returned: always type: string sample: gp2 tags: description: Snapshot tags returned: always type: complex contains: {} vpc_id: description: ID of VPC containing the DB returned: always type: string sample: vpc-abcd1234 cluster_snapshots: description: List of cluster snapshots returned: always type: complex contains: allocated_storage: description: How many gigabytes of storage are allocated returned: always type: int sample: 1 availability_zones: description: The availability zones of the database from which the snapshot was taken returned: always type: list sample: - ca-central-1a - ca-central-1b cluster_create_time: description: Date and time the cluster was created returned: always type: string sample: '2018-05-17T00:13:40.223000+00:00' db_cluster_identifier: description: Database cluster identifier returned: always type: string sample: test-aurora-cluster db_cluster_snapshot_arn: description: ARN of the database snapshot returned: always type: string sample: arn:aws:rds:ca-central-1:111111111111:cluster-snapshot:test-aurora-snapshot db_cluster_snapshot_identifier: description: Snapshot identifier returned: always type: string sample: test-aurora-snapshot engine: description: Database engine returned: always type: string sample: aurora engine_version: description: Database engine version returned: always type: string sample: 5.6.10a iam_database_authentication_enabled: description: Whether database authentication through IAM is enabled returned: always type: bool sample: false kms_key_id: description: ID of the KMS Key encrypting the snapshot returned: always type: string sample: arn:aws:kms:ca-central-1:111111111111:key/abcd1234-abcd-1111-aaaa-0123456789ab license_model: description: License model returned: always type: string sample: aurora master_username: description: Database master username returned: always type: string sample: shertel percent_progress: description: Perecent progress of snapshot returned: always type: int sample: 0 port: description: Database port returned: always type: int sample: 0 snapshot_create_time: description: Date and time when the snapshot was created returned: always type: string sample: '2018-05-17T00:23:23.731000+00:00' snapshot_type: description: Type of snapshot returned: always type: string sample: manual status: description: Status of snapshot returned: always type: string sample: creating storage_encrypted: description: Whether the snapshot is encrypted returned: always type: bool sample: true tags: description: Tags of the snapshot returned: always type: complex contains: {} vpc_id: description: VPC of the database returned: always type: string sample: vpc-abcd1234 ''' from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code from ansible.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict try: import botocore except BaseException: pass # caught by imported HAS_BOTO3 def common_snapshot_facts(module, conn, method, prefix, params): paginator = conn.get_paginator(method) try: results = paginator.paginate(**params).build_full_result()['%ss' % prefix] except is_boto3_error_code('%sNotFound' % prefix): results = [] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "trying to get snapshot information") for snapshot in results: try: snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix], aws_retry=True)['TagList']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix]) return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results] def cluster_snapshot_facts(module, conn): snapshot_name = module.params.get('db_cluster_snapshot_identifier') snapshot_type = module.params.get('snapshot_type') instance_name = module.params.get('db_cluster_instance_identifier') params = dict() if snapshot_name: params['DBClusterSnapshotIdentifier'] = snapshot_name if instance_name: params['DBClusterInstanceIdentifier'] = instance_name if snapshot_type: params['SnapshotType'] = snapshot_type if snapshot_type == 'public': params['IsPublic'] = True elif snapshot_type == 'shared': params['IsShared'] = True return common_snapshot_facts(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params) def standalone_snapshot_facts(module, conn): snapshot_name = module.params.get('db_snapshot_identifier') snapshot_type = module.params.get('snapshot_type') instance_name = module.params.get('db_instance_identifier') params = dict() if snapshot_name: params['DBSnapshotIdentifier'] = snapshot_name if instance_name: params['DBInstanceIdentifier'] = instance_name if snapshot_type: params['SnapshotType'] = snapshot_type if snapshot_type == 'public': params['IsPublic'] = True elif snapshot_type == 'shared': params['IsShared'] = True return common_snapshot_facts(module, conn, 'describe_db_snapshots', 'DBSnapshot', params) def main(): argument_spec = dict( db_snapshot_identifier=dict(aliases=['snapshot_name']), db_instance_identifier=dict(), db_cluster_identifier=dict(), db_cluster_snapshot_identifier=dict(), snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public']) ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']] ) conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) results = dict() if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']: results['snapshots'] = standalone_snapshot_facts(module, conn) if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']: results['cluster_snapshots'] = cluster_snapshot_facts(module, conn) module.exit_json(changed=False, **results) if __name__ == '__main__': main()
gpl-3.0
497,075,691,523,704,770
31.805774
157
0.668533
false
dparaujo/projeto
app_academico/semestre/migrations/0001_initial.py
1
1044
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-10-30 23:47 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='TblAcademicoSemestre', fields=[ ('codigo', models.AutoField(primary_key=True, serialize=False, verbose_name='C\xf3digo')), ('descricao', models.CharField(max_length=100, unique=True, verbose_name='Descri\xe7\xe3o')), ('data', models.DateField(auto_now_add=True, verbose_name='Data de cadastro')), ('hora', models.TimeField(auto_now_add=True, verbose_name='Hora de cadastro')), ('ativo', models.BooleanField(choices=[(True, 'Sim'), (False, 'N\xe3o')], verbose_name='Ativo')), ], options={ 'ordering': ['codigo'], 'db_table': 'tbl_academico_semestre', }, ), ]
gpl-3.0
-6,254,257,975,671,408,000
33.8
113
0.56705
false
mnull/taccoin
contrib/bitrpc/bitrpc.py
1
7836
from jsonrpc import ServiceProxy import sys import string # ===== BEGIN USER SETTINGS ===== # if you do not set these you will be prompted for a password for every command rpcuser = "" rpcpass = "" # ====== END USER SETTINGS ====== if rpcpass == "": access = ServiceProxy("http://127.0.0.1:9332") else: access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332") cmd = sys.argv[1].lower() if cmd == "backupwallet": try: path = raw_input("Enter destination path/filename: ") print access.backupwallet(path) except: print "\n---An error occurred---\n" elif cmd == "getaccount": try: addr = raw_input("Enter a taccoin address: ") print access.getaccount(addr) except: print "\n---An error occurred---\n" elif cmd == "getaccountaddress": try: acct = raw_input("Enter an account name: ") print access.getaccountaddress(acct) except: print "\n---An error occurred---\n" elif cmd == "getaddressesbyaccount": try: acct = raw_input("Enter an account name: ") print access.getaddressesbyaccount(acct) except: print "\n---An error occurred---\n" elif cmd == "getbalance": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getbalance(acct, mc) except: print access.getbalance() except: print "\n---An error occurred---\n" elif cmd == "getblockbycount": try: height = raw_input("Height: ") print access.getblockbycount(height) except: print "\n---An error occurred---\n" elif cmd == "getblockcount": try: print access.getblockcount() except: print "\n---An error occurred---\n" elif cmd == "getblocknumber": try: print access.getblocknumber() except: print "\n---An error occurred---\n" elif cmd == "getconnectioncount": try: print access.getconnectioncount() except: print "\n---An error occurred---\n" elif cmd == "getdifficulty": try: print access.getdifficulty() except: print "\n---An error occurred---\n" elif cmd == "getgenerate": try: print access.getgenerate() except: print "\n---An error occurred---\n" elif cmd == "gethashespersec": try: print access.gethashespersec() except: print "\n---An error occurred---\n" elif cmd == "getinfo": try: print access.getinfo() except: print "\n---An error occurred---\n" elif cmd == "getnewaddress": try: acct = raw_input("Enter an account name: ") try: print access.getnewaddress(acct) except: print access.getnewaddress() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaccount": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaccount(acct, mc) except: print access.getreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaddress": try: addr = raw_input("Enter a taccoin address (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaddress(addr, mc) except: print access.getreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "gettransaction": try: txid = raw_input("Enter a transaction ID: ") print access.gettransaction(txid) except: print "\n---An error occurred---\n" elif cmd == "getwork": try: data = raw_input("Data (optional): ") try: print access.gettransaction(data) except: print access.gettransaction() except: print "\n---An error occurred---\n" elif cmd == "help": try: cmd = raw_input("Command (optional): ") try: print access.help(cmd) except: print access.help() except: print "\n---An error occurred---\n" elif cmd == "listaccounts": try: mc = raw_input("Minimum confirmations (optional): ") try: print access.listaccounts(mc) except: print access.listaccounts() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaccount": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaccount(mc, incemp) except: print access.listreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaddress": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaddress(mc, incemp) except: print access.listreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "listtransactions": try: acct = raw_input("Account (optional): ") count = raw_input("Number of transactions (optional): ") frm = raw_input("Skip (optional):") try: print access.listtransactions(acct, count, frm) except: print access.listtransactions() except: print "\n---An error occurred---\n" elif cmd == "move": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.move(frm, to, amt, mc, comment) except: print access.move(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendfrom": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendfrom(frm, to, amt, mc, comment, commentto) except: print access.sendfrom(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendmany": try: frm = raw_input("From: ") to = raw_input("To (in format address1:amount1,address2:amount2,...): ") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.sendmany(frm,to,mc,comment) except: print access.sendmany(frm,to) except: print "\n---An error occurred---\n" elif cmd == "sendtoaddress": try: to = raw_input("To (in format address1:amount1,address2:amount2,...): ") amt = raw_input("Amount:") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendtoaddress(to,amt,comment,commentto) except: print access.sendtoaddress(to,amt) except: print "\n---An error occurred---\n" elif cmd == "setaccount": try: addr = raw_input("Address: ") acct = raw_input("Account:") print access.setaccount(addr,acct) except: print "\n---An error occurred---\n" elif cmd == "setgenerate": try: gen= raw_input("Generate? (true/false): ") cpus = raw_input("Max processors/cores (-1 for unlimited, optional):") try: print access.setgenerate(gen, cpus) except: print access.setgenerate(gen) except: print "\n---An error occurred---\n" elif cmd == "settxfee": try: amt = raw_input("Amount:") print access.settxfee(amt) except: print "\n---An error occurred---\n" elif cmd == "stop": try: print access.stop() except: print "\n---An error occurred---\n" elif cmd == "validateaddress": try: addr = raw_input("Address: ") print access.validateaddress(addr) except: print "\n---An error occurred---\n" elif cmd == "walletpassphrase": try: pwd = raw_input("Enter wallet passphrase: ") access.walletpassphrase(pwd, 60) print "\n---Wallet unlocked---\n" except: print "\n---An error occurred---\n" elif cmd == "walletpassphrasechange": try: pwd = raw_input("Enter old wallet passphrase: ") pwd2 = raw_input("Enter new wallet passphrase: ") access.walletpassphrasechange(pwd, pwd2) print print "\n---Passphrase changed---\n" except: print print "\n---An error occurred---\n" print else: print "Command not found or not supported"
mit
3,041,786,034,633,486,300
23.185185
79
0.66169
false
dgzurita/odoo
addons/base_import_module/tests/test_module/__openerp__.py
377
1290
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Test Module', 'category': 'Website', 'summary': 'Custom', 'version': '1.0', 'description': """ Test """, 'author': 'OpenERP SA', 'depends': ['website'], 'data': [ 'test.xml', ], 'installable': True, 'application': True, }
agpl-3.0
6,663,191,059,552,638,000
33.864865
78
0.565891
false
cstipkovic/spidermonkey-research
testing/marionette/harness/marionette/runner/mixins/browsermob-proxy-py/docs/conf.py
2
7900
# -*- coding: utf-8 -*- # # BrowserMob Proxy documentation build configuration file, created by # sphinx-quickstart on Fri May 24 12:37:12 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc'] autoclass_content = 'both' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'BrowserMob Proxy' copyright = u'2014, David Burns' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.6.0' # The full version, including alpha/beta/rc tags. release = '0.6.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'BrowserMobProxydoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'BrowserMobProxy.tex', u'BrowserMob Proxy Documentation', u'David Burns', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'browsermobproxy', u'BrowserMob Proxy Documentation', [u'David Burns'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'BrowserMobProxy', u'BrowserMob Proxy Documentation', u'David Burns', 'BrowserMobProxy', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
mpl-2.0
-3,906,200,997,007,315,000
31.510288
81
0.706456
false
nicholedwight/nichole-theme
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/bw.py
364
1355
# -*- coding: utf-8 -*- """ pygments.styles.bw ~~~~~~~~~~~~~~~~~~ Simple black/white only style. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Operator, Generic class BlackWhiteStyle(Style): background_color = "#ffffff" default_style = "" styles = { Comment: "italic", Comment.Preproc: "noitalic", Keyword: "bold", Keyword.Pseudo: "nobold", Keyword.Type: "nobold", Operator.Word: "bold", Name.Class: "bold", Name.Namespace: "bold", Name.Exception: "bold", Name.Entity: "bold", Name.Tag: "bold", String: "italic", String.Interpol: "bold", String.Escape: "bold", Generic.Heading: "bold", Generic.Subheading: "bold", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold", Error: "border:#FF0000" }
mit
2,146,287,778,005,858,800
26.653061
70
0.454613
false
n-west/gnuradio-volk
gr-analog/python/analog/qa_pll_refout.py
17
7816
#!/usr/bin/env python # # Copyright 2004,2010,2012,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import math from gnuradio import gr, gr_unittest, analog, blocks class test_pll_refout(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_pll_refout(self): expected_result = ((1+0j), (1+6.4087357643e-10j), (0.999985277653+0.00542619498447j), (0.999868750572+0.0162021834403j), (0.99948567152+0.0320679470897j), (0.99860727787+0.0527590736747j), (0.996953129768+0.0780025869608j), (0.994203746319+0.107512556016j), (0.990011692047+0.140985429287j), (0.984013140202+0.178095817566j), (0.975838363171+0.218493551016j), (0.965121984482+0.261800557375j), (0.95151245594+0.307610183954j), (0.934681296349+0.355486690998j), (0.914401650429+0.404808044434j), (0.890356600285+0.455263823271j), (0.862329125404+0.506348133087j), (0.830152392387+0.557536482811j), (0.793714106083+0.608290970325j), (0.752960026264+0.658066213131j), (0.707896590233+0.706316053867j), (0.658591926098+0.752500295639j), (0.605175673962+0.796091973782j), (0.547837555408+0.836584687233j), (0.48682525754+0.873499393463j), (0.42244040966+0.906390726566j), (0.355197101831+0.934791445732j), (0.285494059324+0.958380460739j), (0.213591173291+0.976923108101j), (0.139945343137+0.990159213543j), (0.065038472414+0.997882783413j), (-0.0106285437942+0.999943494797j), (-0.0865436866879+0.996248066425j), (-0.162189796567+0.986759603024j), (-0.23705175519+0.971496999264j), (-0.310622543097+0.950533330441j), (-0.38240903616+0.923993110657j), (-0.451937526464+0.89204955101j), (-0.518758952618+0.854920566082j), (-0.582311093807+0.812966048717j), (-0.642372369766+0.76639264822j), (-0.698591887951+0.715520322323j), (-0.750654160976+0.660695314407j), (-0.798280358315+0.602286040783j), (-0.841228663921+0.540679454803j), (-0.87929558754+0.476276367903j), (-0.912315964699+0.409486919641j), (-0.940161883831+0.340728074312j), (-0.962742805481+0.270418733358j), (-0.980004072189+0.198977485299j), (-0.991925954819+0.126818284392j), (-0.99851256609+0.0545223206282j), (-0.999846458435-0.0175215266645j), (-0.996021270752-0.0891158208251j), (-0.987133920193-0.159895718098j), (-0.973306238651-0.2295101583j), (-0.954683184624-0.297624111176j), (-0.931430280209-0.363919824362j), (-0.903732538223-0.428097635508j), (-0.871792256832-0.489875763655j), (-0.835827112198-0.548992812634j), (-0.796068251133-0.605206847191j), (-0.752758979797-0.658296227455j), (-0.706152498722-0.70805978775j), (-0.656641483307-0.754202902317j), (-0.604367733002-0.79670548439j), (-0.549597978592-0.835429251194j), (-0.492602348328-0.870254516602j), (-0.433654457331-0.901079237461j), (-0.373029649258-0.927819430828j), (-0.31100410223-0.950408577919j), (-0.247853919864-0.968797445297j), (-0.183855071664-0.982953369617j), (-0.119282215834-0.992860376835j), (-0.0544078871608-0.998518764973j), (0.0104992967099-0.999944865704j), (0.0749994292855-0.997183561325j), (0.138844624162-0.990314185619j), (0.201967850327-0.979392170906j), (0.264124274254-0.964488625526j), (0.325075358152-0.945688128471j), (0.3845885396-0.92308807373j), (0.442438393831-0.89679890871j), (0.498407125473-0.866943061352j), (0.552284479141-0.833655714989j), (0.603869199753-0.797083437443j), (0.652970373631-0.757383465767j), (0.69940674305-0.714723825455j), (0.743007957935-0.66928255558j), (0.78350687027-0.62138313055j), (0.820889055729-0.571087777615j), (0.855021059513-0.51859331131j), (0.885780930519-0.46410369873j), (0.913058102131-0.407829582691j), (0.936754107475-0.349988251925j), (0.956783294678-0.290801793337j), (0.973072886467-0.230497643352j), (0.985563337803-0.169307261705j), (0.9942086339-0.1074674353j), (0.9989772439-0.0452152714133j)) sampling_freq = 10e3 freq = sampling_freq / 100 loop_bw = math.pi/100.0 maxf = 1 minf = -1 src = analog.sig_source_c(sampling_freq, analog.GR_COS_WAVE, freq, 1.0) pll = analog.pll_refout_cc(loop_bw, maxf, minf) head = blocks.head(gr.sizeof_gr_complex, int (freq)) dst = blocks.vector_sink_c() self.tb.connect(src, pll, head) self.tb.connect(head, dst) self.tb.run() dst_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 4) if __name__ == '__main__': gr_unittest.run(test_pll_refout, "test_pll_refout.xml")
gpl-3.0
-2,602,156,135,745,435,600
48.783439
79
0.488741
false
saleemjaveds/https-github.com-openstack-nova
nova/tests/virt/hyperv/test_pathutils.py
12
2298
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from nova import test from nova.virt.hyperv import constants from nova.virt.hyperv import pathutils class PathUtilsTestCase(test.NoDBTestCase): """Unit tests for the Hyper-V PathUtils class.""" def setUp(self): self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir') self.fake_instance_name = 'fake_instance_name' self._pathutils = pathutils.PathUtils() super(PathUtilsTestCase, self).setUp() def _mock_lookup_configdrive_path(self, ext): self._pathutils.get_instance_dir = mock.MagicMock( return_value=self.fake_instance_dir) def mock_exists(*args, **kwargs): path = args[0] return True if path[(path.rfind('.') + 1):] == ext else False self._pathutils.exists = mock_exists configdrive_path = self._pathutils.lookup_configdrive_path( self.fake_instance_name) return configdrive_path def test_lookup_configdrive_path(self): for format_ext in constants.DISK_FORMAT_MAP: configdrive_path = self._mock_lookup_configdrive_path(format_ext) fake_path = os.path.join(self.fake_instance_dir, 'configdrive.' + format_ext) self.assertEqual(configdrive_path, fake_path) def test_lookup_configdrive_path_non_exist(self): self._pathutils.get_instance_dir = mock.MagicMock( return_value=self.fake_instance_dir) self._pathutils.exists = mock.MagicMock(return_value=False) configdrive_path = self._pathutils.lookup_configdrive_path( self.fake_instance_name) self.assertIsNone(configdrive_path)
apache-2.0
-9,181,039,940,068,839,000
38.62069
78
0.667537
false
ted-gould/nova
nova/virt/hyperv/migrationops.py
15
13161
# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for migration / resize operations. """ import os from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from nova import exception from nova.i18n import _, _LE from nova import objects from nova.virt import configdrive from nova.virt.hyperv import imagecache from nova.virt.hyperv import utilsfactory from nova.virt.hyperv import vmops from nova.virt.hyperv import vmutils from nova.virt.hyperv import volumeops LOG = logging.getLogger(__name__) class MigrationOps(object): def __init__(self): self._hostutils = utilsfactory.get_hostutils() self._vmutils = utilsfactory.get_vmutils() self._vhdutils = utilsfactory.get_vhdutils() self._pathutils = utilsfactory.get_pathutils() self._volumeops = volumeops.VolumeOps() self._vmops = vmops.VMOps() self._imagecache = imagecache.ImageCache() def _migrate_disk_files(self, instance_name, disk_files, dest): # TODO(mikal): it would be nice if this method took a full instance, # because it could then be passed to the log messages below. same_host = False if dest in self._hostutils.get_local_ips(): same_host = True LOG.debug("Migration target is the source host") else: LOG.debug("Migration target host: %s", dest) instance_path = self._pathutils.get_instance_dir(instance_name) revert_path = self._pathutils.get_instance_migr_revert_dir( instance_name, remove_dir=True, create_dir=True) dest_path = None try: if same_host: # Since source and target are the same, we copy the files to # a temporary location before moving them into place dest_path = '%s_tmp' % instance_path if self._pathutils.exists(dest_path): self._pathutils.rmtree(dest_path) self._pathutils.makedirs(dest_path) else: dest_path = self._pathutils.get_instance_dir( instance_name, dest, remove_dir=True) for disk_file in disk_files: # Skip the config drive as the instance is already configured if os.path.basename(disk_file).lower() != 'configdrive.vhd': LOG.debug('Copying disk "%(disk_file)s" to ' '"%(dest_path)s"', {'disk_file': disk_file, 'dest_path': dest_path}) self._pathutils.copy(disk_file, dest_path) self._pathutils.move_folder_files(instance_path, revert_path) if same_host: self._pathutils.move_folder_files(dest_path, instance_path) except Exception: with excutils.save_and_reraise_exception(): self._cleanup_failed_disk_migration(instance_path, revert_path, dest_path) def _cleanup_failed_disk_migration(self, instance_path, revert_path, dest_path): try: if dest_path and self._pathutils.exists(dest_path): self._pathutils.rmtree(dest_path) if self._pathutils.exists(revert_path): self._pathutils.rename(revert_path, instance_path) except Exception as ex: # Log and ignore this exception LOG.exception(ex) LOG.error(_LE("Cannot cleanup migration files")) def _check_target_flavor(self, instance, flavor): new_root_gb = flavor.root_gb curr_root_gb = instance.root_gb if new_root_gb < curr_root_gb: raise exception.InstanceFaultRollback( vmutils.VHDResizeException( _("Cannot resize the root disk to a smaller size. " "Current size: %(curr_root_gb)s GB. Requested size: " "%(new_root_gb)s GB") % {'curr_root_gb': curr_root_gb, 'new_root_gb': new_root_gb})) def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None, timeout=0, retry_interval=0): LOG.debug("migrate_disk_and_power_off called", instance=instance) self._check_target_flavor(instance, flavor) self._vmops.power_off(instance, timeout, retry_interval) (disk_files, volume_drives) = self._vmutils.get_vm_storage_paths(instance.name) if disk_files: self._migrate_disk_files(instance.name, disk_files, dest) self._vmops.destroy(instance, destroy_disks=False) # disk_info is not used return "" def confirm_migration(self, migration, instance, network_info): LOG.debug("confirm_migration called", instance=instance) self._pathutils.get_instance_migr_revert_dir(instance.name, remove_dir=True) def _revert_migration_files(self, instance_name): instance_path = self._pathutils.get_instance_dir( instance_name, create_dir=False, remove_dir=True) revert_path = self._pathutils.get_instance_migr_revert_dir( instance_name) self._pathutils.rename(revert_path, instance_path) def _check_and_attach_config_drive(self, instance, vm_gen): if configdrive.required_by(instance): configdrive_path = self._pathutils.lookup_configdrive_path( instance.name) if configdrive_path: self._vmops.attach_config_drive(instance, configdrive_path, vm_gen) else: raise vmutils.HyperVException( _("Config drive is required by instance: %s, " "but it does not exist.") % instance.name) def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): LOG.debug("finish_revert_migration called", instance=instance) instance_name = instance.name self._revert_migration_files(instance_name) if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name) image_meta = objects.ImageMeta.from_instance(instance) vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta) self._vmops.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path, vm_gen) self._check_and_attach_config_drive(instance, vm_gen) if power_on: self._vmops.power_on(instance) def _merge_base_vhd(self, diff_vhd_path, base_vhd_path): base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path), os.path.basename(base_vhd_path)) try: LOG.debug('Copying base disk %(base_vhd_path)s to ' '%(base_vhd_copy_path)s', {'base_vhd_path': base_vhd_path, 'base_vhd_copy_path': base_vhd_copy_path}) self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path) LOG.debug("Reconnecting copied base VHD " "%(base_vhd_copy_path)s and diff " "VHD %(diff_vhd_path)s", {'base_vhd_copy_path': base_vhd_copy_path, 'diff_vhd_path': diff_vhd_path}) self._vhdutils.reconnect_parent_vhd(diff_vhd_path, base_vhd_copy_path) LOG.debug("Merging base disk %(base_vhd_copy_path)s and " "diff disk %(diff_vhd_path)s", {'base_vhd_copy_path': base_vhd_copy_path, 'diff_vhd_path': diff_vhd_path}) self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path) # Replace the differential VHD with the merged one self._pathutils.rename(base_vhd_copy_path, diff_vhd_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_vhd_copy_path): self._pathutils.remove(base_vhd_copy_path) def _check_resize_vhd(self, vhd_path, vhd_info, new_size): curr_size = vhd_info['MaxInternalSize'] if new_size < curr_size: raise vmutils.VHDResizeException(_("Cannot resize a VHD " "to a smaller size")) elif new_size > curr_size: self._resize_vhd(vhd_path, new_size) def _resize_vhd(self, vhd_path, new_size): if vhd_path.split('.')[-1].lower() == "vhd": LOG.debug("Getting parent disk info for disk: %s", vhd_path) base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path) if base_disk_path: # A differential VHD cannot be resized. This limitation # does not apply to the VHDX format. self._merge_base_vhd(vhd_path, base_disk_path) LOG.debug("Resizing disk \"%(vhd_path)s\" to new max " "size %(new_size)s", {'vhd_path': vhd_path, 'new_size': new_size}) self._vhdutils.resize_vhd(vhd_path, new_size) def _check_base_disk(self, context, instance, diff_vhd_path, src_base_disk_path): base_vhd_path = self._imagecache.get_cached_image(context, instance) # If the location of the base host differs between source # and target hosts we need to reconnect the base disk if src_base_disk_path.lower() != base_vhd_path.lower(): LOG.debug("Reconnecting copied base VHD " "%(base_vhd_path)s and diff " "VHD %(diff_vhd_path)s", {'base_vhd_path': base_vhd_path, 'diff_vhd_path': diff_vhd_path}) self._vhdutils.reconnect_parent_vhd(diff_vhd_path, base_vhd_path) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, block_device_info=None, power_on=True): LOG.debug("finish_migration called", instance=instance) instance_name = instance.name if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) if not root_vhd_path: raise vmutils.HyperVException(_("Cannot find boot VHD " "file for instance: %s") % instance_name) root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path) src_base_disk_path = root_vhd_info.get("ParentPath") if src_base_disk_path: self._check_base_disk(context, instance, root_vhd_path, src_base_disk_path) if resize_instance: new_size = instance.root_gb * units.Gi self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size) eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name) if resize_instance: new_size = instance.get('ephemeral_gb', 0) * units.Gi if not eph_vhd_path: if new_size: eph_vhd_path = self._vmops.create_ephemeral_vhd(instance) else: eph_vhd_info = self._vhdutils.get_vhd_info(eph_vhd_path) self._check_resize_vhd(eph_vhd_path, eph_vhd_info, new_size) vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta) self._vmops.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path, vm_gen) self._check_and_attach_config_drive(instance, vm_gen) if power_on: self._vmops.power_on(instance)
apache-2.0
-1,321,830,333,474,343,200
43.313131
79
0.571157
false
pratikmallya/hue
desktop/core/src/desktop/lib/metrics/file_reporter.py
19
2262
# Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import tempfile import threading from pyformance.reporters.reporter import Reporter from desktop.lib.metrics import global_registry LOG = logging.getLogger(__name__) class FileReporter(Reporter): def __init__(self, location, *args, **kwargs): super(FileReporter, self).__init__(*args, **kwargs) self.location = location def report_now(self, registry=None, timestamp=None): dirname = os.path.dirname(self.location) if not os.path.exists(dirname): try: os.makedirs(dirname) except OSError, e: LOG.error('failed to make the directory %s: %s' % (dirname, e)) # Write the metrics to a temporary file, then atomically # rename the file to the real location. f = tempfile.NamedTemporaryFile( dir=dirname, delete=False) try: json.dump(self.registry.dump_metrics(), f) f.close() os.rename(f.name, self.location) except Exception: LOG.exception('failed to write metrics to file') os.remove(f.name) raise _reporter = None def start_file_reporter(): from desktop.conf import METRICS global _reporter if _reporter is None: location = METRICS.LOCATION.get() interval = METRICS.COLLECTION_INTERVAL.get() if location is not None and interval is not None: _reporter = FileReporter( location, reporting_interval=interval / 1000.0, registry=global_registry()) _reporter.start()
apache-2.0
-1,733,971,814,190,744,300
28.376623
74
0.702034
false
naparuba/opsbro
data/global-configuration/packs/mongodb/collectors/pymongo/mongo_replica_set_client.py
53
1955
# Copyright 2011-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. """Deprecated. See :doc:`/examples/high_availability`.""" import warnings from pymongo import mongo_client class MongoReplicaSetClient(mongo_client.MongoClient): """Deprecated alias for :class:`~pymongo.mongo_client.MongoClient`. :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` will be removed in a future version of PyMongo. .. versionchanged:: 3.0 :class:`~pymongo.mongo_client.MongoClient` is now the one and only client class for a standalone server, mongos, or replica set. It includes the functionality that had been split into :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`: it can connect to a replica set, discover all its members, and monitor the set for stepdowns, elections, and reconfigs. The ``refresh`` method is removed from :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`, as are the ``seeds`` and ``hosts`` properties. """ def __init__(self, *args, **kwargs): warnings.warn('MongoReplicaSetClient is deprecated, use MongoClient' ' to connect to a replica set', DeprecationWarning, stacklevel=2) super(MongoReplicaSetClient, self).__init__(*args, **kwargs) def __repr__(self): return "MongoReplicaSetClient(%s)" % (self._repr_helper(),)
mit
-5,526,254,158,968,015,000
39.729167
76
0.701279
false
Taapat/enigma2-openpli-fulan
lib/python/Tools/ASCIItranslit.py
85
4020
# -*- coding:utf-8 -*- ASCIItranslit = { 0x0022: "''", 0x002A: "_", 0x002F: "_", 0x003A: "_", 0x003C: "_", 0x003D: "_", 0x003E: "_", 0x003F: "_", 0x005C: "_", 0x007C: "_", 0x007F: "", 0x00A0: "_", 0x00A1: "!", 0x00A2: "c", 0x00A3: "lb", 0x00A4: "", 0x00A5: "yen", 0x00A6: "I", 0x00A7: "SS", 0x00A8: "'", 0x00A9: "(c)", 0x00AA: "a", 0x00AB: "<<", 0x00AC: "not", 0x00AD: "-", 0x00AE: "(R)", 0x00AF: "", 0x00B0: "^0", 0x00B1: "+-", 0x00B2: "^2", 0x00B3: "^3", 0x00B4: "'", 0x00B5: "u", 0x00B6: "P", 0x00B7: ".", 0x00B8: ",", 0x00B9: "^1", 0x00BA: "o", 0x00BB: ">>", 0x00BC: "1_4 ", 0x00BD: "1_2 ", 0x00BE: "3_4 ", 0x00BF: "_", 0x00C0: "`A", 0x00C1: "'A", 0x00C2: "^A", 0x00C3: "~A", 0x00C4: "Ae", 0x00C5: "A", 0x00C6: "AE", 0x00C7: "C", 0x00C8: "`E", 0x00C9: "'E", 0x00CA: "^E", 0x00CB: "E", 0x00CC: "`I", 0x00CD: "'I", 0x00CE: "^I", 0x00CF: "I", 0x00D0: "D", 0x00D1: "~N", 0x00D2: "`O", 0x00D3: "'O", 0x00D4: "^O", 0x00D5: "~O", 0x00D6: "Oe", 0x00D7: "x", 0x00D8: "O", 0x00D9: "`U", 0x00DA: "'U", 0x00DB: "^U", 0x00DC: "Ue", 0x00DD: "'Y", 0x00DE: "Th", 0x00DF: "ss", 0x00E0: "`a", 0x00E1: "'a", 0x00E2: "^a", 0x00E3: "~a", 0x00E4: "AE", 0x00E5: "a", 0x00E6: "ae", 0x00E7: "c", 0x00E8: "`e", 0x00E9: "'e", 0x00EA: "^e", 0x00EB: "e", 0x00EC: "`i", 0x00ED: "'i", 0x00EE: "^i", 0x00EF: "i", 0x00F0: "d", 0x00F1: "~n", 0x00F2: "`o", 0x00F3: "'o", 0x00F4: "^o", 0x00F5: "~o", 0x00F6: "oe", 0x00F7: "_", 0x00F8: "o", 0x00F9: "`u", 0x00FA: "'u", 0x00FB: "^u", 0x00FC: "ue", 0x00FD: "'y", 0x00FE: "th", 0x00FF: "Y", 0x0100: "A", 0x0101: "a", 0x0102: "A", 0x0103: "a", 0x0104: "A", 0x0105: "a", 0x0106: "'C", 0x0107: "'c", 0x0108: "^C", 0x0109: "^c", 0x010A: "C", 0x010B: "c", 0x010C: "C", 0x010D: "c", 0x010E: "D", 0x010F: "d", 0x0110: "D", 0x0111: "d", 0x0112: "E", 0x0113: "e", 0x0114: "E", 0x0115: "e", 0x0116: "E", 0x0117: "e", 0x0118: "E", 0x0119: "e", 0x011A: "E", 0x011B: "e", 0x011C: "^G", 0x011D: "^g", 0x011E: "G", 0x011F: "g", 0x0120: "G", 0x0121: "g", 0x0122: "G", 0x0123: "g", 0x0124: "^H", 0x0125: "^h", 0x0126: "H", 0x0127: "h", 0x0128: "~I", 0x0129: "~i", 0x012A: "I", 0x012B: "i", 0x012C: "I", 0x012D: "i", 0x012E: "I", 0x012F: "i", 0x0130: "I", 0x0131: "i", 0x0132: "IJ", 0x0133: "ij", 0x0134: "^J", 0x0135: "^j", 0x0136: "K", 0x0137: "k", 0x0138: "", 0x0139: "L", 0x013A: "l", 0x013B: "L", 0x013C: "l", 0x013D: "L", 0x013E: "l", 0x013F: "L", 0x0140: "l", 0x0141: "L", 0x0142: "l", 0x0143: "'N", 0x0144: "'n", 0x0145: "N", 0x0146: "n", 0x0147: "N", 0x0148: "n", 0x0149: "n", 0x014A: "_", 0x014B: "_", 0x014C: "O", 0x014D: "o", 0x014E: "O", 0x014F: "o", 0x0150: "''o", 0x0152: "OE", 0x0153: "oe", 0x0154: "'R", 0x0155: "'r", 0x0156: "R", 0x0157: "r", 0x0158: "R", 0x0159: "r", 0x015A: "'s", 0x015B: "'s", 0x015C: "^S", 0x015D: "^s", 0x015E: "S", 0x015F: "s", 0x0160: "S", 0x0161: "s", 0x0162: "T", 0x0163: "t", 0x0164: "T", 0x0165: "t", 0x0166: "T", 0x0167: "t", 0x0168: "~U", 0x0169: "~u", 0x016A: "U", 0x016B: "u", 0x016C: "U", 0x016D: "u", 0x016E: "U", 0x016F: "u", 0x0170: "''u", 0x0172: "U", 0x0173: "u", 0x0174: "^W", 0x0175: "^w", 0x0176: "^Y", 0x0177: "^y", 0x0178: "Y", 0x0179: "'Z", 0x017A: "'z", 0x017B: "Z", 0x017C: "z", 0x017D: "Z", 0x017E: "z", 0x017F: "s", 0x018F: "_", 0x0192: "f", 0x01C4: "DZ", 0x01C5: "DZ", 0x01C6: "DZ", 0x01C7: "LJ", 0x01C8: "Lj", 0x01C9: "lj", 0x01CA: "NJ", 0x01CB: "Nj", 0x01CC: "nj", 0x01F1: "DZ", 0x01F2: "Dz", 0x01F3: "dz", 0x0218: "S", 0x0219: "s", 0x021A: "T", 0x021B: "t", 0x0259: "_", 0x20AC: "EUR" } def legacyEncode(string): string2 = "" for z, char in enumerate(string.decode("utf-8")): i = ord(char) if i < 33: string2 += "_" elif i in ASCIItranslit: string2 += ASCIItranslit[i] else: try: string2 += char.encode('ascii', 'strict') except: string2 += "_" return string2.upper()
gpl-2.0
2,814,552,986,348,173,000
13.833948
50
0.495522
false
y12uc231/edx-platform
lms/djangoapps/instructor/features/common.py
47
4576
""" Define common steps for instructor dashboard acceptance tests. """ # pylint: disable=missing-docstring # pylint: disable=redefined-outer-name from __future__ import absolute_import from lettuce import world, step from mock import patch from nose.tools import assert_in # pylint: disable=no-name-in-module from courseware.tests.factories import StaffFactory, InstructorFactory @step(u'Given I am "([^"]*)" for a very large course') def make_staff_or_instructor_for_large_course(step, role): make_large_course(step, role) @patch.dict('courseware.access.settings.FEATURES', {"MAX_ENROLLMENT_INSTR_BUTTONS": 0}) def make_large_course(step, role): i_am_staff_or_instructor(step, role) @step(u'Given I am "([^"]*)" for a course') def i_am_staff_or_instructor(step, role): # pylint: disable=unused-argument ## In summary: makes a test course, makes a new Staff or Instructor user ## (depending on `role`), and logs that user in to the course # Store the role assert_in(role, ['instructor', 'staff']) # Clear existing courses to avoid conflicts world.clear_courses() # Create a new course course = world.CourseFactory.create( org='edx', number='999', display_name='Test Course' ) world.course_key = course.id world.role = 'instructor' # Log in as the an instructor or staff for the course if role == 'instructor': # Make & register an instructor for the course world.instructor = InstructorFactory(course_key=world.course_key) world.enroll_user(world.instructor, world.course_key) world.log_in( username=world.instructor.username, password='test', email=world.instructor.email, name=world.instructor.profile.name ) else: world.role = 'staff' # Make & register a staff member world.staff = StaffFactory(course_key=world.course_key) world.enroll_user(world.staff, world.course_key) world.log_in( username=world.staff.username, password='test', email=world.staff.email, name=world.staff.profile.name ) def go_to_section(section_name): # section name should be one of # course_info, membership, student_admin, data_download, analytics, send_email world.visit(u'/courses/{}'.format(world.course_key)) world.css_click(u'a[href="/courses/{}/instructor"]'.format(world.course_key)) world.css_click('a[data-section="{0}"]'.format(section_name)) @step(u'I click "([^"]*)"') def click_a_button(step, button): # pylint: disable=unused-argument if button == "Generate Grade Report": # Go to the data download section of the instructor dash go_to_section("data_download") # Click generate grade report button world.css_click('input[name="calculate-grades-csv"]') # Expect to see a message that grade report is being generated expected_msg = "Your grade report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section." world.wait_for_visible('#report-request-response') assert_in( expected_msg, world.css_text('#report-request-response'), msg="Could not find grade report generation success message." ) elif button == "Grading Configuration": # Go to the data download section of the instructor dash go_to_section("data_download") world.css_click('input[name="dump-gradeconf"]') elif button == "List enrolled students' profile information": # Go to the data download section of the instructor dash go_to_section("data_download") world.css_click('input[name="list-profiles"]') elif button == "Download profile information as a CSV": # Go to the data download section of the instructor dash go_to_section("data_download") world.css_click('input[name="list-profiles-csv"]') else: raise ValueError("Unrecognized button option " + button) @step(u'I visit the "([^"]*)" tab') def click_a_button(step, tab_name): # pylint: disable=unused-argument # course_info, membership, student_admin, data_download, analytics, send_email tab_name_dict = { 'Course Info': 'course_info', 'Membership': 'membership', 'Student Admin': 'student_admin', 'Data Download': 'data_download', 'Analytics': 'analytics', 'Email': 'send_email', } go_to_section(tab_name_dict[tab_name])
agpl-3.0
-8,432,078,084,603,166,000
33.406015
152
0.654283
false
jmartinezchaine/OpenERP
openerp/workflow/wkf_logs.py
15
1523
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## # # May be uncommented to logs workflows modifications # import openerp.netsvc as netsvc def log(cr,ident,act_id,info=''): return # msg = """ #res_type: %r #res_id: %d #uid: %d #act_id: %d #info: %s #""" % (ident[1], ident[2], ident[0], act_id, info) #cr.execute('insert into wkf_logs (res_type, res_id, uid, act_id, time, info) values (%s,%s,%s,%s,current_time,%s)', (ident[1],int(ident[2]),int(ident[0]),int(act_id),info)) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-4,776,751,369,692,606,000
37.075
177
0.596848
false
lertech/extra-addons
website_disable_odoo/__init__.py
1
1185
# -*- encoding: utf-8 -*- # Python source code encoding : https://www.python.org/dev/peps/pep-0263/ ############################################################################## # # OpenERP, Odoo Source Management Solution # Copyright (c) 2015 Antiun Ingeniería S.L. (http://www.antiun.com) # Antonio Espinosa <[email protected]> # Daniel Góme-Zurita <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ##############################################################################
gpl-3.0
-4,967,695,546,326,101,000
50.434783
78
0.608622
false
wolverineav/neutron
neutron/db/portsecurity_db.py
3
2785
# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.v2 import attributes as attrs from neutron.common import utils from neutron.db import db_base_plugin_v2 from neutron.db import portsecurity_db_common from neutron.extensions import portsecurity as psec class PortSecurityDbMixin(portsecurity_db_common.PortSecurityDbCommon): # Register dict extend functions for ports and networks db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attrs.NETWORKS, ['_extend_port_security_dict']) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attrs.PORTS, ['_extend_port_security_dict']) def _extend_port_security_dict(self, response_data, db_data): if ('port-security' in getattr(self, 'supported_extension_aliases', [])): super(PortSecurityDbMixin, self)._extend_port_security_dict( response_data, db_data) def _determine_port_security_and_has_ip(self, context, port): """Returns a tuple of booleans (port_security_enabled, has_ip). Port_security is the value associated with the port if one is present otherwise the value associated with the network is returned. has_ip is if the port is associated with an ip or not. """ has_ip = self._ip_on_port(port) # we don't apply security groups for dhcp, router if port.get('device_owner') and utils.is_port_trusted(port): return (False, has_ip) if attrs.is_attr_set(port.get(psec.PORTSECURITY)): port_security_enabled = port[psec.PORTSECURITY] # If port has an ip and security_groups are passed in # conveniently set port_security_enabled to true this way # user doesn't also have to pass in port_security_enabled=True # when creating ports. elif (has_ip and attrs.is_attr_set(port.get('security_groups'))): port_security_enabled = True else: port_security_enabled = self._get_network_security_binding( context, port['network_id']) return (port_security_enabled, has_ip) def _ip_on_port(self, port): return bool(port.get('fixed_ips'))
apache-2.0
-8,432,324,774,877,598,000
43.206349
78
0.682944
false
route-nazionale/event_subscribe
event_subscribe/default_settings.py
1
2179
""" Django settings for event_subscribe project. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os, locale BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'o*4+4o2wia&n8_i02q9rxhhyjzzb_ueqcn=y!(ws2-z7pgydoi' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'base', 'subscribe', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'event_subscribe.urls' WSGI_APPLICATION = 'event_subscribe.wsgi.application' # Database # https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/dev/topics/i18n/ LANGUAGE_CODE = 'it' TIME_ZONE = 'Europe/Rome' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/dev/howto/static-files/ STATIC_URL = '/static/' # useful for strftime locale.setlocale(locale.LC_ALL, 'it_IT.UTF8')
agpl-3.0
5,728,406,010,177,067,000
24.045977
71
0.730151
false
sadanandb/pmt
src/tactic/ui/widget/__init__.py
5
1116
########################################################### # # Copyright (c) 2005-2008, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # # This module contains a collection of generalized utility widgets from upload_wdg import * from sobject_group_wdg import * from calendar_wdg import * from sobject_calendar_wdg import * from data_export_wdg import * from misc_input_wdg import * from button_wdg import * from button_new_wdg import * from gear_menu_wdg import * from chooser_wdg import * from smart_select_wdg import * from proxy_wdg import * from checkin_wdg import * from discussion_wdg import * from text_wdg import * from file_browser_wdg import * from format_value_wdg import * from embed_wdg import * from swap_display_wdg import * from reset_password_wdg import * from title_wdg import * from ckeditor_wdg import * from video_wdg import * #from color_input_wdg import * #from preview_change_wdg import *
epl-1.0
-4,710,474,057,186,819,000
27.615385
66
0.713262
false
40223112/w16test
ref/gear.py
68
21704
import cherrypy import os import sys # 這個程式要計算正齒輪的齒面寬, 資料庫連結希望使用 pybean 與 SQLite # 導入 pybean 模組與所要使用的 Store 及 SQLiteWriter 方法 from pybean import Store, SQLiteWriter import math # 確定程式檔案所在目錄, 在 Windows 有最後的反斜線 _curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) # 將所在目錄設為系統搜尋目錄 sys.path.append(_curdir) if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # while program is executed in OpenShift download_root_dir = os.environ['OPENSHIFT_DATA_DIR'] data_dir = os.environ['OPENSHIFT_DATA_DIR'] else: # while program is executed in localhost download_root_dir = _curdir + "/local_data/" data_dir = _curdir + "/local_data/" # 這是 Gear 設計資料表的定義 ''' lewis.db 中有兩個資料表, steel 與 lewis CREATE TABLE steel ( serialno INTEGER, unsno TEXT, aisino TEXT, treatment TEXT, yield_str INTEGER, tensile_str INTEGER, stretch_ratio INTEGER, sectional_shr INTEGER, brinell INTEGER ); CREATE TABLE lewis ( serialno INTEGER PRIMARY KEY NOT NULL, gearno INTEGER, type1 NUMERIC, type4 NUMERIC, type3 NUMERIC, type2 NUMERIC ); ''' class Gear(object): def __init__(self): # hope to create downloads and images directories  if not os.path.isdir(download_root_dir+"downloads"): try: os.makedirs(download_root_dir+"downloads") except: print("mkdir error") if not os.path.isdir(download_root_dir+"images"): try: os.makedirs(download_root_dir+"images") except: print("mkdir error") if not os.path.isdir(download_root_dir+"tmp"): try: os.makedirs(download_root_dir+"tmp") except: print("mkdir error") @cherrypy.expose def default(self, attr='default', *args, **kwargs): raise cherrypy.HTTPRedirect("/") # 各組利用 index 引導隨後的程式執行 @cherrypy.expose def index(self, *args, **kwargs): # 進行資料庫檔案連結, 並且取出所有資料 try: # 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立 # 因為程式以 application 所在目錄執行, 因此利用相對目錄連結 lewis.db 資料庫檔案 SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True)) #material = SQLite連結.find_one("steel","serialno = ?",[序號]) # str(SQLite連結.count("steel")) 將傳回 70, 表示資料庫中有 70 筆資料 material = SQLite連結.find("steel") # 所傳回的 material 為 iterator ''' outstring = "" for material_item in material: outstring += str(material_item.serialno) + ":" + material_item.unsno + "_" + material_item.treatment + "<br />" return outstring ''' except: return "抱歉! 資料庫無法連線<br />" outstring = ''' <form id=entry method=post action="gear_width"> 請填妥下列參數,以完成適當的齒尺寸大小設計。<br /> 馬達馬力:<input type=text name=horsepower id=horsepower value=100 size=10>horse power<br /> 馬達轉速:<input type=text name=rpm id=rpm value=1120 size=10>rpm<br /> 齒輪減速比: <input type=text name=ratio id=ratio value=4 size=10><br /> 齒形:<select name=toothtype id=toothtype> <option value=type1>壓力角20度,a=0.8,b=1.0 <option value=type2>壓力角20度,a=1.0,b=1.25 <option value=type3>壓力角25度,a=1.0,b=1.25 <option value=type4>壓力角25度,a=1.0,b=1.35 </select><br /> 安全係數:<input type=text name=safetyfactor id=safetyfactor value=3 size=10><br /> 齒輪材質:<select name=material_serialno id=material_serialno> ''' for material_item in material: outstring += "<option value=" + str(material_item.serialno) + ">UNS - " + \ material_item.unsno + " - " + material_item.treatment outstring += "</select><br />" outstring += "小齒輪齒數:<input type=text name=npinion id=npinion value=18 size=10><br />" outstring += "<input type=submit id=submit value=進行運算>" outstring += "</form>" return outstring The 5 problems (The following problems are ridiculously simple, but you'd be surprise to discover how many people struggle with them. To the point of not getting anything done at all. Seriously.) Problem 1 Write three functions that compute the sum of the numbers in a given list using a for-loop, a while-loop, and recursion. Problem 2 Write a function that combines two lists by alternatingly taking elements. For example: given the two lists [a, b, c] and [1, 2, 3], the function should return [a, 1, b, 2, c, 3]. Problem 3 Write a function that computes the list of the first 100 Fibonacci numbers. By definition, the first two numbers in the Fibonacci sequence are 0 and 1, and each subsequent number is the sum of the previous two. As an example, here are the first 10 Fibonnaci numbers: 0, 1, 1, 2, 3, 5, 8, 13, 21, and 34. Problem 4 Write a function that given a list of non negative integers, arranges them such that they form the largest possible number. For example, given [50, 2, 1, 9], the largest formed number is 95021. Problem 5 Write a program that outputs all possibilities to put + or - or nothing between the numbers 1, 2, ..., 9 (in this order) such that the result is always 100. For example: 1 + 2 + 34 – 5 + 67 – 8 + 9 = 100. Problem 1 Write three functions that compute the sum of the numbers in a given list using a for-loop, a while-loop, and recursion. def for_sum(mylist): sum = 0 for i in range(len(mylist)): sum += mylist[i] return sum mylist = [1, 4, 5, 3, 7] sum = for_sum(mylist) g.es("sum is:", sum) def while_sum(mylist): i = 0 sum = 0 while i < len(mylist): sum += mylist[i] i += 1 return sum mylist = [1, 4, 5, 3, 7] sum = while_sum(mylist) g.es("sum is:", sum) def recur_sum(mylist): if len(mylist) == 1: return mylist[0] else: g.es(mylist[0],"+ 遞迴加(", mylist[1:], ")") return mylist[0] + recur_sum(mylist[1:]) mylist = [1, 4, 5, 3, 7] sum = recur_sum(mylist) g.es("sum is:", sum) Problem 2 Write a function that combines two lists by alternatingly taking elements. For example: given the two lists [a, b, c] and [1, 2, 3], the function should return [a, 1, b, 2, c, 3]. Problem 3 Write a function that computes the list of the first 100 Fibonacci numbers. By definition, the first two numbers in the Fibonacci sequence are 0 and 1, and each subsequent number is the sum of the previous two. As an example, here are the first 10 Fibonnaci numbers: 0, 1, 1, 2, 3, 5, 8, 13, 21, and 34. Problem 4 Write a function that given a list of non negative integers, arranges them such that they form the largest possible number. For example, given [50, 2, 1, 9], the largest formed number is 95021. Problem 5 Write a program that outputs all possibilities to put + or - or nothing between the numbers 1, 2, ..., 9 (in this order) such that the result is always 100. For example: 1 + 2 + 34 – 5 + 67 – 8 + 9 = 100. @cherrypy.expose def interpolation(self, small_gear_no=18, gear_type=1): SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True)) # 使用內插法求值 # 找出比目標齒數大的其中的最小的,就是最鄰近的大值 lewis_factor = SQLite連結.find_one("lewis","gearno > ?",[small_gear_no]) if(gear_type == 1): larger_formfactor = lewis_factor.type1 elif(gear_type == 2): larger_formfactor = lewis_factor.type2 elif(gear_type == 3): larger_formfactor = lewis_factor.type3 else: larger_formfactor = lewis_factor.type4 larger_toothnumber = lewis_factor.gearno # 找出比目標齒數小的其中的最大的,就是最鄰近的小值 lewis_factor = SQLite連結.find_one("lewis","gearno < ? order by gearno DESC",[small_gear_no]) if(gear_type == 1): smaller_formfactor = lewis_factor.type1 elif(gear_type == 2): smaller_formfactor = lewis_factor.type2 elif(gear_type == 3): smaller_formfactor = lewis_factor.type3 else: smaller_formfactor = lewis_factor.type4 smaller_toothnumber = lewis_factor.gearno calculated_factor = larger_formfactor + (small_gear_no - larger_toothnumber) * (larger_formfactor - smaller_formfactor) / (larger_toothnumber - smaller_toothnumber) # 只傳回小數點後五位數 return str(round(calculated_factor, 5)) # 改寫為齒面寬的設計函式 @cherrypy.expose def gear_width(self, horsepower=100, rpm=1000, ratio=4, toothtype=1, safetyfactor=2, material_serialno=1, npinion=18): SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True)) outstring = "" # 根據所選用的齒形決定壓力角 if(toothtype == 1 or toothtype == 2): 壓力角 = 20 else: 壓力角 = 25 # 根據壓力角決定最小齒數 if(壓力角== 20): 最小齒數 = 18 else: 最小齒數 = 12 # 直接設最小齒數 if int(npinion) <= 最小齒數: npinion = 最小齒數 # 大於400的齒數則視為齒條(Rack) if int(npinion) >= 400: npinion = 400 # 根據所選用的材料查詢強度值 # 由 material之序號查 steel 表以得材料之降伏強度S單位為 kpsi 因此查得的值要成乘上1000 # 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立 #SQLite連結 = Store(SQLiteWriter("lewis.db", frozen=True)) # 指定 steel 資料表 steel = SQLite連結.new("steel") # 資料查詢 #material = SQLite連結.find_one("steel","unsno=? and treatment=?",[unsno, treatment]) material = SQLite連結.find_one("steel","serialno=?",[material_serialno]) # 列出 steel 資料表中的資料筆數 #print(SQLite連結.count("steel")) #print (material.yield_str) strengthstress = material.yield_str*1000 # 由小齒輪的齒數與齒形類別,查詢lewis form factor # 先查驗是否有直接對應值 on_table = SQLite連結.count("lewis","gearno=?",[npinion]) if on_table == 1: # 直接進入設計運算 #print("直接運算") #print(on_table) lewis_factor = SQLite連結.find_one("lewis","gearno=?",[npinion]) #print(lewis_factor.type1) # 根據齒形查出 formfactor 值 if(toothtype == 1): formfactor = lewis_factor.type1 elif(toothtype == 2): formfactor = lewis_factor.type2 elif(toothtype == 3): formfactor = lewis_factor.type3 else: formfactor = lewis_factor.type4 else: # 沒有直接對應值, 必須進行查表內插運算後, 再執行設計運算 #print("必須內插") #print(interpolation(npinion, gear_type)) formfactor = self.interpolation(npinion, toothtype) # 開始進行設計運算 ngear = int(npinion) * int(ratio) # 重要的最佳化設計---儘量用整數的diametralpitch # 先嘗試用整數算若 diametralpitch 找到100 仍無所獲則改用 0.25 作為增量再不行則宣告 fail counter = 0 i = 0.1 facewidth = 0 circularpitch = 0 while (facewidth <= 3 * circularpitch or facewidth >= 5 * circularpitch): diametralpitch = i #circularpitch = 3.14159/diametralpitch circularpitch = math.pi/diametralpitch pitchdiameter = int(npinion)/diametralpitch #pitchlinevelocity = 3.14159*pitchdiameter*rpm/12 pitchlinevelocity = math.pi*pitchdiameter * float(rpm)/12 transmittedload = 33000*float(horsepower)/pitchlinevelocity velocityfactor = 1200/(1200 + pitchlinevelocity) # formfactor is Lewis form factor # formfactor need to get from table 13-3 and determined ty teeth number and type of tooth # formfactor = 0.293 # 90 is the value get from table corresponding to material type facewidth = transmittedload*diametralpitch*float(safetyfactor)/velocityfactor/formfactor/strengthstress if(counter>5000): outstring += "超過5000次的設計運算,仍無法找到答案!<br />" outstring += "可能所選用的傳遞功率過大,或無足夠強度的材料可以使用!<br />" # 離開while迴圈 break i += 0.1 counter += 1 facewidth = round(facewidth, 4) if(counter<5000): # 先載入 cube 程式測試 #outstring = self.cube_weblink() # 再載入 gear 程式測試 outstring = self.gear_weblink() outstring += "進行"+str(counter)+"次重複運算後,得到合用的facewidth值為:"+str(facewidth) return outstring @cherrypy.expose def cube_weblink(self): outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script> <script type="text/javascript" src="/static/weblink/wl_header.js"> document.writeln ("Error loading Pro/Web.Link header!"); </script> <script type="text/javascript" language="JavaScript"> // 若第三輸入為 false, 表示僅載入 session, 但是不顯示 // ret 為 model open return var ret = document.pwl.pwlMdlOpen("cube.prt", "v:/tmp", false); if (!ret.Status) { alert("pwlMdlOpen failed (" + ret.ErrorCode + ")"); } //將 ProE 執行階段設為變數 session var session = pfcGetProESession(); // 在視窗中打開零件檔案, 並且顯示出來 var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("cube.prt")); var solid = session.GetModel("cube.prt",pfcCreate("pfcModelType").MDL_PART); var length,width,myf,myn,i,j,volume,count,d1Value,d2Value; // 將模型檔中的 length 變數設為 javascript 中的 length 變數 length = solid.GetParam("a1"); // 將模型檔中的 width 變數設為 javascript 中的 width 變數 width = solid.GetParam("a2"); //改變零件尺寸 //myf=20; //myn=20; volume=0; count=0; try { // 以下採用 URL 輸入對應變數 //createParametersFromArguments (); // 以下則直接利用 javascript 程式改變零件參數 for(i=0;i<=5;i++) { //for(j=0;j<=2;j++) //{ myf=20.0; myn=10.0+i*0.5; // 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值 d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf); d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn); // 將處理好的變數值, 指定給對應的零件變數 length.Value = d1Value; width.Value = d2Value; //零件尺寸重新設定後, 呼叫 Regenerate 更新模型 solid.Regenerate(void null); //利用 GetMassProperty 取得模型的質量相關物件 properties = solid.GetMassProperty(void null); //volume = volume + properties.Volume; volume = properties.Volume; count = count + 1; alert("執行第"+count+"次,零件總體積:"+volume); // 將零件存為新檔案 var newfile = document.pwl.pwlMdlSaveAs("cube.prt", "v:/tmp", "cube"+count+".prt"); if (!newfile.Status) { alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")"); } //} // 內圈 for 迴圈 } //外圈 for 迴圈 //alert("共執行:"+count+"次,零件總體積:"+volume); //alert("零件體積:"+properties.Volume); //alert("零件體積取整數:"+Math.round(properties.Volume)); } catch(err) { alert ("Exception occurred: "+pfcGetExceptionType (err)); } </script> ''' return outstring @cherrypy.expose def gear_weblink(self, facewidth=5, n=18): outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script> <script type="text/javascript" src="/static/weblink/wl_header.js">// <![CDATA[ document.writeln ("Error loading Pro/Web.Link header!"); // ]]></script> <script type="text/javascript" language="JavaScript">// <![CDATA[ if (!pfcIsWindows()) netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect"); // 若第三輸入為 false, 表示僅載入 session, 但是不顯示 // ret 為 model open return var ret = document.pwl.pwlMdlOpen("gear.prt", "v:/", false); if (!ret.Status) { alert("pwlMdlOpen failed (" + ret.ErrorCode + ")"); } //將 ProE 執行階段設為變數 session var session = pfcGetProESession(); // 在視窗中打開零件檔案, 並且顯示出來 var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("gear.prt")); var solid = session.GetModel("gear.prt",pfcCreate("pfcModelType").MDL_PART); var length,width,myf,myn,i,j,volume,count,d1Value,d2Value; // 將模型檔中的 length 變數設為 javascript 中的 length 變數 length = solid.GetParam("n"); // 將模型檔中的 width 變數設為 javascript 中的 width 變數 width = solid.GetParam("face_width"); //改變零件尺寸 //myf=20; //myn=20; volume=0; count=0; try { // 以下採用 URL 輸入對應變數 //createParametersFromArguments (); // 以下則直接利用 javascript 程式改變零件參數 for(i=0;i<=5;i++) { //for(j=0;j<=2;j++) //{ myf=25+i*2; myn=10.0+i*0.5; // 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值 //d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf); d1Value = pfcCreate ("MpfcModelItem").CreateIntParamValue(myf); d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn); // 將處理好的變數值, 指定給對應的零件變數 length.Value = d1Value; width.Value = d2Value; //零件尺寸重新設定後, 呼叫 Regenerate 更新模型 solid.Regenerate(void null); //利用 GetMassProperty 取得模型的質量相關物件 properties = solid.GetMassProperty(void null); //volume = volume + properties.Volume; volume = properties.Volume; count = count + 1; alert("執行第"+count+"次,零件總體積:"+volume); // 將零件存為新檔案 var newfile = document.pwl.pwlMdlSaveAs("gear.prt", "v:/", "mygear_"+count+".prt"); if (!newfile.Status) { alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")"); } //} // 內圈 for 迴圈 } //外圈 for 迴圈 //alert("共執行:"+count+"次,零件總體積:"+volume); //alert("零件體積:"+properties.Volume); //alert("零件體積取整數:"+Math.round(properties.Volume)); } catch(err) { alert ("Exception occurred: "+pfcGetExceptionType (err)); } // ]]></script> ''' return outstring root = Gear() # setup static, images and downloads directories application_conf = { '/static':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': _curdir+"/static"}, '/images':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/images"}, '/downloads':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/downloads"} } # if inOpenshift ('OPENSHIFT_REPO_DIR' exists in environment variables) or not inOpenshift if __name__ == '__main__': if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # operate in OpenShift application = cherrypy.Application(root, config = application_conf) else: # operate in localhost cherrypy.quickstart(root, config = application_conf)
agpl-3.0
2,927,056,776,212,377,000
38.717213
307
0.58348
false
sinkuri256/python-for-android
python3-alpha/python3-src/Lib/shlex.py
51
11100
"""A lexical analyzer class for simple shell-like syntaxes.""" # Module and documentation by Eric S. Raymond, 21 Dec 1998 # Input stacking and error message cleanup added by ESR, March 2000 # push_source() and pop_source() made explicit by ESR, January 2001. # Posix compliance, split(), string arguments, and # iterator interface by Gustavo Niemeyer, April 2003. import os.path import sys from collections import deque from io import StringIO __all__ = ["shlex", "split"] class shlex: "A lexical analyzer class for simple shell-like syntaxes." def __init__(self, instream=None, infile=None, posix=False): if isinstance(instream, str): instream = StringIO(instream) if instream is not None: self.instream = instream self.infile = infile else: self.instream = sys.stdin self.infile = None self.posix = posix if posix: self.eof = None else: self.eof = '' self.commenters = '#' self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_') if self.posix: self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') self.whitespace = ' \t\r\n' self.whitespace_split = False self.quotes = '\'"' self.escape = '\\' self.escapedquotes = '"' self.state = ' ' self.pushback = deque() self.lineno = 1 self.debug = 0 self.token = '' self.filestack = deque() self.source = None if self.debug: print('shlex: reading from %s, line %d' \ % (self.instream, self.lineno)) def push_token(self, tok): "Push a token onto the stack popped by the get_token method" if self.debug >= 1: print("shlex: pushing token " + repr(tok)) self.pushback.appendleft(tok) def push_source(self, newstream, newfile=None): "Push an input source onto the lexer's input source stack." if isinstance(newstream, str): newstream = StringIO(newstream) self.filestack.appendleft((self.infile, self.instream, self.lineno)) self.infile = newfile self.instream = newstream self.lineno = 1 if self.debug: if newfile is not None: print('shlex: pushing to file %s' % (self.infile,)) else: print('shlex: pushing to stream %s' % (self.instream,)) def pop_source(self): "Pop the input source stack." self.instream.close() (self.infile, self.instream, self.lineno) = self.filestack.popleft() if self.debug: print('shlex: popping to %s, line %d' \ % (self.instream, self.lineno)) self.state = ' ' def get_token(self): "Get a token from the input stream (or from stack if it's nonempty)" if self.pushback: tok = self.pushback.popleft() if self.debug >= 1: print("shlex: popping token " + repr(tok)) return tok # No pushback. Get a token. raw = self.read_token() # Handle inclusions if self.source is not None: while raw == self.source: spec = self.sourcehook(self.read_token()) if spec: (newfile, newstream) = spec self.push_source(newstream, newfile) raw = self.get_token() # Maybe we got EOF instead? while raw == self.eof: if not self.filestack: return self.eof else: self.pop_source() raw = self.get_token() # Neither inclusion nor EOF if self.debug >= 1: if raw != self.eof: print("shlex: token=" + repr(raw)) else: print("shlex: token=EOF") return raw def read_token(self): quoted = False escapedstate = ' ' while True: nextchar = self.instream.read(1) if nextchar == '\n': self.lineno = self.lineno + 1 if self.debug >= 3: print("shlex: in state", repr(self.state), \ "I see character:", repr(nextchar)) if self.state is None: self.token = '' # past end of file break elif self.state == ' ': if not nextchar: self.state = None # end of file break elif nextchar in self.whitespace: if self.debug >= 2: print("shlex: I see whitespace in whitespace state") if self.token or (self.posix and quoted): break # emit current token else: continue elif nextchar in self.commenters: self.instream.readline() self.lineno = self.lineno + 1 elif self.posix and nextchar in self.escape: escapedstate = 'a' self.state = nextchar elif nextchar in self.wordchars: self.token = nextchar self.state = 'a' elif nextchar in self.quotes: if not self.posix: self.token = nextchar self.state = nextchar elif self.whitespace_split: self.token = nextchar self.state = 'a' else: self.token = nextchar if self.token or (self.posix and quoted): break # emit current token else: continue elif self.state in self.quotes: quoted = True if not nextchar: # end of file if self.debug >= 2: print("shlex: I see EOF in quotes state") # XXX what error should be raised here? raise ValueError("No closing quotation") if nextchar == self.state: if not self.posix: self.token = self.token + nextchar self.state = ' ' break else: self.state = 'a' elif self.posix and nextchar in self.escape and \ self.state in self.escapedquotes: escapedstate = self.state self.state = nextchar else: self.token = self.token + nextchar elif self.state in self.escape: if not nextchar: # end of file if self.debug >= 2: print("shlex: I see EOF in escape state") # XXX what error should be raised here? raise ValueError("No escaped character") # In posix shells, only the quote itself or the escape # character may be escaped within quotes. if escapedstate in self.quotes and \ nextchar != self.state and nextchar != escapedstate: self.token = self.token + self.state self.token = self.token + nextchar self.state = escapedstate elif self.state == 'a': if not nextchar: self.state = None # end of file break elif nextchar in self.whitespace: if self.debug >= 2: print("shlex: I see whitespace in word state") self.state = ' ' if self.token or (self.posix and quoted): break # emit current token else: continue elif nextchar in self.commenters: self.instream.readline() self.lineno = self.lineno + 1 if self.posix: self.state = ' ' if self.token or (self.posix and quoted): break # emit current token else: continue elif self.posix and nextchar in self.quotes: self.state = nextchar elif self.posix and nextchar in self.escape: escapedstate = 'a' self.state = nextchar elif nextchar in self.wordchars or nextchar in self.quotes \ or self.whitespace_split: self.token = self.token + nextchar else: self.pushback.appendleft(nextchar) if self.debug >= 2: print("shlex: I see punctuation in word state") self.state = ' ' if self.token: break # emit current token else: continue result = self.token self.token = '' if self.posix and not quoted and result == '': result = None if self.debug > 1: if result: print("shlex: raw token=" + repr(result)) else: print("shlex: raw token=EOF") return result def sourcehook(self, newfile): "Hook called on a filename to be sourced." if newfile[0] == '"': newfile = newfile[1:-1] # This implements cpp-like semantics for relative-path inclusion. if isinstance(self.infile, str) and not os.path.isabs(newfile): newfile = os.path.join(os.path.dirname(self.infile), newfile) return (newfile, open(newfile, "r")) def error_leader(self, infile=None, lineno=None): "Emit a C-compiler-like, Emacs-friendly error-message leader." if infile is None: infile = self.infile if lineno is None: lineno = self.lineno return "\"%s\", line %d: " % (infile, lineno) def __iter__(self): return self def __next__(self): token = self.get_token() if token == self.eof: raise StopIteration return token def split(s, comments=False, posix=True): lex = shlex(s, posix=posix) lex.whitespace_split = True if not comments: lex.commenters = '' return list(lex) if __name__ == '__main__': if len(sys.argv) == 1: lexer = shlex() else: file = sys.argv[1] lexer = shlex(open(file), file) while 1: tt = lexer.get_token() if tt: print("Token: " + repr(tt)) else: break
apache-2.0
-8,029,511,807,110,584,000
37.326389
76
0.4894
false
adrientetar/robofab
Lib/robofab/tools/glifExport.py
1
2477
"""Tool for exporting GLIFs from FontLab""" import FL import os from robofab.interface.all.dialogs import ProgressBar from robofab.glifLib import GlyphSet from robofab.tools.glifImport import GlyphPlaceholder from robofab.pens.flPen import drawFLGlyphOntoPointPen def exportGlyph(glyphName, flGlyph, glyphSet): """Export a FontLab glyph.""" glyph = GlyphPlaceholder() glyph.width = flGlyph.width glyph.unicodes = flGlyph.unicodes if flGlyph.note: glyph.note = flGlyph.note customdata = flGlyph.customdata if customdata: from io import StringIO from robofab.plistlib import readPlist, Data f = StringIO(customdata) try: glyph.lib = readPlist(f) except: # XXX ugh, plistlib can raise lots of things # Anyway, customdata does not contain valid plist data, # but we don't need to toss it! glyph.lib = {"org.robofab.fontlab.customdata": Data(customdata)} def drawPoints(pen): # whoohoo, nested scopes are cool. drawFLGlyphOntoPointPen(flGlyph, pen) glyphSet.writeGlyph(glyphName, glyph, drawPoints) def exportGlyphs(font, glyphs=None, dest=None, doProgress=True, bar=None): """Export all glyphs in a FontLab font""" if dest is None: dir, base = os.path.split(font.file_name) base = base.split(".")[0] + ".glyphs" dest = os.path.join(dir, base) if not os.path.exists(dest): os.makedirs(dest) glyphSet = GlyphSet(dest) if glyphs is None: indices = list(range(len(font))) else: indices = [] for glyphName in glyphs: indices.append(font.FindGlyph(glyphName)) barStart = 0 closeBar = False if doProgress: if not bar: bar = ProgressBar("Exporting Glyphs", len(indices)) closeBar = True else: barStart = bar.getCurrentTick() else: bar = None try: done = {} for i in range(len(indices)): #if not (i % 10) and not bar.tick(i + barStart): # raise KeyboardInterrupt index = indices[i] flGlyph = font[index] if flGlyph is None: continue glyphName = flGlyph.name if not glyphName: print("can't dump glyph #%s, it has no glyph name" % i) else: if glyphName in done: n = 1 while ("%s#%s" % (glyphName, n)) in done: n += 1 glyphName = "%s#%s" % (glyphName, n) done[glyphName] = None exportGlyph(glyphName, flGlyph, glyphSet) if bar and not i % 10: bar.tick(barStart + i) # Write out contents.plist glyphSet.writeContents() except KeyboardInterrupt: if bar: bar.close() bar = None if bar and closeBar: bar.close()
bsd-3-clause
-3,893,137,988,498,525,000
25.073684
74
0.692774
false
hnjamba/onaclone
onadata/libs/utils/csv_reader.py
6
1612
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 import csv class CsvReader(object): """ Typical usage:: csv_reader = CsvReader(path) for d in csv_reader.iter_dicts(): Variable.objects.create(**d) """ def __init__(self, path): self.open(path) def open(self, path): self._file = open(path, 'rU') # universal new-line mode # http://stackoverflow.com/questions/904041/reading-a-utf8-csv-file-wit # h-python/904085#904085 self._csv_reader = csv.reader(self._file) def close(self): self._file.close() def __iter__(self): return self def next(self): """ A CsvReader object is iterable (since we have defined __iter__ and next methods. Each iteration of this object returns a row of data. """ row = self._csv_reader.next() return [cell for cell in row] def _set_headers(self): self._headers = self.next() def iter_dicts(self): self._set_headers() for row in self: result = {} for key, value in zip(self._headers, row): # note since we're reading this in from a csv file # value is going to be a string or unicode string, we # quite simply want to avoid including empty strings in our # dict. if value: result[key] = value # we only want to yield rows where there is something in # the row. if result: yield result self.close()
bsd-2-clause
-3,126,621,194,137,224,700
26.793103
79
0.539702
false
georgejhunt/olpc-kernel
tools/perf/scripts/python/failed-syscalls-by-pid.py
1996
2233
# failed system call counts, by pid # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): raw_syscalls__sys_exit(**locals()) def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
gpl-2.0
6,483,761,255,902,136,000
27.628205
112
0.618898
false
ctrevino/DIGITS
digits/model/tasks/caffe_train.py
1
38589
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. import os import re import time import math import subprocess import numpy as np from google.protobuf import text_format import caffe try: import caffe_pb2 except ImportError: # See issue #32 from caffe.proto import caffe_pb2 from train import TrainTask from digits.config import config_value from digits.status import Status from digits import utils, dataset from digits.utils import subclass, override, constants from digits.dataset import ImageClassificationDatasetJob # NOTE: Increment this everytime the pickled object changes PICKLE_VERSION = 2 @subclass class CaffeTrainTask(TrainTask): """ Trains a caffe model """ CAFFE_LOG = 'caffe_output.log' @staticmethod def upgrade_network(network): #TODO pass def __init__(self, network, **kwargs): """ Arguments: network -- a caffe NetParameter defining the network """ super(CaffeTrainTask, self).__init__(**kwargs) self.pickver_task_caffe_train = PICKLE_VERSION self.network = network self.current_iteration = 0 self.loaded_snapshot_file = None self.loaded_snapshot_epoch = None self.image_mean = None self.solver = None self.solver_file = constants.CAFFE_SOLVER_FILE self.train_val_file = constants.CAFFE_TRAIN_VAL_FILE self.snapshot_prefix = constants.CAFFE_SNAPSHOT_PREFIX self.deploy_file = constants.CAFFE_DEPLOY_FILE self.caffe_log_file = self.CAFFE_LOG def __getstate__(self): state = super(CaffeTrainTask, self).__getstate__() # Don't pickle these things if 'caffe_log' in state: del state['caffe_log'] if '_transformer' in state: del state['_transformer'] if '_caffe_net' in state: del state['_caffe_net'] return state def __setstate__(self, state): super(CaffeTrainTask, self).__setstate__(state) # Upgrade pickle file if state['pickver_task_caffe_train'] == 1: print 'upgrading %s' % self.job_id self.caffe_log_file = self.CAFFE_LOG self.pickver_task_caffe_train = PICKLE_VERSION # Make changes to self self.loaded_snapshot_file = None self.loaded_snapshot_epoch = None # These things don't get pickled self.image_mean = None ### Task overrides @override def name(self): return 'Train Caffe Model' @override def before_run(self): super(CaffeTrainTask, self).before_run() if isinstance(self.dataset, dataset.ImageClassificationDatasetJob): self.save_prototxt_files() else: raise NotImplementedError self.caffe_log = open(self.path(self.CAFFE_LOG), 'a') self.saving_snapshot = False self.receiving_train_output = False self.receiving_val_output = False self.last_train_update = None return True def save_prototxt_files(self): """ Save solver, train_val and deploy files to disk """ has_val_set = self.dataset.val_db_task() is not None ### Check what has been specified in self.network tops = [] bottoms = {} train_data_layer = None val_data_layer = None hidden_layers = caffe_pb2.NetParameter() loss_layers = [] accuracy_layers = [] for layer in self.network.layer: assert layer.type not in ['MemoryData', 'HDF5Data', 'ImageData'], 'unsupported data layer type' if layer.type == 'Data': for rule in layer.include: if rule.phase == caffe_pb2.TRAIN: assert train_data_layer is None, 'cannot specify two train data layers' train_data_layer = layer elif rule.phase == caffe_pb2.TEST: assert val_data_layer is None, 'cannot specify two test data layers' val_data_layer = layer elif layer.type == 'SoftmaxWithLoss': loss_layers.append(layer) elif layer.type == 'Accuracy': addThis = True if layer.accuracy_param.HasField('top_k'): if layer.accuracy_param.top_k >= len(self.get_labels()): self.logger.warning('Removing layer %s because top_k=%s while there are are only %s labels in this dataset' % (layer.name, layer.accuracy_param.top_k, len(self.get_labels()))) addThis = False if addThis: accuracy_layers.append(layer) else: hidden_layers.layer.add().CopyFrom(layer) if len(layer.bottom) == 1 and len(layer.top) == 1 and layer.bottom[0] == layer.top[0]: pass else: for top in layer.top: tops.append(top) for bottom in layer.bottom: bottoms[bottom] = True if train_data_layer is None: assert val_data_layer is None, 'cannot specify a test data layer without a train data layer' assert len(loss_layers) > 0, 'must specify a loss layer' network_outputs = [] for name in tops: if name not in bottoms: network_outputs.append(name) assert len(network_outputs), 'network must have an output' # Update num_output for any output InnerProduct layers automatically for layer in hidden_layers.layer: if layer.type == 'InnerProduct': for top in layer.top: if top in network_outputs: layer.inner_product_param.num_output = len(self.get_labels()) break ### Write train_val file train_val_network = caffe_pb2.NetParameter() # data layers if train_data_layer is not None: if train_data_layer.HasField('data_param'): assert not train_data_layer.data_param.HasField('source'), "don't set the data_param.source" assert not train_data_layer.data_param.HasField('backend'), "don't set the data_param.backend" max_crop_size = min(self.dataset.image_dims[0], self.dataset.image_dims[1]) if self.crop_size: assert self.crop_size <= max_crop_size, 'crop_size is larger than the image size' train_data_layer.transform_param.crop_size = self.crop_size elif train_data_layer.transform_param.HasField('crop_size'): cs = train_data_layer.transform_param.crop_size if cs > max_crop_size: # don't throw an error here cs = max_crop_size train_data_layer.transform_param.crop_size = cs self.crop_size = cs train_val_network.layer.add().CopyFrom(train_data_layer) train_data_layer = train_val_network.layer[-1] if val_data_layer is not None and has_val_set: if val_data_layer.HasField('data_param'): assert not val_data_layer.data_param.HasField('source'), "don't set the data_param.source" assert not val_data_layer.data_param.HasField('backend'), "don't set the data_param.backend" if self.crop_size: # use our error checking from the train layer val_data_layer.transform_param.crop_size = self.crop_size train_val_network.layer.add().CopyFrom(val_data_layer) val_data_layer = train_val_network.layer[-1] else: train_data_layer = train_val_network.layer.add(type = 'Data', name = 'data') train_data_layer.top.append('data') train_data_layer.top.append('label') train_data_layer.include.add(phase = caffe_pb2.TRAIN) train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE if self.crop_size: train_data_layer.transform_param.crop_size = self.crop_size if has_val_set: val_data_layer = train_val_network.layer.add(type = 'Data', name = 'data') val_data_layer.top.append('data') val_data_layer.top.append('label') val_data_layer.include.add(phase = caffe_pb2.TEST) val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE if self.crop_size: val_data_layer.transform_param.crop_size = self.crop_size train_data_layer.data_param.source = self.dataset.path(self.dataset.train_db_task().db_name) train_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB if val_data_layer is not None and has_val_set: val_data_layer.data_param.source = self.dataset.path(self.dataset.val_db_task().db_name) val_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB if self.use_mean: mean_pixel = None with open(self.dataset.path(self.dataset.train_db_task().mean_file)) as f: blob = caffe_pb2.BlobProto() blob.MergeFromString(f.read()) mean = np.reshape(blob.data, ( self.dataset.image_dims[2], self.dataset.image_dims[0], self.dataset.image_dims[1], ) ) mean_pixel = mean.mean(1).mean(1) for value in mean_pixel: train_data_layer.transform_param.mean_value.append(value) if val_data_layer is not None and has_val_set: for value in mean_pixel: val_data_layer.transform_param.mean_value.append(value) if self.batch_size: train_data_layer.data_param.batch_size = self.batch_size if val_data_layer is not None and has_val_set: val_data_layer.data_param.batch_size = self.batch_size else: if not train_data_layer.data_param.HasField('batch_size'): train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE if val_data_layer is not None and has_val_set and not val_data_layer.data_param.HasField('batch_size'): val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE # hidden layers train_val_network.MergeFrom(hidden_layers) # output layers train_val_network.layer.extend(loss_layers) train_val_network.layer.extend(accuracy_layers) with open(self.path(self.train_val_file), 'w') as outfile: text_format.PrintMessage(train_val_network, outfile) ### Write deploy file deploy_network = caffe_pb2.NetParameter() # input deploy_network.input.append('data') deploy_network.input_dim.append(1) deploy_network.input_dim.append(self.dataset.image_dims[2]) if self.crop_size: deploy_network.input_dim.append(self.crop_size) deploy_network.input_dim.append(self.crop_size) else: deploy_network.input_dim.append(self.dataset.image_dims[0]) deploy_network.input_dim.append(self.dataset.image_dims[1]) # hidden layers deploy_network.MergeFrom(hidden_layers) # output layers if loss_layers[-1].type == 'SoftmaxWithLoss': prob_layer = deploy_network.layer.add( type = 'Softmax', name = 'prob') prob_layer.bottom.append(network_outputs[-1]) prob_layer.top.append('prob') with open(self.path(self.deploy_file), 'w') as outfile: text_format.PrintMessage(deploy_network, outfile) ### Write solver file solver = caffe_pb2.SolverParameter() # get enum value for solver type solver.solver_type = getattr(solver, self.solver_type) solver.net = self.train_val_file # Set CPU/GPU mode if config_value('caffe_root')['cuda_enabled'] and \ bool(config_value('gpu_list')): solver.solver_mode = caffe_pb2.SolverParameter.GPU else: solver.solver_mode = caffe_pb2.SolverParameter.CPU solver.snapshot_prefix = self.snapshot_prefix # Epochs -> Iterations train_iter = int(math.ceil(float(self.dataset.train_db_task().entries_count) / train_data_layer.data_param.batch_size)) solver.max_iter = train_iter * self.train_epochs snapshot_interval = self.snapshot_interval * train_iter if 0 < snapshot_interval <= 1: solver.snapshot = 1 # don't round down elif 1 < snapshot_interval < solver.max_iter: solver.snapshot = int(snapshot_interval) else: solver.snapshot = 0 # only take one snapshot at the end if has_val_set and self.val_interval: solver.test_iter.append(int(math.ceil(float(self.dataset.val_db_task().entries_count) / val_data_layer.data_param.batch_size))) val_interval = self.val_interval * train_iter if 0 < val_interval <= 1: solver.test_interval = 1 # don't round down elif 1 < val_interval < solver.max_iter: solver.test_interval = int(val_interval) else: solver.test_interval = solver.max_iter # only test once at the end # Learning rate solver.base_lr = self.learning_rate solver.lr_policy = self.lr_policy['policy'] scale = float(solver.max_iter)/100.0 if solver.lr_policy == 'fixed': pass elif solver.lr_policy == 'step': # stepsize = stepsize * scale solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale)) solver.gamma = self.lr_policy['gamma'] elif solver.lr_policy == 'multistep': for value in self.lr_policy['stepvalue']: # stepvalue = stepvalue * scale solver.stepvalue.append(int(math.ceil(float(value) * scale))) solver.gamma = self.lr_policy['gamma'] elif solver.lr_policy == 'exp': # gamma = gamma^(1/scale) solver.gamma = math.pow(self.lr_policy['gamma'], 1.0/scale) elif solver.lr_policy == 'inv': # gamma = gamma / scale solver.gamma = self.lr_policy['gamma'] / scale solver.power = self.lr_policy['power'] elif solver.lr_policy == 'poly': solver.power = self.lr_policy['power'] elif solver.lr_policy == 'sigmoid': # gamma = -gamma / scale solver.gamma = -1.0 * self.lr_policy['gamma'] / scale # stepsize = stepsize * scale solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale)) else: raise Exception('Unknown lr_policy: "%s"' % solver.lr_policy) # go with the suggested defaults if solver.solver_type != solver.ADAGRAD: solver.momentum = 0.9 solver.weight_decay = 0.0005 # Display 8x per epoch, or once per 5000 images, whichever is more frequent solver.display = max(1, min( int(math.floor(float(solver.max_iter) / (self.train_epochs * 8))), int(math.ceil(5000.0 / train_data_layer.data_param.batch_size)) )) if self.random_seed is not None: solver.random_seed = self.random_seed with open(self.path(self.solver_file), 'w') as outfile: text_format.PrintMessage(solver, outfile) self.solver = solver # save for later return True def iteration_to_epoch(self, it): return float(it * self.train_epochs) / self.solver.max_iter @override def task_arguments(self, resources): args = [config_value('caffe_root')['executable'], 'train', '--solver=%s' % self.path(self.solver_file), ] if 'gpus' in resources: identifiers = [] for identifier, value in resources['gpus']: identifiers.append(identifier) if len(identifiers) == 1: args.append('--gpu=%s' % identifiers[0]) elif len(identifiers) > 1: args.append('--gpus=%s' % ','.join(identifiers)) if self.pretrained_model: args.append('--weights=%s' % self.path(self.pretrained_model)) return args @override def process_output(self, line): float_exp = '(NaN|[-+]?[0-9]*\.?[0-9]+(e[-+]?[0-9]+)?)' self.caffe_log.write('%s\n' % line) self.caffe_log.flush() # parse caffe output timestamp, level, message = self.preprocess_output_caffe(line) if not message: return True # iteration updates match = re.match(r'Iteration (\d+)', message) if match: i = int(match.group(1)) self.new_iteration(i) # net output match = re.match(r'(Train|Test) net output #(\d+): (\S*) = %s' % float_exp, message, flags=re.IGNORECASE) if match: phase = match.group(1) index = int(match.group(2)) name = match.group(3) value = match.group(4) assert value.lower() != 'nan', 'Network outputted NaN for "%s" (%s phase). Try decreasing your learning rate.' % (name, phase) value = float(value) # Find the layer type kind = '?' for layer in self.network.layer: if name in layer.top: kind = layer.type break if phase.lower() == 'train': self.save_train_output(name, kind, value) elif phase.lower() == 'test': self.save_val_output(name, kind, value) return True # learning rate updates match = re.match(r'Iteration (\d+).*lr = %s' % float_exp, message, flags=re.IGNORECASE) if match: i = int(match.group(1)) lr = float(match.group(2)) self.save_train_output('learning_rate', 'LearningRate', lr) return True # snapshot saved if self.saving_snapshot: if not message.startswith('Snapshotting solver state'): self.logger.warning('caffe output format seems to have changed. Expected "Snapshotting solver state..." after "Snapshotting to..."') else: self.logger.debug('Snapshot saved.') self.detect_snapshots() self.send_snapshot_update() self.saving_snapshot = False return True # snapshot starting match = re.match(r'Snapshotting to (.*)\s*$', message) if match: self.saving_snapshot = True return True # memory requirement match = re.match(r'Memory required for data:\s+(\d+)', message) if match: bytes_required = int(match.group(1)) #self.logger.debug('memory required: %s' % utils.sizeof_fmt(bytes_required)) return True if level in ['error', 'critical']: self.logger.error('%s: %s' % (self.name(), message)) self.exception = message return True return True def preprocess_output_caffe(self, line): """ Takes line of output and parses it according to caffe's output format Returns (timestamp, level, message) or (None, None, None) """ # NOTE: This must change when the logging format changes # LMMDD HH:MM:SS.MICROS pid file:lineno] message match = re.match(r'(\w)(\d{4} \S{8}).*]\s+(\S.*)$', line) if match: level = match.group(1) # add the year because caffe omits it timestr = '%s%s' % (time.strftime('%Y'), match.group(2)) message = match.group(3) if level == 'I': level = 'info' elif level == 'W': level = 'warning' elif level == 'E': level = 'error' elif level == 'F': #FAIL level = 'critical' timestamp = time.mktime(time.strptime(timestr, '%Y%m%d %H:%M:%S')) return (timestamp, level, message) else: #self.logger.warning('Unrecognized task output "%s"' % line) return (None, None, None) def new_iteration(self, it): """ Update current_iteration """ if self.current_iteration == it: return self.current_iteration = it self.send_progress_update(self.iteration_to_epoch(it)) def send_snapshot_update(self): """ Sends socketio message about the snapshot list """ from digits.webapp import socketio socketio.emit('task update', { 'task': self.html_id(), 'update': 'snapshots', 'data': self.snapshot_list(), }, namespace='/jobs', room=self.job_id, ) @override def after_run(self): super(CaffeTrainTask, self).after_run() self.caffe_log.close() @override def after_runtime_error(self): if os.path.exists(self.path(self.CAFFE_LOG)): output = subprocess.check_output(['tail', '-n40', self.path(self.CAFFE_LOG)]) lines = [] for line in output.split('\n'): # parse caffe header timestamp, level, message = self.preprocess_output_caffe(line) if message: lines.append(message) # return the last 20 lines self.traceback = '\n'.join(lines[len(lines)-20:]) ### TrainTask overrides @override def detect_snapshots(self): self.snapshots = [] snapshot_dir = os.path.join(self.job_dir, os.path.dirname(self.snapshot_prefix)) snapshots = [] solverstates = [] for filename in os.listdir(snapshot_dir): # find models match = re.match(r'%s_iter_(\d+)\.caffemodel' % os.path.basename(self.snapshot_prefix), filename) if match: iteration = int(match.group(1)) epoch = float(iteration) / (float(self.solver.max_iter)/self.train_epochs) # assert epoch.is_integer(), '%s is not an integer' % epoch epoch = round(epoch,3) # if epoch is int if epoch == math.ceil(epoch): # print epoch,math.ceil(epoch),int(epoch) epoch = int(epoch) snapshots.append( ( os.path.join(snapshot_dir, filename), epoch ) ) # find solverstates match = re.match(r'%s_iter_(\d+)\.solverstate' % os.path.basename(self.snapshot_prefix), filename) if match: solverstates.append( ( os.path.join(snapshot_dir, filename), int(match.group(1)) ) ) # delete all but the most recent solverstate for filename, iteration in sorted(solverstates, key=lambda tup: tup[1])[:-1]: #print 'Removing "%s"' % filename os.remove(filename) self.snapshots = sorted(snapshots, key=lambda tup: tup[1]) return len(self.snapshots) > 0 @override def est_next_snapshot(self): if self.status != Status.RUN or self.current_iteration == 0: return None elapsed = time.time() - self.status_updates[-1][1] next_snapshot_iteration = (1 + self.current_iteration//self.snapshot_interval) * self.snapshot_interval return (next_snapshot_iteration - self.current_iteration) * elapsed // self.current_iteration @override def can_view_weights(self): return False @override def can_infer_one(self): if isinstance(self.dataset, ImageClassificationDatasetJob): return True return False @override def infer_one(self, data, snapshot_epoch=None, layers=None): if isinstance(self.dataset, ImageClassificationDatasetJob): return self.classify_one(data, snapshot_epoch=snapshot_epoch, layers=layers, ) raise NotImplementedError() def classify_one(self, image, snapshot_epoch=None, layers=None): """ Classify an image Returns (predictions, visualizations) predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence visualizations -- a list of dicts for the specified layers Returns (None, None) if something goes wrong Arguments: image -- a np.array Keyword arguments: snapshot_epoch -- which snapshot to use layers -- which layer activation[s] and weight[s] to visualize """ labels = self.get_labels() net = self.get_net(snapshot_epoch) # process image if image.ndim == 2: image = image[:,:,np.newaxis] preprocessed = self.get_transformer().preprocess( 'data', image) # reshape net input (if necessary) test_shape = (1,) + preprocessed.shape if net.blobs['data'].data.shape != test_shape: net.blobs['data'].reshape(*test_shape) # run inference net.blobs['data'].data[...] = preprocessed output = net.forward() scores = output[net.outputs[-1]].flatten() indices = (-scores).argsort() predictions = [] for i in indices: predictions.append( (labels[i], scores[i]) ) # add visualizations visualizations = [] if layers and layers != 'none': if layers == 'all': added_activations = [] for layer in self.network.layer: print 'Computing visualizations for "%s"...' % layer.name if not layer.type.endswith(('Data', 'Loss', 'Accuracy')): for bottom in layer.bottom: if bottom in net.blobs and bottom not in added_activations: data = net.blobs[bottom].data[0] vis = self.get_layer_visualization(data) mean, std, hist = self.get_layer_statistics(data) visualizations.append( { 'name': str(bottom), 'type': 'Activations', 'mean': mean, 'stddev': std, 'histogram': hist, 'image_html': utils.image.embed_image_html(vis), } ) added_activations.append(bottom) if layer.name in net.params: data = net.params[layer.name][0].data if layer.type not in ['InnerProduct']: vis = self.get_layer_visualization(data) else: vis = None mean, std, hist = self.get_layer_statistics(data) visualizations.append( { 'name': str(layer.name), 'type': 'Weights (%s layer)' % layer.type, 'mean': mean, 'stddev': std, 'histogram': hist, 'image_html': utils.image.embed_image_html(vis), } ) for top in layer.top: if top in net.blobs and top not in added_activations: data = net.blobs[top].data[0] normalize = True # don't normalize softmax layers if layer.type == 'Softmax': normalize = False vis = self.get_layer_visualization(data, normalize=normalize) mean, std, hist = self.get_layer_statistics(data) visualizations.append( { 'name': str(top), 'type': 'Activation', 'mean': mean, 'stddev': std, 'histogram': hist, 'image_html': utils.image.embed_image_html(vis), } ) added_activations.append(top) else: raise NotImplementedError return (predictions, visualizations) def get_layer_visualization(self, data, normalize = True, max_width = 600, ): """ Returns a vis_square for the given layer data Arguments: data -- a np.ndarray Keyword arguments: normalize -- whether to normalize the data when visualizing max_width -- maximum width for the vis_square """ if data.ndim == 1: # interpret as 1x1 grayscale images # (N, 1, 1) data = data[:, np.newaxis, np.newaxis] elif data.ndim == 2: # interpret as 1x1 grayscale images # (N, 1, 1) data = data.reshape((data.shape[0]*data.shape[1], 1, 1)) elif data.ndim == 3: if data.shape[0] == 3: # interpret as a color image # (1, H, W,3) data = data[[2,1,0],...] # BGR to RGB (see issue #59) data = data.transpose(1,2,0) data = data[np.newaxis,...] else: # interpret as grayscale images # (N, H, W) pass elif data.ndim == 4: if data.shape[0] == 3: # interpret as HxW color images # (N, H, W, 3) data = data.transpose(1,2,3,0) data = data[:,:,:,[2,1,0]] # BGR to RGB (see issue #59) elif data.shape[1] == 3: # interpret as HxW color images # (N, H, W, 3) data = data.transpose(0,2,3,1) data = data[:,:,:,[2,1,0]] # BGR to RGB (see issue #59) else: # interpret as HxW grayscale images # (N, H, W) data = data.reshape((data.shape[0]*data.shape[1], data.shape[2], data.shape[3])) else: raise RuntimeError('unrecognized data shape: %s' % (data.shape,)) # chop off data so that it will fit within max_width padsize = 0 width = data.shape[2] if width > max_width: data = data[:1,:max_width,:max_width] else: if width > 1: padsize = 1 width += 1 n = max(max_width/width,1) n *= n data = data[:n] return utils.image.vis_square(data, padsize = padsize, normalize = normalize, ) def get_layer_statistics(self, data): """ Returns statistics for the given layer data: (mean, standard deviation, histogram) histogram -- [y, x, ticks] Arguments: data -- a np.ndarray """ # XXX These calculations can be super slow mean = np.mean(data) std = np.std(data) y, x = np.histogram(data, bins=20) y = list(y) ticks = x[[0,len(x)/2,-1]] x = [(x[i]+x[i+1])/2.0 for i in xrange(len(x)-1)] ticks = list(ticks) return (mean, std, [y, x, ticks]) @override def can_infer_many(self): if isinstance(self.dataset, ImageClassificationDatasetJob): return True return False @override def infer_many(self, data, snapshot_epoch=None): if isinstance(self.dataset, ImageClassificationDatasetJob): return self.classify_many(data, snapshot_epoch=snapshot_epoch) raise NotImplementedError() def classify_many(self, images, snapshot_epoch=None): """ Returns (labels, results): labels -- an array of strings results -- a 2D np array: [ [image0_label0_confidence, image0_label1_confidence, ...], [image1_label0_confidence, image1_label1_confidence, ...], ... ] Arguments: images -- a list of np.arrays Keyword arguments: snapshot_epoch -- which snapshot to use """ labels = self.get_labels() net = self.get_net(snapshot_epoch) caffe_images = [] for image in images: if image.ndim == 2: caffe_images.append(image[:,:,np.newaxis]) else: caffe_images.append(image) caffe_images = np.array(caffe_images) if self.batch_size: data_shape = (self.batch_size, self.dataset.image_dims[2]) # TODO: grab batch_size from the TEST phase in train_val network else: data_shape = (constants.DEFAULT_BATCH_SIZE, self.dataset.image_dims[2]) if self.crop_size: data_shape += (self.crop_size, self.crop_size) else: data_shape += (self.dataset.image_dims[0], self.dataset.image_dims[1]) scores = None for chunk in [caffe_images[x:x+data_shape[0]] for x in xrange(0, len(caffe_images), data_shape[0])]: new_shape = (len(chunk),) + data_shape[1:] if net.blobs['data'].data.shape != new_shape: net.blobs['data'].reshape(*new_shape) for index, image in enumerate(chunk): net.blobs['data'].data[index] = self.get_transformer().preprocess( 'data', image) output = net.forward()[net.outputs[-1]] if scores is None: scores = output else: scores = np.vstack((scores, output)) print 'Processed %s/%s images' % (len(scores), len(caffe_images)) return (labels, scores) def has_model(self): """ Returns True if there is a model that can be used """ return len(self.snapshots) > 0 def get_net(self, epoch=None): """ Returns an instance of caffe.Net Keyword Arguments: epoch -- which snapshot to load (default is -1 to load the most recently generated snapshot) """ if not self.has_model(): return False file_to_load = None if not epoch: epoch = self.snapshots[-1][1] file_to_load = self.snapshots[-1][0] else: for snapshot_file, snapshot_epoch in self.snapshots: if snapshot_epoch == epoch: file_to_load = snapshot_file break if file_to_load is None: raise Exception('snapshot not found for epoch "%s"' % epoch) # check if already loaded if self.loaded_snapshot_file and self.loaded_snapshot_file == file_to_load \ and hasattr(self, '_caffe_net') and self._caffe_net is not None: return self._caffe_net if config_value('caffe_root')['cuda_enabled'] and\ config_value('gpu_list'): caffe.set_mode_gpu() # load a new model self._caffe_net = caffe.Net( self.path(self.deploy_file), file_to_load, caffe.TEST) self.loaded_snapshot_epoch = epoch self.loaded_snapshot_file = file_to_load return self._caffe_net def get_transformer(self): """ Returns an instance of caffe.io.Transformer """ # check if already loaded if hasattr(self, '_transformer') and self._transformer is not None: return self._transformer data_shape = (1, self.dataset.image_dims[2]) if self.crop_size: data_shape += (self.crop_size, self.crop_size) else: data_shape += (self.dataset.image_dims[0], self.dataset.image_dims[1]) t = caffe.io.Transformer( inputs = {'data': data_shape} ) t.set_transpose('data', (2,0,1)) # transpose to (channels, height, width) if self.dataset.image_dims[2] == 3 and \ self.dataset.train_db_task().image_channel_order == 'BGR': # channel swap # XXX see issue #59 t.set_channel_swap('data', (2,1,0)) if self.use_mean: # set mean with open(self.dataset.path(self.dataset.train_db_task().mean_file)) as f: blob = caffe_pb2.BlobProto() blob.MergeFromString(f.read()) pixel = np.reshape(blob.data, ( self.dataset.image_dims[2], self.dataset.image_dims[0], self.dataset.image_dims[1], ) ).mean(1).mean(1) t.set_mean('data', pixel) #t.set_raw_scale('data', 255) # [0,255] range instead of [0,1] self._transformer = t return self._transformer
bsd-3-clause
-6,825,362,181,322,277,000
38.0182
199
0.52616
false
lupyuen/RaspberryPiImage
home/pi/GrovePi/Software/Python/others/temboo/Library/Google/Spreadsheets/RetrieveCellFeed.py
4
5884
# -*- coding: utf-8 -*- ############################################################################### # # RetrieveCellFeed # Retrieves a list of cell names and values in a specified Google worksheet. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class RetrieveCellFeed(Choreography): def __init__(self, temboo_session): """ Create a new instance of the RetrieveCellFeed Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(RetrieveCellFeed, self).__init__(temboo_session, '/Library/Google/Spreadsheets/RetrieveCellFeed') def new_input_set(self): return RetrieveCellFeedInputSet() def _make_result_set(self, result, path): return RetrieveCellFeedResultSet(result, path) def _make_execution(self, session, exec_id, path): return RetrieveCellFeedChoreographyExecution(session, exec_id, path) class RetrieveCellFeedInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the RetrieveCellFeed Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AccessToken(self, value): """ Set the value of the AccessToken input for this Choreo. ((optional, string) A valid Access Token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new Access Token.) """ super(RetrieveCellFeedInputSet, self)._set_input('AccessToken', value) def set_ClientID(self, value): """ Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.) """ super(RetrieveCellFeedInputSet, self)._set_input('ClientID', value) def set_ClientSecret(self, value): """ Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.) """ super(RetrieveCellFeedInputSet, self)._set_input('ClientSecret', value) def set_Password(self, value): """ Set the value of the Password input for this Choreo. ((optional, password) Deprecated (retained for backward compatibility only).) """ super(RetrieveCellFeedInputSet, self)._set_input('Password', value) def set_RefreshToken(self, value): """ Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new Access Token when the original token is expired. Required unless providing a valid AccessToken.) """ super(RetrieveCellFeedInputSet, self)._set_input('RefreshToken', value) def set_ResponseFormat(self, value): """ Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml (the default) and json.) """ super(RetrieveCellFeedInputSet, self)._set_input('ResponseFormat', value) def set_SpreadsheetKey(self, value): """ Set the value of the SpreadsheetKey input for this Choreo. ((required, string) The unique key of the spreadsheet associated with the cells you want to retrieve.) """ super(RetrieveCellFeedInputSet, self)._set_input('SpreadsheetKey', value) def set_Username(self, value): """ Set the value of the Username input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).) """ super(RetrieveCellFeedInputSet, self)._set_input('Username', value) def set_WorksheetId(self, value): """ Set the value of the WorksheetId input for this Choreo. ((required, string) The unique ID of the worksheet associated with the cells you want to retrieve.) """ super(RetrieveCellFeedInputSet, self)._set_input('WorksheetId', value) class RetrieveCellFeedResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the RetrieveCellFeed Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. (The response from Google.) """ return self._output.get('Response', None) def get_NewAccessToken(self): """ Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.) """ return self._output.get('NewAccessToken', None) class RetrieveCellFeedChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return RetrieveCellFeedResultSet(response, path)
apache-2.0
35,968,798,605,831,190
45.330709
254
0.686268
false
CeON/CoAnSys
document-similarity/document-similarity-logic/src/main/python/analyse_unibi.py
4
3007
#! /usr/bin/python import sys,re,string sys.path.append("/home/pdendek/docsim-check/out/document-similarity-logic-1.6-SNAPSHOT-jar-with-dependencies.jar") from pl.edu.icm.coansys.commons.java import DiacriticsRemover def fillDict(inL,langs): innerD = {} for x in inL: t,l = x langs.add(l) innerD[l] = t return (innerD,langs) def replaceInList(l,old,new): try: l.remove(old) l.append(new) except ValueError: pass return l def replaceKInDict(d,old,new): try: d[new] = d.pop(old) except KeyError: pass return d def filterDiacritics(t): r = DiacriticsRemover.removeDiacritics(t).lower() #remove diacritics, .toLowerCase() r = re.sub('(\\w)[^a-zA-Z0-9\\s'+re.escape('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')+']+(\\w)',r'\1\2',r) # remove weired signs within a string r = re.sub(r'[^a-zA-Z0-9_\-]+',' ',r) # remove stand-alone weired signs r = r.strip() return r def getFilteredItemOrNone(d,k): r = None try: r = filterDiacritics(d[k]) except KeyError: pass return r @outputSchema("b:{ t:( key:chararray, title:chararray, abstract:chararray, ccs:{ cc:( type:chararray, code:chararray) }, lang:chararray ) }") def groupByLangAndFilter(key,tis,abstrs,ccs): langs = set() unpacked_ccs = [(cc[0],thiscc[0]) for cc in ccs for thiscc in cc[1]] ltis,langs = fillDict(tis,langs) labstrs,langs = fillDict(abstrs,langs) langs = replaceInList(list(langs),u'',u'und') ltis = replaceKInDict(ltis,u'',u'und') labstrs = replaceKInDict(labstrs,u'',u'und') out = [(key+u'-----'+unicode(lang),getFilteredItemOrNone(ltis,lang),getFilteredItemOrNone(labstrs,lang),unpacked_ccs,lang) for lang in langs] return out @outputSchema("b:{ t:( key:chararray, title:chararray, abstract:chararray, ccs:{ cc:( type:chararray, code:chararray) }, lang:chararray ) }") def groupByLangAndFilter(key,tis,abstrs,ccs,startsWith): langs = set() unpacked_ccs = [(cc[0],thiscc[0]) for cc in ccs if cc[0].startswith(startsWith) for thiscc in cc[1] ] ltis,langs = fillDict(tis,langs) labstrs,langs = fillDict(abstrs,langs) langs = replaceInList(list(langs),u'',u'und') ltis = replaceKInDict(ltis,u'',u'und') labstrs = replaceKInDict(labstrs,u'',u'und') out = [(key+u'-----'+unicode(lang),getFilteredItemOrNone(ltis,lang),getFilteredItemOrNone(labstrs,lang),unpacked_ccs,lang) for lang in langs] return out @outputSchema("t:( key:chararray, text:chararray, ccs:{ cc:( type:chararray, code:chararray) } )") def mergeAndFilter(key,tis,abstrs,ccs): unpacked_ccs = [(cc[0],thiscc[0]) for cc in ccs for thiscc in cc[1]] return (key,' '.join([filterDiacritics(o[0]) for o in tis+abstrs]), unpacked_ccs) @outputSchema("t:( key:chararray, text:chararray, ccs:{ cc:( type:chararray, code:chararray) } )") def mergeAndFilter(key,tis,abstrs,ccs,startsWith): unpacked_ccs = [(cc[0],thiscc[0]) for cc in ccs if cc[0].startswith(startsWith) for thiscc in cc[1]] t = ' '.join([o[0] for o in tis+abstrs]) t = t.lower() return (key,' '.join([filterDiacritics(o[0]) for o in tis+abstrs]), unpacked_ccs)
agpl-3.0
-5,420,090,212,876,902,000
35.670732
142
0.691054
false
openchange/openchange
mapiproxy/services/ocsmanager/ocsmanager/controllers/authenticate.py
8
3494
import logging import hashlib import os from base64 import urlsafe_b64encode as encode from base64 import urlsafe_b64decode as decode from pylons import request, response, session, tmpl_context as c, url from pylons.controllers.util import abort, redirect from pylons.decorators.rest import restrict from ocsmanager.model import AuthenticateModel from ocsmanager.lib.base import BaseController, render log = logging.getLogger(__name__) class AuthenticateController(BaseController): def _auth_abort(self, code, message): c.code = code c.message = message return render('/error.xml') @restrict('POST') def token(self): """ Return a session token, one-time hash and password hash for the user. """ # Ensure Content-type is text/xml if request.headers.get("Content-Type", "").startswith("text/xml") is False: return self._auth_abort(417, 'Invalid Parameter') # Retrieve request XML body payload = request.body if payload is None: log.error('Empty payload in auth:token()') return self._auth_abort(417, 'Invalid Parameter') # Retrieve the salt from the model authModel = AuthenticateModel.AuthenticateModel() login = authModel.getTokenLogin(payload) if login is None: return self._auth_abort(417, 'Invalid Parameter') salt = authModel.getTokenLoginSalt(login) if salt is None: log.debug('Invalid user %s', login) salt = encode(hashlib.sha1(os.urandom(4)).digest()) session['token'] = encode(hashlib.sha1(os.urandom(8)).digest()) session['token_salt'] = encode(hashlib.sha1(os.urandom(8)).digest()) session['salt'] = salt session['login'] = login session.save() c.token_salt = session['token_salt'] c.salt = salt response.set_cookie('token', session['token']) response.headers['content-type'] = 'text/xml; charset=utf-8' return render('/token.xml') @restrict('POST') def login(self): """Authenticate the user on ocsmanager. """ if not "ocsmanager" in request.cookies: return self._auth_abort(403, 'Invalid Session') if not "token" in session: return self._auth_abort(403, 'Invalid Session') if not "token" in request.cookies: return self._auth_abort(403, 'Invalid Token') if request.cookies.get('token') != session['token']: return self._auth_abort(403, 'Invalid Token') if not "login" in session: return self._auth_abort(403, 'Invalid Session') payload = request.body if payload is None: log.error('Empty payload in auth:login()') return self._auth_abort(417, 'Invalid Parameter') authModel = AuthenticateModel.AuthenticateModel() (error, msg) = authModel.verifyPassword(session['login'], session['token_salt'], session['salt'], payload) if error is True: response.delete_cookie('token') session['token'] = None return self._auth_abort(401, 'Invalid credentials') # Authentication was successful, remove auth token - no longer needed session['token'] = None response.delete_cookie('token') session['tokenLogin'] = hashlib.sha1(os.urandom(8)).hexdigest() session.save() c.tokenLogin = encode(session['tokenLogin']) c.ttl = 10 return render('/login.xml')
gpl-3.0
1,379,567,357,265,311,700
36.569892
114
0.636234
false
JGulbronson/rmc
data/aggregator.py
2
14325
import argparse import mongoengine import redis import sys import rmc.models as m import rmc.shared.constants as c import rmc.shared.facebook as facebook import rmc.shared.util as rmc_util import rmc.data.crawler as rmc_crawler import rmc.data.processor as rmc_processor # TODO(mack): remove duplication of fields throughout code # TODO(mack): deprecate overall rating r = redis.StrictRedis(host=c.REDIS_HOST, port=c.REDIS_PORT, db=c.REDIS_DB) PROFESSOR_RATING_FIELDS = [ 'easiness', 'clarity', 'passion', ] COURSE_RATING_FIELDS = [ 'easiness', 'interest', 'usefulness', 'overall', ] def increment_ratings(courses, get_rating_fn, get_fields_fn, ucs): for uc in ucs: ratings = get_rating_fn(courses, uc) if not ratings: continue for field_key, field_value in get_fields_fn(uc): if field_value is not None: ratings[field_key].add_rating(field_value) def increment_aggregate_ratings(courses, get_rating_fn, get_fields_fn, ucs): for uc in ucs: ratings = get_rating_fn(courses, uc) if not ratings: continue for field_key, field_value in get_fields_fn(uc): if field_value is not None: ratings[field_key].add_aggregate_rating(field_value) def update_mongo_course_rating(): # course => ratings def get_rating_fn(courses, uc): if uc.course_id not in courses: obj = {} for field in COURSE_RATING_FIELDS: obj[field] = m.AggregateRating() courses[uc.course_id] = obj return courses[uc.course_id] def get_fields_fn(uc): easiness = uc.course_review.easiness interest = uc.course_review.interest usefulness = uc.course_review.usefulness if easiness and interest: overall = (easiness + interest) / 2 elif easiness: overall = easiness else: overall = interest return [ ('easiness', easiness), ('interest', interest), ('overall', overall), ('usefulness', usefulness), ] def get_aggregate_fields_fn(uc): easiness = uc.easiness interest = uc.interest # TODO(mack): add usefulness metric def calculate_overall_rating(e, i): return ((e.count * e.rating + i.count * i.rating) / max(1, (e.count + i.count))) # heuristic for getting the overall rating: # 1. the count will max of the count for each attribute # 2. the rating will be average overall = m.AggregateRating( count=max(easiness.count, interest.count), rating=calculate_overall_rating(easiness, interest), ) return [ ('easiness', easiness), ('interest', interest), ('overall', overall), ] courses = {} args = [courses, get_rating_fn] menlo_ucs = m.MenloCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS) flow_ucs = m.UserCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS) increment_ratings(*(args + [get_fields_fn, menlo_ucs])) increment_ratings(*(args + [get_fields_fn, flow_ucs])) count = [0] def set_course_ratings_in_mongo(courses): for course_id, ratings in courses.items(): course = m.Course.objects.with_id(course_id) if not course: print 'could not find course %s in mongo' % course_id continue course.easiness = ratings['easiness'] course.interest = ratings['interest'] course.usefulness = ratings['usefulness'] course.overall = ratings['overall'] course.save() count[0] += 1 set_course_ratings_in_mongo(courses) print 'saved ratings for %d courses in mongodb' % count[0] def update_mongo_course_professors(): count = 0 for course in m.Course.objects.only('professor_ids'): def get_professor_ids(course, coll): course_prof_ids_only = (coll.objects(course_id=course.id) .only('professor_id')) return set( [x.professor_id for x in course_prof_ids_only if x.professor_id] ) professor_ids = get_professor_ids(course, m.UserCourse).union( get_professor_ids(course, m.MenloCourse)) # TODO(mack): Looks like add_to_set doesn't validate that each item # in the list meets the schema since it seemed to be letting me # writing lists that contained None. Investigate if this is what it # is doing. course.update(add_to_set__professor_ids=list(professor_ids)) count += 1 print 'added professors for %d courses in mongodb' % count def update_redis_course_professor_rating(): # course => professors => ratings def get_rating_fn(courses, uc): if uc.professor_id is None: return None if uc.course_id not in courses: courses[uc.course_id] = {} professors = courses[uc.course_id] if uc.professor_id not in professors: obj = {} for field in PROFESSOR_RATING_FIELDS: obj[field] = m.AggregateRating() professors[uc.professor_id] = obj return professors[uc.professor_id] def get_fields_fn(uc): return [ ('easiness', uc.course_review.easiness), ('clarity', uc.professor_review.clarity), ('passion', uc.professor_review.passion), ] def get_aggregate_fields_fn(uc): return [ ('easiness', uc.easiness), ('clarity', uc.clarity), ('passion', uc.passion), ] courses = {} args = [courses, get_rating_fn] menlo_ucs = m.MenloCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS) flow_ucs = m.UserCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS) increment_ratings(*(args + [get_fields_fn, menlo_ucs])) increment_ratings(*(args + [get_fields_fn, flow_ucs])) count = [0] def set_course_professor_ratings_in_redis(courses): for course_id, professors in courses.items(): for professor_id, ratings in professors.items(): if professor_id is None: continue professor = m.Professor.objects.with_id(professor_id) if not professor: continue for rating_type, aggregate_rating in ratings.items(): professor.set_course_rating_in_redis( course_id, rating_type, aggregate_rating) count[0] += 1 set_course_professor_ratings_in_redis(courses) print 'set %d course professor rating keys in redis' % count[0] def update_all_fb_friend_list(): for user in m.User.objects(): # TODO(Sandy): Batch requests for performance if user.fbid and not user.is_fb_token_expired: try: user.update_fb_friends( facebook.get_friend_list(user.fb_access_token)) user.save() except facebook.FacebookOAuthException as e: user.fb_access_token_invalid = True user.save() except Exception as e: print "get_friend_list failed for %s with: %s" % (user.id, e.message) # TODO(mack): test it when we get data to test with # TODO(mack): currently sort of duplicate logic in # User.cache_mutual_course_ids() def update_redis_friend_mutual_courses(): # TODO(Sandy): Use friend real time updates after it. There's a fb updates # branch for this, pending on: # https://developers.facebook.com/bugs/374296595988186?browse=search_50990ddb8a19d9316431973 # Rate limit is 600 calls / 600 seconds / token: # http://stackoverflow.com/questions/8805316/facebook-graph-api-rate-limit-and-batch-requests update_all_fb_friend_list() courses_by_user = {} for user in m.User.objects.only('friend_ids', 'course_history'): friend_ids = [str(friend_id) for friend_id in user.friend_ids] ucs = (m.UserCourse.objects(id__in=user.course_history) .only('course_id')) course_ids = [uc.course_id for uc in ucs] courses_by_user[str(user.id)] = [friend_ids, set(course_ids)] count = 0 user_pair = set() for user_id, (friend_ids, courses) in courses_by_user.iteritems(): for friend_id in friend_ids: if user_id < friend_id: first_id = user_id second_id = friend_id else: first_id = friend_id second_id = user_id if (first_id, second_id) in user_pair: continue friend_courses = courses_by_user[friend_id][1] mutual_courses = courses.intersection(friend_courses) if mutual_courses: count += 1 redis_key = m.User.cls_mutual_courses_redis_key( first_id, second_id) r.sadd(redis_key, *list(mutual_courses)) user_pair.add((first_id, second_id)) print 'set %d friend pair keys in redis' % count def update_mongo_points(): total_points = 0 num_course_comments = 0 num_course_ratings = 0 num_course_shares = 0 num_professor_comments = 0 num_professor_ratings = 0 num_professor_shares = 0 num_invites = 0 for user in m.User.objects.only( 'num_invites', 'course_history', 'num_points'): num_points = 0 if user.num_invites: num_points += m.PointSource.FIRST_INVITE num_invites += 1 for uc in m.UserCourse.objects(id__in=user.course_history): num_points += uc.num_points if uc.course_review.has_commented: num_course_comments += 1 if uc.course_review.has_been_rated: num_course_ratings += 1 if uc.course_review.has_shared: num_course_shares += 1 if uc.professor_review.has_commented: num_professor_comments += 1 if uc.professor_review.has_been_rated: num_professor_ratings += 1 if uc.professor_review.has_shared: num_professor_shares += 1 user.update(set__num_points=num_points) total_points += num_points r.set('total_points', total_points) print ' ===update_mongo_points ===' print 'num_course_comments', num_course_comments print 'num_course_ratings', num_course_ratings print 'num_course_shares', num_course_shares print 'num_professor_comments', num_professor_comments print 'num_professor_ratings', num_professor_ratings print 'num_professor_shares', num_professor_shares print 'num_invites', num_invites def update_exam_schedule(): # Crawl data and store on disk rmc_crawler.get_opendata_exam_schedule() # Process the data on disk errors = rmc_processor.import_opendata_exam_schedules() print "%d exam schedule items found" % m.Exam.objects().count() print "%d exam schedule items skipped" % len(errors) def update_sections(): # Fetch data from OpenData API and cache to files. rmc_crawler.get_opendata_sections() # Import from files to DB. rmc_processor.import_opendata_sections() # Send push notifications about seat openings. num_sent = m.GcmCourseAlert.send_eligible_alerts() num_expired = m.GcmCourseAlert.delete_expired() print 'Sent %s push notifications and expired %s' % (num_sent, num_expired) def update_courses(): # First get an up to date list of departments and write to a text file print "Fetching departments" rmc_crawler.get_departments() # Import any departments we don't already have into Mongo print "Loading departments into Mongo" rmc_processor.import_departments() # Hit the endpoints of the OpenData API for each department print "Fetching courses" rmc_crawler.get_opendata2_courses() # Load the data into Mongo print "Loading courses into Mongo" rmc_processor.import_courses() def update_professors_departments(): """Update the departments_taught field for each professor in Mongo""" for prof in m.Professor.objects(): prof.departments_taught = prof.get_departments_taught() prof.save() def update_scholarships(): """Update the scholarships available in Mongo""" print "Fetching scholarships" rmc_crawler.get_scholarships() print "Loading scholarships into Mongo" rmc_processor.import_scholarships() if __name__ == '__main__': mongoengine.connect(c.MONGO_DB_RMC) parser = argparse.ArgumentParser() mode_mapping = { 'redis_course_professor_rating': update_redis_course_professor_rating, 'redis_friend_mutual_courses': update_redis_friend_mutual_courses, 'mongo_course_rating': update_mongo_course_rating, 'mongo_course_professors': update_mongo_course_professors, 'mongo_points': update_mongo_points, 'exam_schedule': update_exam_schedule, 'sections': update_sections, 'courses': update_courses, 'prof_departments': update_professors_departments, 'scholarships': update_scholarships } parser.add_argument('mode', help='one of %s' % ','.join(mode_mapping.keys() + ['daily'])) args = parser.parse_args() if args.mode == 'daily': daily_functions = [ update_redis_course_professor_rating, update_redis_friend_mutual_courses, update_mongo_course_rating, update_mongo_course_professors, update_mongo_points, update_exam_schedule, update_professors_departments ] for func in daily_functions: try: func() except Exception as exp: print "aggregator.py: function %s threw an exception" % (func) print exp elif args.mode in mode_mapping: func = mode_mapping[args.mode] func() else: sys.exit('The mode %s is not supported' % args.mode)
mit
15,536,293,646,599,332
33.107143
97
0.603839
false
openhdf/enigma2-wetek
lib/python/Components/Renderer/valioOledInfo.py
13
2222
# -*- coding: utf-8 -*- # # OLED-Info Renderer for Dreambox/Enigma-2 # Version: 1.0 # Coded by Vali (c)2011 # ####################################################################### from enigma import eLabel from Renderer import Renderer from os import popen from time import localtime, strftime from Components.VariableText import VariableText from Components.Sensors import sensors from Components.config import config from Tools.HardwareInfo import HardwareInfo class valioOledInfo(Renderer, VariableText): def __init__(self): Renderer.__init__(self) VariableText.__init__(self) try: self.infozahl = int(config.valiflex.OledInfo.value) except: self.infozahl = 12 self.Zaehler = 0 self.oben = "---" self.unten = "---" GUI_WIDGET = eLabel def changed(self, what): if not self.suspended: if self.Zaehler > self.infozahl: self.Zaehler = 0 if self.Zaehler == 0: self.hide() elif self.Zaehler == 6: self.show() t = localtime(self.source.time) self.oben = _(strftime("%a", t)) + " " +strftime("%d", t) self.unten = "%02d:%02d" % (t.tm_hour, t.tm_min) elif self.Zaehler == 14: self.oben = "temp:" maxtemp = 0 try: templist = sensors.getSensorsList(sensors.TYPE_TEMPERATURE) tempcount = len(templist) for count in range(tempcount): id = templist[count] tt = sensors.getSensorValue(id) if tt > maxtemp: maxtemp = tt except: pass self.unten = str(maxtemp) + " °C" elif self.Zaehler == 21: self.oben = "loads:" loada = 0 try: out_line = open("/proc/loadavg").readline() loada = out_line[:4] except: pass self.unten = loada elif self.Zaehler == 28: self.oben = "free:" out_lines = [] out_lines = open("/proc/meminfo").readlines() for lidx in range(len(out_lines)-1): tstLine = out_lines[lidx].split() if "MemFree:" in tstLine: templ = int(out_lines[lidx].split()[1]) fmem = "%d mb" %(templ/1024) self.unten = str(fmem) self.Zaehler = self.Zaehler + 1 self.text = self.oben + "\n" + self.unten def onShow(self): self.suspended = False self.changed(None) def onHide(self): self.suspended = True
gpl-2.0
3,997,286,995,062,252,500
25.771084
71
0.611161
false
csutherl/sos
sos/plugins/acpid.py
12
1211
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin class Acpid(Plugin): """ACPI daemon information""" plugin_name = "acpid" profiles = ('hardware',) class RedHatAcpid(Acpid, RedHatPlugin): def setup(self): self.add_copy_spec([ "/var/log/acpid*", "/etc/acpi/events/power.conf"]) class DebianAcpid(Acpid, DebianPlugin, UbuntuPlugin): def setup(self): self.add_copy_spec([ "/etc/acpi/events/powerbtn*"]) # vim: set et ts=4 sw=4 :
gpl-2.0
-222,572,055,967,362,720
32.638889
72
0.706854
false
codypiersall/platformer
lib/menu.py
1
7244
""" A simple menu system for pygame. Probably a little too simple, unfortunately.""" from __future__ import division import pygame # This silly Exception is used to return from a menu. class ReturnError(Exception): pass class Exit(Exception): pass class Menu(object): """ Class for building a menu. Initialize it with a Pygame surface object, list of menu items (it's just a list of strings), a Pygame font object, and a dict of settings. The idea behind this class is that menus are for changing settings, which will be given to other Pygame objects. """ SPACE = 10 UP = pygame.K_UP DOWN = pygame.K_DOWN RETURN = pygame.K_RETURN BG_COLOR = (0,0,0) FONT_COLOR = (255,0,0) SELECTOR_COLOR = (0,255,0) def __init__(self, screen, items, font, settings=None): self.settings = settings if settings else {} self.screen = screen self.items = items self.selected = 0 self.surfaces = [] self.font = font self.actions = {} self.initial_repeat = pygame.key.get_repeat() self.repeat = (200, 70) def add_item(self, item): """Add another item to the menu. `item` should just be a string.""" self.items.append(item) def add_submenu(self, index, items): """ Create a new Menu instance, initialized with items, that can be accessed by clicking on the index of the current menu. This makes the font and the settings refer to the same object, so a submenu can change settings too. example: ``` main_menu = Menu(screen, ['Start', 'Options', 'Back'], some_font) options_menu = main_menu.add_submenu(1, ['Levels', 'Character Select']) ``` this will create a menu with "Start", "Options", and "Back" items first; then clicking "Options" will start the `options_menu` main loop. """ submenu = Menu(self.screen, items, self.font, self.settings) self.__add_action(index, submenu) return submenu def change_settings(self, index, setting, value): """ When a menu item associated with the given index is clicked, change the setting indicated to value. """ self.__add_action(index, ('settings', setting, value)) def draw(self): """Menu layout and whatnot.""" self.surfaces = [self.font.render(str(i), 1, self.FONT_COLOR) for i in self.items] num_items = len(self.items) ind_height = self.surfaces[0].get_height() height = self.surfaces[0].get_height() * num_items + self.SPACE * (num_items - 1) width = max(s.get_width() for s in self.surfaces) draw_surf = pygame.Surface((width, height)) draw_surf.fill(self.BG_COLOR) for i, item in enumerate(self.surfaces): draw_surf.blit(item, (0, ind_height*i + self.SPACE*i)) menu_x = (self.screen.get_width() - width) / 2 menu_y = (self.screen.get_height() - height) / 2 sy = menu_y + ind_height*self.selected + self.SPACE * self.selected sx = menu_x - 20 self.screen.fill(self.BG_COLOR) self.screen.blit(draw_surf, (menu_x, menu_y)) pygame.draw.polygon(self.screen, self.SELECTOR_COLOR, ([sx,sy], [sx, sy + ind_height], [sx + 10, (2 *sy + ind_height) / 2])) def change_select(self, direction): """Change the current menu selection.""" if direction == self.UP: if self.selected == 0: self.selected = len(self.items) - 1 else: self.selected -= 1 elif direction == self.DOWN: if self.selected == len(self.items) - 1: self.selected = 0 else: self.selected += 1 def _reset_repeat(self): """Change key repeat back to what it was before the menu was called.""" if self.initial_repeat == (0, 0): pygame.key.set_repeat() else: pygame.key.set_repeat(*self.initial_repeat) def seeya(self): """Clean up code when the menu is destroyed.""" self._reset_repeat() def on_enter(self): """Determine what to do when the enter key is pressed.""" try: action = self.actions[self.selected] except KeyError: print("You should add an action for item #{}.".format(self.selected)) return if isinstance(action, Menu): action.mainloop() elif action == 'return': # hokey way of getting back to the main loop. I'm not proud # of this. raise ReturnError elif isinstance(action, (tuple, list)): if action[0] == 'settings': self.settings[action[1]] = action[2] print(self.settings) raise ReturnError if action[0] == 'start': game = action[1]() self._reset_repeat() game.main(self.screen, self.settings) pygame.key.set_repeat(*self.repeat) def add_start_action(self, index, Game): """Resets key repeat and calls `Game.main(self.screen, self.settings)`""" self.__add_action(index, ('start', Game)) def add_back_action(self, index): """ Whenever `index` is selected, go to the previous mainloop. """ self.__add_action(-1, 'return') def __add_action(self, index, action): """ Internal method used for adding an action to a menu item. This should not be called directly. """ if index < 0: index = len(self.items) + index self.actions.update({index: action}) def mainloop(self): pygame.key.set_repeat(*self.repeat) pygame.display.update() clock = pygame.time.Clock() while True: clock.tick(30) for e in pygame.event.get(): if e.type == pygame.QUIT: raise Exit if e.type == pygame.KEYDOWN: if e.key == pygame.K_ESCAPE: self.seeya() return elif e.key == self.UP or e.key == self.DOWN: self.change_select(e.key) elif e.key == self.RETURN: try: self.on_enter() except ReturnError: return self.draw() pygame.display.update() if __name__ == '__main__': """Minimalist example for creating a menu.""" pygame.init() if not pygame.display.get_init(): pygame.display.init() if not pygame.font.get_init(): pygame.font.init() screen = pygame.display.set_mode((640, 480)) font = pygame.font.Font('../coders_crux.ttf', 48) menu = Menu(screen, 'Some Good Items Exit'.split(), font) menu.add_back_action(-1) menu.mainloop()
bsd-3-clause
573,334,550,866,849,700
31.850467
132
0.539343
false
ryanjmccall/nupic
examples/opf/experiments/classification/scalar_encoder_0/description.py
17
2421
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- ## This file defines parameters for a prediction experiment. import os from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription # the sub-experiment configuration config = \ { 'dataSource': 'file://' + os.path.join(os.path.dirname(__file__), '../datasets/scalar_SP_0.csv'), 'modelParams': { 'clParams': { 'clVerbosity': 0}, 'inferenceType': 'NontemporalClassification', 'sensorParams': { 'encoders': { 'field1': { 'clipInput': True, 'fieldname': u'field1', 'maxval': 0.10000000000000001, 'minval': 0.0, 'n': 11, 'name': u'field1', 'type': 'AdaptiveScalarEncoder', 'w': 7}}, 'verbosity': 0}, 'spEnable': False, 'spParams': { 'spVerbosity': 0}, 'tpEnable': False, 'tpParams': { }}} mod = importBaseDescription('../base_scalar/description.py', config) locals().update(mod.__dict__)
gpl-3.0
4,853,052,496,851,065,000
48.408163
95
0.493598
false
willprice/arduino-sphere-project
scripts/example_direction_finder/temboo/Library/Zendesk/MonitoredTwitterHandles/GetMonitoredTwitterHandle.py
5
3934
# -*- coding: utf-8 -*- ############################################################################### # # GetMonitoredTwitterHandle # Retrieves detailed information on a specified monitored Twitter account. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class GetMonitoredTwitterHandle(Choreography): def __init__(self, temboo_session): """ Create a new instance of the GetMonitoredTwitterHandle Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(GetMonitoredTwitterHandle, self).__init__(temboo_session, '/Library/Zendesk/MonitoredTwitterHandles/GetMonitoredTwitterHandle') def new_input_set(self): return GetMonitoredTwitterHandleInputSet() def _make_result_set(self, result, path): return GetMonitoredTwitterHandleResultSet(result, path) def _make_execution(self, session, exec_id, path): return GetMonitoredTwitterHandleChoreographyExecution(session, exec_id, path) class GetMonitoredTwitterHandleInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the GetMonitoredTwitterHandle Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_Email(self, value): """ Set the value of the Email input for this Choreo. ((required, string) The email address you use to login to your Zendesk account.) """ super(GetMonitoredTwitterHandleInputSet, self)._set_input('Email', value) def set_ID(self, value): """ Set the value of the ID input for this Choreo. ((required, string) ID of the monitored Twitter handle.) """ super(GetMonitoredTwitterHandleInputSet, self)._set_input('ID', value) def set_Password(self, value): """ Set the value of the Password input for this Choreo. ((required, password) Your Zendesk password.) """ super(GetMonitoredTwitterHandleInputSet, self)._set_input('Password', value) def set_Server(self, value): """ Set the value of the Server input for this Choreo. ((required, string) Your Zendesk domain and subdomain (e.g., temboocare.zendesk.com).) """ super(GetMonitoredTwitterHandleInputSet, self)._set_input('Server', value) class GetMonitoredTwitterHandleResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the GetMonitoredTwitterHandle Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Zendesk.) """ return self._output.get('Response', None) class GetMonitoredTwitterHandleChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return GetMonitoredTwitterHandleResultSet(response, path)
gpl-2.0
-8,861,465,096,887,379,000
39.556701
145
0.68912
false
batxes/4Cin
SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/SHH_WT_models_highres13173.py
4
88213
import _surface import chimera try: import chimera.runCommand except: pass from VolumePath import markerset as ms try: from VolumePath import Marker_Set, Link new_marker_set=Marker_Set except: from VolumePath import volume_path_dialog d= volume_path_dialog(True) new_marker_set= d.new_marker_set marker_sets={} surf_sets={} if "particle_0 geometry" not in marker_sets: s=new_marker_set('particle_0 geometry') marker_sets["particle_0 geometry"]=s s= marker_sets["particle_0 geometry"] mark=s.place_marker((3090.89, 5216.81, 3306.16), (0.7, 0.7, 0.7), 182.271) if "particle_1 geometry" not in marker_sets: s=new_marker_set('particle_1 geometry') marker_sets["particle_1 geometry"]=s s= marker_sets["particle_1 geometry"] mark=s.place_marker((3442.4, 5488.79, 3362.45), (0.7, 0.7, 0.7), 258.199) if "particle_2 geometry" not in marker_sets: s=new_marker_set('particle_2 geometry') marker_sets["particle_2 geometry"]=s s= marker_sets["particle_2 geometry"] mark=s.place_marker((3243.28, 5186.02, 3208.37), (0.7, 0.7, 0.7), 123.897) if "particle_3 geometry" not in marker_sets: s=new_marker_set('particle_3 geometry') marker_sets["particle_3 geometry"]=s s= marker_sets["particle_3 geometry"] mark=s.place_marker((3266.09, 5486.11, 2938.21), (0.7, 0.7, 0.7), 146.739) if "particle_4 geometry" not in marker_sets: s=new_marker_set('particle_4 geometry') marker_sets["particle_4 geometry"]=s s= marker_sets["particle_4 geometry"] mark=s.place_marker((3308.41, 5741.81, 2530.91), (0.7, 0.7, 0.7), 179.098) if "particle_5 geometry" not in marker_sets: s=new_marker_set('particle_5 geometry') marker_sets["particle_5 geometry"]=s s= marker_sets["particle_5 geometry"] mark=s.place_marker((3314.34, 5112.88, 2522.65), (0.7, 0.7, 0.7), 148.854) if "particle_6 geometry" not in marker_sets: s=new_marker_set('particle_6 geometry') marker_sets["particle_6 geometry"]=s s= marker_sets["particle_6 geometry"] mark=s.place_marker((3226.75, 4558.87, 2388.07), (0.7, 0.7, 0.7), 196.357) if "particle_7 geometry" not in marker_sets: s=new_marker_set('particle_7 geometry') marker_sets["particle_7 geometry"]=s s= marker_sets["particle_7 geometry"] mark=s.place_marker((2820.11, 4640.86, 1957.35), (0.7, 0.7, 0.7), 166.873) if "particle_8 geometry" not in marker_sets: s=new_marker_set('particle_8 geometry') marker_sets["particle_8 geometry"]=s s= marker_sets["particle_8 geometry"] mark=s.place_marker((2449.6, 4795.53, 1424.79), (0.7, 0.7, 0.7), 95.4711) if "particle_9 geometry" not in marker_sets: s=new_marker_set('particle_9 geometry') marker_sets["particle_9 geometry"]=s s= marker_sets["particle_9 geometry"] mark=s.place_marker((2690.15, 4499.98, 1601.81), (0.7, 0.7, 0.7), 185.401) if "particle_10 geometry" not in marker_sets: s=new_marker_set('particle_10 geometry') marker_sets["particle_10 geometry"]=s s= marker_sets["particle_10 geometry"] mark=s.place_marker((3072.82, 4363.07, 2003), (0.7, 0.7, 0.7), 151.984) if "particle_11 geometry" not in marker_sets: s=new_marker_set('particle_11 geometry') marker_sets["particle_11 geometry"]=s s= marker_sets["particle_11 geometry"] mark=s.place_marker((3454.34, 4147.67, 2500.02), (0.7, 0.7, 0.7), 185.612) if "particle_12 geometry" not in marker_sets: s=new_marker_set('particle_12 geometry') marker_sets["particle_12 geometry"]=s s= marker_sets["particle_12 geometry"] mark=s.place_marker((3845.48, 4233.73, 2737.26), (0.7, 0.7, 0.7), 210.273) if "particle_13 geometry" not in marker_sets: s=new_marker_set('particle_13 geometry') marker_sets["particle_13 geometry"]=s s= marker_sets["particle_13 geometry"] mark=s.place_marker((4017.91, 4541.21, 2723.57), (0.7, 0.7, 0.7), 106.892) if "particle_14 geometry" not in marker_sets: s=new_marker_set('particle_14 geometry') marker_sets["particle_14 geometry"]=s s= marker_sets["particle_14 geometry"] mark=s.place_marker((4401.24, 4603.34, 2818.23), (0.7, 0.7, 0.7), 202.025) if "particle_15 geometry" not in marker_sets: s=new_marker_set('particle_15 geometry') marker_sets["particle_15 geometry"]=s s= marker_sets["particle_15 geometry"] mark=s.place_marker((4794.35, 4726.7, 3173.3), (0.7, 0.7, 0.7), 192.169) if "particle_16 geometry" not in marker_sets: s=new_marker_set('particle_16 geometry') marker_sets["particle_16 geometry"]=s s= marker_sets["particle_16 geometry"] mark=s.place_marker((5186.86, 4777.04, 3678.1), (0.7, 0.7, 0.7), 241.11) if "particle_17 geometry" not in marker_sets: s=new_marker_set('particle_17 geometry') marker_sets["particle_17 geometry"]=s s= marker_sets["particle_17 geometry"] mark=s.place_marker((5387.31, 4579.19, 4174.33), (0.7, 0.7, 0.7), 128.465) if "particle_18 geometry" not in marker_sets: s=new_marker_set('particle_18 geometry') marker_sets["particle_18 geometry"]=s s= marker_sets["particle_18 geometry"] mark=s.place_marker((5505.5, 4442.72, 4747.76), (0.7, 0.7, 0.7), 217.38) if "particle_19 geometry" not in marker_sets: s=new_marker_set('particle_19 geometry') marker_sets["particle_19 geometry"]=s s= marker_sets["particle_19 geometry"] mark=s.place_marker((5715.78, 4619.53, 5401.87), (0.7, 0.7, 0.7), 184.555) if "particle_20 geometry" not in marker_sets: s=new_marker_set('particle_20 geometry') marker_sets["particle_20 geometry"]=s s= marker_sets["particle_20 geometry"] mark=s.place_marker((5354.49, 4154.73, 5120.84), (0.7, 0.7, 0.7), 140.055) if "particle_21 geometry" not in marker_sets: s=new_marker_set('particle_21 geometry') marker_sets["particle_21 geometry"]=s s= marker_sets["particle_21 geometry"] mark=s.place_marker((5444.65, 3767.47, 4897.28), (0.7, 0.7, 0.7), 169.708) if "particle_22 geometry" not in marker_sets: s=new_marker_set('particle_22 geometry') marker_sets["particle_22 geometry"]=s s= marker_sets["particle_22 geometry"] mark=s.place_marker((5637.46, 3350.81, 4869.79), (0.7, 0.7, 0.7), 184.639) if "particle_23 geometry" not in marker_sets: s=new_marker_set('particle_23 geometry') marker_sets["particle_23 geometry"]=s s= marker_sets["particle_23 geometry"] mark=s.place_marker((5430.83, 3055.01, 4907.7), (0.7, 0.7, 0.7), 119.286) if "particle_24 geometry" not in marker_sets: s=new_marker_set('particle_24 geometry') marker_sets["particle_24 geometry"]=s s= marker_sets["particle_24 geometry"] mark=s.place_marker((5215.06, 2952.45, 5124.7), (0.7, 0.7, 0.7), 147.754) if "particle_25 geometry" not in marker_sets: s=new_marker_set('particle_25 geometry') marker_sets["particle_25 geometry"]=s s= marker_sets["particle_25 geometry"] mark=s.place_marker((4993.18, 3158.14, 5200.35), (0.7, 0.7, 0.7), 171.4) if "particle_26 geometry" not in marker_sets: s=new_marker_set('particle_26 geometry') marker_sets["particle_26 geometry"]=s s= marker_sets["particle_26 geometry"] mark=s.place_marker((4942.44, 3245.51, 4791.83), (0.7, 0.7, 0.7), 156.341) if "particle_27 geometry" not in marker_sets: s=new_marker_set('particle_27 geometry') marker_sets["particle_27 geometry"]=s s= marker_sets["particle_27 geometry"] mark=s.place_marker((4508.02, 3364.32, 4409.99), (0.7, 0.7, 0.7), 186.501) if "particle_28 geometry" not in marker_sets: s=new_marker_set('particle_28 geometry') marker_sets["particle_28 geometry"]=s s= marker_sets["particle_28 geometry"] mark=s.place_marker((4102.11, 3385.19, 4041.84), (0.7, 0.7, 0.7), 308.325) if "particle_29 geometry" not in marker_sets: s=new_marker_set('particle_29 geometry') marker_sets["particle_29 geometry"]=s s= marker_sets["particle_29 geometry"] mark=s.place_marker((3999.66, 3459.17, 3639.49), (0.7, 0.7, 0.7), 138.617) if "particle_30 geometry" not in marker_sets: s=new_marker_set('particle_30 geometry') marker_sets["particle_30 geometry"]=s s= marker_sets["particle_30 geometry"] mark=s.place_marker((3990.46, 3321.12, 3375.95), (0.7, 0.7, 0.7), 130.03) if "particle_31 geometry" not in marker_sets: s=new_marker_set('particle_31 geometry') marker_sets["particle_31 geometry"]=s s= marker_sets["particle_31 geometry"] mark=s.place_marker((4184.65, 3265.84, 3644.95), (0.7, 0.7, 0.7), 156.552) if "particle_32 geometry" not in marker_sets: s=new_marker_set('particle_32 geometry') marker_sets["particle_32 geometry"]=s s= marker_sets["particle_32 geometry"] mark=s.place_marker((4026.47, 3512.91, 3767.62), (0.7, 0.7, 0.7), 183.244) if "particle_33 geometry" not in marker_sets: s=new_marker_set('particle_33 geometry') marker_sets["particle_33 geometry"]=s s= marker_sets["particle_33 geometry"] mark=s.place_marker((3897.67, 3697.53, 3916.15), (0.7, 0.7, 0.7), 181.382) if "particle_34 geometry" not in marker_sets: s=new_marker_set('particle_34 geometry') marker_sets["particle_34 geometry"]=s s= marker_sets["particle_34 geometry"] mark=s.place_marker((3807.73, 3656.25, 4086.53), (0.7, 0.7, 0.7), 101.943) if "particle_35 geometry" not in marker_sets: s=new_marker_set('particle_35 geometry') marker_sets["particle_35 geometry"]=s s= marker_sets["particle_35 geometry"] mark=s.place_marker((3783.04, 3832.54, 4398.97), (1, 0.7, 0), 138.913) if "particle_36 geometry" not in marker_sets: s=new_marker_set('particle_36 geometry') marker_sets["particle_36 geometry"]=s s= marker_sets["particle_36 geometry"] mark=s.place_marker((3228.84, 2967.93, 4776.67), (0.7, 0.7, 0.7), 221.737) if "particle_37 geometry" not in marker_sets: s=new_marker_set('particle_37 geometry') marker_sets["particle_37 geometry"]=s s= marker_sets["particle_37 geometry"] mark=s.place_marker((2616.08, 2353.63, 4600.86), (0.7, 0.7, 0.7), 256.38) if "particle_38 geometry" not in marker_sets: s=new_marker_set('particle_38 geometry') marker_sets["particle_38 geometry"]=s s= marker_sets["particle_38 geometry"] mark=s.place_marker((1952.2, 2273.33, 4248.27), (0.7, 0.7, 0.7), 221.694) if "particle_39 geometry" not in marker_sets: s=new_marker_set('particle_39 geometry') marker_sets["particle_39 geometry"]=s s= marker_sets["particle_39 geometry"] mark=s.place_marker((1721.16, 2626.59, 3618.89), (0.7, 0.7, 0.7), 259.341) if "particle_40 geometry" not in marker_sets: s=new_marker_set('particle_40 geometry') marker_sets["particle_40 geometry"]=s s= marker_sets["particle_40 geometry"] mark=s.place_marker((2382.49, 2754.68, 3233.37), (0.7, 0.7, 0.7), 117.89) if "particle_41 geometry" not in marker_sets: s=new_marker_set('particle_41 geometry') marker_sets["particle_41 geometry"]=s s= marker_sets["particle_41 geometry"] mark=s.place_marker((3187.11, 2940.63, 3218.99), (0.7, 0.7, 0.7), 116.071) if "particle_42 geometry" not in marker_sets: s=new_marker_set('particle_42 geometry') marker_sets["particle_42 geometry"]=s s= marker_sets["particle_42 geometry"] mark=s.place_marker((3423.18, 3186.91, 3539), (0.7, 0.7, 0.7), 268.224) if "particle_43 geometry" not in marker_sets: s=new_marker_set('particle_43 geometry') marker_sets["particle_43 geometry"]=s s= marker_sets["particle_43 geometry"] mark=s.place_marker((3119.75, 3280.74, 3537.84), (0.7, 0.7, 0.7), 386.918) if "particle_44 geometry" not in marker_sets: s=new_marker_set('particle_44 geometry') marker_sets["particle_44 geometry"]=s s= marker_sets["particle_44 geometry"] mark=s.place_marker((2659.51, 3383, 3130.67), (0.7, 0.7, 0.7), 121.316) if "particle_45 geometry" not in marker_sets: s=new_marker_set('particle_45 geometry') marker_sets["particle_45 geometry"]=s s= marker_sets["particle_45 geometry"] mark=s.place_marker((2262.82, 3557.09, 3144.59), (0.7, 0.7, 0.7), 138.363) if "particle_46 geometry" not in marker_sets: s=new_marker_set('particle_46 geometry') marker_sets["particle_46 geometry"]=s s= marker_sets["particle_46 geometry"] mark=s.place_marker((2681.82, 3645.19, 3719.46), (1, 0.7, 0), 175.207) if "particle_47 geometry" not in marker_sets: s=new_marker_set('particle_47 geometry') marker_sets["particle_47 geometry"]=s s= marker_sets["particle_47 geometry"] mark=s.place_marker((2011.97, 3812.97, 3529.26), (0.7, 0.7, 0.7), 131.468) if "particle_48 geometry" not in marker_sets: s=new_marker_set('particle_48 geometry') marker_sets["particle_48 geometry"]=s s= marker_sets["particle_48 geometry"] mark=s.place_marker((1293.89, 3958.68, 3548.36), (0.7, 0.7, 0.7), 287.894) if "particle_49 geometry" not in marker_sets: s=new_marker_set('particle_49 geometry') marker_sets["particle_49 geometry"]=s s= marker_sets["particle_49 geometry"] mark=s.place_marker((1523.24, 3465.94, 3594.87), (0.7, 0.7, 0.7), 88.1109) if "particle_50 geometry" not in marker_sets: s=new_marker_set('particle_50 geometry') marker_sets["particle_50 geometry"]=s s= marker_sets["particle_50 geometry"] mark=s.place_marker((2067.26, 3224.06, 3548.5), (0.7, 0.7, 0.7), 145.385) if "particle_51 geometry" not in marker_sets: s=new_marker_set('particle_51 geometry') marker_sets["particle_51 geometry"]=s s= marker_sets["particle_51 geometry"] mark=s.place_marker((2194.35, 3102.8, 3444.89), (0.7, 0.7, 0.7), 155.452) if "particle_52 geometry" not in marker_sets: s=new_marker_set('particle_52 geometry') marker_sets["particle_52 geometry"]=s s= marker_sets["particle_52 geometry"] mark=s.place_marker((1577.34, 3094.31, 3356.89), (0.7, 0.7, 0.7), 145.512) if "particle_53 geometry" not in marker_sets: s=new_marker_set('particle_53 geometry') marker_sets["particle_53 geometry"]=s s= marker_sets["particle_53 geometry"] mark=s.place_marker((1071.16, 3085.41, 3225.32), (0.7, 0.7, 0.7), 99.9972) if "particle_54 geometry" not in marker_sets: s=new_marker_set('particle_54 geometry') marker_sets["particle_54 geometry"]=s s= marker_sets["particle_54 geometry"] mark=s.place_marker((699.839, 3134.94, 2967.88), (0.7, 0.7, 0.7), 327.529) if "particle_55 geometry" not in marker_sets: s=new_marker_set('particle_55 geometry') marker_sets["particle_55 geometry"]=s s= marker_sets["particle_55 geometry"] mark=s.place_marker((1194.99, 2993.02, 2595.42), (0.7, 0.7, 0.7), 137.983) if "particle_56 geometry" not in marker_sets: s=new_marker_set('particle_56 geometry') marker_sets["particle_56 geometry"]=s s= marker_sets["particle_56 geometry"] mark=s.place_marker((1650.27, 2814.7, 2723.25), (0.7, 0.7, 0.7), 83.3733) if "particle_57 geometry" not in marker_sets: s=new_marker_set('particle_57 geometry') marker_sets["particle_57 geometry"]=s s= marker_sets["particle_57 geometry"] mark=s.place_marker((2150.52, 2680.43, 2960.7), (0.7, 0.7, 0.7), 101.562) if "particle_58 geometry" not in marker_sets: s=new_marker_set('particle_58 geometry') marker_sets["particle_58 geometry"]=s s= marker_sets["particle_58 geometry"] mark=s.place_marker((2599.89, 2684.91, 3234.81), (0.7, 0.7, 0.7), 165.689) if "particle_59 geometry" not in marker_sets: s=new_marker_set('particle_59 geometry') marker_sets["particle_59 geometry"]=s s= marker_sets["particle_59 geometry"] mark=s.place_marker((2498.8, 2743.03, 3522.98), (0.7, 0.7, 0.7), 136.925) if "particle_60 geometry" not in marker_sets: s=new_marker_set('particle_60 geometry') marker_sets["particle_60 geometry"]=s s= marker_sets["particle_60 geometry"] mark=s.place_marker((2417.74, 2809.45, 3528.8), (0.7, 0.7, 0.7), 123.389) if "particle_61 geometry" not in marker_sets: s=new_marker_set('particle_61 geometry') marker_sets["particle_61 geometry"]=s s= marker_sets["particle_61 geometry"] mark=s.place_marker((2064.84, 2726.46, 3228.19), (0.7, 0.7, 0.7), 184.47) if "particle_62 geometry" not in marker_sets: s=new_marker_set('particle_62 geometry') marker_sets["particle_62 geometry"]=s s= marker_sets["particle_62 geometry"] mark=s.place_marker((1385.03, 2507.08, 2857.65), (0.7, 0.7, 0.7), 148.473) if "particle_63 geometry" not in marker_sets: s=new_marker_set('particle_63 geometry') marker_sets["particle_63 geometry"]=s s= marker_sets["particle_63 geometry"] mark=s.place_marker((502.988, 2188.25, 2486.64), (0.7, 0.7, 0.7), 241.406) if "particle_64 geometry" not in marker_sets: s=new_marker_set('particle_64 geometry') marker_sets["particle_64 geometry"]=s s= marker_sets["particle_64 geometry"] mark=s.place_marker((1046.74, 2535.16, 2334.68), (0.7, 0.7, 0.7), 182.736) if "particle_65 geometry" not in marker_sets: s=new_marker_set('particle_65 geometry') marker_sets["particle_65 geometry"]=s s= marker_sets["particle_65 geometry"] mark=s.place_marker((1398.05, 2813.55, 2377.44), (0.7, 0.7, 0.7), 166.62) if "particle_66 geometry" not in marker_sets: s=new_marker_set('particle_66 geometry') marker_sets["particle_66 geometry"]=s s= marker_sets["particle_66 geometry"] mark=s.place_marker((1507.41, 2672.68, 2618.01), (0.7, 0.7, 0.7), 113.872) if "particle_67 geometry" not in marker_sets: s=new_marker_set('particle_67 geometry') marker_sets["particle_67 geometry"]=s s= marker_sets["particle_67 geometry"] mark=s.place_marker((1713.24, 2763.55, 2851.64), (0.7, 0.7, 0.7), 110.065) if "particle_68 geometry" not in marker_sets: s=new_marker_set('particle_68 geometry') marker_sets["particle_68 geometry"]=s s= marker_sets["particle_68 geometry"] mark=s.place_marker((1783.94, 2947.17, 3190.69), (0.7, 0.7, 0.7), 150.08) if "particle_69 geometry" not in marker_sets: s=new_marker_set('particle_69 geometry') marker_sets["particle_69 geometry"]=s s= marker_sets["particle_69 geometry"] mark=s.place_marker((1751.61, 3222.24, 3587.05), (0.7, 0.7, 0.7), 118.525) if "particle_70 geometry" not in marker_sets: s=new_marker_set('particle_70 geometry') marker_sets["particle_70 geometry"]=s s= marker_sets["particle_70 geometry"] mark=s.place_marker((1585.48, 3430.21, 4063.25), (0.7, 0.7, 0.7), 163.955) if "particle_71 geometry" not in marker_sets: s=new_marker_set('particle_71 geometry') marker_sets["particle_71 geometry"]=s s= marker_sets["particle_71 geometry"] mark=s.place_marker((1251.91, 3368.59, 4215.12), (0.7, 0.7, 0.7), 170.131) if "particle_72 geometry" not in marker_sets: s=new_marker_set('particle_72 geometry') marker_sets["particle_72 geometry"]=s s= marker_sets["particle_72 geometry"] mark=s.place_marker((920.013, 2947.14, 3682.27), (0.7, 0.7, 0.7), 78.2127) if "particle_73 geometry" not in marker_sets: s=new_marker_set('particle_73 geometry') marker_sets["particle_73 geometry"]=s s= marker_sets["particle_73 geometry"] mark=s.place_marker((625.25, 2568.83, 3003.93), (0.7, 0.7, 0.7), 251.896) if "particle_74 geometry" not in marker_sets: s=new_marker_set('particle_74 geometry') marker_sets["particle_74 geometry"]=s s= marker_sets["particle_74 geometry"] mark=s.place_marker((524.525, 2394.79, 2352.85), (0.7, 0.7, 0.7), 167.55) if "particle_75 geometry" not in marker_sets: s=new_marker_set('particle_75 geometry') marker_sets["particle_75 geometry"]=s s= marker_sets["particle_75 geometry"] mark=s.place_marker((649.765, 2398.17, 1953.81), (0.7, 0.7, 0.7), 167.846) if "particle_76 geometry" not in marker_sets: s=new_marker_set('particle_76 geometry') marker_sets["particle_76 geometry"]=s s= marker_sets["particle_76 geometry"] mark=s.place_marker((517.955, 1972.34, 2220.43), (0.7, 0.7, 0.7), 259.68) if "particle_77 geometry" not in marker_sets: s=new_marker_set('particle_77 geometry') marker_sets["particle_77 geometry"]=s s= marker_sets["particle_77 geometry"] mark=s.place_marker((346.79, 1995.38, 2656.93), (0.7, 0.7, 0.7), 80.2854) if "particle_78 geometry" not in marker_sets: s=new_marker_set('particle_78 geometry') marker_sets["particle_78 geometry"]=s s= marker_sets["particle_78 geometry"] mark=s.place_marker((161.404, 2071.41, 2591.3), (0.7, 0.7, 0.7), 82.4427) if "particle_79 geometry" not in marker_sets: s=new_marker_set('particle_79 geometry') marker_sets["particle_79 geometry"]=s s= marker_sets["particle_79 geometry"] mark=s.place_marker((-25.3969, 1838.69, 2400.86), (0.7, 0.7, 0.7), 212.811) if "particle_80 geometry" not in marker_sets: s=new_marker_set('particle_80 geometry') marker_sets["particle_80 geometry"]=s s= marker_sets["particle_80 geometry"] mark=s.place_marker((503.845, 1356.45, 2339.49), (0.7, 0.7, 0.7), 176.391) if "particle_81 geometry" not in marker_sets: s=new_marker_set('particle_81 geometry') marker_sets["particle_81 geometry"]=s s= marker_sets["particle_81 geometry"] mark=s.place_marker((1222.84, 1485.94, 2421.17), (0.7, 0.7, 0.7), 99.3204) if "particle_82 geometry" not in marker_sets: s=new_marker_set('particle_82 geometry') marker_sets["particle_82 geometry"]=s s= marker_sets["particle_82 geometry"] mark=s.place_marker((1660.59, 1766.61, 2731.09), (0.7, 0.7, 0.7), 166.62) if "particle_83 geometry" not in marker_sets: s=new_marker_set('particle_83 geometry') marker_sets["particle_83 geometry"]=s s= marker_sets["particle_83 geometry"] mark=s.place_marker((1800.31, 1723.55, 2988.95), (0.7, 0.7, 0.7), 102.831) if "particle_84 geometry" not in marker_sets: s=new_marker_set('particle_84 geometry') marker_sets["particle_84 geometry"]=s s= marker_sets["particle_84 geometry"] mark=s.place_marker((1102.38, 1202.31, 2685.64), (0.7, 0.7, 0.7), 65.0997) if "particle_85 geometry" not in marker_sets: s=new_marker_set('particle_85 geometry') marker_sets["particle_85 geometry"]=s s= marker_sets["particle_85 geometry"] mark=s.place_marker((1022.98, 1677.39, 2505.54), (0.7, 0.7, 0.7), 92.1294) if "particle_86 geometry" not in marker_sets: s=new_marker_set('particle_86 geometry') marker_sets["particle_86 geometry"]=s s= marker_sets["particle_86 geometry"] mark=s.place_marker((1223.57, 2185.68, 2445.99), (0.7, 0.7, 0.7), 194.791) if "particle_87 geometry" not in marker_sets: s=new_marker_set('particle_87 geometry') marker_sets["particle_87 geometry"]=s s= marker_sets["particle_87 geometry"] mark=s.place_marker((1357.33, 2514.76, 2270.95), (0.7, 0.7, 0.7), 120.766) if "particle_88 geometry" not in marker_sets: s=new_marker_set('particle_88 geometry') marker_sets["particle_88 geometry"]=s s= marker_sets["particle_88 geometry"] mark=s.place_marker((977.298, 2261.52, 1914.44), (0.7, 0.7, 0.7), 217.803) if "particle_89 geometry" not in marker_sets: s=new_marker_set('particle_89 geometry') marker_sets["particle_89 geometry"]=s s= marker_sets["particle_89 geometry"] mark=s.place_marker((825.453, 2187.1, 2286.49), (0.7, 0.7, 0.7), 115.775) if "particle_90 geometry" not in marker_sets: s=new_marker_set('particle_90 geometry') marker_sets["particle_90 geometry"]=s s= marker_sets["particle_90 geometry"] mark=s.place_marker((837.571, 2430.38, 2631.33), (0.7, 0.7, 0.7), 115.648) if "particle_91 geometry" not in marker_sets: s=new_marker_set('particle_91 geometry') marker_sets["particle_91 geometry"]=s s= marker_sets["particle_91 geometry"] mark=s.place_marker((1161.16, 2521.92, 2635.77), (0.7, 0.7, 0.7), 83.8386) if "particle_92 geometry" not in marker_sets: s=new_marker_set('particle_92 geometry') marker_sets["particle_92 geometry"]=s s= marker_sets["particle_92 geometry"] mark=s.place_marker((1306.03, 2646.15, 2301.15), (0.7, 0.7, 0.7), 124.32) if "particle_93 geometry" not in marker_sets: s=new_marker_set('particle_93 geometry') marker_sets["particle_93 geometry"]=s s= marker_sets["particle_93 geometry"] mark=s.place_marker((1434.13, 2952.53, 2005.87), (0.7, 0.7, 0.7), 185.993) if "particle_94 geometry" not in marker_sets: s=new_marker_set('particle_94 geometry') marker_sets["particle_94 geometry"]=s s= marker_sets["particle_94 geometry"] mark=s.place_marker((1157.57, 3458.33, 1790.63), (0.7, 0.7, 0.7), 238.826) if "particle_95 geometry" not in marker_sets: s=new_marker_set('particle_95 geometry') marker_sets["particle_95 geometry"]=s s= marker_sets["particle_95 geometry"] mark=s.place_marker((696.696, 3738.86, 1899.14), (0.7, 0.7, 0.7), 128.465) if "particle_96 geometry" not in marker_sets: s=new_marker_set('particle_96 geometry') marker_sets["particle_96 geometry"]=s s= marker_sets["particle_96 geometry"] mark=s.place_marker((677.478, 3416.37, 2459.03), (0.7, 0.7, 0.7), 203.209) if "particle_97 geometry" not in marker_sets: s=new_marker_set('particle_97 geometry') marker_sets["particle_97 geometry"]=s s= marker_sets["particle_97 geometry"] mark=s.place_marker((949.309, 2991.87, 2582.89), (0.7, 0.7, 0.7), 160.486) if "particle_98 geometry" not in marker_sets: s=new_marker_set('particle_98 geometry') marker_sets["particle_98 geometry"]=s s= marker_sets["particle_98 geometry"] mark=s.place_marker((1009.11, 2892.43, 2257.02), (0.7, 0.7, 0.7), 149.277) if "particle_99 geometry" not in marker_sets: s=new_marker_set('particle_99 geometry') marker_sets["particle_99 geometry"]=s s= marker_sets["particle_99 geometry"] mark=s.place_marker((494.817, 2872.42, 2113.28), (0.7, 0.7, 0.7), 35.7435) if "particle_100 geometry" not in marker_sets: s=new_marker_set('particle_100 geometry') marker_sets["particle_100 geometry"]=s s= marker_sets["particle_100 geometry"] mark=s.place_marker((1149.85, 2656.06, 2847.1), (0.7, 0.7, 0.7), 98.3898) if "particle_101 geometry" not in marker_sets: s=new_marker_set('particle_101 geometry') marker_sets["particle_101 geometry"]=s s= marker_sets["particle_101 geometry"] mark=s.place_marker((2010.02, 2417.66, 3425.01), (0.7, 0.7, 0.7), 188.404) if "particle_102 geometry" not in marker_sets: s=new_marker_set('particle_102 geometry') marker_sets["particle_102 geometry"]=s s= marker_sets["particle_102 geometry"] mark=s.place_marker((2488.19, 2268.67, 3360.05), (0.7, 0.7, 0.7), 110.318) if "particle_103 geometry" not in marker_sets: s=new_marker_set('particle_103 geometry') marker_sets["particle_103 geometry"]=s s= marker_sets["particle_103 geometry"] mark=s.place_marker((2204.33, 2105.35, 3157.25), (0.7, 0.7, 0.7), 127.534) if "particle_104 geometry" not in marker_sets: s=new_marker_set('particle_104 geometry') marker_sets["particle_104 geometry"]=s s= marker_sets["particle_104 geometry"] mark=s.place_marker((1863.04, 2112.83, 2995.47), (0.7, 0.7, 0.7), 91.368) if "particle_105 geometry" not in marker_sets: s=new_marker_set('particle_105 geometry') marker_sets["particle_105 geometry"]=s s= marker_sets["particle_105 geometry"] mark=s.place_marker((1530.34, 2244.74, 2839.69), (0.7, 0.7, 0.7), 131.045) if "particle_106 geometry" not in marker_sets: s=new_marker_set('particle_106 geometry') marker_sets["particle_106 geometry"]=s s= marker_sets["particle_106 geometry"] mark=s.place_marker((1270.14, 2562.14, 2745.95), (0.7, 0.7, 0.7), 143.608) if "particle_107 geometry" not in marker_sets: s=new_marker_set('particle_107 geometry') marker_sets["particle_107 geometry"]=s s= marker_sets["particle_107 geometry"] mark=s.place_marker((1249.87, 2773.06, 2408.46), (0.7, 0.7, 0.7), 135.783) if "particle_108 geometry" not in marker_sets: s=new_marker_set('particle_108 geometry') marker_sets["particle_108 geometry"]=s s= marker_sets["particle_108 geometry"] mark=s.place_marker((1260.97, 2915.05, 2103.49), (0.7, 0.7, 0.7), 92.5947) if "particle_109 geometry" not in marker_sets: s=new_marker_set('particle_109 geometry') marker_sets["particle_109 geometry"]=s s= marker_sets["particle_109 geometry"] mark=s.place_marker((1468.35, 2738.57, 2045.95), (0.7, 0.7, 0.7), 150.123) if "particle_110 geometry" not in marker_sets: s=new_marker_set('particle_110 geometry') marker_sets["particle_110 geometry"]=s s= marker_sets["particle_110 geometry"] mark=s.place_marker((1647.87, 2700.23, 1960.63), (0.7, 0.7, 0.7), 121.57) if "particle_111 geometry" not in marker_sets: s=new_marker_set('particle_111 geometry') marker_sets["particle_111 geometry"]=s s= marker_sets["particle_111 geometry"] mark=s.place_marker((1627.21, 2618.06, 1641.27), (0.7, 0.7, 0.7), 104.777) if "particle_112 geometry" not in marker_sets: s=new_marker_set('particle_112 geometry') marker_sets["particle_112 geometry"]=s s= marker_sets["particle_112 geometry"] mark=s.place_marker((1998.47, 2558.16, 1766.02), (0.7, 0.7, 0.7), 114.844) if "particle_113 geometry" not in marker_sets: s=new_marker_set('particle_113 geometry') marker_sets["particle_113 geometry"]=s s= marker_sets["particle_113 geometry"] mark=s.place_marker((2394.12, 2502.16, 1901.18), (0.7, 0.7, 0.7), 150.588) if "particle_114 geometry" not in marker_sets: s=new_marker_set('particle_114 geometry') marker_sets["particle_114 geometry"]=s s= marker_sets["particle_114 geometry"] mark=s.place_marker((2406.83, 2462.22, 2281.67), (0.7, 0.7, 0.7), 103.55) if "particle_115 geometry" not in marker_sets: s=new_marker_set('particle_115 geometry') marker_sets["particle_115 geometry"]=s s= marker_sets["particle_115 geometry"] mark=s.place_marker((2181.3, 2142.24, 2574.6), (0.7, 0.7, 0.7), 215.392) if "particle_116 geometry" not in marker_sets: s=new_marker_set('particle_116 geometry') marker_sets["particle_116 geometry"]=s s= marker_sets["particle_116 geometry"] mark=s.place_marker((2072.96, 1812.79, 2949.94), (0.7, 0.7, 0.7), 99.9126) if "particle_117 geometry" not in marker_sets: s=new_marker_set('particle_117 geometry') marker_sets["particle_117 geometry"]=s s= marker_sets["particle_117 geometry"] mark=s.place_marker((1629.21, 1205.21, 2899.78), (0.7, 0.7, 0.7), 99.7857) if "particle_118 geometry" not in marker_sets: s=new_marker_set('particle_118 geometry') marker_sets["particle_118 geometry"]=s s= marker_sets["particle_118 geometry"] mark=s.place_marker((1161.71, 866.438, 2889.49), (0.7, 0.7, 0.7), 109.98) if "particle_119 geometry" not in marker_sets: s=new_marker_set('particle_119 geometry') marker_sets["particle_119 geometry"]=s s= marker_sets["particle_119 geometry"] mark=s.place_marker((1549.97, 1155.21, 2666.42), (0.7, 0.7, 0.7), 102.831) if "particle_120 geometry" not in marker_sets: s=new_marker_set('particle_120 geometry') marker_sets["particle_120 geometry"]=s s= marker_sets["particle_120 geometry"] mark=s.place_marker((1731.87, 1478.97, 2617.24), (0.7, 0.7, 0.7), 103.593) if "particle_121 geometry" not in marker_sets: s=new_marker_set('particle_121 geometry') marker_sets["particle_121 geometry"]=s s= marker_sets["particle_121 geometry"] mark=s.place_marker((1740.72, 1903.23, 2560.12), (0.7, 0.7, 0.7), 173.472) if "particle_122 geometry" not in marker_sets: s=new_marker_set('particle_122 geometry') marker_sets["particle_122 geometry"]=s s= marker_sets["particle_122 geometry"] mark=s.place_marker((1301.34, 2136.75, 2419.2), (0.7, 0.7, 0.7), 113.575) if "particle_123 geometry" not in marker_sets: s=new_marker_set('particle_123 geometry') marker_sets["particle_123 geometry"]=s s= marker_sets["particle_123 geometry"] mark=s.place_marker((1277.16, 2514.89, 2253.05), (0.7, 0.7, 0.7), 128.296) if "particle_124 geometry" not in marker_sets: s=new_marker_set('particle_124 geometry') marker_sets["particle_124 geometry"]=s s= marker_sets["particle_124 geometry"] mark=s.place_marker((1251.41, 2822.63, 1973.4), (0.7, 0.7, 0.7), 145.004) if "particle_125 geometry" not in marker_sets: s=new_marker_set('particle_125 geometry') marker_sets["particle_125 geometry"]=s s= marker_sets["particle_125 geometry"] mark=s.place_marker((1430, 3201.21, 1695.25), (0.7, 0.7, 0.7), 148.261) if "particle_126 geometry" not in marker_sets: s=new_marker_set('particle_126 geometry') marker_sets["particle_126 geometry"]=s s= marker_sets["particle_126 geometry"] mark=s.place_marker((1263.04, 3659.72, 1326.51), (0.7, 0.7, 0.7), 127.704) if "particle_127 geometry" not in marker_sets: s=new_marker_set('particle_127 geometry') marker_sets["particle_127 geometry"]=s s= marker_sets["particle_127 geometry"] mark=s.place_marker((937.274, 4090.42, 1124.53), (0.7, 0.7, 0.7), 129.607) if "particle_128 geometry" not in marker_sets: s=new_marker_set('particle_128 geometry') marker_sets["particle_128 geometry"]=s s= marker_sets["particle_128 geometry"] mark=s.place_marker((719.611, 3815.82, 1491.28), (0.7, 0.7, 0.7), 139.759) if "particle_129 geometry" not in marker_sets: s=new_marker_set('particle_129 geometry') marker_sets["particle_129 geometry"]=s s= marker_sets["particle_129 geometry"] mark=s.place_marker((725.563, 3427.72, 2034.38), (0.7, 0.7, 0.7), 118.567) if "particle_130 geometry" not in marker_sets: s=new_marker_set('particle_130 geometry') marker_sets["particle_130 geometry"]=s s= marker_sets["particle_130 geometry"] mark=s.place_marker((758.012, 2993.92, 2075.96), (0.7, 0.7, 0.7), 136.164) if "particle_131 geometry" not in marker_sets: s=new_marker_set('particle_131 geometry') marker_sets["particle_131 geometry"]=s s= marker_sets["particle_131 geometry"] mark=s.place_marker((999.731, 2602.15, 2158.14), (0.7, 0.7, 0.7), 121.655) if "particle_132 geometry" not in marker_sets: s=new_marker_set('particle_132 geometry') marker_sets["particle_132 geometry"]=s s= marker_sets["particle_132 geometry"] mark=s.place_marker((1281.07, 2260.76, 2108.01), (0.7, 0.7, 0.7), 127.492) if "particle_133 geometry" not in marker_sets: s=new_marker_set('particle_133 geometry') marker_sets["particle_133 geometry"]=s s= marker_sets["particle_133 geometry"] mark=s.place_marker((1279.23, 1865.68, 1913.12), (0.7, 0.7, 0.7), 138.617) if "particle_134 geometry" not in marker_sets: s=new_marker_set('particle_134 geometry') marker_sets["particle_134 geometry"]=s s= marker_sets["particle_134 geometry"] mark=s.place_marker((1586.8, 1653.2, 1899.48), (0.7, 0.7, 0.7), 120.766) if "particle_135 geometry" not in marker_sets: s=new_marker_set('particle_135 geometry') marker_sets["particle_135 geometry"]=s s= marker_sets["particle_135 geometry"] mark=s.place_marker((1763.41, 1530.1, 2153), (0.7, 0.7, 0.7), 145.893) if "particle_136 geometry" not in marker_sets: s=new_marker_set('particle_136 geometry') marker_sets["particle_136 geometry"]=s s= marker_sets["particle_136 geometry"] mark=s.place_marker((1876.15, 1810.8, 2535.69), (0.7, 0.7, 0.7), 185.02) if "particle_137 geometry" not in marker_sets: s=new_marker_set('particle_137 geometry') marker_sets["particle_137 geometry"]=s s= marker_sets["particle_137 geometry"] mark=s.place_marker((2205.73, 2093.04, 2837.17), (0.7, 0.7, 0.7), 221.314) if "particle_138 geometry" not in marker_sets: s=new_marker_set('particle_138 geometry') marker_sets["particle_138 geometry"]=s s= marker_sets["particle_138 geometry"] mark=s.place_marker((2596.33, 2386.1, 2875.4), (0.7, 0.7, 0.7), 165.139) if "particle_139 geometry" not in marker_sets: s=new_marker_set('particle_139 geometry') marker_sets["particle_139 geometry"]=s s= marker_sets["particle_139 geometry"] mark=s.place_marker((2475.55, 2586.76, 2848.82), (0.7, 0.7, 0.7), 179.437) if "particle_140 geometry" not in marker_sets: s=new_marker_set('particle_140 geometry') marker_sets["particle_140 geometry"]=s s= marker_sets["particle_140 geometry"] mark=s.place_marker((2198.82, 2290.26, 2990.97), (0.7, 0.7, 0.7), 137.898) if "particle_141 geometry" not in marker_sets: s=new_marker_set('particle_141 geometry') marker_sets["particle_141 geometry"]=s s= marker_sets["particle_141 geometry"] mark=s.place_marker((1955.8, 2033.96, 3039.56), (0.7, 0.7, 0.7), 124.658) if "particle_142 geometry" not in marker_sets: s=new_marker_set('particle_142 geometry') marker_sets["particle_142 geometry"]=s s= marker_sets["particle_142 geometry"] mark=s.place_marker((1869.79, 1790.68, 2810.05), (0.7, 0.7, 0.7), 97.7553) if "particle_143 geometry" not in marker_sets: s=new_marker_set('particle_143 geometry') marker_sets["particle_143 geometry"]=s s= marker_sets["particle_143 geometry"] mark=s.place_marker((1761.16, 1555.08, 2643.29), (0.7, 0.7, 0.7), 92.9331) if "particle_144 geometry" not in marker_sets: s=new_marker_set('particle_144 geometry') marker_sets["particle_144 geometry"]=s s= marker_sets["particle_144 geometry"] mark=s.place_marker((1551.4, 1323.2, 2515.72), (0.7, 0.7, 0.7), 123.135) if "particle_145 geometry" not in marker_sets: s=new_marker_set('particle_145 geometry') marker_sets["particle_145 geometry"]=s s= marker_sets["particle_145 geometry"] mark=s.place_marker((1780.48, 1547.37, 2732.15), (0.7, 0.7, 0.7), 125.716) if "particle_146 geometry" not in marker_sets: s=new_marker_set('particle_146 geometry') marker_sets["particle_146 geometry"]=s s= marker_sets["particle_146 geometry"] mark=s.place_marker((1929.9, 1833.09, 2726.9), (0.7, 0.7, 0.7), 127.534) if "particle_147 geometry" not in marker_sets: s=new_marker_set('particle_147 geometry') marker_sets["particle_147 geometry"]=s s= marker_sets["particle_147 geometry"] mark=s.place_marker((1812.18, 2089.44, 2616.9), (0.7, 0.7, 0.7), 94.9212) if "particle_148 geometry" not in marker_sets: s=new_marker_set('particle_148 geometry') marker_sets["particle_148 geometry"]=s s= marker_sets["particle_148 geometry"] mark=s.place_marker((2073.52, 2425.01, 2749.8), (0.7, 0.7, 0.7), 137.644) if "particle_149 geometry" not in marker_sets: s=new_marker_set('particle_149 geometry') marker_sets["particle_149 geometry"]=s s= marker_sets["particle_149 geometry"] mark=s.place_marker((2276.64, 2719.64, 2734.72), (0.7, 0.7, 0.7), 149.277) if "particle_150 geometry" not in marker_sets: s=new_marker_set('particle_150 geometry') marker_sets["particle_150 geometry"]=s s= marker_sets["particle_150 geometry"] mark=s.place_marker((2392.19, 2559.14, 2424.7), (0.7, 0.7, 0.7), 103.677) if "particle_151 geometry" not in marker_sets: s=new_marker_set('particle_151 geometry') marker_sets["particle_151 geometry"]=s s= marker_sets["particle_151 geometry"] mark=s.place_marker((2332.26, 2393.17, 1953.59), (0.7, 0.7, 0.7), 99.6588) if "particle_152 geometry" not in marker_sets: s=new_marker_set('particle_152 geometry') marker_sets["particle_152 geometry"]=s s= marker_sets["particle_152 geometry"] mark=s.place_marker((2289.64, 2338.93, 1574.29), (0.7, 0.7, 0.7), 134.133) if "particle_153 geometry" not in marker_sets: s=new_marker_set('particle_153 geometry') marker_sets["particle_153 geometry"]=s s= marker_sets["particle_153 geometry"] mark=s.place_marker((2313.95, 2177.08, 1864.22), (0.7, 0.7, 0.7), 173.007) if "particle_154 geometry" not in marker_sets: s=new_marker_set('particle_154 geometry') marker_sets["particle_154 geometry"]=s s= marker_sets["particle_154 geometry"] mark=s.place_marker((2517.97, 2359.26, 2368.67), (0.7, 0.7, 0.7), 141.028) if "particle_155 geometry" not in marker_sets: s=new_marker_set('particle_155 geometry') marker_sets["particle_155 geometry"]=s s= marker_sets["particle_155 geometry"] mark=s.place_marker((2600.01, 2485.32, 2816.83), (0.7, 0.7, 0.7), 161.121) if "particle_156 geometry" not in marker_sets: s=new_marker_set('particle_156 geometry') marker_sets["particle_156 geometry"]=s s= marker_sets["particle_156 geometry"] mark=s.place_marker((2330.41, 2642.93, 2968.55), (0.7, 0.7, 0.7), 119.582) if "particle_157 geometry" not in marker_sets: s=new_marker_set('particle_157 geometry') marker_sets["particle_157 geometry"]=s s= marker_sets["particle_157 geometry"] mark=s.place_marker((2013.04, 2492.05, 2781.23), (0.7, 0.7, 0.7), 137.094) if "particle_158 geometry" not in marker_sets: s=new_marker_set('particle_158 geometry') marker_sets["particle_158 geometry"]=s s= marker_sets["particle_158 geometry"] mark=s.place_marker((1654.71, 2163.4, 2679.06), (0.7, 0.7, 0.7), 149.234) if "particle_159 geometry" not in marker_sets: s=new_marker_set('particle_159 geometry') marker_sets["particle_159 geometry"]=s s= marker_sets["particle_159 geometry"] mark=s.place_marker((1746.09, 1925.79, 3022.4), (0.7, 0.7, 0.7), 151.011) if "particle_160 geometry" not in marker_sets: s=new_marker_set('particle_160 geometry') marker_sets["particle_160 geometry"]=s s= marker_sets["particle_160 geometry"] mark=s.place_marker((2131.07, 1862.7, 3338.64), (0.7, 0.7, 0.7), 184.216) if "particle_161 geometry" not in marker_sets: s=new_marker_set('particle_161 geometry') marker_sets["particle_161 geometry"]=s s= marker_sets["particle_161 geometry"] mark=s.place_marker((2396.61, 1700.13, 3076.17), (0.7, 0.7, 0.7), 170.596) if "particle_162 geometry" not in marker_sets: s=new_marker_set('particle_162 geometry') marker_sets["particle_162 geometry"]=s s= marker_sets["particle_162 geometry"] mark=s.place_marker((2006.97, 1316.19, 2760.43), (0.7, 0.7, 0.7), 215.603) if "particle_163 geometry" not in marker_sets: s=new_marker_set('particle_163 geometry') marker_sets["particle_163 geometry"]=s s= marker_sets["particle_163 geometry"] mark=s.place_marker((1396.3, 811.098, 2370.96), (0.7, 0.7, 0.7), 79.0164) if "particle_164 geometry" not in marker_sets: s=new_marker_set('particle_164 geometry') marker_sets["particle_164 geometry"]=s s= marker_sets["particle_164 geometry"] mark=s.place_marker((1481.94, 793.405, 2045.79), (0.7, 0.7, 0.7), 77.2821) if "particle_165 geometry" not in marker_sets: s=new_marker_set('particle_165 geometry') marker_sets["particle_165 geometry"]=s s= marker_sets["particle_165 geometry"] mark=s.place_marker((1628.33, 1100.64, 1974.76), (0.7, 0.7, 0.7), 188.658) if "particle_166 geometry" not in marker_sets: s=new_marker_set('particle_166 geometry') marker_sets["particle_166 geometry"]=s s= marker_sets["particle_166 geometry"] mark=s.place_marker((1898.14, 1041.02, 1827.19), (0.7, 0.7, 0.7), 115.437) if "particle_167 geometry" not in marker_sets: s=new_marker_set('particle_167 geometry') marker_sets["particle_167 geometry"]=s s= marker_sets["particle_167 geometry"] mark=s.place_marker((2103.96, 1465.73, 2206.61), (0.7, 0.7, 0.7), 88.4916) if "particle_168 geometry" not in marker_sets: s=new_marker_set('particle_168 geometry') marker_sets["particle_168 geometry"]=s s= marker_sets["particle_168 geometry"] mark=s.place_marker((2310.07, 1904.89, 2594.65), (0.7, 0.7, 0.7), 108.88) if "particle_169 geometry" not in marker_sets: s=new_marker_set('particle_169 geometry') marker_sets["particle_169 geometry"]=s s= marker_sets["particle_169 geometry"] mark=s.place_marker((2242.38, 2088.21, 2888.06), (0.7, 0.7, 0.7), 172.119) if "particle_170 geometry" not in marker_sets: s=new_marker_set('particle_170 geometry') marker_sets["particle_170 geometry"]=s s= marker_sets["particle_170 geometry"] mark=s.place_marker((1940.99, 1739.53, 2722.31), (0.7, 0.7, 0.7), 139.505) if "particle_171 geometry" not in marker_sets: s=new_marker_set('particle_171 geometry') marker_sets["particle_171 geometry"]=s s= marker_sets["particle_171 geometry"] mark=s.place_marker((1654.36, 1394.61, 2522.18), (0.7, 0.7, 0.7), 92.7639) if "particle_172 geometry" not in marker_sets: s=new_marker_set('particle_172 geometry') marker_sets["particle_172 geometry"]=s s= marker_sets["particle_172 geometry"] mark=s.place_marker((1571.58, 1423.73, 2718.66), (0.7, 0.7, 0.7), 89.8452) if "particle_173 geometry" not in marker_sets: s=new_marker_set('particle_173 geometry') marker_sets["particle_173 geometry"]=s s= marker_sets["particle_173 geometry"] mark=s.place_marker((1857.29, 1311.46, 2745.25), (0.7, 0.7, 0.7), 149.446) if "particle_174 geometry" not in marker_sets: s=new_marker_set('particle_174 geometry') marker_sets["particle_174 geometry"]=s s= marker_sets["particle_174 geometry"] mark=s.place_marker((1933.14, 1154.16, 2459.34), (0.7, 0.7, 0.7), 126.858) if "particle_175 geometry" not in marker_sets: s=new_marker_set('particle_175 geometry') marker_sets["particle_175 geometry"]=s s= marker_sets["particle_175 geometry"] mark=s.place_marker((1611.59, 1131.64, 2325.12), (0.7, 0.7, 0.7), 106.046) if "particle_176 geometry" not in marker_sets: s=new_marker_set('particle_176 geometry') marker_sets["particle_176 geometry"]=s s= marker_sets["particle_176 geometry"] mark=s.place_marker((1192.94, 1390.41, 2494.63), (0.7, 0.7, 0.7), 156.298) if "particle_177 geometry" not in marker_sets: s=new_marker_set('particle_177 geometry') marker_sets["particle_177 geometry"]=s s= marker_sets["particle_177 geometry"] mark=s.place_marker((728.078, 1782.92, 2604.07), (0.7, 0.7, 0.7), 231.212) if "particle_178 geometry" not in marker_sets: s=new_marker_set('particle_178 geometry') marker_sets["particle_178 geometry"]=s s= marker_sets["particle_178 geometry"] mark=s.place_marker((742.94, 2168.54, 3016.39), (0.7, 0.7, 0.7), 88.4916) if "particle_179 geometry" not in marker_sets: s=new_marker_set('particle_179 geometry') marker_sets["particle_179 geometry"]=s s= marker_sets["particle_179 geometry"] mark=s.place_marker((1128.29, 2309.08, 3261.29), (0.7, 0.7, 0.7), 111.334) if "particle_180 geometry" not in marker_sets: s=new_marker_set('particle_180 geometry') marker_sets["particle_180 geometry"]=s s= marker_sets["particle_180 geometry"] mark=s.place_marker((1747.91, 2281.92, 3273.57), (0.7, 0.7, 0.7), 127.619) if "particle_181 geometry" not in marker_sets: s=new_marker_set('particle_181 geometry') marker_sets["particle_181 geometry"]=s s= marker_sets["particle_181 geometry"] mark=s.place_marker((2205.7, 2242.19, 3277.45), (0.7, 0.7, 0.7), 230.746) if "particle_182 geometry" not in marker_sets: s=new_marker_set('particle_182 geometry') marker_sets["particle_182 geometry"]=s s= marker_sets["particle_182 geometry"] mark=s.place_marker((1939.96, 2025.64, 3076.75), (0.7, 0.7, 0.7), 124.573) if "particle_183 geometry" not in marker_sets: s=new_marker_set('particle_183 geometry') marker_sets["particle_183 geometry"]=s s= marker_sets["particle_183 geometry"] mark=s.place_marker((1482.01, 1656.91, 2883.13), (0.7, 0.7, 0.7), 124.489) if "particle_184 geometry" not in marker_sets: s=new_marker_set('particle_184 geometry') marker_sets["particle_184 geometry"]=s s= marker_sets["particle_184 geometry"] mark=s.place_marker((1504.57, 1478.11, 2544.99), (0.7, 0.7, 0.7), 196.61) if "particle_185 geometry" not in marker_sets: s=new_marker_set('particle_185 geometry') marker_sets["particle_185 geometry"]=s s= marker_sets["particle_185 geometry"] mark=s.place_marker((1531.55, 1770.69, 2557.96), (0.7, 0.7, 0.7), 134.049) if "particle_186 geometry" not in marker_sets: s=new_marker_set('particle_186 geometry') marker_sets["particle_186 geometry"]=s s= marker_sets["particle_186 geometry"] mark=s.place_marker((1407.73, 1610.39, 2785.39), (0.7, 0.7, 0.7), 141.493) if "particle_187 geometry" not in marker_sets: s=new_marker_set('particle_187 geometry') marker_sets["particle_187 geometry"]=s s= marker_sets["particle_187 geometry"] mark=s.place_marker((1078.32, 1373.51, 2866.52), (0.7, 0.7, 0.7), 172.203) if "particle_188 geometry" not in marker_sets: s=new_marker_set('particle_188 geometry') marker_sets["particle_188 geometry"]=s s= marker_sets["particle_188 geometry"] mark=s.place_marker((1540.71, 1508.68, 2433.47), (0.7, 0.7, 0.7), 271.354) if "particle_189 geometry" not in marker_sets: s=new_marker_set('particle_189 geometry') marker_sets["particle_189 geometry"]=s s= marker_sets["particle_189 geometry"] mark=s.place_marker((1728.21, 1816.18, 2082.57), (0.7, 0.7, 0.7), 97.0785) if "particle_190 geometry" not in marker_sets: s=new_marker_set('particle_190 geometry') marker_sets["particle_190 geometry"]=s s= marker_sets["particle_190 geometry"] mark=s.place_marker((1629.49, 2143.27, 1857.23), (0.7, 0.7, 0.7), 151.857) if "particle_191 geometry" not in marker_sets: s=new_marker_set('particle_191 geometry') marker_sets["particle_191 geometry"]=s s= marker_sets["particle_191 geometry"] mark=s.place_marker((1806.4, 2496.19, 1454.7), (0.7, 0.7, 0.7), 199.233) if "particle_192 geometry" not in marker_sets: s=new_marker_set('particle_192 geometry') marker_sets["particle_192 geometry"]=s s= marker_sets["particle_192 geometry"] mark=s.place_marker((2253.13, 2808.98, 1557.96), (0.7, 0.7, 0.7), 118.863) if "particle_193 geometry" not in marker_sets: s=new_marker_set('particle_193 geometry') marker_sets["particle_193 geometry"]=s s= marker_sets["particle_193 geometry"] mark=s.place_marker((2331.88, 3194.75, 1445.21), (0.7, 0.7, 0.7), 172.415) if "particle_194 geometry" not in marker_sets: s=new_marker_set('particle_194 geometry') marker_sets["particle_194 geometry"]=s s= marker_sets["particle_194 geometry"] mark=s.place_marker((2126.38, 3458.65, 1110.05), (0.7, 0.7, 0.7), 134.26) if "particle_195 geometry" not in marker_sets: s=new_marker_set('particle_195 geometry') marker_sets["particle_195 geometry"]=s s= marker_sets["particle_195 geometry"] mark=s.place_marker((1657.55, 3744.45, 373.939), (0.7, 0.7, 0.7), 139.548) if "particle_196 geometry" not in marker_sets: s=new_marker_set('particle_196 geometry') marker_sets["particle_196 geometry"]=s s= marker_sets["particle_196 geometry"] mark=s.place_marker((1196.42, 3573.54, 664.478), (0.7, 0.7, 0.7), 196.526) if "particle_197 geometry" not in marker_sets: s=new_marker_set('particle_197 geometry') marker_sets["particle_197 geometry"]=s s= marker_sets["particle_197 geometry"] mark=s.place_marker((1132.34, 3166.14, 1351.41), (0.7, 0.7, 0.7), 136.206) if "particle_198 geometry" not in marker_sets: s=new_marker_set('particle_198 geometry') marker_sets["particle_198 geometry"]=s s= marker_sets["particle_198 geometry"] mark=s.place_marker((1537.38, 2985.53, 2233.12), (0.7, 0.7, 0.7), 152.322) if "particle_199 geometry" not in marker_sets: s=new_marker_set('particle_199 geometry') marker_sets["particle_199 geometry"]=s s= marker_sets["particle_199 geometry"] mark=s.place_marker((1930.26, 2764.05, 2718.16), (0.7, 0.7, 0.7), 126.054) if "particle_200 geometry" not in marker_sets: s=new_marker_set('particle_200 geometry') marker_sets["particle_200 geometry"]=s s= marker_sets["particle_200 geometry"] mark=s.place_marker((1758.92, 2388.91, 2687.17), (0.7, 0.7, 0.7), 164.378) if "particle_201 geometry" not in marker_sets: s=new_marker_set('particle_201 geometry') marker_sets["particle_201 geometry"]=s s= marker_sets["particle_201 geometry"] mark=s.place_marker((1683.29, 2102.45, 2322.3), (0.7, 0.7, 0.7), 122.205) if "particle_202 geometry" not in marker_sets: s=new_marker_set('particle_202 geometry') marker_sets["particle_202 geometry"]=s s= marker_sets["particle_202 geometry"] mark=s.place_marker((1768.67, 1943.35, 1895.07), (0.7, 0.7, 0.7), 134.979) if "particle_203 geometry" not in marker_sets: s=new_marker_set('particle_203 geometry') marker_sets["particle_203 geometry"]=s s= marker_sets["particle_203 geometry"] mark=s.place_marker((2057.64, 1911.34, 1997.76), (0.7, 0.7, 0.7), 136.375) if "particle_204 geometry" not in marker_sets: s=new_marker_set('particle_204 geometry') marker_sets["particle_204 geometry"]=s s= marker_sets["particle_204 geometry"] mark=s.place_marker((1774.9, 1830.72, 2216.04), (0.7, 0.7, 0.7), 151.688) if "particle_205 geometry" not in marker_sets: s=new_marker_set('particle_205 geometry') marker_sets["particle_205 geometry"]=s s= marker_sets["particle_205 geometry"] mark=s.place_marker((1842.69, 1593.05, 2230.1), (0.7, 0.7, 0.7), 116.156) if "particle_206 geometry" not in marker_sets: s=new_marker_set('particle_206 geometry') marker_sets["particle_206 geometry"]=s s= marker_sets["particle_206 geometry"] mark=s.place_marker((2246.62, 1999.09, 2632.53), (0.7, 0.7, 0.7), 122.839) if "particle_207 geometry" not in marker_sets: s=new_marker_set('particle_207 geometry') marker_sets["particle_207 geometry"]=s s= marker_sets["particle_207 geometry"] mark=s.place_marker((2299.53, 2434.33, 2910.3), (0.7, 0.7, 0.7), 164.716) if "particle_208 geometry" not in marker_sets: s=new_marker_set('particle_208 geometry') marker_sets["particle_208 geometry"]=s s= marker_sets["particle_208 geometry"] mark=s.place_marker((1535.73, 2481.03, 2500.23), (0.7, 0.7, 0.7), 303.672) if "particle_209 geometry" not in marker_sets: s=new_marker_set('particle_209 geometry') marker_sets["particle_209 geometry"]=s s= marker_sets["particle_209 geometry"] mark=s.place_marker((884.472, 2222.55, 1700.26), (0.7, 0.7, 0.7), 220.298) if "particle_210 geometry" not in marker_sets: s=new_marker_set('particle_210 geometry') marker_sets["particle_210 geometry"]=s s= marker_sets["particle_210 geometry"] mark=s.place_marker((1121.34, 1639.07, 1805.06), (0.7, 0.7, 0.7), 175.883) if "particle_211 geometry" not in marker_sets: s=new_marker_set('particle_211 geometry') marker_sets["particle_211 geometry"]=s s= marker_sets["particle_211 geometry"] mark=s.place_marker((1214.33, 1147.05, 2280.78), (0.7, 0.7, 0.7), 233.581) if "particle_212 geometry" not in marker_sets: s=new_marker_set('particle_212 geometry') marker_sets["particle_212 geometry"]=s s= marker_sets["particle_212 geometry"] mark=s.place_marker((1195.48, 1154.97, 3065.55), (0.7, 0.7, 0.7), 231.127) if "particle_213 geometry" not in marker_sets: s=new_marker_set('particle_213 geometry') marker_sets["particle_213 geometry"]=s s= marker_sets["particle_213 geometry"] mark=s.place_marker((1456.07, 790.8, 3514.03), (0.7, 0.7, 0.7), 247.413) if "particle_214 geometry" not in marker_sets: s=new_marker_set('particle_214 geometry') marker_sets["particle_214 geometry"]=s s= marker_sets["particle_214 geometry"] mark=s.place_marker((1861.61, 279.25, 3598.95), (0.7, 0.7, 0.7), 200.206) if "particle_215 geometry" not in marker_sets: s=new_marker_set('particle_215 geometry') marker_sets["particle_215 geometry"]=s s= marker_sets["particle_215 geometry"] mark=s.place_marker((2117.27, 89.8132, 3318.65), (0.7, 0.7, 0.7), 150.419) if "particle_216 geometry" not in marker_sets: s=new_marker_set('particle_216 geometry') marker_sets["particle_216 geometry"]=s s= marker_sets["particle_216 geometry"] mark=s.place_marker((2338.17, 630.252, 3498.27), (0.7, 0.7, 0.7), 140.14) if "particle_217 geometry" not in marker_sets: s=new_marker_set('particle_217 geometry') marker_sets["particle_217 geometry"]=s s= marker_sets["particle_217 geometry"] mark=s.place_marker((2384.47, 889.44, 3849.54), (0.7, 0.7, 0.7), 132.949) if "particle_218 geometry" not in marker_sets: s=new_marker_set('particle_218 geometry') marker_sets["particle_218 geometry"]=s s= marker_sets["particle_218 geometry"] mark=s.place_marker((2499.25, 1193.79, 4020.39), (0.7, 0.7, 0.7), 141.113) if "particle_219 geometry" not in marker_sets: s=new_marker_set('particle_219 geometry') marker_sets["particle_219 geometry"]=s s= marker_sets["particle_219 geometry"] mark=s.place_marker((2190.7, 1321.92, 4072.23), (0.7, 0.7, 0.7), 171.526) if "particle_220 geometry" not in marker_sets: s=new_marker_set('particle_220 geometry') marker_sets["particle_220 geometry"]=s s= marker_sets["particle_220 geometry"] mark=s.place_marker((1678.66, 1155.81, 3843.79), (0.7, 0.7, 0.7), 326.937) if "particle_221 geometry" not in marker_sets: s=new_marker_set('particle_221 geometry') marker_sets["particle_221 geometry"]=s s= marker_sets["particle_221 geometry"] mark=s.place_marker((1414.68, 1114.86, 3331.28), (0.7, 0.7, 0.7), 92.0871) if "particle_222 geometry" not in marker_sets: s=new_marker_set('particle_222 geometry') marker_sets["particle_222 geometry"]=s s= marker_sets["particle_222 geometry"] mark=s.place_marker((1414.86, 1568.91, 3204.72), (0.7, 0.7, 0.7), 210.273) if "particle_223 geometry" not in marker_sets: s=new_marker_set('particle_223 geometry') marker_sets["particle_223 geometry"]=s s= marker_sets["particle_223 geometry"] mark=s.place_marker((1961.71, 2045.8, 3427.37), (0.7, 0.7, 0.7), 122.628) if "particle_224 geometry" not in marker_sets: s=new_marker_set('particle_224 geometry') marker_sets["particle_224 geometry"]=s s= marker_sets["particle_224 geometry"] mark=s.place_marker((2149.39, 2141.91, 3574.62), (0.7, 0.7, 0.7), 109.176) if "particle_225 geometry" not in marker_sets: s=new_marker_set('particle_225 geometry') marker_sets["particle_225 geometry"]=s s= marker_sets["particle_225 geometry"] mark=s.place_marker((2062.82, 1894.26, 3429.15), (0.7, 0.7, 0.7), 142.213) if "particle_226 geometry" not in marker_sets: s=new_marker_set('particle_226 geometry') marker_sets["particle_226 geometry"]=s s= marker_sets["particle_226 geometry"] mark=s.place_marker((1748.05, 1982.65, 3191.98), (0.7, 0.7, 0.7), 250.078) if "particle_227 geometry" not in marker_sets: s=new_marker_set('particle_227 geometry') marker_sets["particle_227 geometry"]=s s= marker_sets["particle_227 geometry"] mark=s.place_marker((2025.69, 1880.39, 2844.09), (0.7, 0.7, 0.7), 123.558) if "particle_228 geometry" not in marker_sets: s=new_marker_set('particle_228 geometry') marker_sets["particle_228 geometry"]=s s= marker_sets["particle_228 geometry"] mark=s.place_marker((2479.84, 1953.46, 2720.15), (0.7, 0.7, 0.7), 235.992) if "particle_229 geometry" not in marker_sets: s=new_marker_set('particle_229 geometry') marker_sets["particle_229 geometry"]=s s= marker_sets["particle_229 geometry"] mark=s.place_marker((2898.81, 1997.4, 2452.37), (0.7, 0.7, 0.7), 172.373) if "particle_230 geometry" not in marker_sets: s=new_marker_set('particle_230 geometry') marker_sets["particle_230 geometry"]=s s= marker_sets["particle_230 geometry"] mark=s.place_marker((2899.01, 1796.56, 2040.65), (0.7, 0.7, 0.7), 152.322) if "particle_231 geometry" not in marker_sets: s=new_marker_set('particle_231 geometry') marker_sets["particle_231 geometry"]=s s= marker_sets["particle_231 geometry"] mark=s.place_marker((2768.53, 1698.39, 1758.58), (0.7, 0.7, 0.7), 196.653) if "particle_232 geometry" not in marker_sets: s=new_marker_set('particle_232 geometry') marker_sets["particle_232 geometry"]=s s= marker_sets["particle_232 geometry"] mark=s.place_marker((3011.25, 1626.51, 1998.57), (0.7, 0.7, 0.7), 134.091) if "particle_233 geometry" not in marker_sets: s=new_marker_set('particle_233 geometry') marker_sets["particle_233 geometry"]=s s= marker_sets["particle_233 geometry"] mark=s.place_marker((3249.52, 1629.44, 2217.93), (0.7, 0.7, 0.7), 180.325) if "particle_234 geometry" not in marker_sets: s=new_marker_set('particle_234 geometry') marker_sets["particle_234 geometry"]=s s= marker_sets["particle_234 geometry"] mark=s.place_marker((2870.18, 1836.74, 2375.88), (0.7, 0.7, 0.7), 218.437) if "particle_235 geometry" not in marker_sets: s=new_marker_set('particle_235 geometry') marker_sets["particle_235 geometry"]=s s= marker_sets["particle_235 geometry"] mark=s.place_marker((2416.45, 1752.93, 2433.89), (0.7, 0.7, 0.7), 148.008) if "particle_236 geometry" not in marker_sets: s=new_marker_set('particle_236 geometry') marker_sets["particle_236 geometry"]=s s= marker_sets["particle_236 geometry"] mark=s.place_marker((1954.13, 1323.18, 2340.36), (0.7, 0.7, 0.7), 191.873) if "particle_237 geometry" not in marker_sets: s=new_marker_set('particle_237 geometry') marker_sets["particle_237 geometry"]=s s= marker_sets["particle_237 geometry"] mark=s.place_marker((1660.32, 887.866, 2494.78), (0.7, 0.7, 0.7), 138.575) if "particle_238 geometry" not in marker_sets: s=new_marker_set('particle_238 geometry') marker_sets["particle_238 geometry"]=s s= marker_sets["particle_238 geometry"] mark=s.place_marker((1752.55, 474.123, 2384.95), (0.7, 0.7, 0.7), 161.205) if "particle_239 geometry" not in marker_sets: s=new_marker_set('particle_239 geometry') marker_sets["particle_239 geometry"]=s s= marker_sets["particle_239 geometry"] mark=s.place_marker((1738.45, 876.879, 2179.64), (0.7, 0.7, 0.7), 288.021) if "particle_240 geometry" not in marker_sets: s=new_marker_set('particle_240 geometry') marker_sets["particle_240 geometry"]=s s= marker_sets["particle_240 geometry"] mark=s.place_marker((2414.78, 1064.76, 2185.36), (0.7, 0.7, 0.7), 227.405) if "particle_241 geometry" not in marker_sets: s=new_marker_set('particle_241 geometry') marker_sets["particle_241 geometry"]=s s= marker_sets["particle_241 geometry"] mark=s.place_marker((2753.7, 1451.8, 2241.63), (0.7, 0.7, 0.7), 126.519) if "particle_242 geometry" not in marker_sets: s=new_marker_set('particle_242 geometry') marker_sets["particle_242 geometry"]=s s= marker_sets["particle_242 geometry"] mark=s.place_marker((2622.41, 1439.97, 1966.53), (0.7, 0.7, 0.7), 117.975) if "particle_243 geometry" not in marker_sets: s=new_marker_set('particle_243 geometry') marker_sets["particle_243 geometry"]=s s= marker_sets["particle_243 geometry"] mark=s.place_marker((2551.8, 1776.82, 2191.6), (0.7, 0.7, 0.7), 200.883) if "particle_244 geometry" not in marker_sets: s=new_marker_set('particle_244 geometry') marker_sets["particle_244 geometry"]=s s= marker_sets["particle_244 geometry"] mark=s.place_marker((2783.2, 1774.3, 2449.27), (0.7, 0.7, 0.7), 158.794) if "particle_245 geometry" not in marker_sets: s=new_marker_set('particle_245 geometry') marker_sets["particle_245 geometry"]=s s= marker_sets["particle_245 geometry"] mark=s.place_marker((2881.6, 1599.58, 2696.98), (0.7, 0.7, 0.7), 115.86) if "particle_246 geometry" not in marker_sets: s=new_marker_set('particle_246 geometry') marker_sets["particle_246 geometry"]=s s= marker_sets["particle_246 geometry"] mark=s.place_marker((2967.4, 1814.45, 2794.71), (0.7, 0.7, 0.7), 133.034) if "particle_247 geometry" not in marker_sets: s=new_marker_set('particle_247 geometry') marker_sets["particle_247 geometry"]=s s= marker_sets["particle_247 geometry"] mark=s.place_marker((3124.1, 2201.65, 2562.16), (0.7, 0.7, 0.7), 314.627) if "particle_248 geometry" not in marker_sets: s=new_marker_set('particle_248 geometry') marker_sets["particle_248 geometry"]=s s= marker_sets["particle_248 geometry"] mark=s.place_marker((2904.93, 2033.48, 2324.32), (0.7, 0.7, 0.7), 115.352) if "particle_249 geometry" not in marker_sets: s=new_marker_set('particle_249 geometry') marker_sets["particle_249 geometry"]=s s= marker_sets["particle_249 geometry"] mark=s.place_marker((2734.54, 1651.52, 2244.45), (0.7, 0.7, 0.7), 180.621) if "particle_250 geometry" not in marker_sets: s=new_marker_set('particle_250 geometry') marker_sets["particle_250 geometry"]=s s= marker_sets["particle_250 geometry"] mark=s.place_marker((2773.7, 1457.69, 2539.39), (0.7, 0.7, 0.7), 126.265) if "particle_251 geometry" not in marker_sets: s=new_marker_set('particle_251 geometry') marker_sets["particle_251 geometry"]=s s= marker_sets["particle_251 geometry"] mark=s.place_marker((2755.21, 1547.53, 2902.74), (0.7, 0.7, 0.7), 133.541) if "particle_252 geometry" not in marker_sets: s=new_marker_set('particle_252 geometry') marker_sets["particle_252 geometry"]=s s= marker_sets["particle_252 geometry"] mark=s.place_marker((2593.21, 1495.32, 3292.93), (0.7, 0.7, 0.7), 171.019) if "particle_253 geometry" not in marker_sets: s=new_marker_set('particle_253 geometry') marker_sets["particle_253 geometry"]=s s= marker_sets["particle_253 geometry"] mark=s.place_marker((2350.74, 1320.27, 3541.74), (0.7, 0.7, 0.7), 115.437) if "particle_254 geometry" not in marker_sets: s=new_marker_set('particle_254 geometry') marker_sets["particle_254 geometry"]=s s= marker_sets["particle_254 geometry"] mark=s.place_marker((2516.51, 1299.83, 3250.33), (0.7, 0.7, 0.7), 158.583) if "particle_255 geometry" not in marker_sets: s=new_marker_set('particle_255 geometry') marker_sets["particle_255 geometry"]=s s= marker_sets["particle_255 geometry"] mark=s.place_marker((2599.6, 1750.68, 3125.44), (0.7, 0.7, 0.7), 192) if "particle_256 geometry" not in marker_sets: s=new_marker_set('particle_256 geometry') marker_sets["particle_256 geometry"]=s s= marker_sets["particle_256 geometry"] mark=s.place_marker((2831.54, 2044.5, 2921.39), (0.7, 0.7, 0.7), 150.165) if "particle_257 geometry" not in marker_sets: s=new_marker_set('particle_257 geometry') marker_sets["particle_257 geometry"]=s s= marker_sets["particle_257 geometry"] mark=s.place_marker((2831.92, 2032.63, 3227.93), (0.7, 0.7, 0.7), 157.567) if "particle_258 geometry" not in marker_sets: s=new_marker_set('particle_258 geometry') marker_sets["particle_258 geometry"]=s s= marker_sets["particle_258 geometry"] mark=s.place_marker((2773.74, 2116.1, 3189.7), (0.7, 0.7, 0.7), 199.36) if "particle_259 geometry" not in marker_sets: s=new_marker_set('particle_259 geometry') marker_sets["particle_259 geometry"]=s s= marker_sets["particle_259 geometry"] mark=s.place_marker((2409.47, 1852.89, 3066.79), (0.7, 0.7, 0.7), 105.369) if "particle_260 geometry" not in marker_sets: s=new_marker_set('particle_260 geometry') marker_sets["particle_260 geometry"]=s s= marker_sets["particle_260 geometry"] mark=s.place_marker((2181.51, 1841.48, 2916.64), (0.7, 0.7, 0.7), 118.651) if "particle_261 geometry" not in marker_sets: s=new_marker_set('particle_261 geometry') marker_sets["particle_261 geometry"]=s s= marker_sets["particle_261 geometry"] mark=s.place_marker((2526.58, 2100.86, 2803.45), (0.7, 0.7, 0.7), 219.664) if "particle_262 geometry" not in marker_sets: s=new_marker_set('particle_262 geometry') marker_sets["particle_262 geometry"]=s s= marker_sets["particle_262 geometry"] mark=s.place_marker((3015.8, 2425.02, 2881.97), (0.7, 0.7, 0.7), 196.018) if "particle_263 geometry" not in marker_sets: s=new_marker_set('particle_263 geometry') marker_sets["particle_263 geometry"]=s s= marker_sets["particle_263 geometry"] mark=s.place_marker((3381.31, 2749.07, 2929.37), (0.7, 0.7, 0.7), 218.141) if "particle_264 geometry" not in marker_sets: s=new_marker_set('particle_264 geometry') marker_sets["particle_264 geometry"]=s s= marker_sets["particle_264 geometry"] mark=s.place_marker((3228.27, 2914.16, 3207.32), (0.7, 0.7, 0.7), 181.636) if "particle_265 geometry" not in marker_sets: s=new_marker_set('particle_265 geometry') marker_sets["particle_265 geometry"]=s s= marker_sets["particle_265 geometry"] mark=s.place_marker((2964.92, 2791.07, 3278.44), (0.7, 0.7, 0.7), 195.003) if "particle_266 geometry" not in marker_sets: s=new_marker_set('particle_266 geometry') marker_sets["particle_266 geometry"]=s s= marker_sets["particle_266 geometry"] mark=s.place_marker((3217.37, 2840.36, 3272.54), (0.7, 0.7, 0.7), 139.209) if "particle_267 geometry" not in marker_sets: s=new_marker_set('particle_267 geometry') marker_sets["particle_267 geometry"]=s s= marker_sets["particle_267 geometry"] mark=s.place_marker((3259.57, 2817.95, 3343.47), (0.7, 0.7, 0.7), 189.885) if "particle_268 geometry" not in marker_sets: s=new_marker_set('particle_268 geometry') marker_sets["particle_268 geometry"]=s s= marker_sets["particle_268 geometry"] mark=s.place_marker((3331.15, 2551.42, 3292.1), (0.7, 0.7, 0.7), 267.674) if "particle_269 geometry" not in marker_sets: s=new_marker_set('particle_269 geometry') marker_sets["particle_269 geometry"]=s s= marker_sets["particle_269 geometry"] mark=s.place_marker((3460.07, 2048.74, 3059.78), (0.7, 0.7, 0.7), 196.568) if "particle_270 geometry" not in marker_sets: s=new_marker_set('particle_270 geometry') marker_sets["particle_270 geometry"]=s s= marker_sets["particle_270 geometry"] mark=s.place_marker((3474.54, 1983.71, 3395.24), (0.7, 0.7, 0.7), 192.423) if "particle_271 geometry" not in marker_sets: s=new_marker_set('particle_271 geometry') marker_sets["particle_271 geometry"]=s s= marker_sets["particle_271 geometry"] mark=s.place_marker((3571.13, 2299.17, 3586.25), (1, 0.7, 0), 202.405) if "particle_272 geometry" not in marker_sets: s=new_marker_set('particle_272 geometry') marker_sets["particle_272 geometry"]=s s= marker_sets["particle_272 geometry"] mark=s.place_marker((3466.08, 1544.08, 3226.11), (0.7, 0.7, 0.7), 135.529) if "particle_273 geometry" not in marker_sets: s=new_marker_set('particle_273 geometry') marker_sets["particle_273 geometry"]=s s= marker_sets["particle_273 geometry"] mark=s.place_marker((3301.97, 633.701, 2995.76), (0.7, 0.7, 0.7), 114.21) if "particle_274 geometry" not in marker_sets: s=new_marker_set('particle_274 geometry') marker_sets["particle_274 geometry"]=s s= marker_sets["particle_274 geometry"] mark=s.place_marker((2987.77, 690.958, 2925.82), (0.7, 0.7, 0.7), 159.133) if "particle_275 geometry" not in marker_sets: s=new_marker_set('particle_275 geometry') marker_sets["particle_275 geometry"]=s s= marker_sets["particle_275 geometry"] mark=s.place_marker((2850.87, 1030.99, 2737.12), (0.7, 0.7, 0.7), 144.412) if "particle_276 geometry" not in marker_sets: s=new_marker_set('particle_276 geometry') marker_sets["particle_276 geometry"]=s s= marker_sets["particle_276 geometry"] mark=s.place_marker((2783.71, 1301.4, 2579.77), (0.7, 0.7, 0.7), 70.8525) if "particle_277 geometry" not in marker_sets: s=new_marker_set('particle_277 geometry') marker_sets["particle_277 geometry"]=s s= marker_sets["particle_277 geometry"] mark=s.place_marker((2882.65, 1874.31, 2788.84), (0.7, 0.7, 0.7), 141.874) if "particle_278 geometry" not in marker_sets: s=new_marker_set('particle_278 geometry') marker_sets["particle_278 geometry"]=s s= marker_sets["particle_278 geometry"] mark=s.place_marker((3064.55, 2387.15, 3075.98), (0.7, 0.7, 0.7), 217.337) if "particle_279 geometry" not in marker_sets: s=new_marker_set('particle_279 geometry') marker_sets["particle_279 geometry"]=s s= marker_sets["particle_279 geometry"] mark=s.place_marker((3120.17, 2418.67, 3038.07), (0.7, 0.7, 0.7), 237.641) if "particle_280 geometry" not in marker_sets: s=new_marker_set('particle_280 geometry') marker_sets["particle_280 geometry"]=s s= marker_sets["particle_280 geometry"] mark=s.place_marker((3070.11, 2233.3, 2610.53), (0.7, 0.7, 0.7), 229.393) if "particle_281 geometry" not in marker_sets: s=new_marker_set('particle_281 geometry') marker_sets["particle_281 geometry"]=s s= marker_sets["particle_281 geometry"] mark=s.place_marker((3501.14, 2630.84, 2480.91), (0.7, 0.7, 0.7), 349.906) if "particle_282 geometry" not in marker_sets: s=new_marker_set('particle_282 geometry') marker_sets["particle_282 geometry"]=s s= marker_sets["particle_282 geometry"] mark=s.place_marker((4011.46, 2802.37, 2663.01), (0.7, 0.7, 0.7), 162.347) if "particle_283 geometry" not in marker_sets: s=new_marker_set('particle_283 geometry') marker_sets["particle_283 geometry"]=s s= marker_sets["particle_283 geometry"] mark=s.place_marker((4194.71, 2816.88, 2652.96), (0.7, 0.7, 0.7), 194.072) if "particle_284 geometry" not in marker_sets: s=new_marker_set('particle_284 geometry') marker_sets["particle_284 geometry"]=s s= marker_sets["particle_284 geometry"] mark=s.place_marker((4225.48, 2790.29, 2474.77), (0.7, 0.7, 0.7), 242.21) if "particle_285 geometry" not in marker_sets: s=new_marker_set('particle_285 geometry') marker_sets["particle_285 geometry"]=s s= marker_sets["particle_285 geometry"] mark=s.place_marker((4336.5, 2363.18, 2188.66), (0.7, 0.7, 0.7), 320.93) if "particle_286 geometry" not in marker_sets: s=new_marker_set('particle_286 geometry') marker_sets["particle_286 geometry"]=s s= marker_sets["particle_286 geometry"] mark=s.place_marker((4752.36, 2179.67, 1851.79), (0.7, 0.7, 0.7), 226.432) if "particle_287 geometry" not in marker_sets: s=new_marker_set('particle_287 geometry') marker_sets["particle_287 geometry"]=s s= marker_sets["particle_287 geometry"] mark=s.place_marker((4562.94, 2488.17, 1779.18), (0.7, 0.7, 0.7), 125.208) if "particle_288 geometry" not in marker_sets: s=new_marker_set('particle_288 geometry') marker_sets["particle_288 geometry"]=s s= marker_sets["particle_288 geometry"] mark=s.place_marker((4278.31, 3012.7, 1826.79), (0.7, 0.7, 0.7), 197.837) if "particle_289 geometry" not in marker_sets: s=new_marker_set('particle_289 geometry') marker_sets["particle_289 geometry"]=s s= marker_sets["particle_289 geometry"] mark=s.place_marker((4311.86, 3390.32, 1318.4), (0.7, 0.7, 0.7), 167.804) if "particle_290 geometry" not in marker_sets: s=new_marker_set('particle_290 geometry') marker_sets["particle_290 geometry"]=s s= marker_sets["particle_290 geometry"] mark=s.place_marker((4509.41, 3633.6, 552.179), (0.7, 0.7, 0.7), 136.84) if "particle_291 geometry" not in marker_sets: s=new_marker_set('particle_291 geometry') marker_sets["particle_291 geometry"]=s s= marker_sets["particle_291 geometry"] mark=s.place_marker((4615.95, 3242.42, 412.954), (0.7, 0.7, 0.7), 85.7421) if "particle_292 geometry" not in marker_sets: s=new_marker_set('particle_292 geometry') marker_sets["particle_292 geometry"]=s s= marker_sets["particle_292 geometry"] mark=s.place_marker((4684.83, 2820.11, 1774), (1, 0.7, 0), 256) if "particle_293 geometry" not in marker_sets: s=new_marker_set('particle_293 geometry') marker_sets["particle_293 geometry"]=s s= marker_sets["particle_293 geometry"] mark=s.place_marker((4306.57, 3630.66, 1091), (0.7, 0.7, 0.7), 138.702) if "particle_294 geometry" not in marker_sets: s=new_marker_set('particle_294 geometry') marker_sets["particle_294 geometry"]=s s= marker_sets["particle_294 geometry"] mark=s.place_marker((4234.8, 4078.98, 950.138), (0.7, 0.7, 0.7), 140.732) if "particle_295 geometry" not in marker_sets: s=new_marker_set('particle_295 geometry') marker_sets["particle_295 geometry"]=s s= marker_sets["particle_295 geometry"] mark=s.place_marker((4500.42, 3932.57, 1058.17), (0.7, 0.7, 0.7), 81.3006) if "particle_296 geometry" not in marker_sets: s=new_marker_set('particle_296 geometry') marker_sets["particle_296 geometry"]=s s= marker_sets["particle_296 geometry"] mark=s.place_marker((4891.01, 3833.47, 855.543), (0.7, 0.7, 0.7), 133.837) if "particle_297 geometry" not in marker_sets: s=new_marker_set('particle_297 geometry') marker_sets["particle_297 geometry"]=s s= marker_sets["particle_297 geometry"] mark=s.place_marker((4804.93, 3438.47, 1322.52), (0.7, 0.7, 0.7), 98.3475) if "particle_298 geometry" not in marker_sets: s=new_marker_set('particle_298 geometry') marker_sets["particle_298 geometry"]=s s= marker_sets["particle_298 geometry"] mark=s.place_marker((4482.09, 3198.21, 2042.73), (0.7, 0.7, 0.7), 297.623) if "particle_299 geometry" not in marker_sets: s=new_marker_set('particle_299 geometry') marker_sets["particle_299 geometry"]=s s= marker_sets["particle_299 geometry"] mark=s.place_marker((4431.4, 2872.71, 2324.48), (0.7, 0.7, 0.7), 212.938) if "particle_300 geometry" not in marker_sets: s=new_marker_set('particle_300 geometry') marker_sets["particle_300 geometry"]=s s= marker_sets["particle_300 geometry"] mark=s.place_marker((4614.6, 2980.88, 2438.55), (0.7, 0.7, 0.7), 154.183) if "particle_301 geometry" not in marker_sets: s=new_marker_set('particle_301 geometry') marker_sets["particle_301 geometry"]=s s= marker_sets["particle_301 geometry"] mark=s.place_marker((4921.42, 2712.29, 2403.03), (0.7, 0.7, 0.7), 180.832) if "particle_302 geometry" not in marker_sets: s=new_marker_set('particle_302 geometry') marker_sets["particle_302 geometry"]=s s= marker_sets["particle_302 geometry"] mark=s.place_marker((4988.74, 2388.5, 2272.44), (0.7, 0.7, 0.7), 122.332) if "particle_303 geometry" not in marker_sets: s=new_marker_set('particle_303 geometry') marker_sets["particle_303 geometry"]=s s= marker_sets["particle_303 geometry"] mark=s.place_marker((4991.85, 2101.53, 2050.3), (0.7, 0.7, 0.7), 209.047) if "particle_304 geometry" not in marker_sets: s=new_marker_set('particle_304 geometry') marker_sets["particle_304 geometry"]=s s= marker_sets["particle_304 geometry"] mark=s.place_marker((5327.33, 2083.91, 2296.79), (0.7, 0.7, 0.7), 126.985) if "particle_305 geometry" not in marker_sets: s=new_marker_set('particle_305 geometry') marker_sets["particle_305 geometry"]=s s= marker_sets["particle_305 geometry"] mark=s.place_marker((5707.25, 2025.51, 2227.07), (0.7, 0.7, 0.7), 122.205) if "particle_306 geometry" not in marker_sets: s=new_marker_set('particle_306 geometry') marker_sets["particle_306 geometry"]=s s= marker_sets["particle_306 geometry"] mark=s.place_marker((5755.37, 2064.68, 2020.59), (0.7, 0.7, 0.7), 107.95) if "particle_307 geometry" not in marker_sets: s=new_marker_set('particle_307 geometry') marker_sets["particle_307 geometry"]=s s= marker_sets["particle_307 geometry"] mark=s.place_marker((5274.05, 2285.16, 2318.03), (0.7, 0.7, 0.7), 182.567) if "particle_308 geometry" not in marker_sets: s=new_marker_set('particle_308 geometry') marker_sets["particle_308 geometry"]=s s= marker_sets["particle_308 geometry"] mark=s.place_marker((4715.81, 2559.83, 2436.75), (0.7, 0.7, 0.7), 185.274) if "particle_309 geometry" not in marker_sets: s=new_marker_set('particle_309 geometry') marker_sets["particle_309 geometry"]=s s= marker_sets["particle_309 geometry"] mark=s.place_marker((4254.85, 2574.56, 2306.89), (0.7, 0.7, 0.7), 413.567) if "particle_310 geometry" not in marker_sets: s=new_marker_set('particle_310 geometry') marker_sets["particle_310 geometry"]=s s= marker_sets["particle_310 geometry"] mark=s.place_marker((4170.11, 2735.92, 2486.12), (0.7, 0.7, 0.7), 240.01) if "particle_311 geometry" not in marker_sets: s=new_marker_set('particle_311 geometry') marker_sets["particle_311 geometry"]=s s= marker_sets["particle_311 geometry"] mark=s.place_marker((4173.87, 2688.1, 2458.17), (0.7, 0.7, 0.7), 238.995) if "particle_312 geometry" not in marker_sets: s=new_marker_set('particle_312 geometry') marker_sets["particle_312 geometry"]=s s= marker_sets["particle_312 geometry"] mark=s.place_marker((4424.16, 2694.95, 2540.83), (0.7, 0.7, 0.7), 203.674) if "particle_313 geometry" not in marker_sets: s=new_marker_set('particle_313 geometry') marker_sets["particle_313 geometry"]=s s= marker_sets["particle_313 geometry"] mark=s.place_marker((4789.23, 2340.14, 2864.99), (0.7, 0.7, 0.7), 266.744) if "particle_314 geometry" not in marker_sets: s=new_marker_set('particle_314 geometry') marker_sets["particle_314 geometry"]=s s= marker_sets["particle_314 geometry"] mark=s.place_marker((5028.08, 2731.96, 2906.55), (0.7, 0.7, 0.7), 147.585) if "particle_315 geometry" not in marker_sets: s=new_marker_set('particle_315 geometry') marker_sets["particle_315 geometry"]=s s= marker_sets["particle_315 geometry"] mark=s.place_marker((4786.7, 2799.58, 2816.75), (0.7, 0.7, 0.7), 249.485) if "particle_316 geometry" not in marker_sets: s=new_marker_set('particle_316 geometry') marker_sets["particle_316 geometry"]=s s= marker_sets["particle_316 geometry"] mark=s.place_marker((4526.81, 2476.19, 2750.35), (0.7, 0.7, 0.7), 119.371) if "particle_317 geometry" not in marker_sets: s=new_marker_set('particle_317 geometry') marker_sets["particle_317 geometry"]=s s= marker_sets["particle_317 geometry"] mark=s.place_marker((4449.67, 1925.79, 2317.33), (0.7, 0.7, 0.7), 155.875) if "particle_318 geometry" not in marker_sets: s=new_marker_set('particle_318 geometry') marker_sets["particle_318 geometry"]=s s= marker_sets["particle_318 geometry"] mark=s.place_marker((4378.39, 1747.54, 1579.96), (0.7, 0.7, 0.7), 189.419) if "particle_319 geometry" not in marker_sets: s=new_marker_set('particle_319 geometry') marker_sets["particle_319 geometry"]=s s= marker_sets["particle_319 geometry"] mark=s.place_marker((4069.91, 2089.97, 1279.18), (0.7, 0.7, 0.7), 137.475) if "particle_320 geometry" not in marker_sets: s=new_marker_set('particle_320 geometry') marker_sets["particle_320 geometry"]=s s= marker_sets["particle_320 geometry"] mark=s.place_marker((3730.76, 2467.57, 1266.91), (0.7, 0.7, 0.7), 176.179) if "particle_321 geometry" not in marker_sets: s=new_marker_set('particle_321 geometry') marker_sets["particle_321 geometry"]=s s= marker_sets["particle_321 geometry"] mark=s.place_marker((3489.17, 2844.16, 1117.23), (0.7, 0.7, 0.7), 138.829) if "particle_322 geometry" not in marker_sets: s=new_marker_set('particle_322 geometry') marker_sets["particle_322 geometry"]=s s= marker_sets["particle_322 geometry"] mark=s.place_marker((3403.27, 3196.55, 896.098), (0.7, 0.7, 0.7), 148.727) if "particle_323 geometry" not in marker_sets: s=new_marker_set('particle_323 geometry') marker_sets["particle_323 geometry"]=s s= marker_sets["particle_323 geometry"] mark=s.place_marker((3435.2, 3524.05, 483.942), (0.7, 0.7, 0.7), 230.323) if "particle_324 geometry" not in marker_sets: s=new_marker_set('particle_324 geometry') marker_sets["particle_324 geometry"]=s s= marker_sets["particle_324 geometry"] mark=s.place_marker((3738.71, 3078.8, 824.448), (0.7, 0.7, 0.7), 175.376) if "particle_325 geometry" not in marker_sets: s=new_marker_set('particle_325 geometry') marker_sets["particle_325 geometry"]=s s= marker_sets["particle_325 geometry"] mark=s.place_marker((3834.35, 2767.45, 1214.9), (0.7, 0.7, 0.7), 161.163) if "particle_326 geometry" not in marker_sets: s=new_marker_set('particle_326 geometry') marker_sets["particle_326 geometry"]=s s= marker_sets["particle_326 geometry"] mark=s.place_marker((3532.1, 2447.98, 1012.46), (0.7, 0.7, 0.7), 125.885) if "particle_327 geometry" not in marker_sets: s=new_marker_set('particle_327 geometry') marker_sets["particle_327 geometry"]=s s= marker_sets["particle_327 geometry"] mark=s.place_marker((3403.8, 2097.61, 744.902), (0.7, 0.7, 0.7), 206.635) if "particle_328 geometry" not in marker_sets: s=new_marker_set('particle_328 geometry') marker_sets["particle_328 geometry"]=s s= marker_sets["particle_328 geometry"] mark=s.place_marker((3162.02, 2310.14, 1067.68), (0.7, 0.7, 0.7), 151.392) if "particle_329 geometry" not in marker_sets: s=new_marker_set('particle_329 geometry') marker_sets["particle_329 geometry"]=s s= marker_sets["particle_329 geometry"] mark=s.place_marker((3015.75, 2593.73, 1238.66), (0.7, 0.7, 0.7), 173.388) if "particle_330 geometry" not in marker_sets: s=new_marker_set('particle_330 geometry') marker_sets["particle_330 geometry"]=s s= marker_sets["particle_330 geometry"] mark=s.place_marker((3067.43, 2847.18, 1025.19), (0.7, 0.7, 0.7), 135.825) if "particle_331 geometry" not in marker_sets: s=new_marker_set('particle_331 geometry') marker_sets["particle_331 geometry"]=s s= marker_sets["particle_331 geometry"] mark=s.place_marker((3198.96, 3059.58, 665.749), (0.7, 0.7, 0.7), 186.839) if "particle_332 geometry" not in marker_sets: s=new_marker_set('particle_332 geometry') marker_sets["particle_332 geometry"]=s s= marker_sets["particle_332 geometry"] mark=s.place_marker((3361.74, 3261.97, 266.597), (0.7, 0.7, 0.7), 121.189) if "particle_333 geometry" not in marker_sets: s=new_marker_set('particle_333 geometry') marker_sets["particle_333 geometry"]=s s= marker_sets["particle_333 geometry"] mark=s.place_marker((3474.49, 2997.29, 578.486), (0.7, 0.7, 0.7), 102.916) if "particle_334 geometry" not in marker_sets: s=new_marker_set('particle_334 geometry') marker_sets["particle_334 geometry"]=s s= marker_sets["particle_334 geometry"] mark=s.place_marker((3704.33, 2722.38, 1088.12), (0.7, 0.7, 0.7), 212.769) if "particle_335 geometry" not in marker_sets: s=new_marker_set('particle_335 geometry') marker_sets["particle_335 geometry"]=s s= marker_sets["particle_335 geometry"] mark=s.place_marker((3709.04, 2497.45, 1712.79), (0.7, 0.7, 0.7), 173.092) if "particle_336 geometry" not in marker_sets: s=new_marker_set('particle_336 geometry') marker_sets["particle_336 geometry"]=s s= marker_sets["particle_336 geometry"] mark=s.place_marker((3893.06, 2194.75, 2060.4), (0.7, 0.7, 0.7), 264.502) if "particle_337 geometry" not in marker_sets: s=new_marker_set('particle_337 geometry') marker_sets["particle_337 geometry"]=s s= marker_sets["particle_337 geometry"] mark=s.place_marker((4274.66, 1801.39, 2060.41), (0.7, 0.7, 0.7), 208.666) if "particle_338 geometry" not in marker_sets: s=new_marker_set('particle_338 geometry') marker_sets["particle_338 geometry"]=s s= marker_sets["particle_338 geometry"] mark=s.place_marker((4743.68, 1673.55, 2041.41), (0.7, 0.7, 0.7), 186.797) if "particle_339 geometry" not in marker_sets: s=new_marker_set('particle_339 geometry') marker_sets["particle_339 geometry"]=s s= marker_sets["particle_339 geometry"] mark=s.place_marker((4991.08, 1824.33, 2450.91), (0.7, 0.7, 0.7), 255.534) if "particle_340 geometry" not in marker_sets: s=new_marker_set('particle_340 geometry') marker_sets["particle_340 geometry"]=s s= marker_sets["particle_340 geometry"] mark=s.place_marker((5306.74, 2105.19, 2419.33), (0.7, 0.7, 0.7), 153.126) if "particle_341 geometry" not in marker_sets: s=new_marker_set('particle_341 geometry') marker_sets["particle_341 geometry"]=s s= marker_sets["particle_341 geometry"] mark=s.place_marker((5440.48, 1763.78, 2253.7), (0.7, 0.7, 0.7), 165.816) if "particle_342 geometry" not in marker_sets: s=new_marker_set('particle_342 geometry') marker_sets["particle_342 geometry"]=s s= marker_sets["particle_342 geometry"] mark=s.place_marker((5132.5, 1602.26, 2447.13), (0.7, 0.7, 0.7), 134.429) if "particle_343 geometry" not in marker_sets: s=new_marker_set('particle_343 geometry') marker_sets["particle_343 geometry"]=s s= marker_sets["particle_343 geometry"] mark=s.place_marker((4744.16, 1605.75, 2318.8), (0.7, 0.7, 0.7), 178.971) if "particle_344 geometry" not in marker_sets: s=new_marker_set('particle_344 geometry') marker_sets["particle_344 geometry"]=s s= marker_sets["particle_344 geometry"] mark=s.place_marker((4541.95, 1727.83, 1843.62), (0.7, 0.7, 0.7), 189.969) if "particle_345 geometry" not in marker_sets: s=new_marker_set('particle_345 geometry') marker_sets["particle_345 geometry"]=s s= marker_sets["particle_345 geometry"] mark=s.place_marker((4623.82, 1487.7, 1268.41), (0.7, 0.7, 0.7), 121.359) if "particle_346 geometry" not in marker_sets: s=new_marker_set('particle_346 geometry') marker_sets["particle_346 geometry"]=s s= marker_sets["particle_346 geometry"] mark=s.place_marker((4322.88, 1713.6, 883.234), (0.7, 0.7, 0.7), 187.262) if "particle_347 geometry" not in marker_sets: s=new_marker_set('particle_347 geometry') marker_sets["particle_347 geometry"]=s s= marker_sets["particle_347 geometry"] mark=s.place_marker((3811.99, 2113.1, 888.839), (0.7, 0.7, 0.7), 164.335) if "particle_348 geometry" not in marker_sets: s=new_marker_set('particle_348 geometry') marker_sets["particle_348 geometry"]=s s= marker_sets["particle_348 geometry"] mark=s.place_marker((3797.13, 2613.17, 881.257), (0.7, 0.7, 0.7), 138.363) if "particle_349 geometry" not in marker_sets: s=new_marker_set('particle_349 geometry') marker_sets["particle_349 geometry"]=s s= marker_sets["particle_349 geometry"] mark=s.place_marker((3710.98, 2919.37, 705.159), (0.7, 0.7, 0.7), 138.49) if "particle_350 geometry" not in marker_sets: s=new_marker_set('particle_350 geometry') marker_sets["particle_350 geometry"]=s s= marker_sets["particle_350 geometry"] mark=s.place_marker((3387.63, 2773.49, 709.053), (0.7, 0.7, 0.7), 116.325) if "particle_351 geometry" not in marker_sets: s=new_marker_set('particle_351 geometry') marker_sets["particle_351 geometry"]=s s= marker_sets["particle_351 geometry"] mark=s.place_marker((3458.57, 2381.73, 960.765), (0.7, 0.7, 0.7), 106.511) if "particle_352 geometry" not in marker_sets: s=new_marker_set('particle_352 geometry') marker_sets["particle_352 geometry"]=s s= marker_sets["particle_352 geometry"] mark=s.place_marker((3801.8, 2110.26, 1301.45), (0.7, 0.7, 0.7), 151.096) if "particle_353 geometry" not in marker_sets: s=new_marker_set('particle_353 geometry') marker_sets["particle_353 geometry"]=s s= marker_sets["particle_353 geometry"] mark=s.place_marker((4358.21, 1805.29, 1541.84), (0.7, 0.7, 0.7), 240.856) if "particle_354 geometry" not in marker_sets: s=new_marker_set('particle_354 geometry') marker_sets["particle_354 geometry"]=s s= marker_sets["particle_354 geometry"] mark=s.place_marker((4835.22, 1652.18, 1695.08), (0.7, 0.7, 0.7), 149.7) if "particle_355 geometry" not in marker_sets: s=new_marker_set('particle_355 geometry') marker_sets["particle_355 geometry"]=s s= marker_sets["particle_355 geometry"] mark=s.place_marker((4967.96, 1558.3, 2005.6), (0.7, 0.7, 0.7), 165.943) if "particle_356 geometry" not in marker_sets: s=new_marker_set('particle_356 geometry') marker_sets["particle_356 geometry"]=s s= marker_sets["particle_356 geometry"] mark=s.place_marker((4721.65, 1905.02, 2449.97), (0.7, 0.7, 0.7), 178.971) if "particle_357 geometry" not in marker_sets: s=new_marker_set('particle_357 geometry') marker_sets["particle_357 geometry"]=s s= marker_sets["particle_357 geometry"] mark=s.place_marker((4477.53, 2124.41, 3134.73), (0.7, 0.7, 0.7), 154.945) for k in surf_sets.keys(): chimera.openModels.add([surf_sets[k]])
gpl-3.0
-7,633,951,192,118,199,000
47.790376
75
0.702776
false
crosswalk-project/chromium-crosswalk-efl
third_party/closure_linter/closure_linter/error_fixer_test.py
121
1719
#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the error_fixer module.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header import unittest as googletest from closure_linter import error_fixer from closure_linter import testutil class ErrorFixerTest(googletest.TestCase): """Unit tests for error_fixer.""" def setUp(self): self.error_fixer = error_fixer.ErrorFixer() def testDeleteToken(self): start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) second_token = start_token.next self.error_fixer.HandleFile('test_file', start_token) self.error_fixer._DeleteToken(start_token) self.assertEqual(second_token, self.error_fixer._file_token) def testDeleteTokens(self): start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) fourth_token = start_token.next.next.next self.error_fixer.HandleFile('test_file', start_token) self.error_fixer._DeleteTokens(start_token, 3) self.assertEqual(fourth_token, self.error_fixer._file_token) _TEST_SCRIPT = """\ var x = 3; """ if __name__ == '__main__': googletest.main()
bsd-3-clause
8,056,026,600,216,630,000
29.157895
74
0.737638
false
SHMEDIALIMITED/nodec
node_modules/grunt-css/node_modules/gzip-js/test/unzipTest.py
182
1840
import os from colorama import Fore from helpers import run_cmd outDirDefault = 'test-outs' testDirDefault = 'test-files' """ Run a single test @param tFile- required; the file to check against (uncompressed data) @param level- optional (default: all); the compression level [1-9] @return True if all tests passed; False if at least one test failed """ def runTest(tFile, level=None, outDir=outDirDefault): passed = True if level == None: for x in range(1, 10): if runTest(tFile, x) == False: passed = False return passed out1 = os.path.join(outDir, '%(file)s.%(level)d.gz' % {'file': os.path.basename(tFile), 'level' : level}) out2 = os.path.join(outDir, '%(file)s.%(level)d' % {'file' : os.path.basename(tFile), 'level' : level}) run_cmd('gzip -%(level)d -c %(file)s >> %(output)s' % {'level' : level, 'file' : tFile, 'output' : out1}) run_cmd('../bin/gunzip.js --file %(file)s --output %(output)s' % {'level' : level, 'file' : out1, 'output' : out2}) result = run_cmd('diff %(file1)s %(file2)s' % {'file1' : tFile, 'file2' : out2}) if result['returncode'] == 0: status = Fore.GREEN + 'PASSED' + Fore.RESET else: passed = False status = Fore.RED + 'FAILED' + Fore.RESET print 'Level %(level)d: %(status)s' % {'level' : level, 'status' : status} return passed """ Runs all tests on the given level. This iterates throuth the testDir directory defined above. @param level- The level to run on [1-9] (default: None, runs on all levels all) @return True if all levels passed, False if at least one failed """ def runAll(level=None, testDir=testDirDefault, outDir=outDirDefault): passed = True for tFile in os.listdir(testDir): fullPath = os.path.join(testDir, tFile) print Fore.YELLOW + tFile + Fore.RESET if runTest(fullPath, level) == False: passed = False print '' return passed
mit
-5,968,886,495,668,307,000
29.666667
116
0.669022
false
hobarrera/django
tests/test_client_regress/tests.py
15
63582
# -*- coding: utf-8 -*- """ Regression tests for the Test Client, especially the customized assertions. """ from __future__ import unicode_literals import itertools import os from django.contrib.auth.models import User from django.contrib.auth.signals import user_logged_in, user_logged_out from django.http import HttpResponse from django.template import ( Context, RequestContext, TemplateSyntaxError, engines, ) from django.template.response import SimpleTemplateResponse from django.test import ( Client, SimpleTestCase, TestCase, ignore_warnings, override_settings, ) from django.test.client import RedirectCycleError, RequestFactory, encode_file from django.test.utils import ContextList, str_prefix from django.urls import NoReverseMatch, reverse from django.utils._os import upath from django.utils.deprecation import RemovedInDjango20Warning from django.utils.translation import ugettext_lazy from .models import CustomUser from .views import CustomTestException class TestDataMixin(object): @classmethod def setUpTestData(cls): cls.u1 = User.objects.create_user(username='testclient', password='password') cls.staff = User.objects.create_user(username='staff', password='password', is_staff=True) @override_settings(ROOT_URLCONF='test_client_regress.urls') class AssertContainsTests(SimpleTestCase): def test_contains(self): "Responses can be inspected for content, including counting repeated substrings" response = self.client.get('/no_template_view/') self.assertNotContains(response, 'never') self.assertContains(response, 'never', 0) self.assertContains(response, 'once') self.assertContains(response, 'once', 1) self.assertContains(response, 'twice') self.assertContains(response, 'twice', 2) try: self.assertContains(response, 'text', status_code=999) except AssertionError as e: self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertContains(response, 'text', status_code=999, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertNotContains(response, 'text', status_code=999) except AssertionError as e: self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertNotContains(response, 'once') except AssertionError as e: self.assertIn("Response should not contain 'once'", str(e)) try: self.assertNotContains(response, 'once', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response should not contain 'once'", str(e)) try: self.assertContains(response, 'never', 1) except AssertionError as e: self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e)) try: self.assertContains(response, 'never', 1, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e)) try: self.assertContains(response, 'once', 0) except AssertionError as e: self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e)) try: self.assertContains(response, 'once', 0, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e)) try: self.assertContains(response, 'once', 2) except AssertionError as e: self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e)) try: self.assertContains(response, 'once', 2, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e)) try: self.assertContains(response, 'twice', 1) except AssertionError as e: self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e)) try: self.assertContains(response, 'twice', 1, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e)) try: self.assertContains(response, 'thrice') except AssertionError as e: self.assertIn("Couldn't find 'thrice' in response", str(e)) try: self.assertContains(response, 'thrice', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Couldn't find 'thrice' in response", str(e)) try: self.assertContains(response, 'thrice', 3) except AssertionError as e: self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e)) try: self.assertContains(response, 'thrice', 3, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e)) def test_unicode_contains(self): "Unicode characters can be found in template context" # Regression test for #10183 r = self.client.get('/check_unicode/') self.assertContains(r, 'さかき') self.assertContains(r, b'\xe5\xb3\xa0'.decode('utf-8')) def test_unicode_not_contains(self): "Unicode characters can be searched for, and not found in template context" # Regression test for #10183 r = self.client.get('/check_unicode/') self.assertNotContains(r, 'はたけ') self.assertNotContains(r, b'\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8')) def test_binary_contains(self): r = self.client.get('/check_binary/') self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e') with self.assertRaises(AssertionError): self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e', count=2) def test_binary_not_contains(self): r = self.client.get('/check_binary/') self.assertNotContains(r, b'%ODF-1.4\r\n%\x93\x8c\x8b\x9e') with self.assertRaises(AssertionError): self.assertNotContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e') def test_nontext_contains(self): r = self.client.get('/no_template_view/') self.assertContains(r, ugettext_lazy('once')) def test_nontext_not_contains(self): r = self.client.get('/no_template_view/') self.assertNotContains(r, ugettext_lazy('never')) def test_assert_contains_renders_template_response(self): """ Test that we can pass in an unrendered SimpleTemplateResponse without throwing an error. Refs #15826. """ template = engines['django'].from_string('Hello') response = SimpleTemplateResponse(template) self.assertContains(response, 'Hello') def test_assert_contains_using_non_template_response(self): """ Test that auto-rendering does not affect responses that aren't instances (or subclasses) of SimpleTemplateResponse. Refs #15826. """ response = HttpResponse('Hello') self.assertContains(response, 'Hello') def test_assert_not_contains_renders_template_response(self): """ Test that we can pass in an unrendered SimpleTemplateResponse without throwing an error. Refs #15826. """ template = engines['django'].from_string('Hello') response = SimpleTemplateResponse(template) self.assertNotContains(response, 'Bye') def test_assert_not_contains_using_non_template_response(self): """ Test that auto-rendering does not affect responses that aren't instances (or subclasses) of SimpleTemplateResponse. Refs #15826. """ response = HttpResponse('Hello') self.assertNotContains(response, 'Bye') @override_settings(ROOT_URLCONF='test_client_regress.urls') class AssertTemplateUsedTests(TestDataMixin, TestCase): def test_no_context(self): "Template usage assertions work then templates aren't in use" response = self.client.get('/no_template_view/') # Check that the no template case doesn't mess with the template assertions self.assertTemplateNotUsed(response, 'GET Template') try: self.assertTemplateUsed(response, 'GET Template') except AssertionError as e: self.assertIn("No templates used to render the response", str(e)) try: self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: No templates used to render the response", str(e)) with self.assertRaises(AssertionError) as context: self.assertTemplateUsed(response, 'GET Template', count=2) self.assertIn( "No templates used to render the response", str(context.exception)) def test_single_context(self): "Template assertions work when there is a single context" response = self.client.get('/post_view/', {}) try: self.assertTemplateNotUsed(response, 'Empty GET Template') except AssertionError as e: self.assertIn("Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e)) try: self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e)) try: self.assertTemplateUsed(response, 'Empty POST Template') except AssertionError as e: self.assertIn( "Template 'Empty POST Template' was not a template used to " "render the response. Actual template(s) used: Empty GET Template", str(e) ) try: self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc') except AssertionError as e: self.assertIn( "abc: Template 'Empty POST Template' was not a template used " "to render the response. Actual template(s) used: Empty GET Template", str(e) ) with self.assertRaises(AssertionError) as context: self.assertTemplateUsed(response, 'Empty GET Template', count=2) self.assertIn( "Template 'Empty GET Template' was expected to be rendered 2 " "time(s) but was actually rendered 1 time(s).", str(context.exception)) with self.assertRaises(AssertionError) as context: self.assertTemplateUsed( response, 'Empty GET Template', msg_prefix='abc', count=2) self.assertIn( "abc: Template 'Empty GET Template' was expected to be rendered 2 " "time(s) but was actually rendered 1 time(s).", str(context.exception)) def test_multiple_context(self): "Template assertions work when there are multiple contexts" post_data = { 'text': 'Hello World', 'email': '[email protected]', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view_with_template/', post_data) self.assertContains(response, 'POST data OK') try: self.assertTemplateNotUsed(response, "form_view.html") except AssertionError as e: self.assertIn("Template 'form_view.html' was used unexpectedly in rendering the response", str(e)) try: self.assertTemplateNotUsed(response, 'base.html') except AssertionError as e: self.assertIn("Template 'base.html' was used unexpectedly in rendering the response", str(e)) try: self.assertTemplateUsed(response, "Valid POST Template") except AssertionError as e: self.assertIn( "Template 'Valid POST Template' was not a template used to " "render the response. Actual template(s) used: form_view.html, base.html", str(e) ) with self.assertRaises(AssertionError) as context: self.assertTemplateUsed(response, 'base.html', count=2) self.assertIn( "Template 'base.html' was expected to be rendered 2 " "time(s) but was actually rendered 1 time(s).", str(context.exception)) def test_template_rendered_multiple_times(self): """Template assertions work when a template is rendered multiple times.""" response = self.client.get('/render_template_multiple_times/') self.assertTemplateUsed(response, 'base.html', count=2) @override_settings(ROOT_URLCONF='test_client_regress.urls') class AssertRedirectsTests(SimpleTestCase): def test_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/permanent_redirect_view/') try: self.assertRedirects(response, '/get_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e)) try: self.assertRedirects(response, '/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e)) def test_lost_query(self): "An assertion is raised if the redirect location doesn't preserve GET parameters" response = self.client.get('/redirect_view/', {'var': 'value'}) try: self.assertRedirects(response, '/get_view/') except AssertionError as e: self.assertIn("Response redirected to '/get_view/?var=value', expected '/get_view/'", str(e)) try: self.assertRedirects(response, '/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response redirected to '/get_view/?var=value', expected '/get_view/'", str(e)) def test_incorrect_target(self): "An assertion is raised if the response redirects to another target" response = self.client.get('/permanent_redirect_view/') try: # Should redirect to get_view self.assertRedirects(response, '/some_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e)) def test_target_page(self): "An assertion is raised if the response redirect target cannot be retrieved as expected" response = self.client.get('/double_redirect_view/') try: # The redirect target responds with a 301 code, not 200 self.assertRedirects(response, 'http://testserver/permanent_redirect_view/') except AssertionError as e: self.assertIn( "Couldn't retrieve redirection page '/permanent_redirect_view/': " "response code was 301 (expected 200)", str(e) ) try: # The redirect target responds with a 301 code, not 200 self.assertRedirects(response, 'http://testserver/permanent_redirect_view/', msg_prefix='abc') except AssertionError as e: self.assertIn( "abc: Couldn't retrieve redirection page '/permanent_redirect_view/': " "response code was 301 (expected 200)", str(e) ) def test_redirect_chain(self): "You can follow a redirect chain of multiple redirects" response = self.client.get('/redirects/further/more/', {}, follow=True) self.assertRedirects(response, '/no_template_view/', status_code=302, target_status_code=200) self.assertEqual(len(response.redirect_chain), 1) self.assertEqual(response.redirect_chain[0], ('/no_template_view/', 302)) def test_multiple_redirect_chain(self): "You can follow a redirect chain of multiple redirects" response = self.client.get('/redirects/', {}, follow=True) self.assertRedirects(response, '/no_template_view/', status_code=302, target_status_code=200) self.assertEqual(len(response.redirect_chain), 3) self.assertEqual(response.redirect_chain[0], ('/redirects/further/', 302)) self.assertEqual(response.redirect_chain[1], ('/redirects/further/more/', 302)) self.assertEqual(response.redirect_chain[2], ('/no_template_view/', 302)) def test_redirect_chain_to_non_existent(self): "You can follow a chain to a non-existent view" response = self.client.get('/redirect_to_non_existent_view2/', {}, follow=True) self.assertRedirects(response, '/non_existent_view/', status_code=302, target_status_code=404) def test_redirect_chain_to_self(self): "Redirections to self are caught and escaped" with self.assertRaises(RedirectCycleError) as context: self.client.get('/redirect_to_self/', {}, follow=True) response = context.exception.last_response # The chain of redirects stops once the cycle is detected. self.assertRedirects(response, '/redirect_to_self/', status_code=302, target_status_code=302) self.assertEqual(len(response.redirect_chain), 2) def test_redirect_to_self_with_changing_query(self): "Redirections don't loop forever even if query is changing" with self.assertRaises(RedirectCycleError): self.client.get('/redirect_to_self_with_changing_query_view/', {'counter': '0'}, follow=True) def test_circular_redirect(self): "Circular redirect chains are caught and escaped" with self.assertRaises(RedirectCycleError) as context: self.client.get('/circular_redirect_1/', {}, follow=True) response = context.exception.last_response # The chain of redirects will get back to the starting point, but stop there. self.assertRedirects(response, '/circular_redirect_2/', status_code=302, target_status_code=302) self.assertEqual(len(response.redirect_chain), 4) def test_redirect_chain_post(self): "A redirect chain will be followed from an initial POST post" response = self.client.post('/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/no_template_view/', 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_head(self): "A redirect chain will be followed from an initial HEAD request" response = self.client.head('/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/no_template_view/', 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_options(self): "A redirect chain will be followed from an initial OPTIONS request" response = self.client.options('/redirects/', follow=True) self.assertRedirects(response, '/no_template_view/', 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_put(self): "A redirect chain will be followed from an initial PUT request" response = self.client.put('/redirects/', follow=True) self.assertRedirects(response, '/no_template_view/', 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_delete(self): "A redirect chain will be followed from an initial DELETE request" response = self.client.delete('/redirects/', follow=True) self.assertRedirects(response, '/no_template_view/', 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_to_different_host(self): "The test client will preserve scheme, host and port changes" response = self.client.get('/redirect_other_host/', follow=True) self.assertRedirects( response, 'https://otherserver:8443/no_template_view/', status_code=302, target_status_code=200 ) # We can't use is_secure() or get_host() # because response.request is a dictionary, not an HttpRequest self.assertEqual(response.request.get('wsgi.url_scheme'), 'https') self.assertEqual(response.request.get('SERVER_NAME'), 'otherserver') self.assertEqual(response.request.get('SERVER_PORT'), '8443') def test_redirect_chain_on_non_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/get_view/', follow=True) try: self.assertRedirects(response, '/get_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) try: self.assertRedirects(response, '/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) def test_redirect_on_non_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/get_view/') try: self.assertRedirects(response, '/get_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) try: self.assertRedirects(response, '/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) def test_redirect_scheme(self): "An assertion is raised if the response doesn't have the scheme specified in expected_url" # For all possible True/False combinations of follow and secure for follow, secure in itertools.product([True, False], repeat=2): # always redirects to https response = self.client.get('/https_redirect_view/', follow=follow, secure=secure) # the goal scheme is https self.assertRedirects(response, 'https://testserver/secure_view/', status_code=302) with self.assertRaises(AssertionError): self.assertRedirects(response, 'http://testserver/secure_view/', status_code=302) @ignore_warnings(category=RemovedInDjango20Warning) def test_full_path_in_expected_urls(self): """ Test that specifying a full URL as assertRedirects expected_url still work as backwards compatible behavior until Django 2.0. """ response = self.client.get('/redirect_view/') self.assertRedirects(response, 'http://testserver/get_view/') @override_settings(ROOT_URLCONF='test_client_regress.urls') class AssertFormErrorTests(SimpleTestCase): def test_unknown_form(self): "An assertion is raised if the form name is unknown" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.') except AssertionError as e: self.assertIn("The form 'wrong_form' was not used to render the response", str(e)) try: self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e)) def test_unknown_field(self): "An assertion is raised if the field name is unknown" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'some_field', 'Some error.') except AssertionError as e: self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e)) try: self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e)) def test_noerror_field(self): "An assertion is raised if the field doesn't have any errors" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'value', 'Some error.') except AssertionError as e: self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e)) try: self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e)) def test_unknown_error(self): "An assertion is raised if the field doesn't contain the provided error" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'email', 'Some error.') except AssertionError as e: self.assertIn( str_prefix( "The field 'email' on form 'form' in context 0 does not " "contain the error 'Some error.' (actual errors: " "[%(_)s'Enter a valid email address.'])" ), str(e) ) try: self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn( str_prefix( "abc: The field 'email' on form 'form' in context 0 does " "not contain the error 'Some error.' (actual errors: " "[%(_)s'Enter a valid email address.'])", ), str(e) ) def test_unknown_nonfield_error(self): """ Checks that an assertion is raised if the form's non field errors doesn't contain the provided error. """ post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', None, 'Some error.') except AssertionError as e: self.assertIn( "The form 'form' in context 0 does not contain the non-field " "error 'Some error.' (actual errors: )", str(e) ) try: self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn( "abc: The form 'form' in context 0 does not contain the " "non-field error 'Some error.' (actual errors: )", str(e) ) @override_settings(ROOT_URLCONF='test_client_regress.urls') class AssertFormsetErrorTests(SimpleTestCase): msg_prefixes = [("", {}), ("abc: ", {"msg_prefix": "abc"})] def setUp(self): """Makes response object for testing field and non-field errors""" # For testing field and non-field errors self.response_form_errors = self.getResponse({ 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '2', 'form-0-text': 'Raise non-field error', 'form-0-email': 'not an email address', 'form-0-value': 37, 'form-0-single': 'b', 'form-0-multi': ('b', 'c', 'e'), 'form-1-text': 'Hello World', 'form-1-email': '[email protected]', 'form-1-value': 37, 'form-1-single': 'b', 'form-1-multi': ('b', 'c', 'e'), }) # For testing non-form errors self.response_nonform_errors = self.getResponse({ 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '2', 'form-0-text': 'Hello World', 'form-0-email': '[email protected]', 'form-0-value': 37, 'form-0-single': 'b', 'form-0-multi': ('b', 'c', 'e'), 'form-1-text': 'Hello World', 'form-1-email': '[email protected]', 'form-1-value': 37, 'form-1-single': 'b', 'form-1-multi': ('b', 'c', 'e'), }) def getResponse(self, post_data): response = self.client.post('/formset_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") return response def test_unknown_formset(self): "An assertion is raised if the formset name is unknown" for prefix, kwargs in self.msg_prefixes: msg = prefix + "The formset 'wrong_formset' was not used to render the response" with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError( self.response_form_errors, 'wrong_formset', 0, 'Some_field', 'Some error.', **kwargs ) def test_unknown_field(self): "An assertion is raised if the field name is unknown" for prefix, kwargs in self.msg_prefixes: msg = prefix + "The formset 'my_formset', form 0 in context 0 does not contain the field 'Some_field'" with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError( self.response_form_errors, 'my_formset', 0, 'Some_field', 'Some error.', **kwargs ) def test_no_error_field(self): "An assertion is raised if the field doesn't have any errors" for prefix, kwargs in self.msg_prefixes: msg = prefix + "The field 'value' on formset 'my_formset', form 1 in context 0 contains no errors" with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError(self.response_form_errors, 'my_formset', 1, 'value', 'Some error.', **kwargs) def test_unknown_error(self): "An assertion is raised if the field doesn't contain the specified error" for prefix, kwargs in self.msg_prefixes: msg = str_prefix( prefix + "The field 'email' on formset 'my_formset', form 0 " "in context 0 does not contain the error 'Some error.' " "(actual errors: [%(_)s'Enter a valid email address.'])" ) with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError(self.response_form_errors, 'my_formset', 0, 'email', 'Some error.', **kwargs) def test_field_error(self): "No assertion is raised if the field contains the provided error" error_msg = ['Enter a valid email address.'] for prefix, kwargs in self.msg_prefixes: self.assertFormsetError(self.response_form_errors, 'my_formset', 0, 'email', error_msg, **kwargs) def test_no_nonfield_error(self): "An assertion is raised if the formsets non-field errors doesn't contain any errors." for prefix, kwargs in self.msg_prefixes: msg = prefix + "The formset 'my_formset', form 1 in context 0 does not contain any non-field errors." with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError(self.response_form_errors, 'my_formset', 1, None, 'Some error.', **kwargs) def test_unknown_nonfield_error(self): "An assertion is raised if the formsets non-field errors doesn't contain the provided error." for prefix, kwargs in self.msg_prefixes: msg = str_prefix( prefix + "The formset 'my_formset', form 0 in context 0 does not " "contain the non-field error 'Some error.' (actual errors: " "[%(_)s'Non-field error.'])" ) with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError(self.response_form_errors, 'my_formset', 0, None, 'Some error.', **kwargs) def test_nonfield_error(self): "No assertion is raised if the formsets non-field errors contains the provided error." for prefix, kwargs in self.msg_prefixes: self.assertFormsetError(self.response_form_errors, 'my_formset', 0, None, 'Non-field error.', **kwargs) def test_no_nonform_error(self): "An assertion is raised if the formsets non-form errors doesn't contain any errors." for prefix, kwargs in self.msg_prefixes: msg = prefix + "The formset 'my_formset' in context 0 does not contain any non-form errors." with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError(self.response_form_errors, 'my_formset', None, None, 'Some error.', **kwargs) def test_unknown_nonform_error(self): "An assertion is raised if the formsets non-form errors doesn't contain the provided error." for prefix, kwargs in self.msg_prefixes: msg = str_prefix( prefix + "The formset 'my_formset' in context 0 does not contain the " "non-form error 'Some error.' (actual errors: [%(_)s'Forms " "in a set must have distinct email addresses.'])" ) with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError( self.response_nonform_errors, 'my_formset', None, None, 'Some error.', **kwargs ) def test_nonform_error(self): "No assertion is raised if the formsets non-form errors contains the provided error." msg = 'Forms in a set must have distinct email addresses.' for prefix, kwargs in self.msg_prefixes: self.assertFormsetError(self.response_nonform_errors, 'my_formset', None, None, msg, **kwargs) @override_settings(ROOT_URLCONF='test_client_regress.urls') class LoginTests(TestDataMixin, TestCase): def test_login_different_client(self): "Check that using a different test client doesn't violate authentication" # Create a second client, and log in. c = Client() login = c.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Get a redirection page with the second client. response = c.get("/login_protected_redirect_view/") # At this points, the self.client isn't logged in. # Check that assertRedirects uses the original client, not the # default client. self.assertRedirects(response, "/get_view/") @override_settings( SESSION_ENGINE='test_client_regress.session', ROOT_URLCONF='test_client_regress.urls', ) class SessionEngineTests(TestDataMixin, TestCase): def test_login(self): "A session engine that modifies the session key can be used to log in" login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Try to access a login protected page. response = self.client.get("/login_protected_view/") self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') @override_settings(ROOT_URLCONF='test_client_regress.urls',) class URLEscapingTests(SimpleTestCase): def test_simple_argument_get(self): "Get a view that has a simple string argument" response = self.client.get(reverse('arg_view', args=['Slartibartfast'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Howdy, Slartibartfast') def test_argument_with_space_get(self): "Get a view that has a string argument that requires escaping" response = self.client.get(reverse('arg_view', args=['Arthur Dent'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Hi, Arthur') def test_simple_argument_post(self): "Post for a view that has a simple string argument" response = self.client.post(reverse('arg_view', args=['Slartibartfast'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Howdy, Slartibartfast') def test_argument_with_space_post(self): "Post for a view that has a string argument that requires escaping" response = self.client.post(reverse('arg_view', args=['Arthur Dent'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Hi, Arthur') @override_settings(ROOT_URLCONF='test_client_regress.urls') class ExceptionTests(TestDataMixin, TestCase): def test_exception_cleared(self): "#5836 - A stale user exception isn't re-raised by the test client." login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') try: self.client.get("/staff_only/") self.fail("General users should not be able to visit this page") except CustomTestException: pass # At this point, an exception has been raised, and should be cleared. # This next operation should be successful; if it isn't we have a problem. login = self.client.login(username='staff', password='password') self.assertTrue(login, 'Could not log in') try: self.client.get("/staff_only/") except CustomTestException: self.fail("Staff should be able to visit this page") @override_settings(ROOT_URLCONF='test_client_regress.urls') class TemplateExceptionTests(SimpleTestCase): @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'bad_templates')], }]) def test_bad_404_template(self): "Errors found when rendering 404 error templates are re-raised" try: self.client.get("/no_such_view/") except TemplateSyntaxError: pass else: self.fail("Should get error about syntax error in template") # We need two different tests to check URLconf substitution - one to check # it was changed, and another one (without self.urls) to check it was reverted on # teardown. This pair of tests relies upon the alphabetical ordering of test execution. @override_settings(ROOT_URLCONF='test_client_regress.urls') class UrlconfSubstitutionTests(SimpleTestCase): def test_urlconf_was_changed(self): "TestCase can enforce a custom URLconf on a per-test basis" url = reverse('arg_view', args=['somename']) self.assertEqual(url, '/arg_view/somename/') # This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the # name is to ensure alphabetical ordering. class zzUrlconfSubstitutionTests(SimpleTestCase): def test_urlconf_was_reverted(self): """URLconf is reverted to original value after modification in a TestCase This will not find a match as the default ROOT_URLCONF is empty. """ with self.assertRaises(NoReverseMatch): reverse('arg_view', args=['somename']) @override_settings(ROOT_URLCONF='test_client_regress.urls') class ContextTests(TestDataMixin, TestCase): def test_single_context(self): "Context variables can be retrieved from a single context" response = self.client.get("/request_data/", data={'foo': 'whiz'}) self.assertIsInstance(response.context, RequestContext) self.assertIn('get-foo', response.context) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['data'], 'sausage') try: response.context['does-not-exist'] self.fail('Should not be able to retrieve non-existent key') except KeyError as e: self.assertEqual(e.args[0], 'does-not-exist') def test_inherited_context(self): "Context variables can be retrieved from a list of contexts" response = self.client.get("/request_data_extended/", data={'foo': 'whiz'}) self.assertEqual(response.context.__class__, ContextList) self.assertEqual(len(response.context), 2) self.assertIn('get-foo', response.context) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['data'], 'bacon') with self.assertRaises(KeyError) as cm: response.context['does-not-exist'] self.assertEqual(cm.exception.args[0], 'does-not-exist') def test_contextlist_keys(self): c1 = Context() c1.update({'hello': 'world', 'goodbye': 'john'}) c1.update({'hello': 'dolly', 'dolly': 'parton'}) c2 = Context() c2.update({'goodbye': 'world', 'python': 'rocks'}) c2.update({'goodbye': 'dolly'}) l = ContextList([c1, c2]) # None, True and False are builtins of BaseContext, and present # in every Context without needing to be added. self.assertEqual({'None', 'True', 'False', 'hello', 'goodbye', 'python', 'dolly'}, l.keys()) def test_15368(self): # Need to insert a context processor that assumes certain things about # the request instance. This triggers a bug caused by some ways of # copying RequestContext. with self.settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'test_client_regress.context_processors.special', ], }, }]): response = self.client.get("/request_context_view/") self.assertContains(response, 'Path: /request_context_view/') def test_nested_requests(self): """ response.context is not lost when view call another view. """ response = self.client.get("/nested_view/") self.assertIsInstance(response.context, RequestContext) self.assertEqual(response.context['nested'], 'yes') @override_settings(ROOT_URLCONF='test_client_regress.urls') class SessionTests(TestDataMixin, TestCase): def test_session(self): "The session isn't lost if a user logs in" # The session doesn't exist to start. response = self.client.get('/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'NO') # This request sets a session variable. response = self.client.get('/set_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'set_session') # Check that the session has been modified response = self.client.get('/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'YES') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Session should still contain the modified value response = self.client.get('/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'YES') def test_session_initiated(self): session = self.client.session session['session_var'] = 'foo' session.save() response = self.client.get('/check_session/') self.assertEqual(response.content, b'foo') def test_logout(self): """Logout should work whether the user is logged in or not (#9978).""" self.client.logout() login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') self.client.logout() self.client.logout() def test_logout_with_user(self): """Logout should send user_logged_out signal if user was logged in.""" def listener(*args, **kwargs): listener.executed = True self.assertEqual(kwargs['sender'], User) listener.executed = False user_logged_out.connect(listener) self.client.login(username='testclient', password='password') self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) @override_settings(AUTH_USER_MODEL='test_client_regress.CustomUser') def test_logout_with_custom_user(self): """Logout should send user_logged_out signal if custom user was logged in.""" def listener(*args, **kwargs): self.assertEqual(kwargs['sender'], CustomUser) listener.executed = True listener.executed = False u = CustomUser.custom_objects.create(email='[email protected]') u.set_password('password') u.save() user_logged_out.connect(listener) self.client.login(username='[email protected]', password='password') self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) @override_settings(AUTHENTICATION_BACKENDS=( 'django.contrib.auth.backends.ModelBackend', 'test_client_regress.auth_backends.CustomUserBackend')) def test_logout_with_custom_auth_backend(self): "Request a logout after logging in with custom authentication backend" def listener(*args, **kwargs): self.assertEqual(kwargs['sender'], CustomUser) listener.executed = True listener.executed = False u = CustomUser.custom_objects.create(email='[email protected]') u.set_password('password') u.save() user_logged_out.connect(listener) self.client.login(username='[email protected]', password='password') self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) def test_logout_without_user(self): """Logout should send signal even if user not authenticated.""" def listener(user, *args, **kwargs): listener.user = user listener.executed = True listener.executed = False user_logged_out.connect(listener) self.client.login(username='incorrect', password='password') self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) self.assertIsNone(listener.user) def test_login_with_user(self): """Login should send user_logged_in signal on successful login.""" def listener(*args, **kwargs): listener.executed = True listener.executed = False user_logged_in.connect(listener) self.client.login(username='testclient', password='password') user_logged_out.disconnect(listener) self.assertTrue(listener.executed) def test_login_without_signal(self): """Login shouldn't send signal if user wasn't logged in""" def listener(*args, **kwargs): listener.executed = True listener.executed = False user_logged_in.connect(listener) self.client.login(username='incorrect', password='password') user_logged_in.disconnect(listener) self.assertFalse(listener.executed) @override_settings(ROOT_URLCONF='test_client_regress.urls') class RequestMethodTests(SimpleTestCase): def test_get(self): "Request a view via request method GET" response = self.client.get('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: GET') def test_post(self): "Request a view via request method POST" response = self.client.post('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: POST') def test_head(self): "Request a view via request method HEAD" response = self.client.head('/request_methods/') self.assertEqual(response.status_code, 200) # A HEAD request doesn't return any content. self.assertNotEqual(response.content, b'request method: HEAD') self.assertEqual(response.content, b'') def test_options(self): "Request a view via request method OPTIONS" response = self.client.options('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: OPTIONS') def test_put(self): "Request a view via request method PUT" response = self.client.put('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PUT') def test_delete(self): "Request a view via request method DELETE" response = self.client.delete('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: DELETE') def test_patch(self): "Request a view via request method PATCH" response = self.client.patch('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PATCH') @override_settings(ROOT_URLCONF='test_client_regress.urls') class RequestMethodStringDataTests(SimpleTestCase): def test_post(self): "Request a view with string data via request method POST" # Regression test for #11371 data = '{"test": "json"}' response = self.client.post('/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: POST') def test_put(self): "Request a view with string data via request method PUT" # Regression test for #11371 data = '{"test": "json"}' response = self.client.put('/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PUT') def test_patch(self): "Request a view with string data via request method PATCH" # Regression test for #17797 data = '{"test": "json"}' response = self.client.patch('/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PATCH') def test_empty_string_data(self): "Request a view with empty string data via request method GET/POST/HEAD" # Regression test for #21740 response = self.client.get('/body/', data='', content_type='application/json') self.assertEqual(response.content, b'') response = self.client.post('/body/', data='', content_type='application/json') self.assertEqual(response.content, b'') response = self.client.head('/body/', data='', content_type='application/json') self.assertEqual(response.content, b'') def test_json(self): response = self.client.get('/json_response/') self.assertEqual(response.json(), {'key': 'value'}) def test_json_wrong_header(self): response = self.client.get('/body/') msg = 'Content-Type header is "text/html; charset=utf-8", not "application/json"' with self.assertRaisesMessage(ValueError, msg): self.assertEqual(response.json(), {'key': 'value'}) @override_settings(ROOT_URLCONF='test_client_regress.urls',) class QueryStringTests(SimpleTestCase): def test_get_like_requests(self): # See: https://code.djangoproject.com/ticket/10571. for method_name in ('get', 'head'): # A GET-like request can pass a query string as data method = getattr(self.client, method_name) response = method("/request_data/", data={'foo': 'whiz'}) self.assertEqual(response.context['get-foo'], 'whiz') # A GET-like request can pass a query string as part of the URL response = method("/request_data/?foo=whiz") self.assertEqual(response.context['get-foo'], 'whiz') # Data provided in the URL to a GET-like request is overridden by actual form data response = method("/request_data/?foo=whiz", data={'foo': 'bang'}) self.assertEqual(response.context['get-foo'], 'bang') response = method("/request_data/?foo=whiz", data={'bar': 'bang'}) self.assertEqual(response.context['get-foo'], None) self.assertEqual(response.context['get-bar'], 'bang') def test_post_like_requests(self): # A POST-like request can pass a query string as data response = self.client.post("/request_data/", data={'foo': 'whiz'}) self.assertEqual(response.context['get-foo'], None) self.assertEqual(response.context['post-foo'], 'whiz') # A POST-like request can pass a query string as part of the URL response = self.client.post("/request_data/?foo=whiz") self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['post-foo'], None) # POST data provided in the URL augments actual form data response = self.client.post("/request_data/?foo=whiz", data={'foo': 'bang'}) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['post-foo'], 'bang') response = self.client.post("/request_data/?foo=whiz", data={'bar': 'bang'}) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['get-bar'], None) self.assertEqual(response.context['post-foo'], None) self.assertEqual(response.context['post-bar'], 'bang') @override_settings(ROOT_URLCONF='test_client_regress.urls') class UnicodePayloadTests(SimpleTestCase): def test_simple_unicode_payload(self): "A simple ASCII-only unicode JSON document can be POSTed" # Regression test for #10571 json = '{"english": "mountain pass"}' response = self.client.post("/parse_unicode_json/", json, content_type="application/json") self.assertEqual(response.content, json.encode()) def test_unicode_payload_utf8(self): "A non-ASCII unicode data encoded as UTF-8 can be POSTed" # Regression test for #10571 json = '{"dog": "собака"}' response = self.client.post("/parse_unicode_json/", json, content_type="application/json; charset=utf-8") self.assertEqual(response.content, json.encode('utf-8')) def test_unicode_payload_utf16(self): "A non-ASCII unicode data encoded as UTF-16 can be POSTed" # Regression test for #10571 json = '{"dog": "собака"}' response = self.client.post("/parse_unicode_json/", json, content_type="application/json; charset=utf-16") self.assertEqual(response.content, json.encode('utf-16')) def test_unicode_payload_non_utf(self): "A non-ASCII unicode data as a non-UTF based encoding can be POSTed" # Regression test for #10571 json = '{"dog": "собака"}' response = self.client.post("/parse_unicode_json/", json, content_type="application/json; charset=koi8-r") self.assertEqual(response.content, json.encode('koi8-r')) class DummyFile(object): def __init__(self, filename): self.name = filename def read(self): return b'TEST_FILE_CONTENT' class UploadedFileEncodingTest(SimpleTestCase): def test_file_encoding(self): encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin')) self.assertEqual(b'--TEST_BOUNDARY', encoded_file[0]) self.assertEqual(b'Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1]) self.assertEqual(b'TEST_FILE_CONTENT', encoded_file[-1]) def test_guesses_content_type_on_file_encoding(self): self.assertEqual(b'Content-Type: application/octet-stream', encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2]) self.assertEqual(b'Content-Type: text/plain', encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2]) self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], ( b'Content-Type: application/x-compress', b'Content-Type: application/x-zip', b'Content-Type: application/x-zip-compressed', b'Content-Type: application/zip',)) self.assertEqual(b'Content-Type: application/octet-stream', encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2]) @override_settings(ROOT_URLCONF='test_client_regress.urls',) class RequestHeadersTest(SimpleTestCase): def test_client_headers(self): "A test client can receive custom headers" response = self.client.get("/check_headers/", HTTP_X_ARG_CHECK='Testing 123') self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123") self.assertEqual(response.status_code, 200) def test_client_headers_redirect(self): "Test client headers are preserved through redirects" response = self.client.get("/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123') self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123") self.assertRedirects(response, '/check_headers/', status_code=302, target_status_code=200) @override_settings(ROOT_URLCONF='test_client_regress.urls') class ReadLimitedStreamTest(SimpleTestCase): """ Tests that ensure that HttpRequest.body, HttpRequest.read() and HttpRequest.read(BUFFER) have proper LimitedStream behavior. Refs #14753, #15785 """ def test_body_from_empty_request(self): """HttpRequest.body on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/body/").content, b'') def test_read_from_empty_request(self): """HttpRequest.read() on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/read_all/").content, b'') def test_read_numbytes_from_empty_request(self): """HttpRequest.read(LARGE_BUFFER) on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/read_buffer/").content, b'') def test_read_from_nonempty_request(self): """HttpRequest.read() on a test client PUT request with some payload should return that payload.""" payload = b'foobar' self.assertEqual(self.client.put("/read_all/", data=payload, content_type='text/plain').content, payload) def test_read_numbytes_from_nonempty_request(self): """HttpRequest.read(LARGE_BUFFER) on a test client PUT request with some payload should return that payload.""" payload = b'foobar' self.assertEqual(self.client.put("/read_buffer/", data=payload, content_type='text/plain').content, payload) @override_settings(ROOT_URLCONF='test_client_regress.urls') class RequestFactoryStateTest(SimpleTestCase): """Regression tests for #15929.""" # These tests are checking that certain middleware don't change certain # global state. Alternatively, from the point of view of a test, they are # ensuring test isolation behavior. So, unusually, it doesn't make sense to # run the tests individually, and if any are failing it is confusing to run # them with any other set of tests. def common_test_that_should_always_pass(self): request = RequestFactory().get('/') request.session = {} self.assertFalse(hasattr(request, 'user')) def test_request(self): self.common_test_that_should_always_pass() def test_request_after_client(self): # apart from the next line the three tests are identical self.client.get('/') self.common_test_that_should_always_pass() def test_request_after_client_2(self): # This test is executed after the previous one self.common_test_that_should_always_pass() @override_settings(ROOT_URLCONF='test_client_regress.urls') class RequestFactoryEnvironmentTests(SimpleTestCase): """ Regression tests for #8551 and #17067: ensure that environment variables are set correctly in RequestFactory. """ def test_should_set_correct_env_variables(self): request = RequestFactory().get('/path/') self.assertEqual(request.META.get('REMOTE_ADDR'), '127.0.0.1') self.assertEqual(request.META.get('SERVER_NAME'), 'testserver') self.assertEqual(request.META.get('SERVER_PORT'), '80') self.assertEqual(request.META.get('SERVER_PROTOCOL'), 'HTTP/1.1') self.assertEqual(request.META.get('SCRIPT_NAME') + request.META.get('PATH_INFO'), '/path/')
bsd-3-clause
-2,718,380,387,212,263,000
43.535389
119
0.63463
false
kiall/designate-py3
designate/storage/impl_sqlalchemy/migrate_repo/versions/058_placeholder.py
140
1035
# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Author: Kiall Mac Innes <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # This is a placeholder for Kilo backports. # Do not use this number for new Liberty work. New Liberty work starts after # all the placeholders. # # See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass
apache-2.0
-7,657,652,798,236,455,000
33.5
76
0.758454
false
chirilo/mozillians
vendor-local/lib/python/djcelery/tests/test_backends/test_cache.py
30
4992
from __future__ import absolute_import import sys from datetime import timedelta from django.core.cache.backends.base import InvalidCacheBackendError from celery import result from celery import states from celery.utils import gen_unique_id from celery.datastructures import ExceptionInfo from djcelery.backends.cache import CacheBackend from djcelery.tests.utils import unittest class SomeClass(object): def __init__(self, data): self.data = data class test_CacheBackend(unittest.TestCase): def test_mark_as_done(self): cb = CacheBackend() tid = gen_unique_id() self.assertEqual(cb.get_status(tid), states.PENDING) self.assertIsNone(cb.get_result(tid)) cb.mark_as_done(tid, 42) self.assertEqual(cb.get_status(tid), states.SUCCESS) self.assertEqual(cb.get_result(tid), 42) self.assertTrue(cb.get_result(tid), 42) def test_forget(self): b = CacheBackend() tid = gen_unique_id() b.mark_as_done(tid, {"foo": "bar"}) self.assertEqual(b.get_result(tid).get("foo"), "bar") b.forget(tid) self.assertNotIn(tid, b._cache) self.assertIsNone(b.get_result(tid)) def test_save_restore_delete_taskset(self): backend = CacheBackend() taskset_id = gen_unique_id() subtask_ids = [gen_unique_id() for i in range(10)] subtasks = map(result.AsyncResult, subtask_ids) res = result.TaskSetResult(taskset_id, subtasks) res.save(backend=backend) saved = result.TaskSetResult.restore(taskset_id, backend=backend) self.assertListEqual(saved.subtasks, subtasks) self.assertEqual(saved.taskset_id, taskset_id) saved.delete(backend=backend) self.assertIsNone(result.TaskSetResult.restore(taskset_id, backend=backend)) def test_is_pickled(self): cb = CacheBackend() tid2 = gen_unique_id() result = {"foo": "baz", "bar": SomeClass(12345)} cb.mark_as_done(tid2, result) # is serialized properly. rindb = cb.get_result(tid2) self.assertEqual(rindb.get("foo"), "baz") self.assertEqual(rindb.get("bar").data, 12345) def test_mark_as_failure(self): cb = CacheBackend() einfo = None tid3 = gen_unique_id() try: raise KeyError("foo") except KeyError, exception: einfo = ExceptionInfo(sys.exc_info()) pass cb.mark_as_failure(tid3, exception, traceback=einfo.traceback) self.assertEqual(cb.get_status(tid3), states.FAILURE) self.assertIsInstance(cb.get_result(tid3), KeyError) self.assertEqual(cb.get_traceback(tid3), einfo.traceback) def test_process_cleanup(self): cb = CacheBackend() cb.process_cleanup() def test_set_expires(self): cb1 = CacheBackend(expires=timedelta(seconds=16)) self.assertEqual(cb1.expires, 16) cb2 = CacheBackend(expires=32) self.assertEqual(cb2.expires, 32) class test_custom_CacheBackend(unittest.TestCase): def test_custom_cache_backend(self): from celery import current_app prev_backend = current_app.conf.CELERY_CACHE_BACKEND prev_module = sys.modules["djcelery.backends.cache"] current_app.conf.CELERY_CACHE_BACKEND = "dummy://" sys.modules.pop("djcelery.backends.cache") try: from djcelery.backends.cache import cache from django.core.cache import cache as django_cache self.assertEqual(cache.__class__.__module__, "django.core.cache.backends.dummy") self.assertIsNot(cache, django_cache) finally: current_app.conf.CELERY_CACHE_BACKEND = prev_backend sys.modules["djcelery.backends.cache"] = prev_module class test_MemcacheWrapper(unittest.TestCase): def test_memcache_wrapper(self): try: from django.core.cache.backends import memcached from django.core.cache.backends import locmem except InvalidCacheBackendError: sys.stderr.write( "\n* Memcache library is not installed. Skipping test.\n") return prev_cache_cls = memcached.CacheClass memcached.CacheClass = locmem.CacheClass prev_backend_module = sys.modules.pop("djcelery.backends.cache") try: from djcelery.backends.cache import cache key = "cu.test_memcache_wrapper" val = "The quick brown fox." default = "The lazy dog." self.assertEqual(cache.get(key, default=default), default) cache.set(key, val) self.assertEqual(cache.get(key, default=default), val) finally: memcached.CacheClass = prev_cache_cls sys.modules["djcelery.backends.cache"] = prev_backend_module
bsd-3-clause
-700,960,799,755,242,400
33.666667
74
0.626002
false
duolinwang/MusiteDeep
MusiteDeep/train_general.py
1
4521
import sys import os import pandas as pd import numpy as np import argparse def main(): parser=argparse.ArgumentParser(description='MusiteDeep custom training tool for general PTM prediction.') parser.add_argument('-input', dest='inputfile', type=str, help='training data in fasta format. Sites followed by "#" are positive sites for a specific PTM prediction.', required=True) parser.add_argument('-output-prefix', dest='outputprefix', type=str, help='prefix of the output files (model and parameter files).', required=True) parser.add_argument('-residue-types', dest='residues', type=str, help='Residue types that this model focus on. For multiple residues, seperate each with \',\'. \n\ Note: all the residues specified by this parameter will be trained in one model.', required=True) parser.add_argument('-valinput', dest='valfile', type=str, help='validation data in fasta format if any. It will randomly select 10 percent of samples from the training data as a validation data set, if no validation file is provided.', required=False,default=None) parser.add_argument('-nclass', dest='nclass', type=int, help='number of classifiers to be trained for one time. [Default:5]', required=False, default=5) parser.add_argument('-window', dest='window', type=int, help='window size: the number of amino acid of the left part or right part adjacent to a potential PTM site. 2*\'windo size\'+1 amino acid will be extracted for one protential fragment. [Default:16]', required=False, default=16) parser.add_argument('-maxneg', dest='maxneg', type=int, help='maximum iterations for each classifier which controls the maximum copy number of the negative data which has the same size with the positive data. [Default: 50]', required=False, default=50) parser.add_argument('-nb_epoch', dest='nb_epoch', type=int, help='number of epoches for one bootstrap step. It is invalidate, if earlystop is set.', required=False, default=None) parser.add_argument('-earlystop', dest='earlystop', type=int, help='after the \'earlystop\' number of epochs with no improvement the training will be stopped for one bootstrap step. [Default: 20]', required=False, default=20) parser.add_argument('-inputweights', dest='inputweights', type=int, help='Initial weights saved in a HDF5 file.', required=False, default=None) parser.add_argument('-backupweights', dest='backupweights', type=int, help='Set the intermediate weights for backup in a HDF5 file.', required=False, default=None) parser.add_argument('-transferlayer', dest='transferlayer', type=int, help='Set the last \'transferlayer\' number of layers to be randomly initialized.', required=False, default=1) args = parser.parse_args() inputfile=args.inputfile; valfile=args.valfile; outputprefix=args.outputprefix; nclass=args.nclass; window=args.window; maxneg=args.maxneg; np_epoch2=args.nb_epoch; earlystop=args.earlystop; inputweights=args.inputweights; backupweights=args.backupweights; transferlayer=args.transferlayer residues=args.residues.split(",") outputmodel=outputprefix+str("_HDF5model"); outputparameter=outputprefix+str("_parameters"); try: output = open(outputparameter, 'w') except IOError: print 'cannot write to ' + outputparameter+ "!\n"; exit() else: print >> output, "%d\t%d\t%s\tgeneral" % (nclass,window,args.residues) from methods.Bootstrapping_allneg_continue_val import bootStrapping_allneg_continue_val from methods.EXtractfragment_sort import extractFragforTraining trainfrag=extractFragforTraining(inputfile,window,'-',focus=residues) if(valfile is not None): valfrag=extractFragforTraining(valfile,window,'-',focus= residues) else: valfrag=None; for bt in range(nclass): models=bootStrapping_allneg_continue_val(trainfrag.as_matrix(),valfile=valfrag, srate=1,nb_epoch1=1,nb_epoch2=np_epoch2,earlystop=earlystop,maxneg=maxneg, outputweights=backupweights, inputweights=inputweights, transferlayer=transferlayer) models.save_weights(outputmodel+'_class'+str(bt),overwrite=True) if __name__ == "__main__": main()
gpl-2.0
6,868,989,048,079,880,000
65.477612
289
0.682814
false
keyurpatel076/MissionPlannerGit
packages/IronPython.StdLib.2.7.4/content/Lib/io.py
191
3624
"""The io module provides the Python interfaces to stream handling. The builtin open function is defined in this module. At the top of the I/O hierarchy is the abstract base class IOBase. It defines the basic interface to a stream. Note, however, that there is no separation between reading and writing to streams; implementations are allowed to throw an IOError if they do not support a given operation. Extending IOBase is RawIOBase which deals simply with the reading and writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide an interface to OS files. BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer streams that are readable, writable, and both respectively. BufferedRandom provides a buffered interface to random access streams. BytesIO is a simple stream of in-memory bytes. Another IOBase subclass, TextIOBase, deals with the encoding and decoding of streams into text. TextIOWrapper, which extends it, is a buffered text interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO is a in-memory stream for text. Argument names are not part of the specification, and only the arguments of open() are intended to be used as keyword arguments. data: DEFAULT_BUFFER_SIZE An int containing the default buffer size used by the module's buffered I/O classes. open() uses the file's blksize (as obtained by os.stat) if possible. """ # New I/O library conforming to PEP 3116. # XXX edge cases when switching between reading/writing # XXX need to support 1 meaning line-buffered # XXX whenever an argument is None, use the default value # XXX read/write ops should check readable/writable # XXX buffered readinto should work with arbitrary buffer objects # XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG # XXX check writable, readable and seekable in appropriate places __author__ = ("Guido van Rossum <[email protected]>, " "Mike Verdone <[email protected]>, " "Mark Russell <[email protected]>, " "Antoine Pitrou <[email protected]>, " "Amaury Forgeot d'Arc <[email protected]>, " "Benjamin Peterson <[email protected]>") __all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO", "BytesIO", "StringIO", "BufferedIOBase", "BufferedReader", "BufferedWriter", "BufferedRWPair", "BufferedRandom", "TextIOBase", "TextIOWrapper", "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"] import _io import abc from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, open, FileIO, BytesIO, StringIO, BufferedReader, BufferedWriter, BufferedRWPair, BufferedRandom, IncrementalNewlineDecoder, TextIOWrapper) OpenWrapper = _io.open # for compatibility with _pyio # for seek() SEEK_SET = 0 SEEK_CUR = 1 SEEK_END = 2 # Declaring ABCs in C is tricky so we do it here. # Method descriptions and default implementations are inherited from the C # version however. class IOBase(_io._IOBase): __metaclass__ = abc.ABCMeta class RawIOBase(_io._RawIOBase, IOBase): pass class BufferedIOBase(_io._BufferedIOBase, IOBase): pass class TextIOBase(_io._TextIOBase, IOBase): pass RawIOBase.register(FileIO) for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, BufferedRWPair): BufferedIOBase.register(klass) for klass in (StringIO, TextIOWrapper): TextIOBase.register(klass) del klass
gpl-3.0
5,981,894,630,289,200,000
35.979592
80
0.732616
false
Unrepentant-Atheist/mame
3rdparty/benchmark/.ycm_extra_conf.py
84
3641
import os import ycm_core # These are the compilation flags that will be used in case there's no # compilation database set (by default, one is not set). # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. flags = [ '-Wall', '-Werror', '-pendantic-errors', '-std=c++0x', '-fno-strict-aliasing', '-O3', '-DNDEBUG', # ...and the same thing goes for the magic -x option which specifies the # language that the files to be compiled are written in. This is mostly # relevant for c++ headers. # For a C project, you would set this to 'c' instead of 'c++'. '-x', 'c++', '-I', 'include', '-isystem', '/usr/include', '-isystem', '/usr/local/include', ] # Set this to the absolute path to the folder (NOT the file!) containing the # compile_commands.json file to use that instead of 'flags'. See here for # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of compilation flags. Notice that YCM itself uses that approach. compilation_database_folder = '' if os.path.exists( compilation_database_folder ): database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None SOURCE_EXTENSIONS = [ '.cc' ] def DirectoryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return list( flags ) new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def IsHeaderFile( filename ): extension = os.path.splitext( filename )[ 1 ] return extension in [ '.h', '.hxx', '.hpp', '.hh' ] def GetCompilationInfoForFile( filename ): # The compilation_commands.json file generated by CMake does not have entries # for header files. So we do our best by asking the db for flags for a # corresponding source file, if any. If one exists, the flags for that file # should be good enough. if IsHeaderFile( filename ): basename = os.path.splitext( filename )[ 0 ] for extension in SOURCE_EXTENSIONS: replacement_file = basename + extension if os.path.exists( replacement_file ): compilation_info = database.GetCompilationInfoForFile( replacement_file ) if compilation_info.compiler_flags_: return compilation_info return None return database.GetCompilationInfoForFile( filename ) def FlagsForFile( filename, **kwargs ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = GetCompilationInfoForFile( filename ) if not compilation_info: return None final_flags = MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ) else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
gpl-2.0
-5,718,661,672,368,788,000
30.66087
79
0.68635
false
tencentyun/cos-python-sdk
qcloud_cos/cos_config.py
1
1384
#!/usr/bin/env python # coding=utf-8 ################################################################################ # CosConfig 有关cos的配置 ################################################################################ class CosConfig(object): def __init__(self): self._end_point = 'http://web.file.myqcloud.com/files/v1' self._user_agent = 'cos-python-sdk-v3.3' self._timeout = 30 self._sign_expired = 300 # 设置COS的域名地址 def set_end_point(self, end_point): self._end_point = end_point # 获取域名地址 def get_end_point(self): return self._end_point # 获取HTTP头中的user_agent def get_user_agent(self): return self._user_agent # 设置连接超时, 单位秒 def set_timeout(self, time_out): assert isinstance(time_out, int) self._timeout = time_out # 获取连接超时,单位秒 def get_timeout(self): return self._timeout # 设置签名过期时间, 单位秒 def set_sign_expired(self, expired): assert isinstance(expired, int) self._sign_expired = expired # 获取签名过期时间, 单位秒 def get_sign_expired(self): return self._sign_expired # 打开https def enable_https(self): self._end_point = 'https://web.file.myqcloud.com/files/v1'
mit
2,621,996,498,364,763,000
26.217391
80
0.522364
false
riveridea/gnuradio
gr-trellis/examples/python/test_cpm.py
11
5508
#!/usr/bin/env python ################################################## # Gnuradio Python Flow Graph # Title: CPM test # Author: Achilleas Anastasopoulos # Description: gnuradio flow graph # Generated: Thu Feb 19 23:16:23 2009 ################################################## from gnuradio import gr from gnuradio import trellis, digital, filter, blocks from grc_gnuradio import blks2 as grc_blks2 import math import numpy import fsm_utils from gnuradio import trellis try: from gnuradio import analog except ImportError: sys.stderr.write("Error: Program requires gr-analog.\n") sys.exit(1) try: import scipy.stats except ImportError: print "Error: Program requires scipy (see: www.scipy.org)." sys.exit(1) def run_test(seed,blocksize): tb = gr.top_block() ################################################## # Variables ################################################## M = 2 K = 1 P = 2 h = (1.0*K)/P L = 3 Q = 4 frac = 0.99 f = trellis.fsm(P,M,L) # CPFSK signals #p = numpy.ones(Q)/(2.0) #q = numpy.cumsum(p)/(1.0*Q) # GMSK signals BT=0.3; tt=numpy.arange(0,L*Q)/(1.0*Q)-L/2.0; #print tt p=(0.5*scipy.special.erfc(2*math.pi*BT*(tt-0.5)/math.sqrt(math.log(2.0))/math.sqrt(2.0))-0.5*scipy.special.erfc(2*math.pi*BT*(tt+0.5)/math.sqrt(math.log(2.0))/math.sqrt(2.0)))/2.0; p=p/sum(p)*Q/2.0; #print p q=numpy.cumsum(p)/Q; q=q/q[-1]/2.0; #print q (f0T,SS,S,F,Sf,Ff,N) = fsm_utils.make_cpm_signals(K,P,M,L,q,frac) #print N #print Ff Ffa = numpy.insert(Ff,Q,numpy.zeros(N),axis=0) #print Ffa MF = numpy.fliplr(numpy.transpose(Ffa)) #print MF E = numpy.sum(numpy.abs(Sf)**2,axis=0) Es = numpy.sum(E)/f.O() #print Es constellation = numpy.reshape(numpy.transpose(Sf),N*f.O()) #print Ff #print Sf #print constellation #print numpy.max(numpy.abs(SS - numpy.dot(Ff , Sf))) EsN0_db = 10.0 N0 = Es * 10.0**(-(1.0*EsN0_db)/10.0) #N0 = 0.0 #print N0 head = 4 tail = 4 numpy.random.seed(seed*666) data = numpy.random.randint(0, M, head+blocksize+tail+1) #data = numpy.zeros(blocksize+1+head+tail,'int') for i in range(head): data[i]=0 for i in range(tail+1): data[-i]=0 ################################################## # Blocks ################################################## random_source_x_0 = blocks.vector_source_b(data.tolist(), False) digital_chunks_to_symbols_xx_0 = digital.chunks_to_symbols_bf((-1, 1), 1) filter_interp_fir_filter_xxx_0 = filter.interp_fir_filter_fff(Q, p) analog_frequency_modulator_fc_0 = analog.frequency_modulator_fc(2*math.pi*h*(1.0/Q)) blocks_add_vxx_0 = blocks.add_vcc(1) analog_noise_source_x_0 = analog.noise_source_c(analog.GR_GAUSSIAN, (N0/2.0)**0.5, -long(seed)) blokcs_multiply_vxx_0 = blocks.multiply_vcc(1) analog_sig_source_x_0 = analog.sig_source_c(Q, analog.GR_COS_WAVE, -f0T, 1, 0) # only works for N=2, do it manually for N>2... filter_fir_filter_xxx_0_0 = filter.fir_filter_ccc(Q, MF[0].conjugate()) filter_fir_filter_xxx_0_0_0 = filter.fir_filter_ccc(Q, MF[1].conjugate()) blocks_streams_to_stream_0 = blocks.streams_to_stream(gr.sizeof_gr_complex*1, int(N)) blocks_skiphead_0 = blocks.skiphead(gr.sizeof_gr_complex*1, int(N*(1+0))) viterbi = trellis.viterbi_combined_cb(f, head+blocksize+tail, 0, -1, int(N), constellation, digital.TRELLIS_EUCLIDEAN) blocks_vector_sink_x_0 = blocks.vector_sink_b() ################################################## # Connections ################################################## tb.connect((random_source_x_0, 0), (digital_chunks_to_symbols_xx_0, 0)) tb.connect((digital_chunks_to_symbols_xx_0, 0), (filter_interp_fir_filter_xxx_0, 0)) tb.connect((filter_interp_fir_filter_xxx_0, 0), (analog_frequency_modulator_fc_0, 0)) tb.connect((analog_frequency_modulator_fc_0, 0), (blocks_add_vxx_0, 0)) tb.connect((analog_noise_source_x_0, 0), (blocks_add_vxx_0, 1)) tb.connect((blocks_add_vxx_0, 0), (blocks_multiply_vxx_0, 0)) tb.connect((analog_sig_source_x_0, 0), (blocks_multiply_vxx_0, 1)) tb.connect((blocks_multiply_vxx_0, 0), (filter_fir_filter_xxx_0_0, 0)) tb.connect((blocks_multiply_vxx_0, 0), (filter_fir_filter_xxx_0_0_0, 0)) tb.connect((filter_fir_filter_xxx_0_0, 0), (blocks_streams_to_stream_0, 0)) tb.connect((filter_fir_filter_xxx_0_0_0, 0), (blocks_streams_to_stream_0, 1)) tb.connect((blocks_streams_to_stream_0, 0), (blocks_skiphead_0, 0)) tb.connect((blocks_skiphead_0, 0), (viterbi, 0)) tb.connect((viterbi, 0), (blocks_vector_sink_x_0, 0)) tb.run() dataest = blocks_vector_sink_x_0.data() #print data #print numpy.array(dataest) perr = 0 err = 0 for i in range(blocksize): if data[head+i] != dataest[head+i]: #print i err += 1 if err != 0 : perr = 1 return (err,perr) if __name__ == '__main__': blocksize = 1000 ss=0 ee=0 for i in range(10000): (s,e) = run_test(i,blocksize) ss += s ee += e if (i+1) % 100 == 0: print i+1,ss,ee,(1.0*ss)/(i+1)/(1.0*blocksize),(1.0*ee)/(i+1) print i+1,ss,ee,(1.0*ss)/(i+1)/(1.0*blocksize),(1.0*ee)/(i+1)
gpl-3.0
8,884,651,964,150,266,000
33.641509
188
0.553922
false
onshape-public/onshape-clients
python/onshape_client/oas/models/bt_graphics_appearance1152.py
1
5506
# coding: utf-8 """ Onshape REST API The Onshape REST API consumed by all clients. # noqa: E501 The version of the OpenAPI document: 1.113 Contact: [email protected] Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 import sys # noqa: F401 import six # noqa: F401 import nulltype # noqa: F401 from onshape_client.oas.model_utils import ( # noqa: F401 ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) class BTGraphicsAppearance1152(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = {} validations = {} additional_properties_type = None @staticmethod def openapi_types(): """ This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { "bt_type": (str,), # noqa: E501 "color": ([str],), # noqa: E501 "non_trivial": (bool,), # noqa: E501 "opacity": (int,), # noqa: E501 "reset": (bool,), # noqa: E501 "rgba_color": ([str],), # noqa: E501 "usable_appearance": (bool,), # noqa: E501 } @staticmethod def discriminator(): return None attribute_map = { "bt_type": "btType", # noqa: E501 "color": "color", # noqa: E501 "non_trivial": "nonTrivial", # noqa: E501 "opacity": "opacity", # noqa: E501 "reset": "reset", # noqa: E501 "rgba_color": "rgbaColor", # noqa: E501 "usable_appearance": "usableAppearance", # noqa: E501 } @staticmethod def _composed_schemas(): return None required_properties = set( [ "_data_store", "_check_type", "_from_server", "_path_to_item", "_configuration", ] ) def __init__( self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs ): # noqa: E501 """bt_graphics_appearance1152.BTGraphicsAppearance1152 - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _from_server (bool): True if the data is from the server False if the data is from the client (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. bt_type (str): [optional] # noqa: E501 color ([str]): [optional] # noqa: E501 non_trivial (bool): [optional] # noqa: E501 opacity (int): [optional] # noqa: E501 reset (bool): [optional] # noqa: E501 rgba_color ([str]): [optional] # noqa: E501 usable_appearance (bool): [optional] # noqa: E501 """ self._data_store = {} self._check_type = _check_type self._from_server = _from_server self._path_to_item = _path_to_item self._configuration = _configuration for var_name, var_value in six.iteritems(kwargs): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): # discard variable. continue setattr(self, var_name, var_value)
mit
-5,571,862,601,072,330,000
33.198758
91
0.562114
false
lyft/incubator-airflow
tests/providers/google/cloud/sensors/test_pubsub.py
4
4436
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest import mock from google.cloud.pubsub_v1.types import ReceivedMessage from google.protobuf.json_format import MessageToDict, ParseDict from airflow.exceptions import AirflowSensorTimeout from airflow.providers.google.cloud.sensors.pubsub import PubSubPullSensor TASK_ID = 'test-task-id' TEST_PROJECT = 'test-project' TEST_SUBSCRIPTION = 'test-subscription' class TestPubSubPullSensor(unittest.TestCase): def _generate_messages(self, count): return [ ParseDict( { "ack_id": "%s" % i, "message": { "data": 'Message {}'.format(i).encode('utf8'), "attributes": {"type": "generated message"}, }, }, ReceivedMessage(), ) for i in range(1, count + 1) ] def _generate_dicts(self, count): return [MessageToDict(m) for m in self._generate_messages(count)] @mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook') def test_poke_no_messages(self, mock_hook): operator = PubSubPullSensor(task_id=TASK_ID, project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION) mock_hook.return_value.pull.return_value = [] self.assertEqual([], operator.poke(None)) @mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook') def test_poke_with_ack_messages(self, mock_hook): operator = PubSubPullSensor(task_id=TASK_ID, project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, ack_messages=True) generated_messages = self._generate_messages(5) generated_dicts = self._generate_dicts(5) mock_hook.return_value.pull.return_value = generated_messages self.assertEqual(generated_dicts, operator.poke(None)) mock_hook.return_value.acknowledge.assert_called_once_with( project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, ack_ids=['1', '2', '3', '4', '5'] ) @mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook') def test_execute(self, mock_hook): operator = PubSubPullSensor( task_id=TASK_ID, project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, poke_interval=0 ) generated_messages = self._generate_messages(5) generated_dicts = self._generate_dicts(5) mock_hook.return_value.pull.return_value = generated_messages response = operator.execute(None) mock_hook.return_value.pull.assert_called_once_with( project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=5, return_immediately=False ) self.assertEqual(generated_dicts, response) @mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook') def test_execute_timeout(self, mock_hook): operator = PubSubPullSensor(task_id=TASK_ID, project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, poke_interval=0, timeout=1) mock_hook.return_value.pull.return_value = [] with self.assertRaises(AirflowSensorTimeout): operator.execute(None) mock_hook.return_value.pull.assert_called_once_with( project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=5, return_immediately=False )
apache-2.0
3,097,246,323,247,045,000
40.457944
77
0.631876
false
allenai/allennlp
allennlp/modules/seq2seq_encoders/pytorch_transformer_wrapper.py
1
4656
from typing import Optional from overrides import overrides import torch from torch import nn from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder from allennlp.nn.util import add_positional_features @Seq2SeqEncoder.register("pytorch_transformer") class PytorchTransformer(Seq2SeqEncoder): """ Implements a stacked self-attention encoder similar to the Transformer architecture in [Attention is all you Need] (https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077). This class adapts the Transformer from torch.nn for use in AllenNLP. Optionally, it adds positional encodings. Registered as a `Seq2SeqEncoder` with name "pytorch_transformer". # Parameters input_dim : `int`, required. The input dimension of the encoder. feedforward_hidden_dim : `int`, required. The middle dimension of the FeedForward network. The input and output dimensions are fixed to ensure sizes match up for the self attention layers. num_layers : `int`, required. The number of stacked self attention -> feedforward -> layer normalisation blocks. num_attention_heads : `int`, required. The number of attention heads to use per layer. use_positional_encoding : `bool`, optional, (default = `True`) Whether to add sinusoidal frequencies to the input tensor. This is strongly recommended, as without this feature, the self attention layers have no idea of absolute or relative position (as they are just computing pairwise similarity between vectors of elements), which can be important features for many tasks. dropout_prob : `float`, optional, (default = `0.1`) The dropout probability for the feedforward network. """ # noqa def __init__( self, input_dim: int, num_layers: int, feedforward_hidden_dim: int = 2048, num_attention_heads: int = 8, positional_encoding: Optional[str] = None, positional_embedding_size: int = 512, dropout_prob: float = 0.1, activation: str = "relu", ) -> None: super().__init__() layer = nn.TransformerEncoderLayer( d_model=input_dim, nhead=num_attention_heads, dim_feedforward=feedforward_hidden_dim, dropout=dropout_prob, activation=activation, ) self._transformer = nn.TransformerEncoder(layer, num_layers) self._input_dim = input_dim # initialize parameters # We do this before the embeddings are initialized so we get the default initialization for the embeddings. for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) if positional_encoding is None: self._sinusoidal_positional_encoding = False self._positional_embedding = None elif positional_encoding == "sinusoidal": self._sinusoidal_positional_encoding = True self._positional_embedding = None elif positional_encoding == "embedding": self._sinusoidal_positional_encoding = False self._positional_embedding = nn.Embedding(positional_embedding_size, input_dim) else: raise ValueError( "positional_encoding must be one of None, 'sinusoidal', or 'embedding'" ) @overrides def get_input_dim(self) -> int: return self._input_dim @overrides def get_output_dim(self) -> int: return self._input_dim @overrides def is_bidirectional(self): return False @overrides def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor): output = inputs if self._sinusoidal_positional_encoding: output = add_positional_features(output) if self._positional_embedding is not None: position_ids = torch.arange(inputs.size(1), dtype=torch.long, device=output.device) position_ids = position_ids.unsqueeze(0).expand(inputs.shape[:-1]) output = output + self._positional_embedding(position_ids) # For some reason the torch transformer expects the shape (sequence, batch, features), not the more # familiar (batch, sequence, features), so we have to fix it. output = output.permute(1, 0, 2) # For some other reason, the torch transformer takes the mask backwards. mask = ~mask output = self._transformer(output, src_key_padding_mask=mask) output = output.permute(1, 0, 2) return output
apache-2.0
4,032,469,471,103,327,000
39.486957
127
0.659364
false
davidcusatis/horizon
openstack_dashboard/test/api_tests/cinder_rest_tests.py
8
10320
# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import mock from django.conf import settings from openstack_dashboard import api from openstack_dashboard.api.rest import cinder from openstack_dashboard.test import helpers as test class CinderRestTestCase(test.TestCase): # # Volumes # def test_volumes_get(self): self._test_volumes_get(False, {}) def test_volumes_get_all(self): self._test_volumes_get(True, {}) def test_volumes_get_with_filters(self): filters = {'status': 'available'} self._test_volumes_get(False, filters) @mock.patch.object(cinder.api, 'cinder') def _test_volumes_get(self, all, filters, cc): if all: request = self.mock_rest_request(GET={'all_projects': 'true'}) else: request = self.mock_rest_request(**{'GET': filters}) cc.volume_list_paged.return_value = [ mock.Mock(**{'to_dict.return_value': {'id': 'one'}}), mock.Mock(**{'to_dict.return_value': {'id': 'two'}}), ], False, False response = cinder.Volumes().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.json, {"items": [{"id": "one"}, {"id": "two"}], "has_more_data": False, "has_prev_data": False}) if all: cc.volume_list_paged.assert_called_once_with(request, {'all_tenants': 1}) else: cc.volume_list_paged.assert_called_once_with(request, search_opts=filters) @mock.patch.object(cinder.api, 'cinder') def test_volume_get(self, cc): request = self.mock_rest_request(**{'GET': {}}) cc.volume_get.return_value = mock.Mock( **{'to_dict.return_value': {'id': 'one'}}) response = cinder.Volume().get(request, '1') self.assertStatusCode(response, 200) self.assertEqual(response.json, {"id": "one"}) cc.volume_get.assert_called_once_with(request, '1') @mock.patch.object(cinder.api, 'cinder') def test_volume_create(self, cc): mock_body = '''{ "size": "", "name": "", "description": "", "volume_type": "", "snapshot_id": "", "metadata": "", "image_id": "", "availability_zone": "", "source_volid": "" }''' mock_volume_create_response = { "size": "" } mock_post_response = '{"size": ""}' request = self.mock_rest_request(POST={}, body=mock_body) cc.volume_create.return_value = \ mock.Mock(**{'to_dict.return_value': mock_volume_create_response}) response = cinder.Volumes().post(request) self.assertStatusCode(response, 201) self.assertEqual(response.content.decode("utf-8"), mock_post_response) # # Volume Types # @mock.patch.object(cinder.api, 'cinder') def test_volume_types_get(self, cc): request = self.mock_rest_request(**{'GET': {}}) cc.VolumeType.return_value = mock.Mock( **{'to_dict.return_value': {'id': 'one'}}) cc.volume_type_list.return_value = [{'id': 'one'}] response = cinder.VolumeTypes().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.json, {"items": [{"id": "one"}]}) cc.volume_type_list.assert_called_once_with(request) cc.VolumeType.assert_called_once_with({'id': 'one'}) @mock.patch.object(cinder.api, 'cinder') def test_volume_type_get(self, cc): request = self.mock_rest_request(**{'GET': {}}) cc.volume_type_get.return_value = {'name': 'one'} cc.VolumeType.return_value = mock.Mock( **{'to_dict.return_value': {'id': 'one'}}) response = cinder.VolumeType().get(request, '1') self.assertStatusCode(response, 200) self.assertEqual(response.json, {"id": "one"}) cc.volume_type_get.assert_called_once_with(request, '1') cc.VolumeType.assert_called_once_with({'name': 'one'}) @mock.patch.object(cinder.api, 'cinder') def test_volume_type_get_default(self, cc): request = self.mock_rest_request(**{'GET': {}}) cc.volume_type_default.return_value = {'name': 'one'} cc.VolumeType.return_value = mock.Mock( **{'to_dict.return_value': {'id': 'one'}}) response = cinder.VolumeType().get(request, 'default') self.assertStatusCode(response, 200) self.assertEqual(response.json, {"id": "one"}) cc.volume_type_default.assert_called_once_with(request) cc.VolumeType.assert_called_once_with({'name': 'one'}) # # Volume Snapshots # @mock.patch.object(cinder.api, 'cinder') def test_volume_snaps_get(self, cc): request = self.mock_rest_request(**{'GET': {}}) cc.volume_snapshot_list.return_value = [ mock.Mock(**{'to_dict.return_value': {'id': 'one'}}), mock.Mock(**{'to_dict.return_value': {'id': 'two'}}), ] response = cinder.VolumeSnapshots().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.json, {"items": [{"id": "one"}, {"id": "two"}]}) cc.volume_snapshot_list.assert_called_once_with(request, search_opts={}) @mock.patch.object(cinder.api, 'cinder') def test_volume_snaps_get_with_filters(self, cc): filters = {'status': 'available'} request = self.mock_rest_request(**{'GET': dict(filters)}) cc.volume_snapshot_list.return_value = [ mock.Mock(**{'to_dict.return_value': {'id': 'one'}}), mock.Mock(**{'to_dict.return_value': {'id': 'two'}}), ] response = cinder.VolumeSnapshots().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.json, {"items": [{"id": "one"}, {"id": "two"}]}) cc.volume_snapshot_list.assert_called_once_with(request, search_opts=filters) # # Extensions # @mock.patch.object(cinder.api, 'cinder') @mock.patch.object(settings, 'OPENSTACK_CINDER_EXTENSIONS_BLACKLIST', ['baz']) def _test_extension_list(self, cc): request = self.mock_rest_request() cc.list_extensions.return_value = [ mock.Mock(**{'to_dict.return_value': {'name': 'foo'}}), mock.Mock(**{'to_dict.return_value': {'name': 'bar'}}), mock.Mock(**{'to_dict.return_value': {'name': 'baz'}}), ] response = cinder.Extensions().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.content, '{"items": [{"name": "foo"}, {"name": "bar"}]}') cc.list_extensions.assert_called_once_with(request) @mock.patch.object(cinder.api, 'cinder') def test_qos_specs_get(self, cc): request = self.mock_rest_request(GET={}) cc.qos_specs_list.return_value = [ mock.Mock(**{'to_dict.return_value': {'id': 'one'}}), mock.Mock(**{'to_dict.return_value': {'id': 'two'}}), ] response = cinder.QoSSpecs().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.content.decode("utf-8"), '{"items": [{"id": "one"}, {"id": "two"}]}') cc.qos_specs_list.assert_called_once_with(request) @mock.patch.object(cinder.api, 'cinder') def test_tenant_absolute_limits_get(self, cc): request = self.mock_rest_request(GET={}) cc.tenant_absolute_limits.return_value = \ {'id': 'one'} response = cinder.TenantAbsoluteLimits().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.content.decode("utf-8"), '{"id": "one"}') cc.tenant_absolute_limits.assert_called_once_with(request) # # Services # @test.create_stubs({api.base: ('is_service_enabled',)}) @mock.patch.object(cinder.api, 'cinder') def test_services_get(self, cc): request = self.mock_rest_request(GET={}) cc.service_list.return_value = [mock.Mock( binary='binary_1', host='host_1', zone='zone_1', updated_at='updated_at_1', status='status_1', state='state_1' ), mock.Mock( binary='binary_2', host='host_2', zone='zone_2', updated_at='updated_at_2', status='status_2', state='state_2' )] api.base.is_service_enabled(request, 'volume').AndReturn(True) self.mox.ReplayAll() response = cinder.Services().get(request) self.assertStatusCode(response, 200) response_as_json = json.loads(response.content.decode('utf-8')) self.assertEqual(response_as_json['items'][0]['id'], 1) self.assertEqual(response_as_json['items'][0]['binary'], 'binary_1') self.assertEqual(response_as_json['items'][1]['id'], 2) self.assertEqual(response_as_json['items'][1]['binary'], 'binary_2') cc.service_list.assert_called_once_with(request) @test.create_stubs({api.base: ('is_service_enabled',)}) def test_services_get_disabled(self): request = self.mock_rest_request(GET={}) api.base.is_service_enabled(request, 'volume').AndReturn(False) self.mox.ReplayAll() response = cinder.Services().get(request) self.assertStatusCode(response, 501)
apache-2.0
-8,789,508,030,976,958,000
38.84556
78
0.567636
false
jbremer/androguard
androguard/misc.py
12
6313
from androguard.core import * from androguard.core.androgen import * from androguard.core.bytecode import * from androguard.core.bytecodes.dvm import * from androguard.core.bytecodes.apk import * from androguard.core.analysis.analysis import * from androguard.core.analysis.ganalysis import * from androguard.decompiler.decompiler import * from cPickle import dumps, loads from androguard.core import androconf def save_session(l, filename): """ save your session ! :param l: a list of objects :type: a list of object :param filename: output filename to save the session :type filename: string :Example: save_session([a, vm, vmx], "msession.json") """ with open(filename, "w") as fd: fd.write(dumps(l, -1)) def load_session(filename): """ load your session ! :param filename: the filename where the session has been saved :type filename: string :rtype: the elements of your session :) :Example: a, vm, vmx = load_session("mysession.json") """ return loads(read(filename, binary=False)) def AnalyzeAPK(filename, raw=False, decompiler="dad"): """ Analyze an android application and setup all stuff for a more quickly analysis ! :param filename: the filename of the android application or a buffer which represents the application :type filename: string :param raw: True is you would like to use a buffer (optional) :type raw: boolean :param decompiler: ded, dex2jad, dad (optional) :type decompiler: string :rtype: return the :class:`APK`, :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects """ androconf.debug("APK ...") a = APK(filename, raw) d, dx = AnalyzeDex(a.get_dex(), raw=True, decompiler=decompiler) return a, d, dx def AnalyzeDex(filename, raw=False, decompiler="dad"): """ Analyze an android dex file and setup all stuff for a more quickly analysis ! :param filename: the filename of the android dex file or a buffer which represents the dex file :type filename: string :param raw: True is you would like to use a buffer (optional) :type raw: boolean :rtype: return the :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects """ androconf.debug("DalvikVMFormat ...") d = None if raw == False: d = DalvikVMFormat(read(filename)) else: d = DalvikVMFormat(filename) androconf.debug("Export VM to python namespace") d.create_python_export() androconf.debug("VMAnalysis ...") dx = uVMAnalysis(d) androconf.debug("GVMAnalysis ...") gx = GVMAnalysis(dx, None) d.set_vmanalysis(dx) d.set_gvmanalysis(gx) RunDecompiler(d, dx, decompiler) androconf.debug("XREF ...") d.create_xref() androconf.debug("DREF ...") d.create_dref() return d, dx def AnalyzeODex(filename, raw=False, decompiler="dad"): """ Analyze an android odex file and setup all stuff for a more quickly analysis ! :param filename: the filename of the android dex file or a buffer which represents the dex file :type filename: string :param raw: True is you would like to use a buffer (optional) :type raw: boolean :rtype: return the :class:`DalvikOdexVMFormat`, and :class:`VMAnalysis` objects """ androconf.debug("DalvikOdexVMFormat ...") d = None if raw == False: d = DalvikOdexVMFormat(read(filename)) else: d = DalvikOdexVMFormat(filename) androconf.debug("Export VM to python namespace") d.create_python_export() androconf.debug("VMAnalysis ...") dx = uVMAnalysis(d) androconf.debug("GVMAnalysis ...") gx = GVMAnalysis(dx, None) d.set_vmanalysis(dx) d.set_gvmanalysis(gx) RunDecompiler(d, dx, decompiler) androconf.debug("XREF ...") d.create_xref() androconf.debug("DREF ...") d.create_dref() return d, dx def RunDecompiler(d, dx, decompiler): """ Run the decompiler on a specific analysis :param d: the DalvikVMFormat object :type d: :class:`DalvikVMFormat` object :param dx: the analysis of the format :type dx: :class:`VMAnalysis` object :param decompiler: the type of decompiler to use ("dad", "dex2jad", "ded") :type decompiler: string """ if decompiler != None: androconf.debug("Decompiler ...") decompiler = decompiler.lower() if decompiler == "dex2jad": d.set_decompiler(DecompilerDex2Jad(d, androconf.CONF["PATH_DEX2JAR"], androconf.CONF["BIN_DEX2JAR"], androconf.CONF["PATH_JAD"], androconf.CONF["BIN_JAD"], androconf.CONF["TMP_DIRECTORY"])) elif decompiler == "dex2fernflower": d.set_decompiler(DecompilerDex2Fernflower(d, androconf.CONF["PATH_DEX2JAR"], androconf.CONF["BIN_DEX2JAR"], androconf.CONF["PATH_FERNFLOWER"], androconf.CONF["BIN_FERNFLOWER"], androconf.CONF["OPTIONS_FERNFLOWER"], androconf.CONF["TMP_DIRECTORY"])) elif decompiler == "ded": d.set_decompiler(DecompilerDed(d, androconf.CONF["PATH_DED"], androconf.CONF["BIN_DED"], androconf.CONF["TMP_DIRECTORY"])) else: d.set_decompiler(DecompilerDAD(d, dx)) def AnalyzeElf(filename, raw=False): # avoid to install smiasm for everybody from androguard.core.binaries.elf import ELF e = None if raw == False: e = ELF(read(filename)) else: e = ELF(filename) ExportElfToPython(e) return e def ExportElfToPython(e): for function in e.get_functions(): name = "FUNCTION_" + function.name setattr(e, name, function)
apache-2.0
-5,320,044,743,731,781,000
31.045685
109
0.589102
false
mogoweb/chromium-crosswalk
native_client_sdk/src/tools/genhttpfs.py
79
2524
#!/usr/bin/env python # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This scripts generates a manifest file for the MountHttp file system. # Files and directory paths are specified on the command-line. The names # with glob and directories are recursed to form a list of files. # # For each file, the mode bits, size and path relative to the CWD are written # to the output file which is stdout by default. # import glob import optparse import os import sys def main(argv): parser = optparse.OptionParser( usage='Usage: %prog [options] filename ...') parser.add_option('-C', '--srcdir', help='Change directory.', dest='srcdir', default=None) parser.add_option('-o', '--output', help='Output file name.', dest='output', default=None) parser.add_option('-v', '--verbose', help='Verbose output.', dest='verbose', action='store_true') parser.add_option('-r', '--recursive', help='Recursive search.', action='store_true') options, args = parser.parse_args(argv) if options.output: outfile = open(options.output, 'w') else: outfile = sys.stdout if options.srcdir: os.chdir(options.srcdir) # Generate a set of unique file names bases on the input globs fileset = set() for fileglob in args: filelist = glob.glob(fileglob) if not filelist: raise RuntimeError('Could not find match for "%s".\n' % fileglob) for filename in filelist: if os.path.isfile(filename): fileset |= set([filename]) continue if os.path.isdir(filename) and options.recursive: for root, _, files in os.walk(filename): fileset |= set([os.path.join(root, name) for name in files]) continue raise RuntimeError('Can not handle path "%s".\n' % filename) cwd = os.path.abspath(os.getcwd()) cwdlen = len(cwd) for filename in sorted(fileset): relname = os.path.abspath(filename) if cwd not in relname: raise RuntimeError('%s is not relative to CWD %s.\n' % filename, cwd) relname = relname[cwdlen:] stat = os.stat(filename) mode = '-r--' outfile.write('%s %d %s\n' % (mode, stat.st_size, relname)) return 0 if __name__ == '__main__': try: sys.exit(main(sys.argv[1:])) except OSError, e: sys.stderr.write('%s: %s\n' % (os.path.basename(__file__), e)) sys.exit(1)
bsd-3-clause
-1,025,329,589,992,626,700
31.779221
77
0.636688
false
jammerful/buildbot
master/buildbot/steps/slave.py
11
1812
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members # This module is left for backward compatibility of old-named worker API. # It should never be imported by Buildbot. from __future__ import absolute_import from __future__ import print_function from buildbot.steps.worker import CompositeStepMixin from buildbot.steps.worker import CopyDirectory from buildbot.steps.worker import FileExists from buildbot.steps.worker import MakeDirectory from buildbot.steps.worker import RemoveDirectory from buildbot.steps.worker import SetPropertiesFromEnv from buildbot.steps.worker import WorkerBuildStep # pylint: disable=unused-import from buildbot.worker_transition import deprecatedWorkerModuleAttribute from buildbot.worker_transition import reportDeprecatedWorkerModuleUsage __all__ = [ 'CompositeStepMixin', 'CopyDirectory', 'FileExists', 'MakeDirectory', 'RemoveDirectory', 'SetPropertiesFromEnv', ] reportDeprecatedWorkerModuleUsage( "'{old}' module is deprecated, use " "'buildbot.steps.worker' module instead".format(old=__name__)) deprecatedWorkerModuleAttribute(locals(), WorkerBuildStep) del WorkerBuildStep # noqa
gpl-2.0
-521,931,601,021,272,450
36.75
79
0.788079
false
brianr/uuss
scripts/process_userstates_buckets.py
1
10860
from __future__ import with_statement import base64 import optparse import os import pymongo import simplejson import sys import time import zlib from paste.deploy import converters from lolapps.util import processor from lolapps.util import db from lolapps.util.adapters import mongo, chunking from lolapps.common import userstate_multi from uuss.server import model from lolapps import helpers # logging log = processor.configure_logging() game = None max_batch_size = None mongo_db = None tmp_dir = None bucket = None shard = None cycle_time = None rebalancing = False userstate = None def _app_id_from_user_id(user_id): return helpers.app_id_from_user_id(user_id) def do_processing(onerun=False): for collection_name in mongo_db.collection_names(): proc_time = 0 if collection_name.startswith('user_state-modified-%s' % bucket): try: load_name = 'user_state-modified-%s-%s' % (bucket, time.time()) try: mongo_db[collection_name].rename(load_name) except pymongo.errors.OperationFailure: # effectively this makes sure the renamed collection exists if mongo_db[load_name].count() <= 0: raise log.error("Error encountered renaming collection %s to %s, but able to continue" % (collection_name, load_name)) modified = mongo_db[load_name] start_time = time.time() log.info("%s userstates to save for %s ..." % (modified.count(), load_name)) # prepare for userstate size monitoring userstates_processed = 0 max_userstate_size = -1 total_userstate_size = 0 mod_count = 0 mod_rows = list(modified.find()) batch_count = 1 while mod_rows: proc_rows = mod_rows[:max_batch_size] mod_rows = mod_rows[max_batch_size:] base_file_name = "%s.%s" % (load_name, batch_count) with open(os.path.join(tmp_dir, base_file_name+".usrtmp"), 'w') as user_file: with open(os.path.join(tmp_dir, base_file_name+".plrtmp"), 'w') as player_file: for row in proc_rows: mod_count += row['count'] try: if row['_id'] == 'null': log.error("null user_id encountered") else: if rebalancing: (state, chunked) = userstate.get(row['_id'], raw=True) else: (state, chunked) = userstate.backup(row['_id'], raw=True) if state is not None: if not chunked: # NOTE(jpatrin): Shouldn't happen, but just in case... if isinstance(state, str): state = simplejson.loads(zlib.decompress(state)) # NOTE(jpatrin): This will be just a single chunk for now, # but will serve until the state gets saved by the game raw_state = chunking.blow_chunks(state) else: raw_state = state state = chunking.reconstitute_chunks(raw_state, True) #state_zlib_json = zlib.compress(simplejson.dumps(state)) #user_line = gen_userstate_line(row['_id'], state_zlib_json) user_line = gen_userstate_line(row['_id'], raw_state) player_line = gen_player_line(row['_id'], state) user_file.write(user_line+'\n') if player_line: player_file.write(player_line+'\n') # keep userstate size for average and max size tracking userstates_processed += 1 userstate_size = len(raw_state) / 1024 total_userstate_size += userstate_size max_userstate_size = max(userstate_size, max_userstate_size) except userstate_multi.UserstateException, e: log.exception(e) except Exception, e: # (jay) errors are bad here, but we don't want to keep the # rest of the userstates from being saved, so log it and go on log.exception(e) # don't want the file reader to get to these before we're done, so keep # as temporary name until finished writing os.rename(os.path.join(tmp_dir, base_file_name+".usrtmp"), os.path.join(tmp_dir, base_file_name+".user")) os.rename(os.path.join(tmp_dir, base_file_name+".plrtmp"), os.path.join(tmp_dir, base_file_name+".player")) log.info("processed batch, %s remaining" % len(mod_rows)) batch_count += 1 mongo_db.drop_collection(load_name) log.info("Saved. Total updates since last backup: %s" % mod_count) proc_time = time.time() - start_time log.info("%s seconds to process userstates" % proc_time) # if we processed userstates, log their size characteristics if userstates_processed: avg_userstate_size = total_userstate_size / userstates_processed log.info("processed userstate sizes(k): avg %r max %r", avg_userstate_size, max_userstate_size) except Exception, e: # problems here are bad, but shouldn't stop processing of # other collections log.exception(e) if onerun and proc_time > 0: log.debug("one run finished, returning") return def gen_userstate_line(user_id, state): state = base64.b64encode(state) return "%s\t%s\t%s\t%s\t%s" % ( db.str_nullsafe_quote(user_id), db.str_nullsafe_quote(state), db.str_nullsafe_quote(int(time.time())), # date_created db.str_nullsafe_quote(int(time.time())), # date_modified db.str_nullsafe_quote(1) # revision_id ) def gen_player_line(user_id, state): player = state.get('player') if not player: return '' #player = {} try: now = int(time.time()) last_login = player.get('lastLogin', 0) line = "('%s', %s, %s, %s, %s, %s, %s)" % (user_id, _app_id_from_user_id(user_id), player['tutorialStep'], player['level'], player['xp'], int(last_login / 86400.0), # last_visit now # timestamp ) except KeyError, e: line = None # No player data return line def configure(options): """ Find the first app we got command-line options for and setup for it. We are only processing one app with this script, so if more than one is specified on the command-line, only one of them will get processed. No guarantees on which one that is. """ global mongo_db, tmp_dir, max_batch_size, bucket, shard, cycle_time, rebalancing, game, userstate game = options.game if game is None: print "game option is required" sys.exit(1) ini_file = options.ini if ini_file is None: print "ini option is required" sys.exit(1) log.info('Configuring...') bucket = int(options.bucket) shard = int(options.shard) max_batch_size = int(options.batch) cycle_time = int(options.time) from uuss.server import configuration (full_config, config) = configuration.init_from_ini(ini_file, [game], {'uuss.use_gevent': False}) helpers.config = config userstate = getattr(model, game).userstate tmp_dir = config["%s_user_state.tmp_dir%s" % (game, shard)] if not os.path.exists(tmp_dir): os.mkdir(tmp_dir) rebalancing = converters.asbool(config['%s_user_state.rebalancing' % game]) and options.rebal if rebalancing: hosts = config['%s_user_state.mongo_hosts_rebal' % game].split(";") else: hosts = config['%s_user_state.mongo_hosts' % game].split(";") mongo_db = mongo.connect(hosts[shard])[config['%s_user_state.mongo_dbname' % game]] def main(): import gc log.info("GC collected %r objects", gc.collect()) processor.register_signal_handlers() processor.start_log_processor(do_processing, cycle_time) if __name__ == '__main__': parser = optparse.OptionParser() # default to staging.ini so never accidentaly run against production parser.add_option("-i", "--ini", dest="ini", help="uuss ini file") parser.add_option("-b", "--batch", dest="batch", default="1000", help="maximum batch size") parser.add_option("-u", "--bucket", dest="bucket", default="0", help="bucket to process") parser.add_option("-s", "--shard", dest="shard", default="0", help="shard to process") parser.add_option("-t", "--time", dest="time", default="120", help="minimum cycle time in seconds") parser.add_option("-r", "--rebal", dest="rebal", action="store_true", default=False, help="rebalancing node") parser.add_option("-g", "--game", dest="game", help="game to process (dane, india)") options, args = parser.parse_args() configure(options) main() # run forever #do_processing() # run once
mit
-2,571,789,772,694,419,000
43.691358
106
0.494107
false
mohammed-alfatih/servo
tests/wpt/css-tests/tools/html5lib/doc/conf.py
436
9028
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # html5lib documentation build configuration file, created by # sphinx-quickstart on Wed May 8 00:04:49 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'html5lib' copyright = '2006 - 2013, James Graham, Geoffrey Sneddon, and contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. sys.path.append(os.path.abspath('..')) from html5lib import __version__ release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', 'theme'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'html5libdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'html5lib.tex', 'html5lib Documentation', 'James Graham, Geoffrey Sneddon, and contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'html5lib', 'html5lib Documentation', ['James Graham, Geoffrey Sneddon, and contributors'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'html5lib', 'html5lib Documentation', 'James Graham, Geoffrey Sneddon, and contributors', 'html5lib', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False class CExtMock(object): """Required for autodoc on readthedocs.org where you cannot build C extensions.""" def __init__(self, *args, **kwargs): pass def __call__(self, *args, **kwargs): return CExtMock() @classmethod def __getattr__(cls, name): if name in ('__file__', '__path__'): return '/dev/null' else: return CExtMock() try: import lxml # flake8: noqa except ImportError: sys.modules['lxml'] = CExtMock() sys.modules['lxml.etree'] = CExtMock() print("warning: lxml modules mocked.") try: import genshi # flake8: noqa except ImportError: sys.modules['genshi'] = CExtMock() sys.modules['genshi.core'] = CExtMock() print("warning: genshi modules mocked.")
mpl-2.0
3,787,293,849,168,783,000
31.242857
102
0.697054
false
elimence/edx-platform
cms/djangoapps/contentstore/management/commands/check_course.py
5
2830
from django.core.management.base import BaseCommand, CommandError from xmodule.modulestore.django import modulestore from xmodule.modulestore.xml_importer import check_module_metadata_editability from xmodule.course_module import CourseDescriptor from request_cache.middleware import RequestCache class Command(BaseCommand): help = '''Enumerates through the course and find common errors''' def handle(self, *args, **options): if len(args) != 1: raise CommandError("check_course requires one argument: <location>") loc_str = args[0] loc = CourseDescriptor.id_to_location(loc_str) store = modulestore() # setup a request cache so we don't throttle the DB with all the metadata inheritance requests store.request_cache = RequestCache.get_request_cache() course = store.get_item(loc, depth=3) err_cnt = 0 def _xlint_metadata(module): err_cnt = check_module_metadata_editability(module) for child in module.get_children(): err_cnt = err_cnt + _xlint_metadata(child) return err_cnt err_cnt = err_cnt + _xlint_metadata(course) # we've had a bug where the xml_attributes field can we rewritten as a string rather than a dict def _check_xml_attributes_field(module): err_cnt = 0 if hasattr(module, 'xml_attributes') and isinstance(module.xml_attributes, basestring): print 'module = {0} has xml_attributes as a string. It should be a dict'.format(module.location.url()) err_cnt = err_cnt + 1 for child in module.get_children(): err_cnt = err_cnt + _check_xml_attributes_field(child) return err_cnt err_cnt = err_cnt + _check_xml_attributes_field(course) # check for dangling discussion items, this can cause errors in the forums def _get_discussion_items(module): discussion_items = [] if module.location.category == 'discussion': discussion_items = discussion_items + [module.location.url()] for child in module.get_children(): discussion_items = discussion_items + _get_discussion_items(child) return discussion_items discussion_items = _get_discussion_items(course) # now query all discussion items via get_items() and compare with the tree-traversal queried_discussion_items = store.get_items(['i4x', course.location.org, course.location.course, 'discussion', None, None]) for item in queried_discussion_items: if item.location.url() not in discussion_items: print 'Found dangling discussion module = {0}'.format(item.location.url())
agpl-3.0
4,448,689,419,986,173,400
40.617647
118
0.636749
false
suninsky/ReceiptOCR
Python/server/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py
3131
47315
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # Sampling from about 20M text materials include literature and computer technology # # Japanese frequency table, applied to both S-JIS and EUC-JP # They are sorted in order. # 128 --> 0.77094 # 256 --> 0.85710 # 512 --> 0.92635 # 1024 --> 0.97130 # 2048 --> 0.99431 # # Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58 # Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191 # # Typical Distribution Ratio, 25% of IDR JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0 # Char to FreqOrder table , JIS_TABLE_SIZE = 4368 JISCharToFreqOrder = ( 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16 3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32 1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48 2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64 2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80 5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96 1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112 5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128 5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144 5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160 5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176 5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192 5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208 1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224 1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240 1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256 2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272 3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288 3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304 4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320 12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336 1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352 109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368 5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384 271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400 32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416 43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432 280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448 54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464 5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480 5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496 5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512 4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528 5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544 5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560 5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576 5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592 5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608 5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624 5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640 5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656 5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672 3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688 5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704 5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720 5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736 5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752 5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768 5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784 5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800 5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816 5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832 5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848 5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864 5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880 5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896 5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912 5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928 5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944 5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960 5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976 5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992 5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008 5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024 5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040 5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056 5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072 5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088 5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104 5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120 5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136 5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152 5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168 5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184 5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200 5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216 5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232 5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248 5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264 5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280 5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296 6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312 6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328 6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344 6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360 6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376 6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392 6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408 6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424 4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440 854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456 665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472 1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488 1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504 896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520 3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536 3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552 804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568 3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584 3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600 586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616 2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632 277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648 3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664 1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680 380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696 1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712 850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728 2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744 2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760 2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776 2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792 1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808 1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824 1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840 1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856 2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872 1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888 2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904 1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920 1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936 1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952 1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968 1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984 1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000 606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016 684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032 1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048 2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064 2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080 2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096 3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112 3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128 884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144 3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160 1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176 861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192 2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208 1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224 576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240 3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256 4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272 2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288 1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304 2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320 1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336 385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352 178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368 1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384 2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400 2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416 2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432 3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448 1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464 2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480 359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496 837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512 855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528 1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544 2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560 633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576 1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592 1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608 353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624 1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640 1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656 1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672 764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688 2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704 278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720 2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736 3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752 2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768 1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784 6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800 1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816 2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832 1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848 470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864 72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880 3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896 3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912 1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928 1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944 1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960 1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976 123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992 913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008 2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024 900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040 3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056 2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072 423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088 1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104 2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120 220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136 1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152 745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168 4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184 2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200 1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216 666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232 1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248 2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264 376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280 6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296 1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312 1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328 2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344 3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360 914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376 3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392 1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408 674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424 1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440 199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456 3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472 370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488 2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504 414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520 4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536 2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552 1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568 1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584 1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600 166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616 1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632 3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648 1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664 3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680 264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696 543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712 983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728 2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744 1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760 867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776 1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792 894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808 1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824 530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840 839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856 480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872 1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888 1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904 2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920 4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936 227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952 1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968 328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984 1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000 3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016 1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032 2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048 2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064 1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080 1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096 2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112 455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128 2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144 1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160 1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176 1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192 1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208 3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224 2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240 2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256 575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272 3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288 3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304 1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320 2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336 1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352 2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512 #Everything below is of no interest for detection purpose 2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384 6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400 6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416 6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432 6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448 4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464 4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480 3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496 3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512 4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528 3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544 6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560 4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576 6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592 6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608 6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624 6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640 6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656 6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672 3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688 3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704 6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720 2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736 4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752 4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768 4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784 6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800 3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816 4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832 4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848 6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864 4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880 6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896 3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912 2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928 4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944 2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960 6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976 4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992 6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008 6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024 6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040 4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056 6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072 2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088 6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104 4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120 6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136 4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152 4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168 6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184 6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200 6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216 3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232 1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248 3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264 3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280 4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296 6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312 3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328 6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344 3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360 3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376 2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392 6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408 6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424 3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440 6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456 3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472 6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488 6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504 6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520 4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536 6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552 4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568 3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584 3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600 6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616 6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632 4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648 6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664 6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680 6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696 6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712 6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728 6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744 4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760 4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776 3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792 6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808 4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824 2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840 6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856 6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872 4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888 2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904 4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920 2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936 4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952 4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968 4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984 6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000 3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016 6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032 3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048 6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064 2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080 3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096 7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112 2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128 3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144 3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160 3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176 3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192 7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208 7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224 7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240 7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256 7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272 4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288 3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304 3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320 4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336 3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352 3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368 7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384 4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400 7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416 7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432 7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448 7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464 7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480 4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496 4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512 7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528 3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544 4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560 7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576 7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592 4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608 3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624 3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640 7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656 4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672 4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688 4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704 4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720 4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736 4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752 7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768 7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784 7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800 7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816 7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832 2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848 3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864 7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880 7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896 3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912 4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928 3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944 3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960 2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976 7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992 7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008 4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024 3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040 3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056 7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072 7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088 7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104 4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120 7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136 2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152 3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168 4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184 7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200 4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216 4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232 7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248 7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264 5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280 7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296 7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312 7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328 7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344 7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360 5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376 5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392 7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408 3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424 7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440 7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456 3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472 7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488 7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504 1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520 3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536 4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552 2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568 3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584 2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600 5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616 4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632 4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648 5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664 7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680 7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696 7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712 7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728 3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744 7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760 3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776 7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792 4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808 7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824 7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840 7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856 7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872 7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888 7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904 7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920 7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936 7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952 7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968 7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984 7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000 8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016 8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032 8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048 8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064 8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080 8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096 8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112 8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128 8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144 8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160 8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176 8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192 8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208 8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224 8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240 8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256 8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272 # flake8: noqa
mit
-2,939,653,643,004,805,000
82.154657
98
0.745134
false
jeasoft/odoo
marcos_addons/marcos_ncf/account_voucher/number_to_letter.py
3
4850
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do # Write by Eneldo Serrata ([email protected]) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from itertools import ifilter UNIDADES = ( '', 'UN ', 'DOS ', 'TRES ', 'CUATRO ', 'CINCO ', 'SEIS ', 'SIETE ', 'OCHO ', 'NUEVE ', 'DIEZ ', 'ONCE ', 'DOCE ', 'TRECE ', 'CATORCE ', 'QUINCE ', 'DIECISEIS ', 'DIECISIETE ', 'DIECIOCHO ', 'DIECINUEVE ', 'VEINTE ' ) DECENAS = ( 'VENTI', 'TREINTA ', 'CUARENTA ', 'CINCUENTA ', 'SESENTA ', 'SETENTA ', 'OCHENTA ', 'NOVENTA ', 'CIEN ' ) CENTENAS = ( 'CIENTO ', 'DOSCIENTOS ', 'TRESCIENTOS ', 'CUATROCIENTOS ', 'QUINIENTOS ', 'SEISCIENTOS ', 'SETECIENTOS ', 'OCHOCIENTOS ', 'NOVECIENTOS ' ) MONEDAS = ( {'country': u'Colombia', 'currency': 'COP', 'singular': u'PESO COLOMBIANO', 'plural': u'PESOS COLOMBIANOS', 'symbol': u'$'}, {'country': u'Estados Unidos', 'currency': 'USD', 'singular': u'DÓLAR', 'plural': u'DÓLARES', 'symbol': u'US$'}, {'country': u'Europa', 'currency': 'EUR', 'singular': u'EURO', 'plural': u'EUROS', 'symbol': u'€'}, {'country': u'México', 'currency': 'MXN', 'singular': u'PESO MEXICANO', 'plural': u'PESOS MEXICANOS', 'symbol': u'$'}, {'country': u'Perú', 'currency': 'PEN', 'singular': u'NUEVO SOL', 'plural': u'NUEVOS SOLES', 'symbol': u'S/.'}, {'country': u'Reino Unido', 'currency': 'GBP', 'singular': u'LIBRA', 'plural': u'LIBRAS', 'symbol': u'£'}, {'country': u'Republica Dominicana', 'currency': 'pesos', 'singular': u'peso', 'plural': u'pesos', 'symbol': u'RD'} ) # Para definir la moneda me estoy basando en los código que establece el ISO 4217 # Decidí poner las variables en inglés, porque es más sencillo de ubicarlas sin importar el país # Si, ya sé que Europa no es un país, pero no se me ocurrió un nombre mejor para la clave. def to_word(number, mi_moneda=None): if mi_moneda != None: try: moneda = ifilter(lambda x: x['currency'] == mi_moneda, MONEDAS).next() if number < 2: moneda = moneda['singular'] else: moneda = moneda['plural'] except: return u"Tipo de moneda inválida" else: moneda = "" """Converts a number into string representation""" converted = '' if not (0 < number < 999999999): return 'No es posible convertir el numero a letras' if "." in str(number): number, decimal = str(number).split(".") else: number = str(number) decimal = "00" number_str = str(number).zfill(9) millones = number_str[:3] miles = number_str[3:6] cientos = number_str[6:] if(millones): if(millones == '001'): converted += 'UN MILLON ' elif(int(millones) > 0): converted += '%sMILLONES ' % __convert_group(millones) if(miles): if(miles == '001'): converted += 'MIL ' elif(int(miles) > 0): converted += '%sMIL ' % __convert_group(miles) if(cientos): if(cientos == '001'): converted += 'UN ' elif(int(cientos) > 0): converted += '%s' % __convert_group(cientos) #converted += moneda if decimal: decimal = "con %s%s" % (decimal, "/100") converted += decimal converted = "{}".format(converted) return converted.upper() def __convert_group(n): """Turn each group of numbers into letters""" output = '' if(n == '100'): output = "CIEN " elif(n[0] != '0'): output = CENTENAS[int(n[0]) - 1] k = int(n[1:]) if(k <= 20): output += UNIDADES[k] else: if((k > 30) & (n[2] != '0')): output += '%sY %s' % (DECENAS[int(n[1]) - 2], UNIDADES[int(n[2])]) else: output += '%s%s' % (DECENAS[int(n[1]) - 2], UNIDADES[int(n[2])]) return output
agpl-3.0
-4,518,428,102,402,945,500
29.21875
128
0.54427
false
jnhdny/parts-unlimited-to-bigcommerce
python_scripts/catalog_content_export_unzipper.py
3
1551
#!/usr/bin/python # this script unzips the brand catalog content export zip files # one at a time, and numbers each exported xml file import zipfile import shutil import os import glob def bool_str(s): s = s.lower() T = ('1', 'true', 'y', 'ye', 'yes', 'yea', 'ya', 'yah', 'yup') F = ('0', 'false', 'n', 'no', 'not', 'nope', 'na', 'nah') if s in T: return True elif s in F: return False else: raise VaueError('"%s" is not a recognized yes or no string' % s) working_directory = "../catalog_content_export_zips" output_directory = "../catalog_content_xml_files" if os.path.isdir(working_directory): os.chdir(working_directory) print("Changing working directory to " + working_directory + " ...") cont = True else: cont = bool_str(raw_input("Could not change working directory. Continue? (y/n):")) if cont: if os.path.isdir(output_directory): print("Setting output directory to " + output_directory + " ...") doproceed = True else: doproceed = bool_str(raw_input("Could not set ouput directory to " + output_directory + " ... Double check to see if it exists. Do you wish to proceed using current working directory for output? (y/n):")) if doproceed: for i in glob.glob('*.zip'): #filename = "Brand_Catalog_Content_Export(%s).zip" % i zf = zipfile.ZipFile(i) print("Extracting " + i + " ...") zf.extractall() shutil.move("Brand_Catalog_Content_Export.xml", "../catalog_content_xml_files/%s.xml" % i) print("Moving " + i + " to ~/parts-unlimited-to-bigcommerce/catalog_content_xml_files/" + i + ".xml")
gpl-2.0
-8,436,640,711,712,632,000
34.25
206
0.662153
false
bastimeyer/streamlink
src/streamlink/plugins/livestream.py
5
4599
import re from streamlink.compat import urljoin from streamlink.plugin import Plugin from streamlink.plugin.api import validate from streamlink.plugin.api.utils import parse_json from streamlink.stream import AkamaiHDStream, HLSStream _url_re = re.compile(r"http(s)?://(www\.)?livestream.com/") _stream_config_schema = validate.Schema({ "event": { "stream_info": validate.any({ "is_live": bool, "qualities": [{ "bitrate": int, "height": int }], validate.optional("play_url"): validate.url(scheme="http"), validate.optional("m3u8_url"): validate.url( scheme="http", path=validate.endswith(".m3u8") ), }, None) }, validate.optional("playerUri"): validate.text, validate.optional("viewerPlusSwfUrl"): validate.url(scheme="http"), validate.optional("lsPlayerSwfUrl"): validate.text, validate.optional("hdPlayerSwfUrl"): validate.text }) _smil_schema = validate.Schema(validate.union({ "http_base": validate.all( validate.xml_find("{http://www.w3.org/2001/SMIL20/Language}head/" "{http://www.w3.org/2001/SMIL20/Language}meta" "[@name='httpBase']"), validate.xml_element(attrib={ "content": validate.text }), validate.get("content") ), "videos": validate.all( validate.xml_findall("{http://www.w3.org/2001/SMIL20/Language}body/" "{http://www.w3.org/2001/SMIL20/Language}switch/" "{http://www.w3.org/2001/SMIL20/Language}video"), [ validate.all( validate.xml_element(attrib={ "src": validate.text, "system-bitrate": validate.all( validate.text, validate.transform(int) ) }), validate.transform( lambda e: (e.attrib["src"], e.attrib["system-bitrate"]) ) ) ], ) })) class Livestream(Plugin): @classmethod def default_stream_types(cls, streams): return ["akamaihd", "hls"] @classmethod def can_handle_url(self, url): return _url_re.match(url) def _get_stream_info(self): res = self.session.http.get(self.url) match = re.search("window.config = ({.+})", res.text) if match: config = match.group(1) return parse_json(config, "config JSON", schema=_stream_config_schema) def _parse_smil(self, url, swf_url): res = self.session.http.get(url) smil = self.session.http.xml(res, "SMIL config", schema=_smil_schema) for src, bitrate in smil["videos"]: url = urljoin(smil["http_base"], src) yield bitrate, AkamaiHDStream(self.session, url, swf=swf_url) def _get_streams(self): info = self._get_stream_info() if not info: return stream_info = info["event"]["stream_info"] if not (stream_info and stream_info["is_live"]): # Stream is not live return play_url = stream_info.get("play_url") if play_url: swf_url = info.get("playerUri") or info.get("hdPlayerSwfUrl") or info.get("lsPlayerSwfUrl") or info.get("viewerPlusSwfUrl") if swf_url: if not swf_url.startswith("http"): if swf_url.startswith("//"): swf_url = "http:" + swf_url else: swf_url = "http://" + swf_url # Work around broken SSL. swf_url = swf_url.replace("https://", "http://") qualities = stream_info["qualities"] for bitrate, stream in self._parse_smil(play_url, swf_url): name = "{0:d}k".format(int(bitrate / 1000)) for quality in qualities: if quality["bitrate"] == bitrate: name = "{0}p".format(quality["height"]) yield name, stream m3u8_url = stream_info.get("m3u8_url") if m3u8_url: streams = HLSStream.parse_variant_playlist(self.session, m3u8_url, namekey="pixels") # TODO: Replace with "yield from" when dropping Python 2. for stream in streams.items(): yield stream __plugin__ = Livestream
bsd-2-clause
-936,878,992,519,265,400
34.929688
135
0.52294
false
teichopsia-/take_brake
lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py
252
106466
""" Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. """ from __future__ import absolute_import import sys import os import io import time import re import types import zipfile import zipimport import warnings import stat import functools import pkgutil import token import symbol import operator import platform import collections import plistlib import email.parser import tempfile import textwrap from pkgutil import get_importer try: import _imp except ImportError: # Python 3.2 compatibility import imp as _imp PY3 = sys.version_info > (3,) PY2 = not PY3 if PY3: from urllib.parse import urlparse, urlunparse if PY2: from urlparse import urlparse, urlunparse if PY3: string_types = str, else: string_types = str, eval('unicode') iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems() # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split # Avoid try/except due to potential problems with delayed import mechanisms. if sys.version_info >= (3, 3) and sys.implementation.name == "cpython": import importlib.machinery as importlib_machinery else: importlib_machinery = None try: import parser except ImportError: pass import pip._vendor.packaging.version import pip._vendor.packaging.specifiers packaging = pip._vendor.packaging # declare some globals that will be defined later to # satisfy the linters. require = None working_set = None class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with PEP 440. """ class _SetuptoolsVersionMixin(object): def __hash__(self): return super(_SetuptoolsVersionMixin, self).__hash__() def __lt__(self, other): if isinstance(other, tuple): return tuple(self) < other else: return super(_SetuptoolsVersionMixin, self).__lt__(other) def __le__(self, other): if isinstance(other, tuple): return tuple(self) <= other else: return super(_SetuptoolsVersionMixin, self).__le__(other) def __eq__(self, other): if isinstance(other, tuple): return tuple(self) == other else: return super(_SetuptoolsVersionMixin, self).__eq__(other) def __ge__(self, other): if isinstance(other, tuple): return tuple(self) >= other else: return super(_SetuptoolsVersionMixin, self).__ge__(other) def __gt__(self, other): if isinstance(other, tuple): return tuple(self) > other else: return super(_SetuptoolsVersionMixin, self).__gt__(other) def __ne__(self, other): if isinstance(other, tuple): return tuple(self) != other else: return super(_SetuptoolsVersionMixin, self).__ne__(other) def __getitem__(self, key): return tuple(self)[key] def __iter__(self): component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) replace = { 'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@', }.get def _parse_version_parts(s): for part in component_re.split(s): part = replace(part, part) if not part or part == '.': continue if part[:1] in '0123456789': # pad for numeric comparison yield part.zfill(8) else: yield '*'+part # ensure that alpha/beta/candidate are before final yield '*final' def old_parse_version(s): parts = [] for part in _parse_version_parts(s.lower()): if part.startswith('*'): # remove '-' before a prerelease tag if part < '*final': while parts and parts[-1] == '*final-': parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == '00000000': parts.pop() parts.append(part) return tuple(parts) # Warn for use of this function warnings.warn( "You have iterated over the result of " "pkg_resources.parse_version. This is a legacy behavior which is " "inconsistent with the new version class introduced in setuptools " "8.0. In most cases, conversion to a tuple is unnecessary. For " "comparison of versions, sort the Version instances directly. If " "you have another use case requiring the tuple, please file a " "bug with the setuptools project describing that need.", RuntimeWarning, stacklevel=1, ) for part in old_parse_version(str(self)): yield part class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version): pass class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin, packaging.version.LegacyVersion): pass def parse_version(v): try: return SetuptoolsVersion(v) except packaging.version.InvalidVersion: return SetuptoolsLegacyVersion(v) _state_vars = {} def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_'+v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_'+_state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except ValueError: # not Mac OS X pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError', 'VersionConflict', 'DistributionNotFound', 'UnknownExtra', 'ExtractionError', # Warnings 'PEP440Warning', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__+repr(self.args) class VersionConflict(ResolutionError): """ An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. """ _template = "{self.dist} is installed but {self.req} is required" @property def dist(self): return self.args[0] @property def req(self): return self.args[1] def report(self): return self._template.format(**locals()) def with_context(self, required_by): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. """ if not required_by: return self args = self.args + (required_by,) return ContextualVersionConflict(*args) class ContextualVersionConflict(VersionConflict): """ A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. """ _template = VersionConflict._template + ' by {self.required_by}' @property def required_by(self): return self.args[2] class DistributionNotFound(ResolutionError): """A requested distribution was not found""" _template = ("The '{self.req}' distribution was not found " "and is required by {self.requirers_str}") @property def req(self): return self.args[0] @property def requirers(self): return self.args[1] @property def requirers_str(self): if not self.requirers: return 'the application' return ', '.join(self.requirers) def report(self): return self._template.format(**locals()) def __str__(self): return self.report() class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = sys.version[:3] EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macosx_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts if version == '': plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macosx_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ try: # Python 2.7 or >=3.2 from sysconfig import get_platform except ImportError: from distutils.util import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), _macosx_arch(machine)) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat get_platform = get_build_platform def compatible_platforms(provided, required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided==required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) # backward compatibility run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, string_types): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet(object): """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) @classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws @classmethod def _build_from_requirements(cls, req_spec): """ Build a working set from a requirement spec. Rewrites sys.path. """ # try it without defaults already on sys.path # by starting with an empty path ws = cls([]) reqs = parse_requirements(req_spec) dists = ws.resolve(reqs, Environment()) for dist in dists: ws.add(dist) # add any missing entries from sys.path for entry in sys.path: if entry not in ws.entries: ws.add_entry(entry) # then copy back to sys.path sys.path[:] = ws.entries return ws def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name] def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key]=1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry,[]) keys2 = self.entry_keys.setdefault(dist.location,[]) if not replace and dist.key in self.by_key: # ignore hidden distros return self.by_key[dist.key] = dist if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve(self, requirements, env=None, installer=None, replace_conflicting=False): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. """ # set up the stack requirements = list(requirements)[::-1] # set of processed requirements processed = {} # key -> dist best = {} to_activate = [] # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) while requirements: # process dependencies breadth-first req = requirements.pop(0) if req in processed: # Ignore cyclic or redundant dependencies continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match(req, ws, installer) if dist is None: requirers = required_by.get(req, None) raise DistributionNotFound(req, requirers) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency dependent_req = required_by[req] raise VersionConflict(dist, req).with_context(dependent_req) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] requirements.extend(new_requirements) # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) processed[req] = True # return list of distros to activate return to_activate def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) # add plugins+libs to sys.path map(working_set.add, distributions) # display errors print('Could not load', errors) The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) # scan project names in alphabetic order plugin_projects.sort() error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) # put all our entries in shadow_set list(map(shadow_set.add, self)) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: # save error info error_info[dist] = v if fallback: # try the next older version of project continue else: # give up on this project, keep going break else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback): """Invoke `callback` for all distributions (including existing ones)""" if callback in self.callbacks: return self.callbacks.append(callback) for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.callbacks[:] ) def __setstate__(self, e_k_b_c): entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.callbacks = callbacks[:] class Environment(object): """Searchable snapshot of distributions on a search path""" def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ return (self.python is None or dist.py_version is None or dist.py_version==self.python) \ and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self, project_name): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. """ distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added """ if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) def best_match(self, req, working_set, installer=None): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ dist = working_set.find(req) if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer) def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new # XXX backward compatibility AvailableDistributions = Environment class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir( resource_name ) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir( resource_name ) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() err = ExtractionError("""Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: %s The Python egg cache directory is currently set to: %s Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """ % (old_exc, cache_path) ) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name+'-tmp', *names) try: _bypass_ensure_directory(target_path) except: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ("%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." % path) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """Determine the default cache location This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the "Application Data" directory. On all other systems, it's "~/.python-eggs". """ try: return os.environ['PYTHON_EGG_CACHE'] except KeyError: pass if os.name!='nt': return os.path.expanduser('~/.python-eggs') # XXX this may be locale-specific! app_data = 'Application Data' app_homes = [ # best option, should be locale-safe (('APPDATA',), None), (('USERPROFILE',), app_data), (('HOMEDRIVE','HOMEPATH'), app_data), (('HOMEPATH',), app_data), (('HOME',), None), # 95/98/ME (('WINDIR',), app_data), ] for keys, subdir in app_homes: dirname = '' for key in keys: if key in os.environ: dirname = os.path.join(dirname, os.environ[key]) else: break else: if subdir: dirname = os.path.join(dirname, subdir) return os.path.join(dirname, 'Python-Eggs') else: raise RuntimeError( "Please set the PYTHON_EGG_CACHE enviroment variable" ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """ Convert an arbitrary string to a standard version string """ try: # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ','.') return re.sub('[^A-Za-z0-9.]+', '-', version) def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-','_') class MarkerEvaluation(object): values = { 'os_name': lambda: os.name, 'sys_platform': lambda: sys.platform, 'python_full_version': platform.python_version, 'python_version': lambda: platform.python_version()[:3], 'platform_version': platform.version, 'platform_machine': platform.machine, 'python_implementation': platform.python_implementation, } @classmethod def is_invalid_marker(cls, text): """ Validate text as a PEP 426 environment marker; return an exception if invalid or False otherwise. """ try: cls.evaluate_marker(text) except SyntaxError as e: return cls.normalize_exception(e) return False @staticmethod def normalize_exception(exc): """ Given a SyntaxError from a marker evaluation, normalize the error message: - Remove indications of filename and line number. - Replace platform-specific error messages with standard error messages. """ subs = { 'unexpected EOF while parsing': 'invalid syntax', 'parenthesis is never closed': 'invalid syntax', } exc.filename = None exc.lineno = None exc.msg = subs.get(exc.msg, exc.msg) return exc @classmethod def and_test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.and_, items) @classmethod def test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.or_, items) @classmethod def atom(cls, nodelist): t = nodelist[1][0] if t == token.LPAR: if nodelist[2][0] == token.RPAR: raise SyntaxError("Empty parentheses") return cls.interpret(nodelist[2]) msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @classmethod def comparison(cls, nodelist): if len(nodelist) > 4: msg = "Chained comparison not allowed in environment markers" raise SyntaxError(msg) comp = nodelist[2][1] cop = comp[1] if comp[0] == token.NAME: if len(nodelist[2]) == 3: if cop == 'not': cop = 'not in' else: cop = 'is not' try: cop = cls.get_op(cop) except KeyError: msg = repr(cop) + " operator not allowed in environment markers" raise SyntaxError(msg) return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3])) @classmethod def get_op(cls, op): ops = { symbol.test: cls.test, symbol.and_test: cls.and_test, symbol.atom: cls.atom, symbol.comparison: cls.comparison, 'not in': lambda x, y: x not in y, 'in': lambda x, y: x in y, '==': operator.eq, '!=': operator.ne, '<': operator.lt, '>': operator.gt, '<=': operator.le, '>=': operator.ge, } if hasattr(symbol, 'or_test'): ops[symbol.or_test] = cls.test return ops[op] @classmethod def evaluate_marker(cls, text, extra=None): """ Evaluate a PEP 426 environment marker on CPython 2.4+. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'parser' module, which is not implemented on Jython and has been superseded by the 'ast' module in Python 2.6 and later. """ return cls.interpret(parser.expr(text).totuple(1)[1]) @classmethod def _markerlib_evaluate(cls, text): """ Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. """ from pip._vendor import _markerlib # markerlib implements Metadata 1.2 (PEP 345) environment markers. # Translate the variables to Metadata 2.0 (PEP 426). env = _markerlib.default_environment() for key in env.keys(): new_key = key.replace('.', '_') env[new_key] = env.pop(key) try: result = _markerlib.interpret(text, env) except NameError as e: raise SyntaxError(e.args[0]) return result if 'parser' not in globals(): # Fall back to less-complete _markerlib implementation if 'parser' module # is not available. evaluate_marker = _markerlib_evaluate @classmethod def interpret(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] try: op = cls.get_op(nodelist[0]) except KeyError: raise SyntaxError("Comparison or logical expression expected") return op(nodelist) @classmethod def evaluate(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] kind = nodelist[0] name = nodelist[1] if kind==token.NAME: try: op = cls.values[name] except KeyError: raise SyntaxError("Unknown name %r" % name) return op() if kind==token.STRING: s = nodelist[1] if not cls._safe_string(s): raise SyntaxError( "Only plain strings allowed in environment markers") return s[1:-1] msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @staticmethod def _safe_string(cand): return ( cand[:1] in "'\"" and not cand.startswith('"""') and not cand.startswith("'''") and '\\' not in cand ) invalid_marker = MarkerEvaluation.is_invalid_marker evaluate_marker = MarkerEvaluation.evaluate_marker class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def has_metadata(self, name): return self.egg_info and self._has(self._fn(self.egg_info, name)) if sys.version_info <= (3,): def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)) else: def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)).decode("utf-8") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self, name): return self.egg_info and self._isdir(self._fn(self.egg_info, name)) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) def metadata_listdir(self, name): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] def run_script(self, script_name, namespace): script = 'scripts/'+script_name if not self.has_metadata(script): raise ResolutionError("No script named %r" % script_name) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): source = open(script_filename).read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) script_code = compile(script_text, script_filename,'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): if resource_name: return os.path.join(base, *resource_name.split('/')) return base def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): NullProvider.__init__(self, module) self._setup_prefix() def _setup_prefix(self): # we assume here that our metadata may be nested inside a "basket" # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None while path!=old: if path.lower().endswith('.egg'): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path break old = path path, base = os.path.split(path) class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() register_loader_type(type(None), DefaultProvider) if importlib_machinery is not None: register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider) class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" _isdir = _has = lambda self, path: False _get = lambda self, path: '' _listdir = lambda self, path: [] module_path = None def __init__(self): pass empty_provider = EmptyProvider() class ZipManifests(dict): """ zip manifest builder """ @classmethod def build(cls, path): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ with ContextualZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), zfile.getinfo(name), ) for name in zfile.namelist() ) return dict(items) load = build class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): """ Load a manifest at path or return a suitable manifest already loaded. """ path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest class ContextualZipFile(zipfile.ZipFile): """ Supplement ZipFile class to support context manager for Python 2.6 """ def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __new__(cls, *args, **kwargs): """ Construct a ZipFile or ContextualZipFile as appropriate """ if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls) class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None _zip_manifests = MemoizedZipManifests() def __init__(self, module): EggProvider.__init__(self, module) self.zip_pre = self.loader.archive+os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( "%s is not a subpath of %s" % (fspath, self.zip_pre) ) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre+zip_path if fspath.startswith(self.egg_root+os.sep): return fspath[len(self.egg_root)+1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @property def zipinfo(self): return self._zip_manifests.load(self.loader.archive) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size # ymdhms+wday, yday, dst date_time = zip_stat.date_time + (0, 0, -1) # 1980 offset already done timestamp = time.mktime(date_time) return timestamp, size def _extract_resource(self, manager, zip_path): if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource( manager, os.path.join(zip_path, name) ) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError('"os.rename" and "os.unlink" are not supported ' 'on this platform') try: real_path = manager.get_cache_path( self.egg_name, self._parts(zip_path) ) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path # Windows, del old file and retry elif os.name=='nt': unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: # report a user-friendly error manager.extraction_error() return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size!=size or stat.st_mtime!=timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self, fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self, path): self.path = path def has_metadata(self, name): return name=='PKG-INFO' def get_metadata(self, name): if name=='PKG-INFO': with open(self.path,'rU') as f: metadata = f.read() return metadata raise KeyError("No metadata except PKG-INFO is available") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive+os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders = {}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir('/'): if subitem.endswith('.egg'): subpath = os.path.join(path_item, subitem) for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): yield dist register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object, find_nothing) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if os.path.isdir(path_item) and os.access(path_item, os.R_OK): if path_item.lower().endswith('.egg'): # unpacked egg yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item,'EGG-INFO') ) ) else: # scan for .egg and .egg-info in directory for entry in os.listdir(path_item): lower = entry.lower() if lower.endswith('.egg-info') or lower.endswith('.dist-info'): fullpath = os.path.join(path_item, entry) if os.path.isdir(fullpath): # egg-info directory, allow getting metadata metadata = PathMetadata(path_item, fullpath) else: metadata = FileMetadata(fullpath) yield Distribution.from_location( path_item, entry, metadata, precedence=DEVELOP_DIST ) elif not only and lower.endswith('.egg'): dists = find_distributions(os.path.join(path_item, entry)) for dist in dists: yield dist elif not only and lower.endswith('.egg-link'): with open(os.path.join(path_item, entry)) as entry_file: entry_lines = entry_file.readlines() for line in entry_lines: if not line.strip(): continue path = os.path.join(path_item, line.rstrip()) dists = find_distributions(path) for item in dists: yield item break register_finder(pkgutil.ImpImporter, find_on_path) if importlib_machinery is not None: register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer, path_entry, moduleName, module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module,'__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) for path_item in path: if path_item not in module.__path__: module.__path__.append(path_item) return subpath def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" _imp.acquire_lock() try: if packageName in _namespace_packages: return path, parent = sys.path, None if '.' in packageName: parent = '.'.join(packageName.split('.')[:-1]) declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError: raise TypeError("Not a package:", parent) # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent,[]).append(packageName) _namespace_packages.setdefault(packageName,[]) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: _imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: for package in _namespace_packages.get(parent,()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: _imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item)==normalized: break else: # Only return the path if it's not already there return subpath register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) if importlib_machinery is not None: register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(filename)) def _normalize_cached(filename, _cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s # whitespace and comment LINE_END = re.compile(r"\s*(#.*)?$").match # line continuation CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # Distribution or extra DISTRO = re.compile(r"\s*((\w|[-.])+)").match # ver. info VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match # comma between items COMMA = re.compile(r"\s*,").match OBRACKET = re.compile(r"\s*\[").match CBRACKET = re.compile(r"\s*\]").match MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" (?P<name>[^-]+) ( -(?P<ver>[^-]+) ( -py(?P<pyver>[^-]+) ( -(?P<plat>.+) )? )? )? """, re.VERBOSE | re.IGNORECASE, ).match class EntryPoint(object): """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, *args, **kwargs): """ Require packages for this EntryPoint, then resolve it. """ if not require or args or kwargs: warnings.warn( "Parameters to load are deprecated. Call .resolve and " ".require separately.", DeprecationWarning, stacklevel=2, ) if require: self.require(*args, **kwargs) return self.resolve() def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P<name>.+?)\s*' r'=\s*' r'(?P<module>[\w.]+)\s*' r'(:\s*(?P<attr>[\w.]+))?\s*' r'(?P<extras>\[.*\])?\s*$' ) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) @classmethod def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name]=ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _remove_md5_fragment(location): if not location: return '' parsed = urlparse(location) if parsed[-1].startswith('md5='): return urlunparse(parsed[:-1] + ('',)) return location class Distribution(object): """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__(self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls, location, basename, metadata=None,**kw): project_name, version, py_version, platform = [None]*4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: # .dist-info gets much metadata differently match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name','ver','pyver','plat' ) cls = _distributionImpl[ext.lower()] return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw ) @property def hashcmp(self): return ( self.parsed_version, self.precedence, self.key, _remove_md5_fragment(self.location), self.py_version or '', self.platform or '', ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): if not hasattr(self, "_parsed_version"): self._parsed_version = parse_version(self.version) return self._parsed_version def _warn_legacy_version(self): LV = packaging.version.LegacyVersion is_legacy = isinstance(self._parsed_version, LV) if not is_legacy: return # While an empty version is technically a legacy version and # is not a valid PEP 440 version, it's also unlikely to # actually come from someone and instead it is more likely that # it comes from setuptools attempting to parse a filename and # including it in the list. So for that we'll gate this warning # on if the version is anything at all or not. if not self.version: return tmpl = textwrap.dedent(""" '{project_name} ({version})' is being parsed as a legacy, non PEP 440, version. You may find odd behavior and sort order. In particular it will be sorted as less than 0.0. It is recommended to migrate to PEP 440 compatible versions. """).strip().replace('\n', ' ') warnings.warn(tmpl.format(**vars(self)), PEP440Warning) @property def version(self): try: return self._version except AttributeError: for line in self._get_metadata(self.PKG_INFO): if line.lower().startswith('version:'): self._version = safe_version(line.split(':',1)[1].strip()) return self._version else: tmpl = "Missing 'Version:' header and/or %s file" raise ValueError(tmpl % self.PKG_INFO, self) @property def _dep_map(self): try: return self.__dep_map except AttributeError: dm = self.__dep_map = {None: []} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): if extra: if ':' in extra: extra, marker = extra.split(':', 1) if invalid_marker(marker): # XXX warn reqs=[] elif not evaluate_marker(marker): reqs=[] extra = safe_extra(extra) or None dm.setdefault(extra,[]).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def activate(self, path=None): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: declare_namespace(pkg) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self, self.location) else: return str(self) def __str__(self): try: version = getattr(self, 'version', None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version) def __getattr__(self, attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" if isinstance(self.parsed_version, packaging.version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group,{}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) def insert_on(self, path, loc = None): """Insert self.location in path before its nearest parent directory""" loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath= [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: break elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p+1) except ValueError: break else: del npath[np], path[np] # ha! p = np return def check_version_conflict(self): if self.key == 'setuptools': # ignore the inevitable setuptools self-conflicts :( return nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if (modname not in sys.modules or modname in nsp or modname in _namespace_packages): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location)): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False return True def clone(self,**kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class DistInfoDistribution(Distribution): """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _preparse_requirement(self, requires_dist): """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz') Split environment marker, add == prefix to version specifiers as necessary, and remove parenthesis. """ parts = requires_dist.split(';', 1) + [''] distvers = parts[0].strip() mark = parts[1].strip() distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers) distvers = distvers.replace('(', '').replace(')', '') return (distvers, mark) def _compute_dependencies(self): """Recompute this distribution's dependencies.""" from pip._vendor._markerlib import compile as compile_marker dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: distvers, mark = self._preparse_requirement(req) parsed = next(parse_requirements(distvers)) parsed.marker_fn = compile_marker(mark) reqs.append(parsed) def reqs_for_extra(extra): for req in reqs: if req.marker_fn(override={'extra':extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: extra = safe_extra(extra.strip()) dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm _distributionImpl = { '.egg': Distribution, '.egg-info': Distribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args,**kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass warnings.warn(stacklevel=level + 1, *args, **kw) class RequirementParseError(ValueError): def __str__(self): return ' '.join(self.args) def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) def scan_list(ITEM, TERMINATOR, line, p, groups, item_name): items = [] while not TERMINATOR(line, p): if CONTINUE(line, p): try: line = next(lines) p = 0 except StopIteration: msg = "\\ must not appear on the last nonblank line" raise RequirementParseError(msg) match = ITEM(line, p) if not match: msg = "Expected " + item_name + " in" raise RequirementParseError(msg, line, "at", line[p:]) items.append(match.group(*groups)) p = match.end() match = COMMA(line, p) if match: # skip the comma p = match.end() elif not TERMINATOR(line, p): msg = "Expected ',' or end-of-list in" raise RequirementParseError(msg, line, "at", line[p:]) match = TERMINATOR(line, p) # skip the terminator, if any if match: p = match.end() return line, p, items for line in lines: match = DISTRO(line) if not match: raise RequirementParseError("Missing distribution spec", line) project_name = match.group(1) p = match.end() extras = [] match = OBRACKET(line, p) if match: p = match.end() line, p, extras = scan_list( DISTRO, CBRACKET, line, p, (1,), "'extra' name" ) line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2), "version spec") specs = [(op, val) for op, val in specs] yield Requirement(project_name, specs, extras) class Requirement: def __init__(self, project_name, specs, extras): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" self.unsafe_name, project_name = project_name, safe_name(project_name) self.project_name, self.key = project_name, project_name.lower() self.specifier = packaging.specifiers.SpecifierSet( ",".join(["".join([x, y]) for x, y in specs]) ) self.specs = specs self.extras = tuple(map(safe_extra, extras)) self.hashCmp = ( self.key, self.specifier, frozenset(self.extras), ) self.__hash = hash(self.hashCmp) def __str__(self): extras = ','.join(self.extras) if extras: extras = '[%s]' % extras return '%s%s%s' % (self.project_name, extras, self.specifier) def __eq__(self, other): return ( isinstance(other, Requirement) and self.hashCmp == other.hashCmp ) def __ne__(self, other): return not self == other def __contains__(self, item): if isinstance(item, Distribution): if item.key != self.key: return False item = item.version # Allow prereleases always in order to match the previous behavior of # this method. In the future this should be smarter and follow PEP 440 # more accurately. return self.specifier.contains(item, prereleases=True) def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): reqs = list(parse_requirements(s)) if reqs: if len(reqs) == 1: return reqs[0] raise ValueError("Expected only one requirement", s) raise ValueError("No requirements found", s) def _get_mro(cls): """Get an mro for a type or classic class""" if not isinstance(cls, type): class cls(cls, object): pass return cls.__mro__[1:] return cls.__mro__ def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" for t in _get_mro(getattr(ob, '__class__', type(ob))): if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, 0o755) def split_sections(s): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args,**kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open return tempfile.mkstemp(*args,**kw) finally: # and then put it back os.open = old_open # Silence the PEP440Warning by default, so that end users don't get hit by it # randomly just because they use pkg_resources. We want to append the rule # because we want earlier uses of filterwarnings to take precedence over this # one. warnings.filterwarnings("ignore", category=PEP440Warning, append=True) # from jaraco.functools 1.3 def _call_aside(f, *args, **kwargs): f(*args, **kwargs) return f @_call_aside def _initialize(g=globals()): "Set up global resource manager (deliberately not state-saved)" manager = ResourceManager() g['_manager'] = manager for name in dir(manager): if not name.startswith('_'): g[name] = getattr(manager, name) @_call_aside def _initialize_master_working_set(): """ Prepare the master working set and make the ``require()`` API available. This function has explicit effects on the global state of pkg_resources. It is intended to be invoked once at the initialization of this module. Invocation by other packages is unsupported and done at their own risk. """ working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script # backward compatibility run_main = run_script # Activate all distributions already on sys.path, and ensure that # all distributions added to the working set in the future (e.g. by # calling ``require()``) will get activated as well. add_activation_listener(lambda dist: dist.activate()) working_set.entries=[] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals())
mpl-2.0
7,820,439,430,982,581,000
33.266495
82
0.594518
false
weiyirong/crosswalk-1
app/tools/android/parse_xpk.py
39
3550
#!/usr/bin/env python # Copyright (c) 2013 Intel Corporation. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=C0301 """The script is used to parse an XPK file. It will do: 1. Check the magic file header; 2. Verify the signature of the XPK file; 3. Extract the content of the XPK file to some folder. The format of XPK file can be found at https://github.com/crosswalk-project/crosswalk-website/wiki/Crosswalk-package-management This file is used by make_apk.py. """ import optparse import os import struct import sys import zipfile EXIT_CODE_CRYPTO_NOT_FOUND = 1 EXIT_CODE_NO_XPK_FILE = 2 EXIT_CODE_XPK_FILE_NOT_EXIST = 3 EXIT_CODE_MAGIC_FAILED = 4 EXIT_CODE_VERIFICATION_FAILED = 5 EXIT_CODE_XPK_FILE_IO_ERROR = 6 XPK_MAGIC_HEAD = 'CrWk' errorMessageMap = { EXIT_CODE_CRYPTO_NOT_FOUND: 'Python module Crypto('\ 'https://www.dlitz.net/software/pycrypto/) is needed', EXIT_CODE_NO_XPK_FILE: 'Please specify XPK file by --file', EXIT_CODE_XPK_FILE_NOT_EXIST: 'The XPK file you specified does not exist', EXIT_CODE_MAGIC_FAILED: 'The file you specified is not in XPK format', EXIT_CODE_VERIFICATION_FAILED: 'Signature verification failed for the XPK file', EXIT_CODE_XPK_FILE_IO_ERROR: 'Error happened when reading the XPK file', } def HandleError(err_code): print('Error: %s' % errorMessageMap[err_code]) sys.exit(err_code) try: from Crypto.PublicKey import RSA from Crypto.Signature import PKCS1_v1_5 from Crypto.Hash import SHA except ImportError: HandleError(EXIT_CODE_CRYPTO_NOT_FOUND) def CheckMagic(input_file): magic = input_file.read(4) if magic != XPK_MAGIC_HEAD: HandleError(EXIT_CODE_MAGIC_FAILED) def GetPubkeySignature(input_file): """Return (pubkey, signature) pair""" pubkey_size, signature_size = struct.unpack('II', input_file.read(8)) return (input_file.read(pubkey_size), input_file.read(signature_size)) def ExtractXPKContent(input_file, zip_path): zip_file = open(zip_path, 'wb') zip_file.write(input_file.read()) zip_file.close() def VerifySignature(pubkey, signature, zip_path): zip_file = open(zip_path, 'rb') key = RSA.importKey(pubkey) content = SHA.new(zip_file.read()) zip_file.close() verifier = PKCS1_v1_5.new(key) if not verifier.verify(content, signature): HandleError(EXIT_CODE_VERIFICATION_FAILED) def main(): option_parser = optparse.OptionParser() option_parser.add_option('--file', '-f', help='Path to the xpk file') option_parser.add_option('--out', '-o', help='Path to extract the xpk to') opts, _ = option_parser.parse_args() if opts.file is None: HandleError(EXIT_CODE_NO_XPK_FILE) app_name = os.path.splitext(os.path.basename(opts.file))[0] if opts.out is None: opts.out = app_name if os.path.isfile(opts.file): zip_path = None try: xpk_file = open(opts.file, 'rb') CheckMagic(xpk_file) pubkey, signature = GetPubkeySignature(xpk_file) zip_path = '%s.zip' % app_name ExtractXPKContent(xpk_file, zip_path) VerifySignature(pubkey, signature, zip_path) zipfile.ZipFile(zip_path).extractall(opts.out) except SystemExit as ec: return ec.code except IOError: HandleError(EXIT_CODE_XPK_FILE_IO_ERROR) finally: xpk_file.close() if zip_path and os.path.isfile(zip_path): os.remove(zip_path) else: HandleError(EXIT_CODE_XPK_FILE_NOT_EXIST) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
-4,209,571,939,648,742,400
27.174603
88
0.69831
false
stonegithubs/odoo
addons/membership/wizard/membership_invoice.py
380
3229
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv import openerp.addons.decimal_precision as dp class membership_invoice(osv.osv_memory): """Membership Invoice""" _name = "membership.invoice" _description = "Membership Invoice" _columns = { 'product_id': fields.many2one('product.product','Membership', required=True), 'member_price': fields.float('Member Price', digits_compute= dp.get_precision('Product Price'), required=True), } def onchange_product(self, cr, uid, ids, product_id=False): """This function returns value of product's member price based on product id. """ if not product_id: return {'value': {'member_price': False}} return {'value': {'member_price': self.pool.get('product.product').price_get(cr, uid, [product_id])[product_id]}} def membership_invoice(self, cr, uid, ids, context=None): mod_obj = self.pool.get('ir.model.data') partner_obj = self.pool.get('res.partner') datas = {} if context is None: context = {} data = self.browse(cr, uid, ids, context=context) if data: data = data[0] datas = { 'membership_product_id': data.product_id.id, 'amount': data.member_price } invoice_list = partner_obj.create_membership_invoice(cr, uid, context.get('active_ids', []), datas=datas, context=context) try: search_view_id = mod_obj.get_object_reference(cr, uid, 'account', 'view_account_invoice_filter')[1] except ValueError: search_view_id = False try: form_view_id = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')[1] except ValueError: form_view_id = False return { 'domain': [('id', 'in', invoice_list)], 'name': 'Membership Invoices', 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'account.invoice', 'type': 'ir.actions.act_window', 'views': [(False, 'tree'), (form_view_id, 'form')], 'search_view_id': search_view_id, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-6,050,591,409,046,504,000
40.935065
130
0.584391
false
DMRookie/RoomAI
models/crm/crm_kuhn/play.py
1
2054
#!/bin/python import roomai.kuhn import roomai.common import random class HumanInputPlayer(roomai.common.AbstractPlayer): """ """ def receive_info(self, info): """ Args: info: """ available_actions = info def take_action(self): """ Returns: """ action = raw_input("choosed_acton:") #action = "" return roomai.kuhn.KuhnPokerAction.lookup(action) def reset(self): """ """ pass def show_person(person_state): print ("%s"%(person_state.id) + "'s card:%d"%(person_state.card)) print ("%s"%(person_state.id) + "'s available_actions:\t" + " ".join(person_state.available_actions.keys())) def show_public(public_state): print ("turn:%d"%public_state.turn) if __name__ == "__main__": import crm_kuhn crm_player = crm_kuhn.KuhnPokerCRMPlayer() import algorithms algo = algorithms.CRMAlgorithm() env = roomai.kuhn.KuhnPokerEnv() for i in range(10000): algo.dfs(env = env, player=crm_player, p0 = 1, p1 = 1, deep = 0) print crm_player.regrets print crm_player.strategies crm_player.is_train = False players = [HumanInputPlayer(), crm_player] num_players = len(players) infos, public_state, person_states, private_state = env.init({"num_players":2,"record_history":False}) for i in range(num_players): players[i].receive_info(infos[i]) show_person(infos[i].person_state) show_public(public_state) print ("\n") while public_state.is_terminal == False: turn = public_state.turn action = players[turn].take_action() print "%d player take an action (%s)"%(turn,action.key) infos, public_state, person_states, private_state = env.forward(action) for i in range(num_players): players[i].receive_info(infos[i]) show_person(infos[i].person_state) show_public(public_state) print ("\n") print (public_state.scores)
mit
-4,503,502,269,456,290,000
26.386667
112
0.592989
false
tavendo/AutobahnPython
examples/asyncio/wamp/pubsub/unsubscribe/frontend.py
3
2752
############################################################################### # # The MIT License (MIT) # # Copyright (c) Crossbar.io Technologies GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### from os import environ import asyncio from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner class Component(ApplicationSession): """ An application component that subscribes and receives events. After receiving 5 events, it unsubscribes, sleeps and then resubscribes for another run. Then it stops. """ async def test(self): self.received = 0 async def on_event(i): print("Got event: {}".format(i)) self.received += 1 if self.received > 5: self.runs += 1 if self.runs > 1: self.leave() else: await self.subscription.unsubscribe() print("Unsubscribed .. continue in 5s ..") # can't use loop.call_later() with a coroutine for some reason await asyncio.sleep(5) await self.test() self.subscription = await self.subscribe(on_event, 'com.myapp.topic1') print("Subscribed with subscription ID {}".format(self.subscription.id)) async def onJoin(self, details): self.runs = 0 await self.test() def onDisconnect(self): asyncio.get_event_loop().stop() if __name__ == '__main__': url = environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws") realm = "crossbardemo" runner = ApplicationRunner(url, realm) runner.run(Component)
mit
-6,250,392,274,712,829,000
36.69863
82
0.628634
false
sachinkum/Bal-Aveksha
WebServer/BalAvekshaEnv/lib/python3.5/site-packages/django/utils/safestring.py
41
4575
""" Functions for working with "safe strings": strings that can be displayed safely without further escaping in HTML. Marking something as a "safe string" means that the producer of the string has already turned characters that should not be interpreted by the HTML engine (e.g. '<') into the appropriate entities. """ import warnings from django.utils import six from django.utils.deprecation import RemovedInDjango20Warning from django.utils.functional import Promise, curry class EscapeData(object): pass class EscapeBytes(bytes, EscapeData): """ A byte string that should be HTML-escaped when output. """ pass class EscapeText(six.text_type, EscapeData): """ A unicode string object that should be HTML-escaped when output. """ pass if six.PY3: EscapeString = EscapeText else: EscapeString = EscapeBytes # backwards compatibility for Python 2 EscapeUnicode = EscapeText class SafeData(object): def __html__(self): """ Returns the html representation of a string for interoperability. This allows other template engines to understand Django's SafeData. """ return self class SafeBytes(bytes, SafeData): """ A bytes subclass that has been specifically marked as "safe" (requires no further escaping) for HTML output purposes. """ def __add__(self, rhs): """ Concatenating a safe byte string with another safe byte string or safe unicode string is safe. Otherwise, the result is no longer safe. """ t = super(SafeBytes, self).__add__(rhs) if isinstance(rhs, SafeText): return SafeText(t) elif isinstance(rhs, SafeBytes): return SafeBytes(t) return t def _proxy_method(self, *args, **kwargs): """ Wrap a call to a normal unicode method up so that we return safe results. The method that is being wrapped is passed in the 'method' argument. """ method = kwargs.pop('method') data = method(self, *args, **kwargs) if isinstance(data, bytes): return SafeBytes(data) else: return SafeText(data) decode = curry(_proxy_method, method=bytes.decode) class SafeText(six.text_type, SafeData): """ A unicode (Python 2) / str (Python 3) subclass that has been specifically marked as "safe" for HTML output purposes. """ def __add__(self, rhs): """ Concatenating a safe unicode string with another safe byte string or safe unicode string is safe. Otherwise, the result is no longer safe. """ t = super(SafeText, self).__add__(rhs) if isinstance(rhs, SafeData): return SafeText(t) return t def _proxy_method(self, *args, **kwargs): """ Wrap a call to a normal unicode method up so that we return safe results. The method that is being wrapped is passed in the 'method' argument. """ method = kwargs.pop('method') data = method(self, *args, **kwargs) if isinstance(data, bytes): return SafeBytes(data) else: return SafeText(data) encode = curry(_proxy_method, method=six.text_type.encode) if six.PY3: SafeString = SafeText else: SafeString = SafeBytes # backwards compatibility for Python 2 SafeUnicode = SafeText def mark_safe(s): """ Explicitly mark a string as safe for (HTML) output purposes. The returned object can be used everywhere a string or unicode object is appropriate. Can be called multiple times on a single string. """ if hasattr(s, '__html__'): return s if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes): return SafeBytes(s) if isinstance(s, (six.text_type, Promise)): return SafeText(s) return SafeString(str(s)) def mark_for_escaping(s): """ Explicitly mark a string as requiring HTML escaping upon output. Has no effect on SafeData subclasses. Can be called multiple times on a single string (the resulting escaping is only applied once). """ warnings.warn('mark_for_escaping() is deprecated.', RemovedInDjango20Warning) if hasattr(s, '__html__') or isinstance(s, EscapeData): return s if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes): return EscapeBytes(s) if isinstance(s, (six.text_type, Promise)): return EscapeText(s) return EscapeString(str(s))
gpl-3.0
2,207,144,827,092,949,000
29.298013
81
0.64612
false
gangadharkadam/shfr
frappe/widgets/query_builder.py
35
6998
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe out = frappe.response from frappe.utils import cint import frappe.defaults def get_sql_tables(q): if q.find('WHERE') != -1: tl = q.split('FROM')[1].split('WHERE')[0].split(',') elif q.find('GROUP BY') != -1: tl = q.split('FROM')[1].split('GROUP BY')[0].split(',') else: tl = q.split('FROM')[1].split('ORDER BY')[0].split(',') return [t.strip().strip('`')[3:] for t in tl] def get_parent_dt(dt): pdt = '' if frappe.db.sql('select name from `tabDocType` where istable=1 and name=%s', dt): import frappe.model.meta return frappe.model.meta.get_parent_dt(dt) return pdt def get_sql_meta(tl): std_columns = { 'owner':('Owner', '', '', '100'), 'creation':('Created on', 'Date', '', '100'), 'modified':('Last modified on', 'Date', '', '100'), 'modified_by':('Modified By', '', '', '100') } meta = {} for dt in tl: meta[dt] = std_columns.copy() # for table doctype, the ID is the parent id pdt = get_parent_dt(dt) if pdt: meta[dt]['parent'] = ('ID', 'Link', pdt, '200') # get the field properties from DocField res = frappe.db.sql("select fieldname, label, fieldtype, options, width \ from tabDocField where parent=%s", dt) for r in res: if r[0]: meta[dt][r[0]] = (r[1], r[2], r[3], r[4]); # name meta[dt]['name'] = ('ID', 'Link', dt, '200') return meta def add_match_conditions(q, tl): from frappe.widgets.reportview import build_match_conditions sl = [] for dt in tl: s = build_match_conditions(dt) if s: sl.append(s) # insert the conditions if sl: condition_st = q.find('WHERE')!=-1 and ' AND ' or ' WHERE ' condition_end = q.find('ORDER BY')!=-1 and 'ORDER BY' or 'LIMIT' condition_end = q.find('GROUP BY')!=-1 and 'GROUP BY' or condition_end if q.find('ORDER BY')!=-1 or q.find('LIMIT')!=-1 or q.find('GROUP BY')!=-1: # if query continues beyond conditions q = q.split(condition_end) q = q[0] + condition_st + '(' + ' OR '.join(sl) + ') ' + condition_end + q[1] else: q = q + condition_st + '(' + ' OR '.join(sl) + ')' return q def guess_type(m): """ Returns fieldtype depending on the MySQLdb Description """ import MySQLdb if m in MySQLdb.NUMBER: return 'Currency' elif m in MySQLdb.DATE: return 'Date' else: return 'Data' def build_description_simple(): colnames, coltypes, coloptions, colwidths = [], [], [], [] for m in frappe.db.get_description(): colnames.append(m[0]) coltypes.append(guess_type[m[0]]) coloptions.append('') colwidths.append('100') return colnames, coltypes, coloptions, colwidths def build_description_standard(meta, tl): desc = frappe.db.get_description() colnames, coltypes, coloptions, colwidths = [], [], [], [] # merged metadata - used if we are unable to # get both the table name and field name from # the description - in case of joins merged_meta = {} for d in meta: merged_meta.update(meta[d]) for f in desc: fn, dt = f[0], '' if '.' in fn: dt, fn = fn.split('.') if (not dt) and merged_meta.get(fn): # no "AS" given, find type from merged description desc = merged_meta[fn] colnames.append(desc[0] or fn) coltypes.append(desc[1] or '') coloptions.append(desc[2] or '') colwidths.append(desc[3] or '100') elif meta.get(dt,{}).has_key(fn): # type specified for a multi-table join # usually from Report Builder desc = meta[dt][fn] colnames.append(desc[0] or fn) coltypes.append(desc[1] or '') coloptions.append(desc[2] or '') colwidths.append(desc[3] or '100') else: # nothing found # guess colnames.append(fn) coltypes.append(guess_type(f[1])) coloptions.append('') colwidths.append('100') return colnames, coltypes, coloptions, colwidths @frappe.whitelist() def runquery(q='', ret=0, from_export=0): import frappe.utils formatted = cint(frappe.form_dict.get('formatted')) # CASE A: Simple Query # -------------------- if frappe.form_dict.get('simple_query') or frappe.form_dict.get('is_simple'): if not q: q = frappe.form_dict.get('simple_query') or frappe.form_dict.get('query') if q.split()[0].lower() != 'select': raise Exception, 'Query must be a SELECT' as_dict = cint(frappe.form_dict.get('as_dict')) res = frappe.db.sql(q, as_dict = as_dict, as_list = not as_dict, formatted=formatted) # build colnames etc from metadata colnames, coltypes, coloptions, colwidths = [], [], [], [] # CASE B: Standard Query # ----------------------- else: if not q: q = frappe.form_dict.get('query') tl = get_sql_tables(q) meta = get_sql_meta(tl) q = add_match_conditions(q, tl) # replace special variables q = q.replace('__user', frappe.session.user) q = q.replace('__today', frappe.utils.nowdate()) res = frappe.db.sql(q, as_list=1, formatted=formatted) colnames, coltypes, coloptions, colwidths = build_description_standard(meta, tl) # run server script # ----------------- style, header_html, footer_html, page_template = '', '', '', '' out['colnames'] = colnames out['coltypes'] = coltypes out['coloptions'] = coloptions out['colwidths'] = colwidths out['header_html'] = header_html out['footer_html'] = footer_html out['page_template'] = page_template if style: out['style'] = style # just the data - return if ret==1: return res out['values'] = res # return num of entries qm = frappe.form_dict.get('query_max') or '' if qm and qm.strip(): if qm.split()[0].lower() != 'select': raise Exception, 'Query (Max) must be a SELECT' if not frappe.form_dict.get('simple_query'): qm = add_match_conditions(qm, tl) out['n_values'] = frappe.utils.cint(frappe.db.sql(qm)[0][0]) @frappe.whitelist() def runquery_csv(): global out # run query res = runquery(from_export = 1) q = frappe.form_dict.get('query') rep_name = frappe.form_dict.get('report_name') if not frappe.form_dict.get('simple_query'): # Report Name if not rep_name: rep_name = get_sql_tables(q)[0] if not rep_name: rep_name = 'DataExport' # Headings heads = [] rows = [[rep_name], out['colnames']] + out['values'] from cStringIO import StringIO import csv f = StringIO() writer = csv.writer(f) for r in rows: # encode only unicode type strings and not int, floats etc. writer.writerow(map(lambda v: isinstance(v, unicode) and v.encode('utf-8') or v, r)) f.seek(0) out['result'] = unicode(f.read(), 'utf-8') out['type'] = 'csv' out['doctype'] = rep_name def add_limit_to_query(query, args): """ Add limit condition to query can be used by methods called in listing to add limit condition """ if args.get('limit_page_length'): query += """ limit %(limit_start)s, %(limit_page_length)s""" import frappe.utils args['limit_start'] = frappe.utils.cint(args.get('limit_start')) args['limit_page_length'] = frappe.utils.cint(args.get('limit_page_length')) return query, args
mit
-5,694,405,449,246,642,000
25.01487
116
0.637325
false
aleida/django
tests/regressiontests/file_uploads/views.py
6
4373
from __future__ import absolute_import, unicode_literals import hashlib import json import os from django.core.files.uploadedfile import UploadedFile from django.http import HttpResponse, HttpResponseServerError from .models import FileModel, UPLOAD_TO from .tests import UNICODE_FILENAME from .uploadhandler import QuotaUploadHandler, ErroringUploadHandler def file_upload_view(request): """ Check that a file upload can be updated into the POST dictionary without going pear-shaped. """ form_data = request.POST.copy() form_data.update(request.FILES) if isinstance(form_data.get('file_field'), UploadedFile) and isinstance(form_data['name'], unicode): # If a file is posted, the dummy client should only post the file name, # not the full path. if os.path.dirname(form_data['file_field'].name) != '': return HttpResponseServerError() return HttpResponse('') else: return HttpResponseServerError() def file_upload_view_verify(request): """ Use the sha digest hash to verify the uploaded contents. """ form_data = request.POST.copy() form_data.update(request.FILES) for key, value in form_data.items(): if key.endswith('_hash'): continue if key + '_hash' not in form_data: continue submitted_hash = form_data[key + '_hash'] if isinstance(value, UploadedFile): new_hash = hashlib.sha1(value.read()).hexdigest() else: new_hash = hashlib.sha1(value).hexdigest() if new_hash != submitted_hash: return HttpResponseServerError() # Adding large file to the database should succeed largefile = request.FILES['file_field2'] obj = FileModel() obj.testfile.save(largefile.name, largefile) return HttpResponse('') def file_upload_unicode_name(request): # Check to see if unicode name came through properly. if not request.FILES['file_unicode'].name.endswith(UNICODE_FILENAME): return HttpResponseServerError() response = None # Check to make sure the exotic characters are preserved even # through file save. uni_named_file = request.FILES['file_unicode'] obj = FileModel.objects.create(testfile=uni_named_file) full_name = '%s/%s' % (UPLOAD_TO, uni_named_file.name) if not os.path.exists(full_name): response = HttpResponseServerError() # Cleanup the object with its exotic file name immediately. # (shutil.rmtree used elsewhere in the tests to clean up the # upload directory has been seen to choke on unicode # filenames on Windows.) obj.delete() os.unlink(full_name) if response: return response else: return HttpResponse('') def file_upload_echo(request): """ Simple view to echo back info about uploaded files for tests. """ r = dict([(k, f.name) for k, f in request.FILES.items()]) return HttpResponse(json.dumps(r)) def file_upload_echo_content(request): """ Simple view to echo back the content of uploaded files for tests. """ r = dict([(k, f.read()) for k, f in request.FILES.items()]) return HttpResponse(json.dumps(r)) def file_upload_quota(request): """ Dynamically add in an upload handler. """ request.upload_handlers.insert(0, QuotaUploadHandler()) return file_upload_echo(request) def file_upload_quota_broken(request): """ You can't change handlers after reading FILES; this view shouldn't work. """ response = file_upload_echo(request) request.upload_handlers.insert(0, QuotaUploadHandler()) return response def file_upload_getlist_count(request): """ Check the .getlist() function to ensure we receive the correct number of files. """ file_counts = {} for key in request.FILES.keys(): file_counts[key] = len(request.FILES.getlist(key)) return HttpResponse(json.dumps(file_counts)) def file_upload_errors(request): request.upload_handlers.insert(0, ErroringUploadHandler()) return file_upload_echo(request) def file_upload_filename_case_view(request): """ Check adding the file to the database will preserve the filename case. """ file = request.FILES['file_field'] obj = FileModel() obj.testfile.save(file.name, file) return HttpResponse('%d' % obj.pk)
bsd-3-clause
1,517,972,951,403,133,400
31.154412
104
0.673908
false
BT-ojossen/odoo
addons/l10n_do/__openerp__.py
309
2992
# -*- coding: utf-8 -*- # ############################################################################# # # First author: Jose Ernesto Mendez <[email protected]> (Open Business Solutions SRL.) # Copyright (c) 2012 -TODAY Open Business Solutions, SRL. (http://obsdr.com). All rights reserved. # # This is a fork to upgrade to odoo 8.0 # by Marcos Organizador de Negocios - Eneldo Serrata - www.marcos.org.do # # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs. # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company like Marcos Organizador de Negocios. # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## { 'name': 'Dominican Republic - Accounting', 'version': '1.0', 'category': 'Localization/Account Charts', 'description': """ This is the base module to manage the accounting chart for Dominican Republic. ============================================================================== * Chart of Accounts. * The Tax Code Chart for Domincan Republic * The main taxes used in Domincan Republic * Fiscal position for local """, 'author': 'Eneldo Serrata - Marcos Organizador de Negocios, SRL.', 'website': 'http://marcos.do', 'depends': ['account', 'base_iban'], 'data': [ # basic accounting data 'data/ir_sequence_type.xml', 'data/ir_sequence.xml', 'data/account_journal.xml', 'data/account.account.type.csv', 'data/account.account.template.csv', 'data/account.tax.code.template.csv', 'data/account_chart_template.xml', 'data/account.tax.template.csv', 'data/l10n_do_base_data.xml', # Adds fiscal position 'data/account.fiscal.position.template.csv', 'data/account.fiscal.position.tax.template.csv', # configuration wizard, views, reports... 'data/l10n_do_wizard.xml' ], 'test': [], 'demo': [], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
1,847,647,346,990,503,400
40.555556
98
0.646056
false
algorhythms/LintCode
Maximal Square.py
4
2163
""" Given a 2D binary matrix filled with 0's and 1's, find the largest square containing all 1's and return its area. Example For example, given the following matrix: 1 0 1 0 0 1 0 1 1 1 1 1 1 1 1 1 0 0 1 0 Return 4. """ __author__ = 'Daniel' class Solution: def maxSquare(self, matrix): """ Algorithm: O(n^2) let F_{i, j} represents the max square's length ended at matrix_{i, j} (lower right corner). F_{i, j} = min{F_{i-1, j}, F_{i, j-1}, F_{i-1, j-1}}+1 // if matrix{i, j} == 1 F_{i, j} = 0 // otherwise O(n^3) sandwich approach :param matrix: a matrix of 0 and 1 :return: an integer """ M = len(matrix) N = len(matrix[0]) F = [[0 for _ in xrange(N+1)] for _ in xrange(M+1)] gmax = 0 for i in xrange(1, M+1): for j in xrange(1, N+1): if matrix[i-1][j-1] == 1: F[i][j] = min(F[i-1][j], F[i][j-1], F[i-1][j-1])+1 gmax = max(gmax, F[i][j]) return gmax*gmax def maxSquare_error(self, matrix): """ stack :param matrix: a matrix of 0 and 1 :return: an integer """ M = len(matrix) N = len(matrix[0]) h = [[0 for _ in xrange(N+1)] for _ in xrange(M+1)] for i in xrange(1, M+1): for j in xrange(1, N+1): if matrix[i-1][j-1] == 1: h[i][j] = h[i-1][j]+1 else: h[i][j] = 0 ret = 0 for i in xrange(M): stk = [] # col index, inc_stk for j in xrange(N): while stk and h[i+1][stk[-1]+1] >= h[i+1][j+1]: stk.pop() idx = -1 if stk: idx = stk[-1] cur_square = min(j-idx, h[i+1][j+1]) cur_square *= cur_square ret = max(ret, cur_square) stk.append(j) return ret if __name__ == "__main__": assert Solution().maxSquare([[0,1,0,1,1,0],[1,0,1,0,1,1],[1,1,1,1,1,0],[1,1,1,1,1,1],[0,0,1,1,1,0],[1,1,1,0,1,1]] ) == 9
apache-2.0
4,844,183,719,194,545,000
26.74359
117
0.437818
false
leomorsy/medical8.0
oemedical_emr/models/oemedical_prescription_order.py
3
3191
# -*- coding: utf-8 -*- #/############################################################################# # # Tech-Receptives Solutions Pvt. Ltd. # Copyright (C) 2004-TODAY Tech-Receptives(<http://www.techreceptives.com>) # Special Credit and Thanks to Thymbra Latinoamericana S.A. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #/############################################################################# from openerp.osv import fields, orm from openerp.tools.translate import _ from openerp import netsvc from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta import time class OeMedicalPrescriptionOrder(orm.Model): _name='oemedical.prescription.order' _columns={ 'patient_id': fields.many2one('oemedical.patient', string='Patient', required=True), 'pregnancy_warning': fields.boolean(string='Pregancy Warning', readonly=True), 'notes': fields.text(string='Prescription Notes'), 'prescription_line': fields.one2many('oemedical.prescription.line', 'name', string='Prescription line',), 'pharmacy': fields.many2one('res.partner', string='Pharmacy',), 'prescription_date': fields.datetime(string='Prescription Date'), 'prescription_warning_ack': fields.boolean( string='Prescription verified'), 'physician_id': fields.many2one('oemedical.physician', string='Prescribing Doctor', required=True), 'name': fields.char(size=256, string='Prescription ID', required=True, help='Type in the ID of this prescription'), } _defaults={ 'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'oemedical.prescription.order'), 'prescription_date':lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), } def print_prescription(self, cr, uid, ids, context=None): ''' ''' # assert len(ids) == 1, 'This option should only be used for a single id at a time' # wf_service = netsvc.LocalService("workflow") # wf_service.trg_validate(uid, 'oemedical.prescription.order', ids[0], 'prescription_sent', cr) datas = { 'model': 'oemedical.prescription.order', 'ids': ids, 'form': self.read(cr, uid, ids[0], context=context), } return {'type': 'ir.actions.report.xml', 'report_name': 'prescription.order', 'datas': datas, 'nodestroy': True} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-2,119,046,690,247,324,200
44.585714
123
0.62927
false
kosz85/django
django/db/migrations/executor.py
18
17479
from django.apps.registry import apps as global_apps from django.db import migrations, router from .exceptions import InvalidMigrationPlan from .loader import MigrationLoader from .recorder import MigrationRecorder from .state import ProjectState class MigrationExecutor: """ End-to-end migration execution - load migrations and run them up or down to a specified set of targets. """ def __init__(self, connection, progress_callback=None): self.connection = connection self.loader = MigrationLoader(self.connection) self.recorder = MigrationRecorder(self.connection) self.progress_callback = progress_callback def migration_plan(self, targets, clean_start=False): """ Given a set of targets, return a list of (Migration instance, backwards?). """ plan = [] if clean_start: applied = set() else: applied = set(self.loader.applied_migrations) for target in targets: # If the target is (app_label, None), that means unmigrate everything if target[1] is None: for root in self.loader.graph.root_nodes(): if root[0] == target[0]: for migration in self.loader.graph.backwards_plan(root): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.remove(migration) # If the migration is already applied, do backwards mode, # otherwise do forwards mode. elif target in applied: # Don't migrate backwards all the way to the target node (that # may roll back dependencies in other apps that don't need to # be rolled back); instead roll back through target's immediate # child(ren) in the same app, and no further. next_in_app = sorted( n for n in self.loader.graph.node_map[target].children if n[0] == target[0] ) for node in next_in_app: for migration in self.loader.graph.backwards_plan(node): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.remove(migration) else: for migration in self.loader.graph.forwards_plan(target): if migration not in applied: plan.append((self.loader.graph.nodes[migration], False)) applied.add(migration) return plan def _create_project_state(self, with_applied_migrations=False): """ Create a project state including all the applications without migrations and applied migrations if with_applied_migrations=True. """ state = ProjectState(real_apps=list(self.loader.unmigrated_apps)) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } for migration, _ in full_plan: if migration in applied_migrations: migration.mutate_state(state, preserve=False) return state def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False): """ Migrate the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations. """ # The django_migrations table must be present to record applied # migrations. self.recorder.ensure_schema() if plan is None: plan = self.migration_plan(targets) # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) all_forwards = all(not backwards for mig, backwards in plan) all_backwards = all(backwards for mig, backwards in plan) if not plan: if state is None: # The resulting state should include applied migrations. state = self._create_project_state(with_applied_migrations=True) elif all_forwards == all_backwards: # This should only happen if there's a mixed plan raise InvalidMigrationPlan( "Migration plans with both forwards and backwards migrations " "are not supported. Please split your migration process into " "separate plans of only forwards OR backwards migrations.", plan ) elif all_forwards: if state is None: # The resulting state should still include applied migrations. state = self._create_project_state(with_applied_migrations=True) state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial) else: # No need to check for `elif all_backwards` here, as that condition # would always evaluate to true. state = self._migrate_all_backwards(plan, full_plan, fake=fake) self.check_replacements() return state def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial): """ Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan. """ migrations_to_run = {m[0] for m in plan} for migration, _ in full_plan: if not migrations_to_run: # We remove every migration that we applied from these sets so # that we can bail out once the last migration has been applied # and don't always run until the very end of the migration # process. break if migration in migrations_to_run: if 'apps' not in state.__dict__: if self.progress_callback: self.progress_callback("render_start") state.apps # Render all -- performance critical if self.progress_callback: self.progress_callback("render_success") state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial) migrations_to_run.remove(migration) return state def _migrate_all_backwards(self, plan, full_plan, fake): """ Take a list of 2-tuples of the form (migration instance, True) and unapply them in reverse order they occur in the full_plan. Since unapplying a migration requires the project state prior to that migration, Django will compute the migration states before each of them in a first run over the plan and then unapply them in a second run over the plan. """ migrations_to_run = {m[0] for m in plan} # Holds all migration states prior to the migrations being unapplied states = {} state = self._create_project_state() applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } if self.progress_callback: self.progress_callback("render_start") for migration, _ in full_plan: if not migrations_to_run: # We remove every migration that we applied from this set so # that we can bail out once the last migration has been applied # and don't always run until the very end of the migration # process. break if migration in migrations_to_run: if 'apps' not in state.__dict__: state.apps # Render all -- performance critical # The state before this migration states[migration] = state # The old state keeps as-is, we continue with the new state state = migration.mutate_state(state, preserve=True) migrations_to_run.remove(migration) elif migration in applied_migrations: # Only mutate the state if the migration is actually applied # to make sure the resulting state doesn't include changes # from unrelated migrations. migration.mutate_state(state, preserve=False) if self.progress_callback: self.progress_callback("render_success") for migration, _ in plan: self.unapply_migration(states[migration], migration, fake=fake) applied_migrations.remove(migration) # Generate the post migration state by starting from the state before # the last migration is unapplied and mutating it to include all the # remaining applied migrations. last_unapplied_migration = plan[-1][0] state = states[last_unapplied_migration] for index, (migration, _) in enumerate(full_plan): if migration == last_unapplied_migration: for migration, _ in full_plan[index:]: if migration in applied_migrations: migration.mutate_state(state, preserve=False) break return state def collect_sql(self, plan): """ Take a migration plan and return a list of collected SQL statements that represent the best-efforts version of that plan. """ statements = [] state = None for migration, backwards in plan: with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor: if state is None: state = self.loader.project_state((migration.app_label, migration.name), at_end=False) if not backwards: state = migration.apply(state, schema_editor, collect_sql=True) else: state = migration.unapply(state, schema_editor, collect_sql=True) statements.extend(schema_editor.collected_sql) return statements def apply_migration(self, state, migration, fake=False, fake_initial=False): """Run a migration forwards.""" if self.progress_callback: self.progress_callback("apply_start", migration, fake) if not fake: if fake_initial: # Test to see if this is an already-applied initial migration applied, state = self.detect_soft_applied(state, migration) if applied: fake = True if not fake: # Alright, do it normally with self.connection.schema_editor(atomic=migration.atomic) as schema_editor: state = migration.apply(state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_applied(app_label, name) else: self.recorder.record_applied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("apply_success", migration, fake) return state def unapply_migration(self, state, migration, fake=False): """Run a migration backwards.""" if self.progress_callback: self.progress_callback("unapply_start", migration, fake) if not fake: with self.connection.schema_editor(atomic=migration.atomic) as schema_editor: state = migration.unapply(state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_unapplied(app_label, name) else: self.recorder.record_unapplied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("unapply_success", migration, fake) return state def check_replacements(self): """ Mark replacement migrations applied if their replaced set all are. Do this unconditionally on every migrate, rather than just when migrations are applied or unapplied, to correctly handle the case when a new squash migration is pushed to a deployment that already had all its replaced migrations applied. In this case no new migration will be applied, but the applied state of the squashed migration must be maintained. """ applied = self.recorder.applied_migrations() for key, migration in self.loader.replacements.items(): all_applied = all(m in applied for m in migration.replaces) if all_applied and key not in applied: self.recorder.record_applied(*key) def detect_soft_applied(self, project_state, migration): """ Test whether a migration has been implicitly applied - that the tables or columns it would create exist. This is intended only for use on initial migrations (as it only looks for CreateModel and AddField). """ def should_skip_detecting_model(migration, model): """ No need to detect tables for proxy models, unmanaged models, or models that can't be migrated on the current database. """ return ( model._meta.proxy or not model._meta.managed or not router.allow_migrate( self.connection.alias, migration.app_label, model_name=model._meta.model_name, ) ) if migration.initial is None: # Bail if the migration isn't the first one in its app if any(app == migration.app_label for app, name in migration.dependencies): return False, project_state elif migration.initial is False: # Bail if it's NOT an initial migration return False, project_state if project_state is None: after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True) else: after_state = migration.mutate_state(project_state) apps = after_state.apps found_create_model_migration = False found_add_field_migration = False existing_table_names = self.connection.introspection.table_names(self.connection.cursor()) # Make sure all create model and add field operations are done for operation in migration.operations: if isinstance(operation, migrations.CreateModel): model = apps.get_model(migration.app_label, operation.name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if should_skip_detecting_model(migration, model): continue if model._meta.db_table not in existing_table_names: return False, project_state found_create_model_migration = True elif isinstance(operation, migrations.AddField): model = apps.get_model(migration.app_label, operation.model_name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if should_skip_detecting_model(migration, model): continue table = model._meta.db_table field = model._meta.get_field(operation.name) # Handle implicit many-to-many tables created by AddField. if field.many_to_many: if field.remote_field.through._meta.db_table not in existing_table_names: return False, project_state else: found_add_field_migration = True continue column_names = [ column.name for column in self.connection.introspection.get_table_description(self.connection.cursor(), table) ] if field.column not in column_names: return False, project_state found_add_field_migration = True # If we get this far and we found at least one CreateModel or AddField migration, # the migration is considered implicitly applied. return (found_create_model_migration or found_add_field_migration), after_state
bsd-3-clause
1,059,180,555,687,638,800
46.497283
108
0.596602
false
vnsofthe/odoo
openerp/addons/base/ir/ir_qweb.py
38
64677
# -*- coding: utf-8 -*- import collections import cStringIO import datetime import hashlib import json import itertools import logging import math import os import re import sys import textwrap import uuid from subprocess import Popen, PIPE from urlparse import urlparse import babel import babel.dates import werkzeug from lxml import etree, html from PIL import Image import psycopg2 import openerp.http import openerp.tools from openerp.tools.func import lazy_property import openerp.tools.lru from openerp.http import request from openerp.tools.safe_eval import safe_eval as eval from openerp.osv import osv, orm, fields from openerp.tools import html_escape as escape from openerp.tools.translate import _ _logger = logging.getLogger(__name__) MAX_CSS_RULES = 4095 #-------------------------------------------------------------------- # QWeb template engine #-------------------------------------------------------------------- class QWebException(Exception): def __init__(self, message, **kw): Exception.__init__(self, message) self.qweb = dict(kw) def pretty_xml(self): if 'node' not in self.qweb: return '' return etree.tostring(self.qweb['node'], pretty_print=True) class QWebTemplateNotFound(QWebException): pass def raise_qweb_exception(etype=None, **kw): if etype is None: etype = QWebException orig_type, original, tb = sys.exc_info() try: raise etype, original, tb except etype, e: for k, v in kw.items(): e.qweb[k] = v # Will use `raise foo from bar` in python 3 and rename cause to __cause__ e.qweb['cause'] = original raise def _build_attribute(name, value): value = escape(value) if isinstance(name, unicode): name = name.encode('utf-8') if isinstance(value, unicode): value = value.encode('utf-8') return ' %s="%s"' % (name, value) class QWebContext(dict): def __init__(self, cr, uid, data, loader=None, templates=None, context=None): self.cr = cr self.uid = uid self.loader = loader self.templates = templates or {} self.context = context dic = dict(data) super(QWebContext, self).__init__(dic) self['defined'] = lambda key: key in self def safe_eval(self, expr): locals_dict = collections.defaultdict(lambda: None) locals_dict.update(self) locals_dict.pop('cr', None) locals_dict.pop('loader', None) return eval(expr, None, locals_dict, nocopy=True, locals_builtins=True) def copy(self): """ Clones the current context, conserving all data and metadata (loader, template cache, ...) """ return QWebContext(self.cr, self.uid, dict.copy(self), loader=self.loader, templates=self.templates, context=self.context) def __copy__(self): return self.copy() class QWeb(orm.AbstractModel): """ Base QWeb rendering engine * to customize ``t-field`` rendering, subclass ``ir.qweb.field`` and create new models called :samp:`ir.qweb.field.{widget}` * alternatively, override :meth:`~.get_converter_for` and return an arbitrary model to use as field converter Beware that if you need extensions or alterations which could be incompatible with other subsystems, you should create a local object inheriting from ``ir.qweb`` and customize that. """ _name = 'ir.qweb' _void_elements = frozenset([ u'area', u'base', u'br', u'col', u'embed', u'hr', u'img', u'input', u'keygen', u'link', u'menuitem', u'meta', u'param', u'source', u'track', u'wbr']) _format_regex = re.compile( '(?:' # ruby-style pattern '#\{(.+?)\}' ')|(?:' # jinja-style pattern '\{\{(.+?)\}\}' ')') def __init__(self, pool, cr): super(QWeb, self).__init__(pool, cr) self._render_tag = self.prefixed_methods('render_tag_') self._render_att = self.prefixed_methods('render_att_') def prefixed_methods(self, prefix): """ Extracts all methods prefixed by ``prefix``, and returns a mapping of (t-name, method) where the t-name is the method name with prefix removed and underscore converted to dashes :param str prefix: :return: dict """ n_prefix = len(prefix) return dict( (name[n_prefix:].replace('_', '-'), getattr(type(self), name)) for name in dir(self) if name.startswith(prefix) ) def register_tag(self, tag, func): self._render_tag[tag] = func def add_template(self, qwebcontext, name, node): """Add a parsed template in the context. Used to preprocess templates.""" qwebcontext.templates[name] = node def load_document(self, document, res_id, qwebcontext): """ Loads an XML document and installs any contained template in the engine :type document: a parsed lxml.etree element, an unparsed XML document (as a string) or the path of an XML file to load """ if not isinstance(document, basestring): # assume lxml.etree.Element dom = document elif document.startswith("<?xml"): dom = etree.fromstring(document) else: dom = etree.parse(document).getroot() for node in dom: if node.get('t-name'): name = str(node.get("t-name")) self.add_template(qwebcontext, name, node) if res_id and node.tag == "t": self.add_template(qwebcontext, res_id, node) res_id = None def get_template(self, name, qwebcontext): """ Tries to fetch the template ``name``, either gets it from the context's template cache or loads one with the context's loader (if any). :raises QWebTemplateNotFound: if the template can not be found or loaded """ origin_template = qwebcontext.get('__caller__') or qwebcontext['__stack__'][0] if qwebcontext.loader and name not in qwebcontext.templates: try: xml_doc = qwebcontext.loader(name) except ValueError: raise_qweb_exception(QWebTemplateNotFound, message="Loader could not find template %r" % name, template=origin_template) self.load_document(xml_doc, isinstance(name, (int, long)) and name or None, qwebcontext=qwebcontext) if name in qwebcontext.templates: return qwebcontext.templates[name] raise QWebTemplateNotFound("Template %r not found" % name, template=origin_template) def eval(self, expr, qwebcontext): try: return qwebcontext.safe_eval(expr) except Exception: template = qwebcontext.get('__template__') raise_qweb_exception(message="Could not evaluate expression %r" % expr, expression=expr, template=template) def eval_object(self, expr, qwebcontext): return self.eval(expr, qwebcontext) def eval_str(self, expr, qwebcontext): if expr == "0": return qwebcontext.get(0, '') val = self.eval(expr, qwebcontext) if isinstance(val, unicode): return val.encode("utf8") if val is False or val is None: return '' return str(val) def eval_format(self, expr, qwebcontext): expr, replacements = self._format_regex.subn( lambda m: self.eval_str(m.group(1) or m.group(2), qwebcontext), expr ) if replacements: return expr try: return str(expr % qwebcontext) except Exception: template = qwebcontext.get('__template__') raise_qweb_exception(message="Format error for expression %r" % expr, expression=expr, template=template) def eval_bool(self, expr, qwebcontext): return int(bool(self.eval(expr, qwebcontext))) def render(self, cr, uid, id_or_xml_id, qwebcontext=None, loader=None, context=None): """ render(cr, uid, id_or_xml_id, qwebcontext=None, loader=None, context=None) Renders the template specified by the provided template name :param qwebcontext: context for rendering the template :type qwebcontext: dict or :class:`QWebContext` instance :param loader: if ``qwebcontext`` is a dict, loader set into the context instantiated for rendering """ if qwebcontext is None: qwebcontext = {} if not isinstance(qwebcontext, QWebContext): qwebcontext = QWebContext(cr, uid, qwebcontext, loader=loader, context=context) qwebcontext['__template__'] = id_or_xml_id stack = qwebcontext.get('__stack__', []) if stack: qwebcontext['__caller__'] = stack[-1] stack.append(id_or_xml_id) qwebcontext['__stack__'] = stack qwebcontext['xmlid'] = str(stack[0]) # Temporary fix return self.render_node(self.get_template(id_or_xml_id, qwebcontext), qwebcontext) def render_node(self, element, qwebcontext): generated_attributes = "" t_render = None template_attributes = {} for (attribute_name, attribute_value) in element.attrib.iteritems(): attribute_name = unicode(attribute_name) if attribute_name == "groups": cr = qwebcontext.get('request') and qwebcontext['request'].cr or None uid = qwebcontext.get('request') and qwebcontext['request'].uid or None can_see = self.user_has_groups(cr, uid, groups=attribute_value) if cr and uid else False if not can_see: return '' attribute_value = attribute_value.encode("utf8") if attribute_name.startswith("t-"): for attribute in self._render_att: if attribute_name[2:].startswith(attribute): attrs = self._render_att[attribute]( self, element, attribute_name, attribute_value, qwebcontext) for att, val in attrs: if not val: continue generated_attributes += self.render_attribute(element, att, val, qwebcontext) break else: if attribute_name[2:] in self._render_tag: t_render = attribute_name[2:] template_attributes[attribute_name[2:]] = attribute_value else: generated_attributes += self.render_attribute(element, attribute_name, attribute_value, qwebcontext) if 'debug' in template_attributes: debugger = template_attributes.get('debug', 'pdb') __import__(debugger).set_trace() # pdb, ipdb, pudb, ... if t_render: result = self._render_tag[t_render](self, element, template_attributes, generated_attributes, qwebcontext) else: result = self.render_element(element, template_attributes, generated_attributes, qwebcontext) if element.tail: result += element.tail.encode('utf-8') if isinstance(result, unicode): return result.encode('utf-8') return result def render_element(self, element, template_attributes, generated_attributes, qwebcontext, inner=None): # element: element # template_attributes: t-* attributes # generated_attributes: generated attributes # qwebcontext: values # inner: optional innerXml if inner: g_inner = inner.encode('utf-8') if isinstance(inner, unicode) else inner else: g_inner = [] if element.text is None else [element.text.encode('utf-8')] for current_node in element.iterchildren(tag=etree.Element): try: g_inner.append(self.render_node(current_node, qwebcontext)) except QWebException: raise except Exception: template = qwebcontext.get('__template__') raise_qweb_exception(message="Could not render element %r" % element.tag, node=element, template=template) name = unicode(element.tag) inner = "".join(g_inner) trim = template_attributes.get("trim", 0) if trim == 0: pass elif trim == 'left': inner = inner.lstrip() elif trim == 'right': inner = inner.rstrip() elif trim == 'both': inner = inner.strip() if name == "t": return inner elif len(inner) or name not in self._void_elements: return "<%s%s>%s</%s>" % tuple( qwebcontext if isinstance(qwebcontext, str) else qwebcontext.encode('utf-8') for qwebcontext in (name, generated_attributes, inner, name) ) else: return "<%s%s/>" % (name.encode("utf-8"), generated_attributes) def render_attribute(self, element, name, value, qwebcontext): return _build_attribute(name, value) # Attributes def render_att_att(self, element, attribute_name, attribute_value, qwebcontext): if attribute_name.startswith("t-attf-"): return [(attribute_name[7:], self.eval_format(attribute_value, qwebcontext))] if attribute_name.startswith("t-att-"): return [(attribute_name[6:], self.eval(attribute_value, qwebcontext))] result = self.eval_object(attribute_value, qwebcontext) if isinstance(result, collections.Mapping): return result.iteritems() # assume tuple return [result] # Tags def render_tag_raw(self, element, template_attributes, generated_attributes, qwebcontext): inner = self.eval_str(template_attributes["raw"], qwebcontext) return self.render_element(element, template_attributes, generated_attributes, qwebcontext, inner) def render_tag_esc(self, element, template_attributes, generated_attributes, qwebcontext): options = json.loads(template_attributes.get('esc-options') or '{}') widget = self.get_widget_for(options.get('widget')) inner = widget.format(template_attributes['esc'], options, qwebcontext) return self.render_element(element, template_attributes, generated_attributes, qwebcontext, inner) def _iterate(self, iterable): if isinstance (iterable, collections.Mapping): return iterable.iteritems() return itertools.izip(*itertools.tee(iterable)) def render_tag_foreach(self, element, template_attributes, generated_attributes, qwebcontext): expr = template_attributes["foreach"] enum = self.eval_object(expr, qwebcontext) if enum is None: template = qwebcontext.get('__template__') raise QWebException("foreach enumerator %r is not defined while rendering template %r" % (expr, template), template=template) if isinstance(enum, int): enum = range(enum) varname = template_attributes['as'].replace('.', '_') copy_qwebcontext = qwebcontext.copy() size = None if isinstance(enum, collections.Sized): size = len(enum) copy_qwebcontext["%s_size" % varname] = size copy_qwebcontext["%s_all" % varname] = enum ru = [] for index, (item, value) in enumerate(self._iterate(enum)): copy_qwebcontext.update({ varname: item, '%s_value' % varname: value, '%s_index' % varname: index, '%s_first' % varname: index == 0, }) if size is not None: copy_qwebcontext['%s_last' % varname] = index + 1 == size if index % 2: copy_qwebcontext.update({ '%s_parity' % varname: 'odd', '%s_even' % varname: False, '%s_odd' % varname: True, }) else: copy_qwebcontext.update({ '%s_parity' % varname: 'even', '%s_even' % varname: True, '%s_odd' % varname: False, }) ru.append(self.render_element(element, template_attributes, generated_attributes, copy_qwebcontext)) for k in qwebcontext.keys(): qwebcontext[k] = copy_qwebcontext[k] return "".join(ru) def render_tag_if(self, element, template_attributes, generated_attributes, qwebcontext): if self.eval_bool(template_attributes["if"], qwebcontext): return self.render_element(element, template_attributes, generated_attributes, qwebcontext) return "" def render_tag_call(self, element, template_attributes, generated_attributes, qwebcontext): d = qwebcontext.copy() d[0] = self.render_element(element, template_attributes, generated_attributes, d) cr = d.get('request') and d['request'].cr or None uid = d.get('request') and d['request'].uid or None template = self.eval_format(template_attributes["call"], d) try: template = int(template) except ValueError: pass return self.render(cr, uid, template, d) def render_tag_call_assets(self, element, template_attributes, generated_attributes, qwebcontext): """ This special 't-call' tag can be used in order to aggregate/minify javascript and css assets""" if len(element): # An asset bundle is rendered in two differents contexts (when genereting html and # when generating the bundle itself) so they must be qwebcontext free # even '0' variable is forbidden template = qwebcontext.get('__template__') raise QWebException("t-call-assets cannot contain children nodes", template=template) xmlid = template_attributes['call-assets'] cr, uid, context = [getattr(qwebcontext, attr) for attr in ('cr', 'uid', 'context')] bundle = AssetsBundle(xmlid, cr=cr, uid=uid, context=context, registry=self.pool) css = self.get_attr_bool(template_attributes.get('css'), default=True) js = self.get_attr_bool(template_attributes.get('js'), default=True) return bundle.to_html(css=css, js=js, debug=bool(qwebcontext.get('debug'))) def render_tag_set(self, element, template_attributes, generated_attributes, qwebcontext): if "value" in template_attributes: qwebcontext[template_attributes["set"]] = self.eval_object(template_attributes["value"], qwebcontext) elif "valuef" in template_attributes: qwebcontext[template_attributes["set"]] = self.eval_format(template_attributes["valuef"], qwebcontext) else: qwebcontext[template_attributes["set"]] = self.render_element(element, template_attributes, generated_attributes, qwebcontext) return "" def render_tag_field(self, element, template_attributes, generated_attributes, qwebcontext): """ eg: <span t-record="browse_record(res.partner, 1)" t-field="phone">+1 555 555 8069</span>""" node_name = element.tag assert node_name not in ("table", "tbody", "thead", "tfoot", "tr", "td", "li", "ul", "ol", "dl", "dt", "dd"),\ "RTE widgets do not work correctly on %r elements" % node_name assert node_name != 't',\ "t-field can not be used on a t element, provide an actual HTML node" record, field_name = template_attributes["field"].rsplit('.', 1) record = self.eval_object(record, qwebcontext) field = record._fields[field_name] options = json.loads(template_attributes.get('field-options') or '{}') field_type = get_field_type(field, options) converter = self.get_converter_for(field_type) return converter.to_html(qwebcontext.cr, qwebcontext.uid, field_name, record, options, element, template_attributes, generated_attributes, qwebcontext, context=qwebcontext.context) def get_converter_for(self, field_type): """ returns a :class:`~openerp.models.Model` used to render a ``t-field``. By default, tries to get the model named :samp:`ir.qweb.field.{field_type}`, falling back on ``ir.qweb.field``. :param str field_type: type or widget of field to render """ return self.pool.get('ir.qweb.field.' + field_type, self.pool['ir.qweb.field']) def get_widget_for(self, widget): """ returns a :class:`~openerp.models.Model` used to render a ``t-esc`` :param str widget: name of the widget to use, or ``None`` """ widget_model = ('ir.qweb.widget.' + widget) if widget else 'ir.qweb.widget' return self.pool.get(widget_model) or self.pool['ir.qweb.widget'] def get_attr_bool(self, attr, default=False): if attr: attr = attr.lower() if attr in ('false', '0'): return False elif attr in ('true', '1'): return True return default #-------------------------------------------------------------------- # QWeb Fields converters #-------------------------------------------------------------------- class FieldConverter(osv.AbstractModel): """ Used to convert a t-field specification into an output HTML field. :meth:`~.to_html` is the entry point of this conversion from QWeb, it: * converts the record value to html using :meth:`~.record_to_html` * generates the metadata attributes (``data-oe-``) to set on the root result node * generates the root result node itself through :meth:`~.render_element` """ _name = 'ir.qweb.field' def attributes(self, cr, uid, field_name, record, options, source_element, g_att, t_att, qweb_context, context=None): """ attributes(cr, uid, field_name, record, options, source_element, g_att, t_att, qweb_context, context=None) Generates the metadata attributes (prefixed by ``data-oe-`` for the root node of the field conversion. Attribute values are escaped by the parent. The default attributes are: * ``model``, the name of the record's model * ``id`` the id of the record to which the field belongs * ``field`` the name of the converted field * ``type`` the logical field type (widget, may not match the field's ``type``, may not be any Field subclass name) * ``translate``, a boolean flag (``0`` or ``1``) denoting whether the field is translatable * ``expression``, the original expression :returns: iterable of (attribute name, attribute value) pairs. """ field = record._fields[field_name] field_type = get_field_type(field, options) return [ ('data-oe-model', record._name), ('data-oe-id', record.id), ('data-oe-field', field_name), ('data-oe-type', field_type), ('data-oe-expression', t_att['field']), ] def value_to_html(self, cr, uid, value, field, options=None, context=None): """ value_to_html(cr, uid, value, field, options=None, context=None) Converts a single value to its HTML version/output """ if not value: return '' return value def record_to_html(self, cr, uid, field_name, record, options=None, context=None): """ record_to_html(cr, uid, field_name, record, options=None, context=None) Converts the specified field of the browse_record ``record`` to HTML """ field = record._fields[field_name] return self.value_to_html( cr, uid, record[field_name], field, options=options, context=context) def to_html(self, cr, uid, field_name, record, options, source_element, t_att, g_att, qweb_context, context=None): """ to_html(cr, uid, field_name, record, options, source_element, t_att, g_att, qweb_context, context=None) Converts a ``t-field`` to its HTML output. A ``t-field`` may be extended by a ``t-field-options``, which is a JSON-serialized mapping of configuration values. A default configuration key is ``widget`` which can override the field's own ``_type``. """ try: content = self.record_to_html(cr, uid, field_name, record, options, context=context) if options.get('html-escape', True): content = escape(content) elif hasattr(content, '__html__'): content = content.__html__() except Exception: _logger.warning("Could not get field %s for model %s", field_name, record._name, exc_info=True) content = None inherit_branding = context and context.get('inherit_branding') if not inherit_branding and context and context.get('inherit_branding_auto'): inherit_branding = self.pool['ir.model.access'].check(cr, uid, record._name, 'write', False, context=context) if inherit_branding: # add branding attributes g_att += ''.join( _build_attribute(name, value) for name, value in self.attributes( cr, uid, field_name, record, options, source_element, g_att, t_att, qweb_context, context=context) ) return self.render_element(cr, uid, source_element, t_att, g_att, qweb_context, content) def qweb_object(self): return self.pool['ir.qweb'] def render_element(self, cr, uid, source_element, t_att, g_att, qweb_context, content): """ render_element(cr, uid, source_element, t_att, g_att, qweb_context, content) Final rendering hook, by default just calls ir.qweb's ``render_element`` """ return self.qweb_object().render_element( source_element, t_att, g_att, qweb_context, content or '') def user_lang(self, cr, uid, context): """ user_lang(cr, uid, context) Fetches the res.lang object corresponding to the language code stored in the user's context. Fallbacks to en_US if no lang is present in the context *or the language code is not valid*. :returns: res.lang browse_record """ if context is None: context = {} lang_code = context.get('lang') or 'en_US' Lang = self.pool['res.lang'] lang_ids = Lang.search(cr, uid, [('code', '=', lang_code)], context=context) \ or Lang.search(cr, uid, [('code', '=', 'en_US')], context=context) return Lang.browse(cr, uid, lang_ids[0], context=context) class FloatConverter(osv.AbstractModel): _name = 'ir.qweb.field.float' _inherit = 'ir.qweb.field' def precision(self, cr, uid, field, options=None, context=None): _, precision = field.digits or (None, None) return precision def value_to_html(self, cr, uid, value, field, options=None, context=None): if context is None: context = {} precision = self.precision(cr, uid, field, options=options, context=context) fmt = '%f' if precision is None else '%.{precision}f' lang_code = context.get('lang') or 'en_US' lang = self.pool['res.lang'] formatted = lang.format(cr, uid, [lang_code], fmt.format(precision=precision), value, grouping=True) # %f does not strip trailing zeroes. %g does but its precision causes # it to switch to scientific notation starting at a million *and* to # strip decimals. So use %f and if no precision was specified manually # strip trailing 0. if precision is None: formatted = re.sub(r'(?:(0|\d+?)0+)$', r'\1', formatted) return formatted class DateConverter(osv.AbstractModel): _name = 'ir.qweb.field.date' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): if not value or len(value)<10: return '' lang = self.user_lang(cr, uid, context=context) locale = babel.Locale.parse(lang.code) if isinstance(value, basestring): value = datetime.datetime.strptime( value[:10], openerp.tools.DEFAULT_SERVER_DATE_FORMAT) if options and 'format' in options: pattern = options['format'] else: strftime_pattern = lang.date_format pattern = openerp.tools.posix_to_ldml(strftime_pattern, locale=locale) return babel.dates.format_date( value, format=pattern, locale=locale) class DateTimeConverter(osv.AbstractModel): _name = 'ir.qweb.field.datetime' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): if not value: return '' lang = self.user_lang(cr, uid, context=context) locale = babel.Locale.parse(lang.code) if isinstance(value, basestring): value = datetime.datetime.strptime( value, openerp.tools.DEFAULT_SERVER_DATETIME_FORMAT) value = fields.datetime.context_timestamp( cr, uid, timestamp=value, context=context) if options and 'format' in options: pattern = options['format'] else: strftime_pattern = (u"%s %s" % (lang.date_format, lang.time_format)) pattern = openerp.tools.posix_to_ldml(strftime_pattern, locale=locale) if options and options.get('hide_seconds'): pattern = pattern.replace(":ss", "").replace(":s", "") return babel.dates.format_datetime(value, format=pattern, locale=locale) def record_to_html(self, cr, uid, field_name, record, options, context=None): field = field = record._fields[field_name] value = record[field_name] return self.value_to_html( cr, uid, value, field, options=options, context=dict(context, **record.env.context)) class TextConverter(osv.AbstractModel): _name = 'ir.qweb.field.text' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): """ Escapes the value and converts newlines to br. This is bullshit. """ if not value: return '' return nl2br(value, options=options) class SelectionConverter(osv.AbstractModel): _name = 'ir.qweb.field.selection' _inherit = 'ir.qweb.field' def record_to_html(self, cr, uid, field_name, record, options=None, context=None): value = record[field_name] if not value: return '' field = record._fields[field_name] selection = dict(field.get_description(record.env)['selection']) return self.value_to_html( cr, uid, selection[value], field, options=options) class ManyToOneConverter(osv.AbstractModel): _name = 'ir.qweb.field.many2one' _inherit = 'ir.qweb.field' def record_to_html(self, cr, uid, field_name, record, options=None, context=None): [read] = record.read([field_name]) if not read[field_name]: return '' _, value = read[field_name] return nl2br(value, options=options) class HTMLConverter(osv.AbstractModel): _name = 'ir.qweb.field.html' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): return HTMLSafe(value or '') class ImageConverter(osv.AbstractModel): """ ``image`` widget rendering, inserts a data:uri-using image tag in the document. May be overridden by e.g. the website module to generate links instead. .. todo:: what happens if different output need different converters? e.g. reports may need embedded images or FS links whereas website needs website-aware """ _name = 'ir.qweb.field.image' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): try: image = Image.open(cStringIO.StringIO(value.decode('base64'))) image.verify() except IOError: raise ValueError("Non-image binary fields can not be converted to HTML") except: # image.verify() throws "suitable exceptions", I have no idea what they are raise ValueError("Invalid image content") return HTMLSafe('<img src="data:%s;base64,%s">' % (Image.MIME[image.format], value)) class MonetaryConverter(osv.AbstractModel): """ ``monetary`` converter, has a mandatory option ``display_currency``. The currency is used for formatting *and rounding* of the float value. It is assumed that the linked res_currency has a non-empty rounding value and res.currency's ``round`` method is used to perform rounding. .. note:: the monetary converter internally adds the qweb context to its options mapping, so that the context is available to callees. It's set under the ``_qweb_context`` key. """ _name = 'ir.qweb.field.monetary' _inherit = 'ir.qweb.field' def to_html(self, cr, uid, field_name, record, options, source_element, t_att, g_att, qweb_context, context=None): options['_qweb_context'] = qweb_context return super(MonetaryConverter, self).to_html( cr, uid, field_name, record, options, source_element, t_att, g_att, qweb_context, context=context) def record_to_html(self, cr, uid, field_name, record, options, context=None): if context is None: context = {} Currency = self.pool['res.currency'] display_currency = self.display_currency(cr, uid, options['display_currency'], options) # lang.format mandates a sprintf-style format. These formats are non- # minimal (they have a default fixed precision instead), and # lang.format will not set one by default. currency.round will not # provide one either. So we need to generate a precision value # (integer > 0) from the currency's rounding (a float generally < 1.0). # # The log10 of the rounding should be the number of digits involved if # negative, if positive clamp to 0 digits and call it a day. # nb: int() ~ floor(), we want nearest rounding instead precision = int(math.floor(math.log10(display_currency.rounding))) fmt = "%.{0}f".format(-precision if precision < 0 else 0) from_amount = record[field_name] if options.get('from_currency'): from_currency = self.display_currency(cr, uid, options['from_currency'], options) from_amount = Currency.compute(cr, uid, from_currency.id, display_currency.id, from_amount) lang_code = context.get('lang') or 'en_US' lang = self.pool['res.lang'] formatted_amount = lang.format(cr, uid, [lang_code], fmt, Currency.round(cr, uid, display_currency, from_amount), grouping=True, monetary=True) pre = post = u'' if display_currency.position == 'before': pre = u'{symbol}\N{NO-BREAK SPACE}' else: post = u'\N{NO-BREAK SPACE}{symbol}' return HTMLSafe(u'{pre}<span class="oe_currency_value">{0}</span>{post}'.format( formatted_amount, pre=pre, post=post, ).format( symbol=display_currency.symbol, )) def display_currency(self, cr, uid, currency, options): return self.qweb_object().eval_object( currency, options['_qweb_context']) TIMEDELTA_UNITS = ( ('year', 3600 * 24 * 365), ('month', 3600 * 24 * 30), ('week', 3600 * 24 * 7), ('day', 3600 * 24), ('hour', 3600), ('minute', 60), ('second', 1) ) class DurationConverter(osv.AbstractModel): """ ``duration`` converter, to display integral or fractional values as human-readable time spans (e.g. 1.5 as "1 hour 30 minutes"). Can be used on any numerical field. Has a mandatory option ``unit`` which can be one of ``second``, ``minute``, ``hour``, ``day``, ``week`` or ``year``, used to interpret the numerical field value before converting it. Sub-second values will be ignored. """ _name = 'ir.qweb.field.duration' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): units = dict(TIMEDELTA_UNITS) if value < 0: raise ValueError(_("Durations can't be negative")) if not options or options.get('unit') not in units: raise ValueError(_("A unit must be provided to duration widgets")) locale = babel.Locale.parse( self.user_lang(cr, uid, context=context).code) factor = units[options['unit']] sections = [] r = value * factor for unit, secs_per_unit in TIMEDELTA_UNITS: v, r = divmod(r, secs_per_unit) if not v: continue section = babel.dates.format_timedelta( v*secs_per_unit, threshold=1, locale=locale) if section: sections.append(section) return ' '.join(sections) class RelativeDatetimeConverter(osv.AbstractModel): _name = 'ir.qweb.field.relative' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): parse_format = openerp.tools.DEFAULT_SERVER_DATETIME_FORMAT locale = babel.Locale.parse( self.user_lang(cr, uid, context=context).code) if isinstance(value, basestring): value = datetime.datetime.strptime(value, parse_format) # value should be a naive datetime in UTC. So is fields.Datetime.now() reference = datetime.datetime.strptime(field.now(), parse_format) return babel.dates.format_timedelta( value - reference, add_direction=True, locale=locale) class Contact(orm.AbstractModel): _name = 'ir.qweb.field.contact' _inherit = 'ir.qweb.field.many2one' def record_to_html(self, cr, uid, field_name, record, options=None, context=None): if context is None: context = {} if options is None: options = {} opf = options.get('fields') or ["name", "address", "phone", "mobile", "fax", "email"] value_rec = record[field_name] if not value_rec: return None value_rec = value_rec.sudo().with_context(show_address=True) value = value_rec.name_get()[0][1] val = { 'name': value.split("\n")[0], 'address': escape("\n".join(value.split("\n")[1:])), 'phone': value_rec.phone, 'mobile': value_rec.mobile, 'fax': value_rec.fax, 'city': value_rec.city, 'country_id': value_rec.country_id.display_name, 'website': value_rec.website, 'email': value_rec.email, 'fields': opf, 'object': value_rec, 'options': options } html = self.pool["ir.ui.view"].render(cr, uid, "base.contact", val, engine='ir.qweb', context=context).decode('utf8') return HTMLSafe(html) class QwebView(orm.AbstractModel): _name = 'ir.qweb.field.qweb' _inherit = 'ir.qweb.field.many2one' def record_to_html(self, cr, uid, field_name, record, options=None, context=None): if not getattr(record, field_name): return None view = getattr(record, field_name) if view._model._name != "ir.ui.view": _logger.warning("%s.%s must be a 'ir.ui.view' model." % (record, field_name)) return None ctx = (context or {}).copy() ctx['object'] = record html = view.render(ctx, engine='ir.qweb', context=ctx).decode('utf8') return HTMLSafe(html) class QwebWidget(osv.AbstractModel): _name = 'ir.qweb.widget' def _format(self, inner, options, qwebcontext): return self.pool['ir.qweb'].eval_str(inner, qwebcontext) def format(self, inner, options, qwebcontext): return escape(self._format(inner, options, qwebcontext)) class QwebWidgetMonetary(osv.AbstractModel): _name = 'ir.qweb.widget.monetary' _inherit = 'ir.qweb.widget' def _format(self, inner, options, qwebcontext): inner = self.pool['ir.qweb'].eval(inner, qwebcontext) display = self.pool['ir.qweb'].eval_object(options['display_currency'], qwebcontext) precision = int(round(math.log10(display.rounding))) fmt = "%.{0}f".format(-precision if precision < 0 else 0) lang_code = qwebcontext.context.get('lang') or 'en_US' formatted_amount = self.pool['res.lang'].format( qwebcontext.cr, qwebcontext.uid, [lang_code], fmt, inner, grouping=True, monetary=True ) pre = post = u'' if display.position == 'before': pre = u'{symbol}\N{NO-BREAK SPACE}' else: post = u'\N{NO-BREAK SPACE}{symbol}' return u'{pre}{0}{post}'.format( formatted_amount, pre=pre, post=post ).format(symbol=display.symbol,) class HTMLSafe(object): """ HTMLSafe string wrapper, Werkzeug's escape() has special handling for objects with a ``__html__`` methods but AFAIK does not provide any such object. Wrapping a string in HTML will prevent its escaping """ __slots__ = ['string'] def __init__(self, string): self.string = string def __html__(self): return self.string def __str__(self): s = self.string if isinstance(s, unicode): return s.encode('utf-8') return s def __unicode__(self): s = self.string if isinstance(s, str): return s.decode('utf-8') return s def nl2br(string, options=None): """ Converts newlines to HTML linebreaks in ``string``. Automatically escapes content unless options['html-escape'] is set to False, and returns the result wrapped in an HTMLSafe object. :param str string: :param dict options: :rtype: HTMLSafe """ if options is None: options = {} if options.get('html-escape', True): string = escape(string) return HTMLSafe(string.replace('\n', '<br>\n')) def get_field_type(field, options): """ Gets a t-field's effective type from the field definition and its options """ return options.get('widget', field.type) class AssetError(Exception): pass class AssetNotFound(AssetError): pass class AssetsBundle(object): # Sass installation: # # sudo gem install sass compass bootstrap-sass # # If the following error is encountered: # 'ERROR: Cannot load compass.' # Use this: # sudo gem install compass --pre cmd_sass = ['sass', '--stdin', '-t', 'compressed', '--unix-newlines', '--compass', '-r', 'bootstrap-sass'] rx_css_import = re.compile("(@import[^;{]+;?)", re.M) rx_sass_import = re.compile("""(@import\s?['"]([^'"]+)['"])""") rx_css_split = re.compile("\/\*\! ([a-f0-9-]+) \*\/") def __init__(self, xmlid, debug=False, cr=None, uid=None, context=None, registry=None): self.xmlid = xmlid self.cr = request.cr if cr is None else cr self.uid = request.uid if uid is None else uid self.context = request.context if context is None else context self.registry = request.registry if registry is None else registry self.javascripts = [] self.stylesheets = [] self.css_errors = [] self.remains = [] self._checksum = None context = self.context.copy() context['inherit_branding'] = False context['inherit_branding_auto'] = False context['rendering_bundle'] = True self.html = self.registry['ir.ui.view'].render(self.cr, self.uid, xmlid, context=context) self.parse() def parse(self): fragments = html.fragments_fromstring(self.html) for el in fragments: if isinstance(el, basestring): self.remains.append(el) elif isinstance(el, html.HtmlElement): src = el.get('src', '') href = el.get('href', '') atype = el.get('type') media = el.get('media') if el.tag == 'style': if atype == 'text/sass' or src.endswith('.sass'): self.stylesheets.append(SassAsset(self, inline=el.text, media=media)) else: self.stylesheets.append(StylesheetAsset(self, inline=el.text, media=media)) elif el.tag == 'link' and el.get('rel') == 'stylesheet' and self.can_aggregate(href): if href.endswith('.sass') or atype == 'text/sass': self.stylesheets.append(SassAsset(self, url=href, media=media)) else: self.stylesheets.append(StylesheetAsset(self, url=href, media=media)) elif el.tag == 'script' and not src: self.javascripts.append(JavascriptAsset(self, inline=el.text)) elif el.tag == 'script' and self.can_aggregate(src): self.javascripts.append(JavascriptAsset(self, url=src)) else: self.remains.append(html.tostring(el)) else: try: self.remains.append(html.tostring(el)) except Exception: # notYETimplementederror raise NotImplementedError def can_aggregate(self, url): return not urlparse(url).netloc and not url.startswith(('/web/css', '/web/js')) def to_html(self, sep=None, css=True, js=True, debug=False): if sep is None: sep = '\n ' response = [] if debug: if css and self.stylesheets: self.compile_sass() for style in self.stylesheets: response.append(style.to_html()) if js: for jscript in self.javascripts: response.append(jscript.to_html()) else: url_for = self.context.get('url_for', lambda url: url) if css and self.stylesheets: suffix = '' if request: ua = request.httprequest.user_agent if ua.browser == "msie" and int((ua.version or '0').split('.')[0]) < 10: suffix = '.0' href = '/web/css%s/%s/%s' % (suffix, self.xmlid, self.version) response.append('<link href="%s" rel="stylesheet"/>' % url_for(href)) if js: src = '/web/js/%s/%s' % (self.xmlid, self.version) response.append('<script type="text/javascript" src="%s"></script>' % url_for(src)) response.extend(self.remains) return sep + sep.join(response) @lazy_property def last_modified(self): """Returns last modified date of linked files""" return max(itertools.chain( (asset.last_modified for asset in self.javascripts), (asset.last_modified for asset in self.stylesheets), )) @lazy_property def version(self): return self.checksum[0:7] @lazy_property def checksum(self): """ Not really a full checksum. We compute a SHA1 on the rendered bundle + max linked files last_modified date """ check = self.html + str(self.last_modified) return hashlib.sha1(check).hexdigest() def js(self): content = self.get_cache('js') if content is None: content = ';\n'.join(asset.minify() for asset in self.javascripts) self.set_cache('js', content) return content def css(self, page_number=None): if page_number is not None: return self.css_page(page_number) content = self.get_cache('css') if content is None: self.compile_sass() content = '\n'.join(asset.minify() for asset in self.stylesheets) if self.css_errors: msg = '\n'.join(self.css_errors) content += self.css_message(msg.replace('\n', '\\A ')) # move up all @import rules to the top matches = [] def push(matchobj): matches.append(matchobj.group(0)) return '' content = re.sub(self.rx_css_import, push, content) matches.append(content) content = u'\n'.join(matches) if not self.css_errors: self.set_cache('css', content) content = content.encode('utf-8') return content def css_page(self, page_number): content = self.get_cache('css.%d' % (page_number,)) if page_number: return content if content is None: css = self.css().decode('utf-8') re_rules = '([^{]+\{(?:[^{}]|\{[^{}]*\})*\})' re_selectors = '()(?:\s*@media\s*[^{]*\{)?(?:\s*(?:[^,{]*(?:,|\{(?:[^}]*\}))))' css_url = '@import url(\'/web/css.%%d/%s/%s\');' % (self.xmlid, self.version) pages = [[]] page = pages[0] page_selectors = 0 for rule in re.findall(re_rules, css): selectors = len(re.findall(re_selectors, rule)) if page_selectors + selectors < MAX_CSS_RULES: page_selectors += selectors page.append(rule) else: pages.append([rule]) page = pages[-1] page_selectors = selectors if len(pages) == 1: pages = [] for idx, page in enumerate(pages): self.set_cache("css.%d" % (idx+1), ''.join(page)) content = '\n'.join(css_url % i for i in range(1,len(pages)+1)) self.set_cache("css.0", content) if not content: return self.css() return content def get_cache(self, type): content = None domain = [('url', '=', '/web/%s/%s/%s' % (type, self.xmlid, self.version))] bundle = self.registry['ir.attachment'].search_read(self.cr, openerp.SUPERUSER_ID, domain, ['datas'], context=self.context) if bundle and bundle[0]['datas']: content = bundle[0]['datas'].decode('base64') return content def set_cache(self, type, content): ira = self.registry['ir.attachment'] url_prefix = '/web/%s/%s/' % (type, self.xmlid) # Invalidate previous caches try: with self.cr.savepoint(): domain = [('url', '=like', url_prefix + '%')] oids = ira.search(self.cr, openerp.SUPERUSER_ID, domain, context=self.context) if oids: ira.unlink(self.cr, openerp.SUPERUSER_ID, oids, context=self.context) url = url_prefix + self.version ira.create(self.cr, openerp.SUPERUSER_ID, dict( datas=content.encode('utf8').encode('base64'), type='binary', name=url, url=url, ), context=self.context) except psycopg2.Error: pass def css_message(self, message): return """ body:before { background: #ffc; width: 100%%; font-size: 14px; font-family: monospace; white-space: pre; content: "%s"; } """ % message.replace('"', '\\"') def compile_sass(self): """ Checks if the bundle contains any sass content, then compiles it to css. Css compilation is done at the bundle level and not in the assets because they are potentially interdependant. """ sass = [asset for asset in self.stylesheets if isinstance(asset, SassAsset)] if not sass: return source = '\n'.join([asset.get_source() for asset in sass]) # move up all @import rules to the top and exclude file imports imports = [] def push(matchobj): ref = matchobj.group(2) line = '@import "%s"' % ref if '.' not in ref and line not in imports and not ref.startswith(('.', '/', '~')): imports.append(line) return '' source = re.sub(self.rx_sass_import, push, source) imports.append(source) source = u'\n'.join(imports) try: compiler = Popen(self.cmd_sass, stdin=PIPE, stdout=PIPE, stderr=PIPE) except Exception: msg = "Could not find 'sass' program needed to compile sass/scss files" _logger.error(msg) self.css_errors.append(msg) return result = compiler.communicate(input=source.encode('utf-8')) if compiler.returncode: error = self.get_sass_error(''.join(result), source=source) _logger.warning(error) self.css_errors.append(error) return compiled = result[0].strip().decode('utf8') fragments = self.rx_css_split.split(compiled)[1:] while fragments: asset_id = fragments.pop(0) asset = next(asset for asset in sass if asset.id == asset_id) asset._content = fragments.pop(0) def get_sass_error(self, stderr, source=None): # TODO: try to find out which asset the error belongs to error = stderr.split('Load paths')[0].replace(' Use --trace for backtrace.', '') error += "This error occured while compiling the bundle '%s' containing:" % self.xmlid for asset in self.stylesheets: if isinstance(asset, SassAsset): error += '\n - %s' % (asset.url if asset.url else '<inline sass>') return error class WebAsset(object): html_url = '%s' def __init__(self, bundle, inline=None, url=None): self.id = str(uuid.uuid4()) self.bundle = bundle self.inline = inline self.url = url self.cr = bundle.cr self.uid = bundle.uid self.registry = bundle.registry self.context = bundle.context self._content = None self._filename = None self._ir_attach = None name = '<inline asset>' if inline else url self.name = "%s defined in bundle '%s'" % (name, bundle.xmlid) if not inline and not url: raise Exception("An asset should either be inlined or url linked") def stat(self): if not (self.inline or self._filename or self._ir_attach): addon = filter(None, self.url.split('/'))[0] try: # Test url against modules static assets mpath = openerp.http.addons_manifest[addon]['addons_path'] self._filename = mpath + self.url.replace('/', os.path.sep) except Exception: try: # Test url against ir.attachments fields = ['__last_update', 'datas', 'mimetype'] domain = [('type', '=', 'binary'), ('url', '=', self.url)] ira = self.registry['ir.attachment'] attach = ira.search_read(self.cr, openerp.SUPERUSER_ID, domain, fields, context=self.context) self._ir_attach = attach[0] except Exception: raise AssetNotFound("Could not find %s" % self.name) def to_html(self): raise NotImplementedError() @lazy_property def last_modified(self): try: self.stat() if self._filename: return datetime.datetime.fromtimestamp(os.path.getmtime(self._filename)) elif self._ir_attach: server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT last_update = self._ir_attach['__last_update'] try: return datetime.datetime.strptime(last_update, server_format + '.%f') except ValueError: return datetime.datetime.strptime(last_update, server_format) except Exception: pass return datetime.datetime(1970, 1, 1) @property def content(self): if not self._content: self._content = self.inline or self._fetch_content() return self._content def _fetch_content(self): """ Fetch content from file or database""" try: self.stat() if self._filename: with open(self._filename, 'rb') as fp: return fp.read().decode('utf-8') else: return self._ir_attach['datas'].decode('base64') except UnicodeDecodeError: raise AssetError('%s is not utf-8 encoded.' % self.name) except IOError: raise AssetNotFound('File %s does not exist.' % self.name) except: raise AssetError('Could not get content for %s.' % self.name) def minify(self): return self.content def with_header(self, content=None): if content is None: content = self.content return '\n/* %s */\n%s' % (self.name, content) class JavascriptAsset(WebAsset): def minify(self): return self.with_header(rjsmin(self.content)) def _fetch_content(self): try: return super(JavascriptAsset, self)._fetch_content() except AssetError, e: return "console.error(%s);" % json.dumps(e.message) def to_html(self): if self.url: return '<script type="text/javascript" src="%s"></script>' % (self.html_url % self.url) else: return '<script type="text/javascript" charset="utf-8">%s</script>' % self.with_header() class StylesheetAsset(WebAsset): rx_import = re.compile(r"""@import\s+('|")(?!'|"|/|https?://)""", re.U) rx_url = re.compile(r"""url\s*\(\s*('|"|)(?!'|"|/|https?://|data:)""", re.U) rx_sourceMap = re.compile(r'(/\*# sourceMappingURL=.*)', re.U) rx_charset = re.compile(r'(@charset "[^"]+";)', re.U) def __init__(self, *args, **kw): self.media = kw.pop('media', None) super(StylesheetAsset, self).__init__(*args, **kw) @property def content(self): content = super(StylesheetAsset, self).content if self.media: content = '@media %s { %s }' % (self.media, content) return content def _fetch_content(self): try: content = super(StylesheetAsset, self)._fetch_content() web_dir = os.path.dirname(self.url) content = self.rx_import.sub( r"""@import \1%s/""" % (web_dir,), content, ) content = self.rx_url.sub( r"url(\1%s/" % (web_dir,), content, ) # remove charset declarations, we only support utf-8 content = self.rx_charset.sub('', content) except AssetError, e: self.bundle.css_errors.append(e.message) return '' return content def minify(self): # remove existing sourcemaps, make no sense after re-mini content = self.rx_sourceMap.sub('', self.content) # comments content = re.sub(r'/\*.*?\*/', '', content, flags=re.S) # space content = re.sub(r'\s+', ' ', content) content = re.sub(r' *([{}]) *', r'\1', content) return self.with_header(content) def to_html(self): media = (' media="%s"' % werkzeug.utils.escape(self.media)) if self.media else '' if self.url: href = self.html_url % self.url return '<link rel="stylesheet" href="%s" type="text/css"%s/>' % (href, media) else: return '<style type="text/css"%s>%s</style>' % (media, self.with_header()) class SassAsset(StylesheetAsset): html_url = '%s.css' rx_indent = re.compile(r'^( +|\t+)', re.M) indent = None reindent = ' ' def minify(self): return self.with_header() def to_html(self): if self.url: try: ira = self.registry['ir.attachment'] url = self.html_url % self.url domain = [('type', '=', 'binary'), ('url', '=', self.url)] with self.cr.savepoint(): ira_id = ira.search(self.cr, openerp.SUPERUSER_ID, domain, context=self.context) if ira_id: # TODO: update only if needed ira.write(self.cr, openerp.SUPERUSER_ID, [ira_id], {'datas': self.content}, context=self.context) else: ira.create(self.cr, openerp.SUPERUSER_ID, dict( datas=self.content.encode('utf8').encode('base64'), mimetype='text/css', type='binary', name=url, url=url, ), context=self.context) except psycopg2.Error: pass return super(SassAsset, self).to_html() def get_source(self): content = textwrap.dedent(self.inline or self._fetch_content()) def fix_indent(m): ind = m.group() if self.indent is None: self.indent = ind if self.indent == self.reindent: # Don't reindent the file if identation is the final one (reindent) raise StopIteration() return ind.replace(self.indent, self.reindent) try: content = self.rx_indent.sub(fix_indent, content) except StopIteration: pass return "/*! %s */\n%s" % (self.id, content) def rjsmin(script): """ Minify js with a clever regex. Taken from http://opensource.perlig.de/rjsmin Apache License, Version 2.0 """ def subber(match): """ Substitution callback """ groups = match.groups() return ( groups[0] or groups[1] or groups[2] or groups[3] or (groups[4] and '\n') or (groups[5] and ' ') or (groups[6] and ' ') or (groups[7] and ' ') or '' ) result = re.sub( r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?' r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|' r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]' r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/' r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*' r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*' r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01' r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/' r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]' r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./' r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/' r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01' r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#' r'%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-' r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^' r'\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|' r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\0' r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\0' r'00-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:' r'(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*' r']*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script ).strip() return result # vim:et:
agpl-3.0
6,998,854,237,731,619,000
39.297196
138
0.565873
false
espadrine/opera
chromium/src/third_party/python_26/Lib/site-packages/win32/Demos/eventLogDemo.py
17
3273
import win32evtlog, traceback import win32api, win32con import win32security # To translate NT Sids to account names. from win32evtlogutil import * def ReadLog(computer, logType="Application", dumpEachRecord = 0): # read the entire log back. h=win32evtlog.OpenEventLog(computer, logType) numRecords = win32evtlog.GetNumberOfEventLogRecords(h) # print "There are %d records" % numRecords num=0 while 1: objects = win32evtlog.ReadEventLog(h, win32evtlog.EVENTLOG_BACKWARDS_READ|win32evtlog.EVENTLOG_SEQUENTIAL_READ, 0) if not objects: break for object in objects: # get it for testing purposes, but dont print it. msg = SafeFormatMessage(object, logType).encode("mbcs") if object.Sid is not None: try: domain, user, typ = win32security.LookupAccountSid(computer, object.Sid) sidDesc = "%s/%s" % (domain, user) except win32security.error: sidDesc = str(object.Sid) user_desc = "Event associated with user %s" % (sidDesc,) else: user_desc = None if dumpEachRecord: if user_desc: print user_desc print msg num = num + len(objects) if numRecords == num: print "Successfully read all", numRecords, "records" else: print "Couldn't get all records - reported %d, but found %d" % (numRecords, num) print "(Note that some other app may have written records while we were running!)" win32evtlog.CloseEventLog(h) def Usage(): print "Writes an event to the event log." print "-w : Dont write any test records." print "-r : Dont read the event log" print "-c : computerName : Process the log on the specified computer" print "-v : Verbose" print "-t : LogType - Use the specified log - default = 'Application'" def test(): # check if running on Windows NT, if not, display notice and terminate if win32api.GetVersion() & 0x80000000: print "This sample only runs on NT" return import sys, getopt opts, args = getopt.getopt(sys.argv[1:], "rwh?c:t:v") computer = None do_read = do_write = 1 logType = "Application" verbose = 0 if len(args)>0: print "Invalid args" usage() return 1 for opt, val in opts: if opt == '-t': logType = val if opt == '-c': computer = val if opt in ['-h', '-?']: Usage() return if opt=='-r': do_read = 0 if opt=='-w': do_write = 0 if opt=='-v': verbose = verbose + 1 if do_write: ReportEvent(logType, 2, strings=["The message text for event 2"], data = "Raw\0Data") ReportEvent(logType, 1, eventType=win32evtlog.EVENTLOG_WARNING_TYPE, strings=["A warning"], data = "Raw\0Data") ReportEvent(logType, 1, eventType=win32evtlog.EVENTLOG_INFORMATION_TYPE, strings=["An info"], data = "Raw\0Data") print "Successfully wrote 3 records to the log" if do_read: ReadLog(computer, logType, verbose > 0) if __name__=='__main__': test()
bsd-3-clause
-169,061,908,038,165,400
33.819149
122
0.586618
false
hjfreyer/marry-fuck-kill
backend/core.py
1
11449
#!/usr/bin/env python # # Copyright 2011 Hunter Freyer and Michael Kelly # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections import datetime import hmac import json import logging import urllib2 from google.appengine.api import urlfetch from google.appengine.api import users from google.appengine.ext import db import models import config_NOCOMMIT as config # Whether to display the new vote counts (cached in Triples). USE_CACHED_VOTE_COUNTS = True Image = collections.namedtuple('Image', ['original', 'thumbnail']) class JSONStructureError(Exception): """Raised when JSON structure doesn't match our expectations.""" class EntityValidationError(Exception): pass def GetStatsUrlsForTriple(triple, w=160, h=85): """Returns a list of stats URLs for the given triple. Args: triple: (Triple) triple to examine w: (int) Optional. Width of each chart image. h: (int) Optional. Height of each chart image. Returns: [str, str, str]: URLs for the Triple's three Entities. """ counts = GetTripleVoteCounts(triple) urls = [] overall_max = max([max(c) for c in counts]) for count in counts: urls.append('http://chart.apis.google.com/chart' '?chxr=0,0,%(max)d' '&chxt=y' '&chbh=a' '&chs=%(w)dx%(h)d' '&cht=bvg' '&chco=9911BB,C76FDD,63067A' '&chds=0,%(max)d,0,%(max)d,0,%(max)d' '&chd=t:%(m)d|%(f)d|%(k)d' '&chdl=Marry|Fuck|Kill' '&chdlp=r' % (dict(m=count[0], f=count[1], k=count[2], max=overall_max, w=w, h=h))) return urls def GetTripleVoteCounts(triple): """Calculates vote count for the given triple. Returns: ([[int]]) Vote counts. This is a nested list (first level is votes for entity one, two, three; second level is votes for m, f, k). """ def _CalculateEntityVoteCounts(entity): m = entity.assignment_reference_marry_set.count() f = entity.assignment_reference_fuck_set.count() k = entity.assignment_reference_kill_set.count() return [m, f, k] # For backwards compatibility with Triples that don't have embedded vote # counts. if not USE_CACHED_VOTE_COUNTS or not triple.has_cached_votes: logging.info('Updating legacy Triple without vote counts: %s', triple.key()) votes = [_CalculateEntityVoteCounts(triple.one), _CalculateEntityVoteCounts(triple.two), _CalculateEntityVoteCounts(triple.three)] # Race condition here: We're done calculating the votes, and we're about to # update the Entity. We might be off by one if someone else votes while # we're here. We have MapReduces to fix this up, so we don't care too much. db.run_in_transaction(_UpdateTripleVoteCounts, triple.key(), votes) return votes else: logging.info('Got cached votes for Triple %s', triple.key()) return [[triple.votes_one_m, triple.votes_one_f, triple.votes_one_k], [triple.votes_two_m, triple.votes_two_f, triple.votes_two_k], [triple.votes_three_m, triple.votes_three_f, triple.votes_three_k]] def _AddTripleVoteCounts(triple_key, votes): """Adds votes to a triple's vote count. This should be run in a transaction. Args: triple_key: (db.Key) the triple to update votes: ([str]) a 3-list of 'm', 'f', and 'k', corresponding to the votes for the 3 items in the triple, in order. """ triple = models.Triple.get(triple_key) if triple.has_cached_votes: triple.votes_one_m += 1 if votes[0] == 'm' else 0 triple.votes_one_f += 1 if votes[0] == 'f' else 0 triple.votes_one_k += 1 if votes[0] == 'k' else 0 triple.votes_two_m += 1 if votes[1] == 'm' else 0 triple.votes_two_f += 1 if votes[1] == 'f' else 0 triple.votes_two_k += 1 if votes[1] == 'k' else 0 triple.votes_three_m += 1 if votes[2] == 'm' else 0 triple.votes_three_f += 1 if votes[2] == 'f' else 0 triple.votes_three_k += 1 if votes[2] == 'k' else 0 triple.put() else: logging.warning('_AddTripleVoteCounts: Legacy Triple without vote counts:' '%s', triple_key) def _UpdateTripleVoteCounts(triple_key, new_counts): """Updates vote counts on the given triple. Args: triple: (Triple) triple to update new_counts: ([[int]]) These values are the new values for votes_one_m, ..., votes_three_k. See core.GetTripleVoteCounts. """ triple = models.Triple.get(triple_key) assert(len(new_counts) == 3) votes_one, votes_two, votes_three = new_counts assert(len(votes_one) == 3) assert(len(votes_two) == 3) assert(len(votes_three) == 3) triple.votes_one_m, triple.votes_one_f, triple.votes_one_k = votes_one triple.votes_two_m, triple.votes_two_f, triple.votes_two_k = votes_two triple.votes_three_m, triple.votes_three_f, triple.votes_three_k = ( votes_three) triple.has_cached_votes = True triple.put() def MakeEntity(name, query, user_ip, thumb_url, original_url): """Makes an Entity with the given attributes.""" # Get the thumbnail URL for the entity. This could throw # URLError. We'll let it bubble up. result = urlfetch.fetch(thumb_url) logging.info('Downloading %s' % thumb_url) entity = models.Entity(name=name, data=result.content, query=query, original_url=original_url) entity.put() return entity def MakeTriple(entities, creator, creator_ip): """Create the named triple. Args: entities: a data structure built in MakeSubmitHandler. creator: the user who created the Triple. creator_ip: IP address of the request to make this triple. """ for i in range(len(entities)): # TODO(mjkelly): Check for a signature element. for k in ['n', 'u', 'q', 'ou']: if not entities[i][k]: raise ValueError("Entity %s missing attribute '%s'" % (i, k)) # This may raise a URLError or EntityValidatationError. one = MakeEntity(name=entities[0]['n'], query=entities[0]['q'], user_ip=creator_ip, thumb_url=entities[0]['u'], original_url=entities[0]['ou']) two = MakeEntity(name=entities[1]['n'], query=entities[1]['q'], user_ip=creator_ip, thumb_url=entities[1]['u'], original_url=entities[1]['ou']) three = MakeEntity(name=entities[2]['n'], query=entities[2]['q'], user_ip=creator_ip, thumb_url=entities[2]['u'], original_url=entities[2]['ou']) triple = models.Triple(one=one, two=two, three=three, creator=creator, creatorip=creator_ip, has_cached_votes=True, votes_one_m=0, votes_one_f=0, votes_one_k=0, votes_two_m=0, votes_two_f=0, votes_two_k=0, votes_three_m=0, votes_three_f=0, votes_three_k=0) triple.enable() triple.put() return triple def MakeAssignment(triple_id, v1, v2, v3, user, user_ip): """Create a new assignment. Args: request: the POST request from the client user: the user who made the assignment request """ values = [v1, v2, v3] if set(values) != set(['m', 'f', 'k']): return None try: triple_id = long(triple_id) except ValueError: logging.error("make_assignment: bad triple key '%s'", triple_id) triple = models.Triple.get_by_id(triple_id) logging.debug('triple = %s', triple) if triple is None: logging.error('make_assignment: No triple with key %s', triple_id) return None db.run_in_transaction(_AddTripleVoteCounts, triple.key(), values) # We get an entity->action map from the client, but we need to reverse # it to action->entity to update the DB. triple_entities = [triple.one, triple.two, triple.three] entities = {} for i in range(len(values)): # Items in values are guaranteed to be 'm', 'f', 'k' (check above) entities[values[i]] = triple_entities[i] if (entities['m'] is None or entities['f'] is None or entities['k'] is None): logging.error('Not all non-None: marry = %s, fuck = %s, kill = %s', entities['m'], entities['f'], entities['k']) return None assign = models.Assignment(triple=triple, marry=entities['m'], fuck=entities['f'], kill=entities['k'], user=user, userip=str(user_ip)) assign.put() logging.info("Assigned m=%s, f=%s, k=%s to %s", entities['m'], entities['f'], entities['k'], triple) return assign def ImageSearch(query, user_ip): """Performs an image search. It should return 10 results. Args: query: (str) The search query user_ip: (str) IP address of user making the query, for accounting. Returns: [Image]: A list of Image objects representing search results Raises: JSONStructureError: If the structure of the search results is unexpected. """ images = [] query = query.encode('utf-8') url = ('https://www.googleapis.com/customsearch/v1' '?key={key}' '&cx={cx}' '&q={q}' '&userIp={userip}' '&searchType=image').format( key=config.CSE_API_KEY, cx=config.CSE_ID, q=urllib2.quote(query), userip=user_ip) logging.info('ImageSearch: query url=%s', url) download_start = datetime.datetime.now() # This may raise a DownloadError result = urlfetch.fetch(url) download_finish = datetime.datetime.now() data = json.loads(result.content) parse_finish = datetime.datetime.now() logging.info('ImageSearch: downloaded %s bytes; %s to download, %s to parse', len(result.content), download_finish - download_start, parse_finish - download_finish) try: if data['searchInformation']['totalResults'] == '0': return [] for item in data['items']: link = item['link'] thumb = item['image']['thumbnailLink'] images.append(Image(original=link, thumbnail=thumb)) except KeyError as e: error = 'Missing key %s in JSON. JSON = %r' % (e, result.content) logging.error(error) raise JSONStructureError(error) return images def Sign(*items): """Signs a sequence of items using our internal HMAC key. Args: *items: Any sequence of items. Returns: (str) Hex digest of *items """ h = hmac.new(config.HMAC_KEY) for item in items: h.update(item) return h.hexdigest()
apache-2.0
4,690,555,505,954,704,000
32.379009
79
0.607389
false
rmhyman/DataScience
Lesson1/IntroToPandas.py
1
1976
import pandas as pd ''' The following code is to help you play with the concept of Series in Pandas. You can think of Series as an one-dimensional object that is similar to an array, list, or column in a database. By default, it will assign an index label to each item in the Series ranging from 0 to N, where N is the number of items in the Series minus one. Please feel free to play around with the concept of Series and see what it does *This playground is inspired by Greg Reda's post on Intro to Pandas Data Structures: http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/ ''' # Change False to True to create a Series object if True: series = pd.Series(['Dave', 'Cheng-Han', 'Udacity', 42, -1789710578]) print series ''' You can also manually assign indices to the items in the Series when creating the series ''' # Change False to True to see custom index in action if False: series = pd.Series(['Dave', 'Cheng-Han', 359, 9001], index=['Instructor', 'Curriculum Manager', 'Course Number', 'Power Level']) print series ''' You can use index to select specific items from the Series ''' # Change False to True to see Series indexing in action if False: series = pd.Series(['Dave', 'Cheng-Han', 359, 9001], index=['Instructor', 'Curriculum Manager', 'Course Number', 'Power Level']) print series['Instructor'] print "" print series[['Instructor', 'Curriculum Manager', 'Course Number']] ''' You can also use boolean operators to select specific items from the Series ''' # Change False to True to see boolean indexing in action if True: cuteness = pd.Series([1, 2, 3, 4, 5], index=['Cockroach', 'Fish', 'Mini Pig', 'Puppy', 'Kitten']) print cuteness > 3 print "" print cuteness[cuteness > 3]
mit
5,303,408,304,625,054,000
34.592593
84
0.635121
false
tedelhourani/ansible
lib/ansible/module_utils/known_hosts.py
7
6918
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <[email protected]>, 2012-2013 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import hmac import re from ansible.module_utils.six.moves.urllib.parse import urlparse try: from hashlib import sha1 except ImportError: import sha as sha1 HASHED_KEY_MAGIC = "|1|" def is_ssh_url(url): """ check if url is ssh """ if "@" in url and "://" not in url: return True for scheme in "ssh://", "git+ssh://", "ssh+git://": if url.startswith(scheme): return True return False def get_fqdn_and_port(repo_url): """ chop the hostname and port out of a url """ fqdn = None port = None ipv6_re = re.compile('(\[[^]]*\])(?::([0-9]+))?') if "@" in repo_url and "://" not in repo_url: # most likely an user@host:path or user@host/path type URL repo_url = repo_url.split("@", 1)[1] match = ipv6_re.match(repo_url) # For this type of URL, colon specifies the path, not the port if match: fqdn, path = match.groups() elif ":" in repo_url: fqdn = repo_url.split(":")[0] elif "/" in repo_url: fqdn = repo_url.split("/")[0] elif "://" in repo_url: # this should be something we can parse with urlparse parts = urlparse(repo_url) # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so # ensure we actually have a parts[1] before continuing. if parts[1] != '': fqdn = parts[1] if "@" in fqdn: fqdn = fqdn.split("@", 1)[1] match = ipv6_re.match(fqdn) if match: fqdn, port = match.groups() elif ":" in fqdn: fqdn, port = fqdn.split(":")[0:2] return fqdn, port def check_hostkey(module, fqdn): return not not_in_host_file(module, fqdn) # this is a variant of code found in connection_plugins/paramiko.py and we should modify # the paramiko code to import and use this. def not_in_host_file(self, host): if 'USER' in os.environ: user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") else: user_host_file = "~/.ssh/known_hosts" user_host_file = os.path.expanduser(user_host_file) host_file_list = [] host_file_list.append(user_host_file) host_file_list.append("/etc/ssh/ssh_known_hosts") host_file_list.append("/etc/ssh/ssh_known_hosts2") host_file_list.append("/etc/openssh/ssh_known_hosts") hfiles_not_found = 0 for hf in host_file_list: if not os.path.exists(hf): hfiles_not_found += 1 continue try: host_fh = open(hf) except IOError: hfiles_not_found += 1 continue else: data = host_fh.read() host_fh.close() for line in data.split("\n"): if line is None or " " not in line: continue tokens = line.split() if tokens[0].find(HASHED_KEY_MAGIC) == 0: # this is a hashed known host entry try: (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2) hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) hash.update(host) if hash.digest() == kn_host.decode('base64'): return False except: # invalid hashed host key, skip it continue else: # standard host file entry if host in tokens[0]: return False return True def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False): """ use ssh-keyscan to add the hostkey """ keyscan_cmd = module.get_bin_path('ssh-keyscan', True) if 'USER' in os.environ: user_ssh_dir = os.path.expandvars("~${USER}/.ssh/") user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") else: user_ssh_dir = "~/.ssh/" user_host_file = "~/.ssh/known_hosts" user_ssh_dir = os.path.expanduser(user_ssh_dir) if not os.path.exists(user_ssh_dir): if create_dir: try: os.makedirs(user_ssh_dir, int('700', 8)) except: module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) else: module.fail_json(msg="%s does not exist" % user_ssh_dir) elif not os.path.isdir(user_ssh_dir): module.fail_json(msg="%s is not a directory" % user_ssh_dir) if port: this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn) else: this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn) rc, out, err = module.run_command(this_cmd) # ssh-keyscan gives a 0 exit code and prints nothing on timeout if rc != 0 or not out: msg = 'failed to retrieve hostkey' if not out: msg += '. "%s" returned no matches.' % this_cmd else: msg += ' using command "%s". [stdout]: %s' % (this_cmd, out) if err: msg += ' [stderr]: %s' % err module.fail_json(msg=msg) module.append_to_file(user_host_file, out) return rc, out, err
gpl-3.0
5,912,683,934,801,089,000
34.476923
94
0.596849
false
google/dnae
services/service-example/service_example_run.py
1
9308
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DNA - Service example - Main service module. Main methods to handle the service tasks. This example service uses DCM APIs (and the corresponding DNA connector) to collect DCM report data, elaborate it, and push it to a BigQuery dataset. """ import base64 import json import logging import sys from dcm_connector import DCMConnector from dcm_connector import DCMReport from dna_general_settings import CREDENTIAL_FILE from dna_general_settings import DCM_API_VER from dna_general_settings import GDS_KIND_LOG_SERVICE from dna_logging import configure_logging from dna_project_settings import DCM_PROFILE_ID from dna_project_settings import PROJECT_ID from gcp_connector import GCPConnector from gcp_connector import GCPTable from service_example_settings import DATA_SCHEMA_STANDARD from service_example_settings import DCM_REPORT_DATE_RANGE from service_example_settings import DCM_REPORT_NAME from service_example_settings import DCM_REPORT_TEMPLATE from service_example_settings import FIELD_MAP_STANDARD from service_example_settings import GBQ_TABLE from service_example_settings import SERVICE_NAME from utils import TextUtils # Configure logging configure_logging() logger = logging.getLogger('DNA-Service-example') def get_field(field, row, field_map, default_value=None): """Access fields in a row according to the specified field map. Args: field: field to extract. row: row to extract field from. field_map: field map. default_value: value to be returned in case the field is not mapped. Returns: specified field value if the field is mapped, the default value otherwise. """ field_info = field_map.get(field) if not field_info: return default_value else: return row[field_info['idx']] def extract(dcm, params): """Create a DCM report and extract the resulting data. Args: dcm: initiated instance of the DCM connector. params: parameters to use for the DCM report. Returns: the data object resulting from the DCM report. """ report = DCMReport(params['report_template']) report.setname(params['report_name']) report.setdates(params['date_range']) advertiser_ids = params['advertiser_id'].split(' ') # Add filters for item in advertiser_ids: report.addfilter('dfa:advertiser', item) # Insert and run a new report rid = dcm.createreport(report) fid = dcm.runreport(rid) # Get raw report data for the specified report and file data = dcm.getreportdata(rid, fid) # Delete the report from DCM dcm.deletereport(rid) return data def transform(dest_table, raw_data): """Transform the report data and add it to the destination table. Args: dest_table: GCPConnector object. raw_data: GCPTable object containing the transformed data. """ # Enable smart completion for the GCPTable object. assert isinstance(dest_table, GCPTable) # Loop over raw data rows (excluding header row). for row in raw_data[1:]: try: # Add all fields to the destination table dest_table.addrow([ get_field('Advertiser', row, FIELD_MAP_STANDARD), get_field('AdvertiserID', row, FIELD_MAP_STANDARD), get_field('Campaign', row, FIELD_MAP_STANDARD), get_field('CampaignID', row, FIELD_MAP_STANDARD), get_field('PlacementSize', row, FIELD_MAP_STANDARD), get_field('CreativeType', row, FIELD_MAP_STANDARD), get_field('CreativeSize', row, FIELD_MAP_STANDARD), get_field('PlatformType', row, FIELD_MAP_STANDARD), get_field('Site', row, FIELD_MAP_STANDARD), get_field('Month', row, FIELD_MAP_STANDARD), get_field('Week', row, FIELD_MAP_STANDARD), get_field('Date', row, FIELD_MAP_STANDARD), int(get_field('Clicks', row, FIELD_MAP_STANDARD)), int(get_field('Impressions', row, FIELD_MAP_STANDARD)), float(get_field('ViewableTimeSeconds', row, FIELD_MAP_STANDARD)), int(get_field('EligibleImpressions', row, FIELD_MAP_STANDARD)), int(get_field('MeasurableImpressions', row, FIELD_MAP_STANDARD)), int(get_field('ViewableImpressions', row, FIELD_MAP_STANDARD)), ]) # pylint: disable=broad-except except Exception as e: logger.debug('[%s] - Error "%s" occurs while adding the following row', SERVICE_NAME, str(e)) logger.debug(str(row)) # pylint: enable=broad-except def load(gcp, source_table, params): """Load transformed data onto Google Cloud Platform. Args: gcp: GCPConnector object. source_table: GCPTable object containing the transformed data. params: dictionary containing all relevant GCP parameters. Returns: The BigQuery job id. """ assert isinstance(gcp, GCPConnector) assert isinstance(source_table, GCPTable) bucket = params['bucket'] filename = params['filename'] dataset = params['dataset'] table = params['table'] # Upload data onto a specified Google Cloud Storage bucket/filename gcsuri = gcp.gcs_uploadtable(source_table, bucket, filename) # Create a BigQuery job to transfer uploaded data from GCS to a BigQuery table if params['append']: # If append is "True" append transformed data to the table job_id = gcp.bq_importfromgcs( gcsuri=gcsuri, dataset=dataset, table=table, schema=source_table.schema, encoding=source_table.encoding, writemode='WRITE_APPEND') else: # Otherwise overwrite (or create a new) table job_id = gcp.bq_importfromgcs( gcsuri=gcsuri, dataset=dataset, table=table, schema=source_table.schema, encoding=source_table.encoding) return job_id def service_task(dcm, gcp, params): """Main ETL job, putting together all ETL functions to implement the service. Args: dcm: the DCMConnector instance. gcp: the GCPConnector instance. params: dictionary containing all parameters relevant for the ETL task. Returns: The BigQuery job id. """ # Initiate a GCPTable object (to be used for data ingestion) using the # appropriate data schema. dest_table = GCPTable(params['schema']) # Extract data via a DCM report. raw_data = extract(dcm, params) # Transform data if necessary (and upload it to the destination table) transform(dest_table, raw_data) # ...or alternatively ingest extracted data as it is # dest_table.ingest(raw_data, True) # Load data into Google Big Query. job_id = load(gcp, dest_table, params) return job_id def main(argv): """Main function reading the task and launching the corresponding ETL job. Args: argv: array of parameters: (1) queue name, (2) task id. """ # Get input arguments passed by the service-example-run.sh script queue_name = str(argv[1]) task_name = str(argv[2]) logger.info('Starting service-example processing task. Queue name: [%s]. ' 'Task name: [%s]', queue_name, task_name) # Initiate connectors for Google Cloud Platform and DCM. gcp = GCPConnector(PROJECT_ID) dcm = DCMConnector( credential_file=CREDENTIAL_FILE, user_email=None, profile_id=DCM_PROFILE_ID, api_version=DCM_API_VER) # Get the first available task from the queue. task = gcp.gct_gettask(task_name) payload = task['pullMessage']['payload'] params = json.loads(base64.urlsafe_b64decode(str(payload))) # Add service-specific params. params['report_template'] = DCM_REPORT_TEMPLATE params['report_name'] = DCM_REPORT_NAME params['date_range'] = DCM_REPORT_DATE_RANGE params['schema'] = DATA_SCHEMA_STANDARD params['filename'] = TextUtils.timestamp() + '_' + str( params['account_id']) + '.csv' params['table'] = GBQ_TABLE params['append'] = False # Log run info as Datastore entity. run_entity = gcp.gds_insert( kind=GDS_KIND_LOG_SERVICE, attributes={ 'created': TextUtils.timestamp().decode(), 'service': params['service'].decode(), 'status': u'RUNNING', 'error': None, 'bqjob': None, 'bqstatus': None, }) try: # Run the ETL task with the given params and update the Datastore entity. job_id = service_task(dcm, gcp, params) run_entity['bqjob'] = job_id.decode() run_entity['bqstatus'] = u'RUNNING' run_entity['status'] = u'DONE' # pylint: disable=broad-except except Exception as e: run_entity['status'] = u'FAILED' run_entity['error'] = str(e).decode() logger.error( '[%s] - The following error occurs while executing task <%s> : <%s>', SERVICE_NAME, task_name, str(e)) finally: gcp.gds_update(run_entity) # pylint: enable=broad-except if __name__ == '__main__': main(sys.argv)
apache-2.0
-1,889,882,232,288,534,300
31.432056
80
0.691233
false
pypa/setuptools
setuptools/_distutils/command/bdist_msi.py
26
35579
# Copyright (C) 2005, 2006 Martin von Löwis # Licensed to PSF under a Contributor Agreement. # The bdist_wininst command proper # based on bdist_wininst """ Implements the bdist_msi command. """ import os import sys import warnings from distutils.core import Command from distutils.dir_util import remove_tree from distutils.sysconfig import get_python_version from distutils.version import StrictVersion from distutils.errors import DistutilsOptionError from distutils.util import get_platform from distutils import log import msilib from msilib import schema, sequence, text from msilib import Directory, Feature, Dialog, add_data class PyDialog(Dialog): """Dialog class with a fixed layout: controls at the top, then a ruler, then a list of buttons: back, next, cancel. Optionally a bitmap at the left.""" def __init__(self, *args, **kw): """Dialog(database, name, x, y, w, h, attributes, title, first, default, cancel, bitmap=true)""" Dialog.__init__(self, *args) ruler = self.h - 36 bmwidth = 152*ruler/328 #if kw.get("bitmap", True): # self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin") self.line("BottomLine", 0, ruler, self.w, 0) def title(self, title): "Set the title text of the dialog at the top." # name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix, # text, in VerdanaBold10 self.text("Title", 15, 10, 320, 60, 0x30003, r"{\VerdanaBold10}%s" % title) def back(self, title, next, name = "Back", active = 1): """Add a back button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next) def cancel(self, title, next, name = "Cancel", active = 1): """Add a cancel button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next) def next(self, title, next, name = "Next", active = 1): """Add a Next button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next) def xbutton(self, name, title, next, xpos): """Add a button with a given title, the tab-next button, its name in the Control table, giving its x position; the y-position is aligned with the other buttons. Return the button, so that events can be associated""" return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next) class bdist_msi(Command): description = "create a Microsoft Installer (.msi) binary distribution" user_options = [('bdist-dir=', None, "temporary directory for creating the distribution"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_platform()), ('keep-temp', 'k', "keep the pseudo-installation tree around after " + "creating the distribution archive"), ('target-version=', None, "require a specific python version" + " on the target system"), ('no-target-compile', 'c', "do not compile .py to .pyc on the target system"), ('no-target-optimize', 'o', "do not compile .py to .pyo (optimized) " "on the target system"), ('dist-dir=', 'd', "directory to put final built distributions in"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ('install-script=', None, "basename of installation script to be run after " "installation or before deinstallation"), ('pre-install-script=', None, "Fully qualified filename of a script to be run before " "any files are installed. This script need not be in the " "distribution"), ] boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize', 'skip-build'] all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7', '2.8', '2.9', '3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9'] other_version = 'X' def __init__(self, *args, **kw): super().__init__(*args, **kw) warnings.warn("bdist_msi command is deprecated since Python 3.9, " "use bdist_wheel (wheel packages) instead", DeprecationWarning, 2) def initialize_options(self): self.bdist_dir = None self.plat_name = None self.keep_temp = 0 self.no_target_compile = 0 self.no_target_optimize = 0 self.target_version = None self.dist_dir = None self.skip_build = None self.install_script = None self.pre_install_script = None self.versions = None def finalize_options(self): self.set_undefined_options('bdist', ('skip_build', 'skip_build')) if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'msi') short_version = get_python_version() if (not self.target_version) and self.distribution.has_ext_modules(): self.target_version = short_version if self.target_version: self.versions = [self.target_version] if not self.skip_build and self.distribution.has_ext_modules()\ and self.target_version != short_version: raise DistutilsOptionError( "target version can only be %s, or the '--skip-build'" " option must be specified" % (short_version,)) else: self.versions = list(self.all_versions) self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'), ('plat_name', 'plat_name'), ) if self.pre_install_script: raise DistutilsOptionError( "the pre-install-script feature is not yet implemented") if self.install_script: for script in self.distribution.scripts: if self.install_script == os.path.basename(script): break else: raise DistutilsOptionError( "install_script '%s' not found in scripts" % self.install_script) self.install_script_key = None def run(self): if not self.skip_build: self.run_command('build') install = self.reinitialize_command('install', reinit_subcommands=1) install.prefix = self.bdist_dir install.skip_build = self.skip_build install.warn_dir = 0 install_lib = self.reinitialize_command('install_lib') # we do not want to include pyc or pyo files install_lib.compile = 0 install_lib.optimize = 0 if self.distribution.has_ext_modules(): # If we are building an installer for a Python version other # than the one we are currently running, then we need to ensure # our build_lib reflects the other Python version rather than ours. # Note that for target_version!=sys.version, we must have skipped the # build step, so there is no issue with enforcing the build of this # version. target_version = self.target_version if not target_version: assert self.skip_build, "Should have already checked this" target_version = '%d.%d' % sys.version_info[:2] plat_specifier = ".%s-%s" % (self.plat_name, target_version) build = self.get_finalized_command('build') build.build_lib = os.path.join(build.build_base, 'lib' + plat_specifier) log.info("installing to %s", self.bdist_dir) install.ensure_finalized() # avoid warning of 'install_lib' about installing # into a directory not in sys.path sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB')) install.run() del sys.path[0] self.mkpath(self.dist_dir) fullname = self.distribution.get_fullname() installer_name = self.get_installer_filename(fullname) installer_name = os.path.abspath(installer_name) if os.path.exists(installer_name): os.unlink(installer_name) metadata = self.distribution.metadata author = metadata.author if not author: author = metadata.maintainer if not author: author = "UNKNOWN" version = metadata.get_version() # ProductVersion must be strictly numeric # XXX need to deal with prerelease versions sversion = "%d.%d.%d" % StrictVersion(version).version # Prefix ProductName with Python x.y, so that # it sorts together with the other Python packages # in Add-Remove-Programs (APR) fullname = self.distribution.get_fullname() if self.target_version: product_name = "Python %s %s" % (self.target_version, fullname) else: product_name = "Python %s" % (fullname) self.db = msilib.init_database(installer_name, schema, product_name, msilib.gen_uuid(), sversion, author) msilib.add_tables(self.db, sequence) props = [('DistVersion', version)] email = metadata.author_email or metadata.maintainer_email if email: props.append(("ARPCONTACT", email)) if metadata.url: props.append(("ARPURLINFOABOUT", metadata.url)) if props: add_data(self.db, 'Property', props) self.add_find_python() self.add_files() self.add_scripts() self.add_ui() self.db.Commit() if hasattr(self.distribution, 'dist_files'): tup = 'bdist_msi', self.target_version or 'any', fullname self.distribution.dist_files.append(tup) if not self.keep_temp: remove_tree(self.bdist_dir, dry_run=self.dry_run) def add_files(self): db = self.db cab = msilib.CAB("distfiles") rootdir = os.path.abspath(self.bdist_dir) root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir") f = Feature(db, "Python", "Python", "Everything", 0, 1, directory="TARGETDIR") items = [(f, root, '')] for version in self.versions + [self.other_version]: target = "TARGETDIR" + version name = default = "Python" + version desc = "Everything" if version is self.other_version: title = "Python from another location" level = 2 else: title = "Python %s from registry" % version level = 1 f = Feature(db, name, title, desc, 1, level, directory=target) dir = Directory(db, cab, root, rootdir, target, default) items.append((f, dir, version)) db.Commit() seen = {} for feature, dir, version in items: todo = [dir] while todo: dir = todo.pop() for file in os.listdir(dir.absolute): afile = os.path.join(dir.absolute, file) if os.path.isdir(afile): short = "%s|%s" % (dir.make_short(file), file) default = file + version newdir = Directory(db, cab, dir, file, default, short) todo.append(newdir) else: if not dir.component: dir.start_component(dir.logical, feature, 0) if afile not in seen: key = seen[afile] = dir.add_file(file) if file==self.install_script: if self.install_script_key: raise DistutilsOptionError( "Multiple files with name %s" % file) self.install_script_key = '[#%s]' % key else: key = seen[afile] add_data(self.db, "DuplicateFile", [(key + version, dir.component, key, None, dir.logical)]) db.Commit() cab.commit(db) def add_find_python(self): """Adds code to the installer to compute the location of Python. Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the registry for each version of Python. Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined, else from PYTHON.MACHINE.X.Y. Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe""" start = 402 for ver in self.versions: install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver machine_reg = "python.machine." + ver user_reg = "python.user." + ver machine_prop = "PYTHON.MACHINE." + ver user_prop = "PYTHON.USER." + ver machine_action = "PythonFromMachine" + ver user_action = "PythonFromUser" + ver exe_action = "PythonExe" + ver target_dir_prop = "TARGETDIR" + ver exe_prop = "PYTHON" + ver if msilib.Win64: # type: msidbLocatorTypeRawValue + msidbLocatorType64bit Type = 2+16 else: Type = 2 add_data(self.db, "RegLocator", [(machine_reg, 2, install_path, None, Type), (user_reg, 1, install_path, None, Type)]) add_data(self.db, "AppSearch", [(machine_prop, machine_reg), (user_prop, user_reg)]) add_data(self.db, "CustomAction", [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"), (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"), (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"), ]) add_data(self.db, "InstallExecuteSequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "InstallUISequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "Condition", [("Python" + ver, 0, "NOT TARGETDIR" + ver)]) start += 4 assert start < 500 def add_scripts(self): if self.install_script: start = 6800 for ver in self.versions + [self.other_version]: install_action = "install_script." + ver exe_prop = "PYTHON" + ver add_data(self.db, "CustomAction", [(install_action, 50, exe_prop, self.install_script_key)]) add_data(self.db, "InstallExecuteSequence", [(install_action, "&Python%s=3" % ver, start)]) start += 1 # XXX pre-install scripts are currently refused in finalize_options() # but if this feature is completed, it will also need to add # entries for each version as the above code does if self.pre_install_script: scriptfn = os.path.join(self.bdist_dir, "preinstall.bat") with open(scriptfn, "w") as f: # The batch file will be executed with [PYTHON], so that %1 # is the path to the Python interpreter; %0 will be the path # of the batch file. # rem =""" # %1 %0 # exit # """ # <actual script> f.write('rem ="""\n%1 %0\nexit\n"""\n') with open(self.pre_install_script) as fin: f.write(fin.read()) add_data(self.db, "Binary", [("PreInstall", msilib.Binary(scriptfn)) ]) add_data(self.db, "CustomAction", [("PreInstall", 2, "PreInstall", None) ]) add_data(self.db, "InstallExecuteSequence", [("PreInstall", "NOT Installed", 450)]) def add_ui(self): db = self.db x = y = 50 w = 370 h = 300 title = "[ProductName] Setup" # see "Dialog Style Bits" modal = 3 # visible | modal modeless = 1 # visible track_disk_space = 32 # UI customization properties add_data(db, "Property", # See "DefaultUIFont Property" [("DefaultUIFont", "DlgFont8"), # See "ErrorDialog Style Bit" ("ErrorDialog", "ErrorDlg"), ("Progress1", "Install"), # modified in maintenance type dlg ("Progress2", "installs"), ("MaintenanceForm_Action", "Repair"), # possible values: ALL, JUSTME ("WhichUsers", "ALL") ]) # Fonts, see "TextStyle Table" add_data(db, "TextStyle", [("DlgFont8", "Tahoma", 9, None, 0), ("DlgFontBold8", "Tahoma", 8, None, 1), #bold ("VerdanaBold10", "Verdana", 10, None, 1), ("VerdanaRed9", "Verdana", 9, 255, 0), ]) # UI Sequences, see "InstallUISequence Table", "Using a Sequence Table" # Numbers indicate sequence; see sequence.py for how these action integrate add_data(db, "InstallUISequence", [("PrepareDlg", "Not Privileged or Windows9x or Installed", 140), ("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141), # In the user interface, assume all-users installation if privileged. ("SelectFeaturesDlg", "Not Installed", 1230), # XXX no support for resume installations yet #("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240), ("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250), ("ProgressDlg", None, 1280)]) add_data(db, 'ActionText', text.ActionText) add_data(db, 'UIText', text.UIText) ##################################################################### # Standard dialogs: FatalError, UserExit, ExitDialog fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title, "Finish", "Finish", "Finish") fatal.title("[ProductName] Installer ended prematurely") fatal.back("< Back", "Finish", active = 0) fatal.cancel("Cancel", "Back", active = 0) fatal.text("Description1", 15, 70, 320, 80, 0x30003, "[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.") fatal.text("Description2", 15, 155, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c=fatal.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Exit") user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title, "Finish", "Finish", "Finish") user_exit.title("[ProductName] Installer was interrupted") user_exit.back("< Back", "Finish", active = 0) user_exit.cancel("Cancel", "Back", active = 0) user_exit.text("Description1", 15, 70, 320, 80, 0x30003, "[ProductName] setup was interrupted. Your system has not been modified. " "To install this program at a later time, please run the installation again.") user_exit.text("Description2", 15, 155, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c = user_exit.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Exit") exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title, "Finish", "Finish", "Finish") exit_dialog.title("Completing the [ProductName] Installer") exit_dialog.back("< Back", "Finish", active = 0) exit_dialog.cancel("Cancel", "Back", active = 0) exit_dialog.text("Description", 15, 235, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c = exit_dialog.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Return") ##################################################################### # Required dialog: FilesInUse, ErrorDlg inuse = PyDialog(db, "FilesInUse", x, y, w, h, 19, # KeepModeless|Modal|Visible title, "Retry", "Retry", "Retry", bitmap=False) inuse.text("Title", 15, 6, 200, 15, 0x30003, r"{\DlgFontBold8}Files in Use") inuse.text("Description", 20, 23, 280, 20, 0x30003, "Some files that need to be updated are currently in use.") inuse.text("Text", 20, 55, 330, 50, 3, "The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.") inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess", None, None, None) c=inuse.back("Exit", "Ignore", name="Exit") c.event("EndDialog", "Exit") c=inuse.next("Ignore", "Retry", name="Ignore") c.event("EndDialog", "Ignore") c=inuse.cancel("Retry", "Exit", name="Retry") c.event("EndDialog","Retry") # See "Error Dialog". See "ICE20" for the required names of the controls. error = Dialog(db, "ErrorDlg", 50, 10, 330, 101, 65543, # Error|Minimize|Modal|Visible title, "ErrorText", None, None) error.text("ErrorText", 50,9,280,48,3, "") #error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None) error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo") error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes") error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort") error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel") error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore") error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk") error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry") ##################################################################### # Global "Query Cancel" dialog cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title, "No", "No", "No") cancel.text("Text", 48, 15, 194, 30, 3, "Are you sure you want to cancel [ProductName] installation?") #cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None, # "py.ico", None, None) c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No") c.event("EndDialog", "Exit") c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes") c.event("EndDialog", "Return") ##################################################################### # Global "Wait for costing" dialog costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title, "Return", "Return", "Return") costing.text("Text", 48, 15, 194, 30, 3, "Please wait while the installer finishes determining your disk space requirements.") c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None) c.event("EndDialog", "Exit") ##################################################################### # Preparation dialog: no user input except cancellation prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title, "Cancel", "Cancel", "Cancel") prep.text("Description", 15, 70, 320, 40, 0x30003, "Please wait while the Installer prepares to guide you through the installation.") prep.title("Welcome to the [ProductName] Installer") c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...") c.mapping("ActionText", "Text") c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None) c.mapping("ActionData", "Text") prep.back("Back", None, active=0) prep.next("Next", None, active=0) c=prep.cancel("Cancel", None) c.event("SpawnDialog", "CancelDlg") ##################################################################### # Feature (Python directory) selection seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title, "Next", "Next", "Cancel") seldlg.title("Select Python Installations") seldlg.text("Hint", 15, 30, 300, 20, 3, "Select the Python locations where %s should be installed." % self.distribution.get_fullname()) seldlg.back("< Back", None, active=0) c = seldlg.next("Next >", "Cancel") order = 1 c.event("[TARGETDIR]", "[SourceDir]", ordering=order) for version in self.versions + [self.other_version]: order += 1 c.event("[TARGETDIR]", "[TARGETDIR%s]" % version, "FEATURE_SELECTED AND &Python%s=3" % version, ordering=order) c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1) c.event("EndDialog", "Return", ordering=order + 2) c = seldlg.cancel("Cancel", "Features") c.event("SpawnDialog", "CancelDlg") c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3, "FEATURE", None, "PathEdit", None) c.event("[FEATURE_SELECTED]", "1") ver = self.other_version install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver c = seldlg.text("Other", 15, 200, 300, 15, 3, "Provide an alternate Python location") c.condition("Enable", install_other_cond) c.condition("Show", install_other_cond) c.condition("Disable", dont_install_other_cond) c.condition("Hide", dont_install_other_cond) c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1, "TARGETDIR" + ver, None, "Next", None) c.condition("Enable", install_other_cond) c.condition("Show", install_other_cond) c.condition("Disable", dont_install_other_cond) c.condition("Hide", dont_install_other_cond) ##################################################################### # Disk cost cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title, "OK", "OK", "OK", bitmap=False) cost.text("Title", 15, 6, 200, 15, 0x30003, r"{\DlgFontBold8}Disk Space Requirements") cost.text("Description", 20, 20, 280, 20, 0x30003, "The disk space required for the installation of the selected features.") cost.text("Text", 20, 53, 330, 60, 3, "The highlighted volumes (if any) do not have enough disk space " "available for the currently selected features. You can either " "remove some files from the highlighted volumes, or choose to " "install less features onto local drive(s), or select different " "destination drive(s).") cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223, None, "{120}{70}{70}{70}{70}", None, None) cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return") ##################################################################### # WhichUsers Dialog. Only available on NT, and for privileged users. # This must be run before FindRelatedProducts, because that will # take into account whether the previous installation was per-user # or per-machine. We currently don't support going back to this # dialog after "Next" was selected; to support this, we would need to # find how to reset the ALLUSERS property, and how to re-run # FindRelatedProducts. # On Windows9x, the ALLUSERS property is ignored on the command line # and in the Property table, but installer fails according to the documentation # if a dialog attempts to set ALLUSERS. whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title, "AdminInstall", "Next", "Cancel") whichusers.title("Select whether to install [ProductName] for all users of this computer.") # A radio group with two options: allusers, justme g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3, "WhichUsers", "", "Next") g.add("ALL", 0, 5, 150, 20, "Install for all users") g.add("JUSTME", 0, 25, 150, 20, "Install just for me") whichusers.back("Back", None, active=0) c = whichusers.next("Next >", "Cancel") c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1) c.event("EndDialog", "Return", ordering = 2) c = whichusers.cancel("Cancel", "AdminInstall") c.event("SpawnDialog", "CancelDlg") ##################################################################### # Installation Progress dialog (modeless) progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title, "Cancel", "Cancel", "Cancel", bitmap=False) progress.text("Title", 20, 15, 200, 15, 0x30003, r"{\DlgFontBold8}[Progress1] [ProductName]") progress.text("Text", 35, 65, 300, 30, 3, "Please wait while the Installer [Progress2] [ProductName]. " "This may take several minutes.") progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:") c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...") c.mapping("ActionText", "Text") #c=progress.text("ActionData", 35, 140, 300, 20, 3, None) #c.mapping("ActionData", "Text") c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537, None, "Progress done", None, None) c.mapping("SetProgress", "Progress") progress.back("< Back", "Next", active=False) progress.next("Next >", "Cancel", active=False) progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg") ################################################################### # Maintenance type: repair/uninstall maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title, "Next", "Next", "Cancel") maint.title("Welcome to the [ProductName] Setup Wizard") maint.text("BodyText", 15, 63, 330, 42, 3, "Select whether you want to repair or remove [ProductName].") g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3, "MaintenanceForm_Action", "", "Next") #g.add("Change", 0, 0, 200, 17, "&Change [ProductName]") g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]") g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]") maint.back("< Back", None, active=False) c=maint.next("Finish", "Cancel") # Change installation: Change progress dialog to "Change", then ask # for feature selection #c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1) #c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2) # Reinstall: Change progress dialog to "Repair", then invoke reinstall # Also set list of reinstalled features to "ALL" c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5) c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6) c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7) c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8) # Uninstall: Change progress to "Remove", then invoke uninstall # Also set list of removed features to "ALL" c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11) c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12) c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13) c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14) # Close dialog when maintenance action scheduled c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20) #c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21) maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg") def get_installer_filename(self, fullname): # Factored out to allow overriding in subclasses if self.target_version: base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name, self.target_version) else: base_name = "%s.%s.msi" % (fullname, self.plat_name) installer_name = os.path.join(self.dist_dir, base_name) return installer_name
mit
-1,320,610,647,503,928,800
46.500668
200
0.538001
false
mikemccllstr/seadir
seadir/commands/responses.py
1
1359
'''Command line handlers that address the responses in the Google Sheet''' import logging import os from cliff.lister import Lister import seadir.config as config import seadir.model.responses as responses class Dump(Lister): '''Dump the records from the Google Sheet to the screen''' log = logging.getLogger(__name__) def take_action(self, parsed_args): '''Method invoked by the Cliff framework''' # Obtain the data from the Google Sheet data = responses.Data(email=config.email, password=config.password, sheetname=config.spreadsheet, tabname=config.worksheet) return (data.headers, data.raw_contents()) class Clean(Lister): '''Process all the records and perform any possible cleanups''' log = logging.getLogger(__name__) def take_action(self, parsed_args): return (('Name', 'Size'), ((n, os.stat(n).st_size) for n in os.listdir('.')) ) class Validate(Lister): '''Process all the records and flags any situations that seem erroneous''' log = logging.getLogger(__name__) def take_action(self, parsed_args): return (('Name', 'Size'), ((n, os.stat(n).st_size) for n in os.listdir('.')) )
mit
675,204,467,066,143,500
26.734694
78
0.590876
false
windyuuy/opera
chromium/src/third_party/python_26/Lib/ctypes/test/test_pickling.py
48
2062
import unittest import pickle from ctypes import * import _ctypes_test dll = CDLL(_ctypes_test.__file__) class X(Structure): _fields_ = [("a", c_int), ("b", c_double)] init_called = 0 def __init__(self, *args, **kw): X.init_called += 1 self.x = 42 class Y(X): _fields_ = [("str", c_char_p)] class PickleTest(unittest.TestCase): def dumps(self, item): return pickle.dumps(item) def loads(self, item): return pickle.loads(item) def test_simple(self): for src in [ c_int(42), c_double(3.14), ]: dst = self.loads(self.dumps(src)) self.failUnlessEqual(src.__dict__, dst.__dict__) self.failUnlessEqual(buffer(src)[:], buffer(dst)[:]) def test_struct(self): X.init_called = 0 x = X() x.a = 42 self.failUnlessEqual(X.init_called, 1) y = self.loads(self.dumps(x)) # loads must NOT call __init__ self.failUnlessEqual(X.init_called, 1) # ctypes instances are identical when the instance __dict__ # and the memory buffer are identical self.failUnlessEqual(y.__dict__, x.__dict__) self.failUnlessEqual(buffer(y)[:], buffer(x)[:]) def test_unpickable(self): # ctypes objects that are pointers or contain pointers are # unpickable. self.assertRaises(ValueError, lambda: self.dumps(Y())) prototype = CFUNCTYPE(c_int) for item in [ c_char_p(), c_wchar_p(), c_void_p(), pointer(c_int(42)), dll._testfunc_p_p, prototype(lambda: 42), ]: self.assertRaises(ValueError, lambda: self.dumps(item)) class PickleTest_1(PickleTest): def dumps(self, item): return pickle.dumps(item, 1) class PickleTest_2(PickleTest): def dumps(self, item): return pickle.dumps(item, 2) if __name__ == "__main__": unittest.main()
bsd-3-clause
685,400,053,234,706,400
25.435897
67
0.536372
false
jacraven/lsiapp
main/auth/dropbox.py
2
1701
# coding: utf-8 # pylint: disable=missing-docstring, invalid-name import flask import auth import config import model from main import app dropbox_config = dict( access_token_method='POST', access_token_url='https://api.dropbox.com/1/oauth2/token', authorize_url='https://www.dropbox.com/1/oauth2/authorize', base_url='https://www.dropbox.com/1/', consumer_key=config.CONFIG_DB.auth_dropbox_id, consumer_secret=config.CONFIG_DB.auth_dropbox_secret, ) dropbox = auth.create_oauth_app(dropbox_config, 'dropbox') @app.route('/_s/callback/dropbox/oauth-authorized/') def dropbox_authorized(): response = dropbox.authorized_response() if response is None: flask.flash('You denied the request to sign in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'], '') me = dropbox.get('account/info') user_db = retrieve_user_from_dropbox(me.data) return auth.signin_via_social(user_db) @dropbox.tokengetter def get_dropbox_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/dropbox/') def signin_dropbox(): scheme = 'https' if config.PRODUCTION else 'http' return auth.signin_oauth(dropbox, scheme) def retrieve_user_from_dropbox(response): auth_id = 'dropbox_%s' % response['uid'] user_db = model.User.get_by('auth_ids', auth_id) if user_db: return user_db return auth.create_or_get_user_db( auth_id=auth_id, email=response['email'], name=response['display_name'], username=response['display_name'], verified=True )
mit
-5,999,421,789,438,646,000
26.35
65
0.654909
false
davidzchen/tensorflow
tensorflow/python/keras/distribute/model_collection_base.py
11
1687
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A base class to provide a model and corresponding input data for testing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function class ModelAndInput(object): """Base class to provide model and its corresponding inputs.""" def get_model(self): """Returns a compiled keras model object, together with output name. Returns: model: a keras model object output_name: a string for the name of the output layer """ raise NotImplementedError("must be implemented in descendants") def get_data(self): """Returns data for training and predicting. Returns: x_train: data used for training y_train: label used for training x_predict: data used for predicting """ raise NotImplementedError("must be implemented in descendants") def get_batch_size(self): """Returns the batch_size used by the model.""" raise NotImplementedError("must be implemented in descendants")
apache-2.0
-6,113,257,812,003,060,000
36.488889
80
0.694724
false
cyrixhero/Django-facebook
docs/docs_env/Lib/encodings/zlib_codec.py
533
3015
""" Python 'zlib_codec' Codec - zlib compression encoding Unlike most of the other codecs which target Unicode, this codec will return Python string objects for both encode and decode. Written by Marc-Andre Lemburg ([email protected]). """ import codecs import zlib # this codec needs the optional zlib module ! ### Codec APIs def zlib_encode(input,errors='strict'): """ Encodes the object input and returns a tuple (output object, length consumed). errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' output = zlib.compress(input) return (output, len(input)) def zlib_decode(input,errors='strict'): """ Decodes the object input and returns a tuple (output object, length consumed). input must be an object which provides the bf_getreadbuf buffer slot. Python strings, buffer objects and memory mapped files are examples of objects providing this slot. errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' output = zlib.decompress(input) return (output, len(input)) class Codec(codecs.Codec): def encode(self, input, errors='strict'): return zlib_encode(input, errors) def decode(self, input, errors='strict'): return zlib_decode(input, errors) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.compressobj = zlib.compressobj() def encode(self, input, final=False): if final: c = self.compressobj.compress(input) return c + self.compressobj.flush() else: return self.compressobj.compress(input) def reset(self): self.compressobj = zlib.compressobj() class IncrementalDecoder(codecs.IncrementalDecoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.decompressobj = zlib.decompressobj() def decode(self, input, final=False): if final: c = self.decompressobj.decompress(input) return c + self.decompressobj.flush() else: return self.decompressobj.decompress(input) def reset(self): self.decompressobj = zlib.decompressobj() class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='zlib', encode=zlib_encode, decode=zlib_decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
bsd-3-clause
4,798,231,745,378,867,000
28.558824
68
0.664345
false
neale/CS-program
434-MachineLearning/final_project/linearClassifier/sklearn/__init__.py
27
3086
""" Machine learning module for Python ================================== sklearn is a Python module integrating classical machine learning algorithms in the tightly-knit world of scientific Python packages (numpy, scipy, matplotlib). It aims to provide simple and efficient solutions to learning problems that are accessible to everybody and reusable in various contexts: machine-learning as a versatile tool for science and engineering. See http://scikit-learn.org for complete documentation. """ import sys import re import warnings # Make sure that DeprecationWarning within this package always gets printed warnings.filterwarnings('always', category=DeprecationWarning, module='^{0}\.'.format(re.escape(__name__))) # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = '0.18.dev0' try: # This variable is injected in the __builtins__ by the build # process. It used to enable importing subpackages of sklearn when # the binaries are not built __SKLEARN_SETUP__ except NameError: __SKLEARN_SETUP__ = False if __SKLEARN_SETUP__: sys.stderr.write('Partial import of sklearn during the build process.\n') # We are not importing the rest of the scikit during the build # process, as it may not be compiled yet else: from . import __check_build from .base import clone __check_build # avoid flakes unused variable error __all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition', 'cross_validation', 'datasets', 'decomposition', 'dummy', 'ensemble', 'exceptions', 'externals', 'feature_extraction', 'feature_selection', 'gaussian_process', 'grid_search', 'isotonic', 'kernel_approximation', 'kernel_ridge', 'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics', 'mixture', 'model_selection', 'multiclass', 'multioutput', 'naive_bayes', 'neighbors', 'neural_network', 'pipeline', 'preprocessing', 'qda', 'random_projection', 'semi_supervised', 'svm', 'tree', 'discriminant_analysis', # Non-modules: 'clone'] def setup_module(module): """Fixture for the tests to assure globally controllable seeding of RNGs""" import os import numpy as np import random # It could have been provided in the environment _random_seed = os.environ.get('SKLEARN_SEED', None) if _random_seed is None: _random_seed = np.random.uniform() * (2 ** 31 - 1) _random_seed = int(_random_seed) print("I: Seeding RNGs with %r" % _random_seed) np.random.seed(_random_seed) random.seed(_random_seed)
unlicense
4,141,599,433,581,902,000
34.883721
79
0.652301
false
WarrenWeckesser/scipy
scipy/interpolate/fitpack.py
16
26807
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde', 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider'] import warnings import numpy as np # These are in the API for fitpack even if not used in fitpack.py itself. from ._fitpack_impl import bisplrep, bisplev, dblint from . import _fitpack_impl as _impl from ._bsplines import BSpline def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None, full_output=0, nest=None, per=0, quiet=1): """ Find the B-spline representation of an N-D curve. Given a list of N rank-1 arrays, `x`, which represent a curve in N-D space parametrized by `u`, find a smooth approximating spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK. Parameters ---------- x : array_like A list of sample vector arrays representing the curve. w : array_like, optional Strictly positive rank-1 array of weights the same length as `x[0]`. The weights are used in computing the weighted least-squares spline fit. If the errors in the `x` values have standard-deviation given by the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``. u : array_like, optional An array of parameter values. If not given, these values are calculated automatically as ``M = len(x[0])``, where v[0] = 0 v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`) u[i] = v[i] / v[M-1] ub, ue : int, optional The end-points of the parameters interval. Defaults to u[0] and u[-1]. k : int, optional Degree of the spline. Cubic splines are recommended. Even values of `k` should be avoided especially with a small s-value. ``1 <= k <= 5``, default is 3. task : int, optional If task==0 (default), find t and c for a given smoothing factor, s. If task==1, find t and c for another value of the smoothing factor, s. There must have been a previous call with task=0 or task=1 for the same set of data. If task=-1 find the weighted least square spline for a given set of knots, t. s : float, optional A smoothing condition. The amount of smoothness is determined by satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``, where g(x) is the smoothed interpolation of (x,y). The user can use `s` to control the trade-off between closeness and smoothness of fit. Larger `s` means more smoothing while smaller values of `s` indicate less smoothing. Recommended values of `s` depend on the weights, w. If the weights represent the inverse of the standard-deviation of y, then a good `s` value should be found in the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of data points in x, y, and w. t : int, optional The knots needed for task=-1. full_output : int, optional If non-zero, then return optional outputs. nest : int, optional An over-estimate of the total number of knots of the spline to help in determining the storage space. By default nest=m/2. Always large enough is nest=m+k+1. per : int, optional If non-zero, data points are considered periodic with period ``x[m-1] - x[0]`` and a smooth periodic spline approximation is returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used. quiet : int, optional Non-zero to suppress messages. This parameter is deprecated; use standard Python warning filters instead. Returns ------- tck : tuple (t,c,k) a tuple containing the vector of knots, the B-spline coefficients, and the degree of the spline. u : array An array of the values of the parameter. fp : float The weighted sum of squared residuals of the spline approximation. ier : int An integer flag about splrep success. Success is indicated if ier<=0. If ier in [1,2,3] an error occurred but was not raised. Otherwise an error is raised. msg : str A message corresponding to the integer flag, ier. See Also -------- splrep, splev, sproot, spalde, splint, bisplrep, bisplev UnivariateSpline, BivariateSpline BSpline make_interp_spline Notes ----- See `splev` for evaluation of the spline and its derivatives. The number of dimensions N must be smaller than 11. The number of coefficients in the `c` array is ``k+1`` less then the number of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads the array of coefficients to have the same length as the array of knots. These additional coefficients are ignored by evaluation routines, `splev` and `BSpline`. References ---------- .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and parametric splines, Computer Graphics and Image Processing", 20 (1982) 171-184. .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and parametric splines", report tw55, Dept. Computer Science, K.U.Leuven, 1981. .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. Examples -------- Generate a discretization of a limacon curve in the polar coordinates: >>> phi = np.linspace(0, 2.*np.pi, 40) >>> r = 0.5 + np.cos(phi) # polar coords >>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian And interpolate: >>> from scipy.interpolate import splprep, splev >>> tck, u = splprep([x, y], s=0) >>> new_points = splev(u, tck) Notice that (i) we force interpolation by using `s=0`, (ii) the parameterization, ``u``, is generated automatically. Now plot the result: >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots() >>> ax.plot(x, y, 'ro') >>> ax.plot(new_points[0], new_points[1], 'r-') >>> plt.show() """ res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per, quiet) return res def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None, full_output=0, per=0, quiet=1): """ Find the B-spline representation of a 1-D curve. Given the set of data points ``(x[i], y[i])`` determine a smooth spline approximation of degree k on the interval ``xb <= x <= xe``. Parameters ---------- x, y : array_like The data points defining a curve y = f(x). w : array_like, optional Strictly positive rank-1 array of weights the same length as x and y. The weights are used in computing the weighted least-squares spline fit. If the errors in the y values have standard-deviation given by the vector d, then w should be 1/d. Default is ones(len(x)). xb, xe : float, optional The interval to fit. If None, these default to x[0] and x[-1] respectively. k : int, optional The degree of the spline fit. It is recommended to use cubic splines. Even values of k should be avoided especially with small s values. 1 <= k <= 5 task : {1, 0, -1}, optional If task==0 find t and c for a given smoothing factor, s. If task==1 find t and c for another value of the smoothing factor, s. There must have been a previous call with task=0 or task=1 for the same set of data (t will be stored an used internally) If task=-1 find the weighted least square spline for a given set of knots, t. These should be interior knots as knots on the ends will be added automatically. s : float, optional A smoothing condition. The amount of smoothness is determined by satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x) is the smoothed interpolation of (x,y). The user can use s to control the tradeoff between closeness and smoothness of fit. Larger s means more smoothing while smaller values of s indicate less smoothing. Recommended values of s depend on the weights, w. If the weights represent the inverse of the standard-deviation of y, then a good s value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if weights are supplied. s = 0.0 (interpolating) if no weights are supplied. t : array_like, optional The knots needed for task=-1. If given then task is automatically set to -1. full_output : bool, optional If non-zero, then return optional outputs. per : bool, optional If non-zero, data points are considered periodic with period x[m-1] - x[0] and a smooth periodic spline approximation is returned. Values of y[m-1] and w[m-1] are not used. quiet : bool, optional Non-zero to suppress messages. This parameter is deprecated; use standard Python warning filters instead. Returns ------- tck : tuple A tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the spline. fp : array, optional The weighted sum of squared residuals of the spline approximation. ier : int, optional An integer flag about splrep success. Success is indicated if ier<=0. If ier in [1,2,3] an error occurred but was not raised. Otherwise an error is raised. msg : str, optional A message corresponding to the integer flag, ier. See Also -------- UnivariateSpline, BivariateSpline splprep, splev, sproot, spalde, splint bisplrep, bisplev BSpline make_interp_spline Notes ----- See `splev` for evaluation of the spline and its derivatives. Uses the FORTRAN routine ``curfit`` from FITPACK. The user is responsible for assuring that the values of `x` are unique. Otherwise, `splrep` will not return sensible results. If provided, knots `t` must satisfy the Schoenberg-Whitney conditions, i.e., there must be a subset of data points ``x[j]`` such that ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``. This routine zero-pads the coefficients array ``c`` to have the same length as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored by the evaluation routines, `splev` and `BSpline`.) This is in contrast with `splprep`, which does not zero-pad the coefficients. References ---------- Based on algorithms described in [1]_, [2]_, [3]_, and [4]_: .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and integration of experimental data using spline functions", J.Comp.Appl.Maths 1 (1975) 165-184. .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular grid while using spline functions", SIAM J.Numer.Anal. 19 (1982) 1286-1304. .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981. .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. Examples -------- You can interpolate 1-D points with a B-spline curve. Further examples are given in :ref:`in the tutorial <tutorial-interpolate_splXXX>`. >>> import matplotlib.pyplot as plt >>> from scipy.interpolate import splev, splrep >>> x = np.linspace(0, 10, 10) >>> y = np.sin(x) >>> spl = splrep(x, y) >>> x2 = np.linspace(0, 10, 200) >>> y2 = splev(x2, spl) >>> plt.plot(x, y, 'o', x2, y2) >>> plt.show() """ res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet) return res def splev(x, tck, der=0, ext=0): """ Evaluate a B-spline or its derivatives. Given the knots and coefficients of a B-spline representation, evaluate the value of the smoothing polynomial and its derivatives. This is a wrapper around the FORTRAN routines splev and splder of FITPACK. Parameters ---------- x : array_like An array of points at which to return the value of the smoothed spline or its derivatives. If `tck` was returned from `splprep`, then the parameter values, u should be given. tck : 3-tuple or a BSpline object If a tuple, then it should be a sequence of length 3 returned by `splrep` or `splprep` containing the knots, coefficients, and degree of the spline. (Also see Notes.) der : int, optional The order of derivative of the spline to compute (must be less than or equal to k, the degree of the spline). ext : int, optional Controls the value returned for elements of ``x`` not in the interval defined by the knot sequence. * if ext=0, return the extrapolated value. * if ext=1, return 0 * if ext=2, raise a ValueError * if ext=3, return the boundary value. The default value is 0. Returns ------- y : ndarray or list of ndarrays An array of values representing the spline function evaluated at the points in `x`. If `tck` was returned from `splprep`, then this is a list of arrays representing the curve in an N-D space. Notes ----- Manipulating the tck-tuples directly is not recommended. In new code, prefer using `BSpline` objects. See Also -------- splprep, splrep, sproot, spalde, splint bisplrep, bisplev BSpline References ---------- .. [1] C. de Boor, "On calculating with b-splines", J. Approximation Theory, 6, p.50-62, 1972. .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths Applics, 10, p.134-149, 1972. .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. Examples -------- Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`. """ if isinstance(tck, BSpline): if tck.c.ndim > 1: mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is " "not recommended. Use BSpline.__call__(x) instead.") warnings.warn(mesg, DeprecationWarning) # remap the out-of-bounds behavior try: extrapolate = {0: True, }[ext] except KeyError as e: raise ValueError("Extrapolation mode %s is not supported " "by BSpline." % ext) from e return tck(x, der, extrapolate=extrapolate) else: return _impl.splev(x, tck, der, ext) def splint(a, b, tck, full_output=0): """ Evaluate the definite integral of a B-spline between two given points. Parameters ---------- a, b : float The end-points of the integration interval. tck : tuple or a BSpline instance If a tuple, then it should be a sequence of length 3, containing the vector of knots, the B-spline coefficients, and the degree of the spline (see `splev`). full_output : int, optional Non-zero to return optional output. Returns ------- integral : float The resulting integral. wrk : ndarray An array containing the integrals of the normalized B-splines defined on the set of knots. (Only returned if `full_output` is non-zero) Notes ----- `splint` silently assumes that the spline function is zero outside the data interval (`a`, `b`). Manipulating the tck-tuples directly is not recommended. In new code, prefer using the `BSpline` objects. See Also -------- splprep, splrep, sproot, spalde, splev bisplrep, bisplev BSpline References ---------- .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines", J. Inst. Maths Applics, 17, p.37-41, 1976. .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. Examples -------- Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`. """ if isinstance(tck, BSpline): if tck.c.ndim > 1: mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is " "not recommended. Use BSpline.integrate() instead.") warnings.warn(mesg, DeprecationWarning) if full_output != 0: mesg = ("full_output = %s is not supported. Proceeding as if " "full_output = 0" % full_output) return tck.integrate(a, b, extrapolate=False) else: return _impl.splint(a, b, tck, full_output) def sproot(tck, mest=10): """ Find the roots of a cubic B-spline. Given the knots (>=8) and coefficients of a cubic B-spline return the roots of the spline. Parameters ---------- tck : tuple or a BSpline object If a tuple, then it should be a sequence of length 3, containing the vector of knots, the B-spline coefficients, and the degree of the spline. The number of knots must be >= 8, and the degree must be 3. The knots must be a montonically increasing sequence. mest : int, optional An estimate of the number of zeros (Default is 10). Returns ------- zeros : ndarray An array giving the roots of the spline. Notes ----- Manipulating the tck-tuples directly is not recommended. In new code, prefer using the `BSpline` objects. See also -------- splprep, splrep, splint, spalde, splev bisplrep, bisplev BSpline References ---------- .. [1] C. de Boor, "On calculating with b-splines", J. Approximation Theory, 6, p.50-62, 1972. .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths Applics, 10, p.134-149, 1972. .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. Examples -------- Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`. """ if isinstance(tck, BSpline): if tck.c.ndim > 1: mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is " "not recommended.") warnings.warn(mesg, DeprecationWarning) t, c, k = tck.tck # _impl.sproot expects the interpolation axis to be last, so roll it. # NB: This transpose is a no-op if c is 1D. sh = tuple(range(c.ndim)) c = c.transpose(sh[1:] + (0,)) return _impl.sproot((t, c, k), mest) else: return _impl.sproot(tck, mest) def spalde(x, tck): """ Evaluate all derivatives of a B-spline. Given the knots and coefficients of a cubic B-spline compute all derivatives up to order k at a point (or set of points). Parameters ---------- x : array_like A point or a set of points at which to evaluate the derivatives. Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`. tck : tuple A tuple ``(t, c, k)``, containing the vector of knots, the B-spline coefficients, and the degree of the spline (see `splev`). Returns ------- results : {ndarray, list of ndarrays} An array (or a list of arrays) containing all derivatives up to order k inclusive for each point `x`. See Also -------- splprep, splrep, splint, sproot, splev, bisplrep, bisplev, BSpline References ---------- .. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory 6 (1972) 50-62. .. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths applics 10 (1972) 134-149. .. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on Numerical Analysis, Oxford University Press, 1993. Examples -------- Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`. """ if isinstance(tck, BSpline): raise TypeError("spalde does not accept BSpline instances.") else: return _impl.spalde(x, tck) def insert(x, tck, m=1, per=0): """ Insert knots into a B-spline. Given the knots and coefficients of a B-spline representation, create a new B-spline with a knot inserted `m` times at point `x`. This is a wrapper around the FORTRAN routine insert of FITPACK. Parameters ---------- x (u) : array_like A 1-D point at which to insert a new knot(s). If `tck` was returned from ``splprep``, then the parameter values, u should be given. tck : a `BSpline` instance or a tuple If tuple, then it is expected to be a tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the spline. m : int, optional The number of times to insert the given knot (its multiplicity). Default is 1. per : int, optional If non-zero, the input spline is considered periodic. Returns ------- BSpline instance or a tuple A new B-spline with knots t, coefficients c, and degree k. ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline. In case of a periodic spline (``per != 0``) there must be either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x`` or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``. A tuple is returned iff the input argument `tck` is a tuple, otherwise a BSpline object is constructed and returned. Notes ----- Based on algorithms from [1]_ and [2]_. Manipulating the tck-tuples directly is not recommended. In new code, prefer using the `BSpline` objects. References ---------- .. [1] W. Boehm, "Inserting new knots into b-spline curves.", Computer Aided Design, 12, p.199-201, 1980. .. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on Numerical Analysis", Oxford University Press, 1993. Examples -------- You can insert knots into a B-spline. >>> from scipy.interpolate import splrep, insert >>> x = np.linspace(0, 10, 5) >>> y = np.sin(x) >>> tck = splrep(x, y) >>> tck[0] array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.]) A knot is inserted: >>> tck_inserted = insert(3, tck) >>> tck_inserted[0] array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.]) Some knots are inserted: >>> tck_inserted2 = insert(8, tck, m=3) >>> tck_inserted2[0] array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.]) """ if isinstance(tck, BSpline): t, c, k = tck.tck # FITPACK expects the interpolation axis to be last, so roll it over # NB: if c array is 1D, transposes are no-ops sh = tuple(range(c.ndim)) c = c.transpose(sh[1:] + (0,)) t_, c_, k_ = _impl.insert(x, (t, c, k), m, per) # and roll the last axis back c_ = np.asarray(c_) c_ = c_.transpose((sh[-1],) + sh[:-1]) return BSpline(t_, c_, k_) else: return _impl.insert(x, tck, m, per) def splder(tck, n=1): """ Compute the spline representation of the derivative of a given spline Parameters ---------- tck : BSpline instance or a tuple of (t, c, k) Spline whose derivative to compute n : int, optional Order of derivative to evaluate. Default: 1 Returns ------- `BSpline` instance or tuple Spline of order k2=k-n representing the derivative of the input spline. A tuple is returned iff the input argument `tck` is a tuple, otherwise a BSpline object is constructed and returned. Notes ----- .. versionadded:: 0.13.0 See Also -------- splantider, splev, spalde BSpline Examples -------- This can be used for finding maxima of a curve: >>> from scipy.interpolate import splrep, splder, sproot >>> x = np.linspace(0, 10, 70) >>> y = np.sin(x) >>> spl = splrep(x, y, k=4) Now, differentiate the spline and find the zeros of the derivative. (NB: `sproot` only works for order 3 splines, so we fit an order 4 spline): >>> dspl = splder(spl) >>> sproot(dspl) / np.pi array([ 0.50000001, 1.5 , 2.49999998]) This agrees well with roots :math:`\\pi/2 + n\\pi` of :math:`\\cos(x) = \\sin'(x)`. """ if isinstance(tck, BSpline): return tck.derivative(n) else: return _impl.splder(tck, n) def splantider(tck, n=1): """ Compute the spline for the antiderivative (integral) of a given spline. Parameters ---------- tck : BSpline instance or a tuple of (t, c, k) Spline whose antiderivative to compute n : int, optional Order of antiderivative to evaluate. Default: 1 Returns ------- BSpline instance or a tuple of (t2, c2, k2) Spline of order k2=k+n representing the antiderivative of the input spline. A tuple is returned iff the input argument `tck` is a tuple, otherwise a BSpline object is constructed and returned. See Also -------- splder, splev, spalde BSpline Notes ----- The `splder` function is the inverse operation of this function. Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo rounding error. .. versionadded:: 0.13.0 Examples -------- >>> from scipy.interpolate import splrep, splder, splantider, splev >>> x = np.linspace(0, np.pi/2, 70) >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2) >>> spl = splrep(x, y) The derivative is the inverse operation of the antiderivative, although some floating point error accumulates: >>> splev(1.7, spl), splev(1.7, splder(splantider(spl))) (array(2.1565429877197317), array(2.1565429877201865)) Antiderivative can be used to evaluate definite integrals: >>> ispl = splantider(spl) >>> splev(np.pi/2, ispl) - splev(0, ispl) 2.2572053588768486 This is indeed an approximation to the complete elliptic integral :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`: >>> from scipy.special import ellipk >>> ellipk(0.8) 2.2572053268208538 """ if isinstance(tck, BSpline): return tck.antiderivative(n) else: return _impl.splantider(tck, n)
bsd-3-clause
-8,576,654,773,767,334,000
34.133683
80
0.614877
false
blackbliss/callme
flask/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py
163
11979
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from io import BytesIO import logging import os import re import struct import sys from .compat import sysconfig, fsencode, detect_encoding, ZipFile from .resources import finder from .util import (FileOperator, get_export_entry, convert_path, get_executable, in_venv) logger = logging.getLogger(__name__) _DEFAULT_MANIFEST = ''' <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> <assemblyIdentity version="1.0.0.0" processorArchitecture="X86" name="%s" type="win32"/> <!-- Identify the application security requirements. --> <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> <security> <requestedPrivileges> <requestedExecutionLevel level="asInvoker" uiAccess="false"/> </requestedPrivileges> </security> </trustInfo> </assembly>'''.strip() # check if Python is called on the first line with this expression FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*- if __name__ == '__main__': import sys, re def _resolve(module, func): __import__(module) mod = sys.modules[module] parts = func.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result try: sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) func = _resolve('%(module)s', '%(func)s') rc = func() # None interpreted as 0 except Exception as e: # only supporting Python >= 2.6 sys.stderr.write('%%s\\n' %% e) rc = 1 sys.exit(rc) ''' class ScriptMaker(object): """ A class to copy or create scripts from source scripts or callable specifications. """ script_template = SCRIPT_TEMPLATE executable = None # for shebangs def __init__(self, source_dir, target_dir, add_launchers=True, dry_run=False, fileop=None): self.source_dir = source_dir self.target_dir = target_dir self.add_launchers = add_launchers self.force = False self.clobber = False # It only makes sense to set mode bits on POSIX. self.set_mode = (os.name == 'posix') self.variants = set(('', 'X.Y')) self._fileop = fileop or FileOperator(dry_run) def _get_alternate_executable(self, executable, options): if options.get('gui', False) and os.name == 'nt': dn, fn = os.path.split(executable) fn = fn.replace('python', 'pythonw') executable = os.path.join(dn, fn) return executable def _get_shebang(self, encoding, post_interp=b'', options=None): if self.executable: executable = self.executable elif not sysconfig.is_python_build(): executable = get_executable() elif in_venv(): executable = os.path.join(sysconfig.get_path('scripts'), 'python%s' % sysconfig.get_config_var('EXE')) else: executable = os.path.join( sysconfig.get_config_var('BINDIR'), 'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE'))) if options: executable = self._get_alternate_executable(executable, options) executable = fsencode(executable) shebang = b'#!' + executable + post_interp + b'\n' # Python parser starts to read a script using UTF-8 until # it gets a #coding:xxx cookie. The shebang has to be the # first line of a file, the #coding:xxx cookie cannot be # written before. So the shebang has to be decodable from # UTF-8. try: shebang.decode('utf-8') except UnicodeDecodeError: raise ValueError( 'The shebang (%r) is not decodable from utf-8' % shebang) # If the script is encoded to a custom encoding (use a # #coding:xxx cookie), the shebang has to be decodable from # the script encoding too. if encoding != 'utf-8': try: shebang.decode(encoding) except UnicodeDecodeError: raise ValueError( 'The shebang (%r) is not decodable ' 'from the script encoding (%r)' % (shebang, encoding)) return shebang def _get_script_text(self, entry): return self.script_template % dict(module=entry.prefix, func=entry.suffix) manifest = _DEFAULT_MANIFEST def get_manifest(self, exename): base = os.path.basename(exename) return self.manifest % base def _write_script(self, names, shebang, script_bytes, filenames, ext): use_launcher = self.add_launchers and os.name == 'nt' linesep = os.linesep.encode('utf-8') if not use_launcher: script_bytes = shebang + linesep + script_bytes else: if ext == 'py': launcher = self._get_launcher('t') else: launcher = self._get_launcher('w') stream = BytesIO() with ZipFile(stream, 'w') as zf: zf.writestr('__main__.py', script_bytes) zip_data = stream.getvalue() script_bytes = launcher + shebang + linesep + zip_data for name in names: outname = os.path.join(self.target_dir, name) if use_launcher: n, e = os.path.splitext(outname) if e.startswith('.py'): outname = n outname = '%s.exe' % outname try: self._fileop.write_binary_file(outname, script_bytes) except Exception: # Failed writing an executable - it might be in use. logger.warning('Failed to write executable - trying to ' 'use .deleteme logic') dfname = '%s.deleteme' % outname if os.path.exists(dfname): os.remove(dfname) # Not allowed to fail here os.rename(outname, dfname) # nor here self._fileop.write_binary_file(outname, script_bytes) logger.debug('Able to replace executable using ' '.deleteme logic') try: os.remove(dfname) except Exception: pass # still in use - ignore error else: if os.name == 'nt' and not outname.endswith('.' + ext): outname = '%s.%s' % (outname, ext) if os.path.exists(outname) and not self.clobber: logger.warning('Skipping existing file %s', outname) continue self._fileop.write_binary_file(outname, script_bytes) if self.set_mode: self._fileop.set_executable_mode([outname]) filenames.append(outname) def _make_script(self, entry, filenames, options=None): shebang = self._get_shebang('utf-8', options=options) script = self._get_script_text(entry).encode('utf-8') name = entry.name scriptnames = set() if '' in self.variants: scriptnames.add(name) if 'X' in self.variants: scriptnames.add('%s%s' % (name, sys.version[0])) if 'X.Y' in self.variants: scriptnames.add('%s-%s' % (name, sys.version[:3])) if options and options.get('gui', False): ext = 'pyw' else: ext = 'py' self._write_script(scriptnames, shebang, script, filenames, ext) def _copy_script(self, script, filenames): adjust = False script = os.path.join(self.source_dir, convert_path(script)) outname = os.path.join(self.target_dir, os.path.basename(script)) if not self.force and not self._fileop.newer(script, outname): logger.debug('not copying %s (up-to-date)', script) return # Always open the file, but ignore failures in dry-run mode -- # that way, we'll get accurate feedback if we can read the # script. try: f = open(script, 'rb') except IOError: if not self.dry_run: raise f = None else: encoding, lines = detect_encoding(f.readline) f.seek(0) first_line = f.readline() if not first_line: logger.warning('%s: %s is an empty file (skipping)', self.get_command_name(), script) return match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) if match: adjust = True post_interp = match.group(1) or b'' if not adjust: if f: f.close() self._fileop.copy_file(script, outname) if self.set_mode: self._fileop.set_executable_mode([outname]) filenames.append(outname) else: logger.info('copying and adjusting %s -> %s', script, self.target_dir) if not self._fileop.dry_run: shebang = self._get_shebang(encoding, post_interp) if b'pythonw' in first_line: ext = 'pyw' else: ext = 'py' n = os.path.basename(outname) self._write_script([n], shebang, f.read(), filenames, ext) if f: f.close() @property def dry_run(self): return self._fileop.dry_run @dry_run.setter def dry_run(self, value): self._fileop.dry_run = value if os.name == 'nt': # Executable launcher support. # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ def _get_launcher(self, kind): if struct.calcsize('P') == 8: # 64-bit bits = '64' else: bits = '32' name = '%s%s.exe' % (kind, bits) # Issue 31: don't hardcode an absolute package name, but # determine it relative to the current package distlib_package = __name__.rsplit('.', 1)[0] result = finder(distlib_package).find(name).bytes return result # Public API follows def make(self, specification, options=None): """ Make a script. :param specification: The specification, which is either a valid export entry specification (to make a script from a callable) or a filename (to make a script by copying from a source location). :param options: A dictionary of options controlling script generation. :return: A list of all absolute pathnames written to. """ filenames = [] entry = get_export_entry(specification) if entry is None: self._copy_script(specification, filenames) else: self._make_script(entry, filenames, options=options) return filenames def make_multiple(self, specifications, options=None): """ Take a list of specifications and make scripts from them, :param specifications: A list of specifications. :return: A list of all absolute pathnames written to, """ filenames = [] for specification in specifications: filenames.extend(self.make(specification, options)) return filenames
mit
4,861,348,553,560,394,000
36.788644
79
0.547792
false
mfussenegger/jedi
test/static_analysis/arguments.py
20
1150
# ----------------- # normal arguments (no keywords) # ----------------- def simple(a): return a simple(1) #! 6 type-error-too-few-arguments simple() #! 10 type-error-too-many-arguments simple(1, 2) #! 10 type-error-too-many-arguments simple(1, 2, 3) # ----------------- # keyword arguments # ----------------- simple(a=1) #! 7 type-error-keyword-argument simple(b=1) #! 10 type-error-too-many-arguments simple(1, a=1) def two_params(x, y): return y two_params(y=2, x=1) two_params(1, y=2) #! 11 type-error-multiple-values two_params(1, x=2) #! 17 type-error-too-many-arguments two_params(1, 2, y=3) # ----------------- # default arguments # ----------------- def default(x, y=1, z=2): return x #! 7 type-error-too-few-arguments default() default(1) default(1, 2) default(1, 2, 3) #! 17 type-error-too-many-arguments default(1, 2, 3, 4) default(x=1) # ----------------- # class arguments # ----------------- class Instance(): def __init__(self, foo): self.foo = foo Instance(1).foo Instance(foo=1).foo #! 12 type-error-too-many-arguments Instance(1, 2).foo #! 8 type-error-too-few-arguments Instance().foo
mit
5,969,800,030,835,117,000
14.753425
35
0.578261
false
junhuac/MQUIC
src/tools/gyp/pylib/gyp/xcode_emulation.py
7
67036
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This module contains classes that help to emulate xcodebuild behavior on top of other build systems, such as make and ninja. """ import copy import gyp.common import os import os.path import re import shlex import subprocess import sys import tempfile from gyp.common import GypError # Populated lazily by XcodeVersion, for efficiency, and to fix an issue when # "xcodebuild" is called too quickly (it has been found to return incorrect # version number). XCODE_VERSION_CACHE = None # Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance # corresponding to the installed version of Xcode. XCODE_ARCHS_DEFAULT_CACHE = None def XcodeArchsVariableMapping(archs, archs_including_64_bit=None): """Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable, and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT).""" mapping = {'$(ARCHS_STANDARD)': archs} if archs_including_64_bit: mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit return mapping class XcodeArchsDefault(object): """A class to resolve ARCHS variable from xcode_settings, resolving Xcode macros and implementing filtering by VALID_ARCHS. The expansion of macros depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and on the version of Xcode. """ # Match variable like $(ARCHS_STANDARD). variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$') def __init__(self, default, mac, iphonesimulator, iphoneos): self._default = (default,) self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator} def _VariableMapping(self, sdkroot): """Returns the dictionary of variable mapping depending on the SDKROOT.""" sdkroot = sdkroot.lower() if 'iphoneos' in sdkroot: return self._archs['ios'] elif 'iphonesimulator' in sdkroot: return self._archs['iossim'] else: return self._archs['mac'] def _ExpandArchs(self, archs, sdkroot): """Expands variables references in ARCHS, and remove duplicates.""" variable_mapping = self._VariableMapping(sdkroot) expanded_archs = [] for arch in archs: if self.variable_pattern.match(arch): variable = arch try: variable_expansion = variable_mapping[variable] for arch in variable_expansion: if arch not in expanded_archs: expanded_archs.append(arch) except KeyError as e: print 'Warning: Ignoring unsupported variable "%s".' % variable elif arch not in expanded_archs: expanded_archs.append(arch) return expanded_archs def ActiveArchs(self, archs, valid_archs, sdkroot): """Expands variables references in ARCHS, and filter by VALID_ARCHS if it is defined (if not set, Xcode accept any value in ARCHS, otherwise, only values present in VALID_ARCHS are kept).""" expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '') if valid_archs: filtered_archs = [] for arch in expanded_archs: if arch in valid_archs: filtered_archs.append(arch) expanded_archs = filtered_archs return expanded_archs def GetXcodeArchsDefault(): """Returns the |XcodeArchsDefault| object to use to expand ARCHS for the installed version of Xcode. The default values used by Xcode for ARCHS and the expansion of the variables depends on the version of Xcode used. For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses $(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0 and deprecated with Xcode 5.1. For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit architecture as part of $(ARCHS_STANDARD) and default to only building it. For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they are also part of $(ARCHS_STANDARD). All thoses rules are coded in the construction of the |XcodeArchsDefault| object to use depending on the version of Xcode detected. The object is for performance reason.""" global XCODE_ARCHS_DEFAULT_CACHE if XCODE_ARCHS_DEFAULT_CACHE: return XCODE_ARCHS_DEFAULT_CACHE xcode_version, _ = XcodeVersion() if xcode_version < '0500': XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( '$(ARCHS_STANDARD)', XcodeArchsVariableMapping(['i386']), XcodeArchsVariableMapping(['i386']), XcodeArchsVariableMapping(['armv7'])) elif xcode_version < '0510': XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( '$(ARCHS_STANDARD_INCLUDING_64_BIT)', XcodeArchsVariableMapping(['x86_64'], ['x86_64']), XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']), XcodeArchsVariableMapping( ['armv7', 'armv7s'], ['armv7', 'armv7s', 'arm64'])) else: XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( '$(ARCHS_STANDARD)', XcodeArchsVariableMapping(['x86_64'], ['x86_64']), XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']), XcodeArchsVariableMapping( ['armv7', 'armv7s', 'arm64'], ['armv7', 'armv7s', 'arm64'])) return XCODE_ARCHS_DEFAULT_CACHE class XcodeSettings(object): """A class that understands the gyp 'xcode_settings' object.""" # Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached # at class-level for efficiency. _sdk_path_cache = {} _platform_path_cache = {} _sdk_root_cache = {} # Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so # cached at class-level for efficiency. _plist_cache = {} # Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so # cached at class-level for efficiency. _codesigning_key_cache = {} def __init__(self, spec): self.spec = spec self.isIOS = False self.mac_toolchain_dir = None self.header_map_path = None # Per-target 'xcode_settings' are pushed down into configs earlier by gyp. # This means self.xcode_settings[config] always contains all settings # for that config -- the per-target settings as well. Settings that are # the same for all configs are implicitly per-target settings. self.xcode_settings = {} configs = spec['configurations'] for configname, config in configs.iteritems(): self.xcode_settings[configname] = config.get('xcode_settings', {}) self._ConvertConditionalKeys(configname) if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET', None): self.isIOS = True # This is only non-None temporarily during the execution of some methods. self.configname = None # Used by _AdjustLibrary to match .a and .dylib entries in libraries. self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$') def _ConvertConditionalKeys(self, configname): """Converts or warns on conditional keys. Xcode supports conditional keys, such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation with some keys converted while the rest force a warning.""" settings = self.xcode_settings[configname] conditional_keys = [key for key in settings if key.endswith(']')] for key in conditional_keys: # If you need more, speak up at http://crbug.com/122592 if key.endswith("[sdk=iphoneos*]"): if configname.endswith("iphoneos"): new_key = key.split("[")[0] settings[new_key] = settings[key] else: print 'Warning: Conditional keys not implemented, ignoring:', \ ' '.join(conditional_keys) del settings[key] def _Settings(self): assert self.configname return self.xcode_settings[self.configname] def _Test(self, test_key, cond_key, default): return self._Settings().get(test_key, default) == cond_key def _Appendf(self, lst, test_key, format_str, default=None): if test_key in self._Settings(): lst.append(format_str % str(self._Settings()[test_key])) elif default: lst.append(format_str % str(default)) def _WarnUnimplemented(self, test_key): if test_key in self._Settings(): print 'Warning: Ignoring not yet implemented key "%s".' % test_key def IsBinaryOutputFormat(self, configname): default = "binary" if self.isIOS else "xml" format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT', default) return format == "binary" def IsIosFramework(self): return self.spec['type'] == 'shared_library' and self._IsBundle() and \ self.isIOS def _IsBundle(self): return int(self.spec.get('mac_bundle', 0)) != 0 or self._IsXCTest() def _IsXCTest(self): return int(self.spec.get('mac_xctest_bundle', 0)) != 0 def _IsIosAppExtension(self): return int(self.spec.get('ios_app_extension', 0)) != 0 def _IsIosWatchKitExtension(self): return int(self.spec.get('ios_watchkit_extension', 0)) != 0 def _IsIosWatchApp(self): return int(self.spec.get('ios_watch_app', 0)) != 0 def GetFrameworkVersion(self): """Returns the framework version of the current target. Only valid for bundles.""" assert self._IsBundle() return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A') def GetWrapperExtension(self): """Returns the bundle extension (.app, .framework, .plugin, etc). Only valid for bundles.""" assert self._IsBundle() if self.spec['type'] in ('loadable_module', 'shared_library'): default_wrapper_extension = { 'loadable_module': 'bundle', 'shared_library': 'framework', }[self.spec['type']] wrapper_extension = self.GetPerTargetSetting( 'WRAPPER_EXTENSION', default=default_wrapper_extension) return '.' + self.spec.get('product_extension', wrapper_extension) elif self.spec['type'] == 'executable': if self._IsIosAppExtension() or self._IsIosWatchKitExtension(): return '.' + self.spec.get('product_extension', 'appex') else: return '.' + self.spec.get('product_extension', 'app') else: assert False, "Don't know extension for '%s', target '%s'" % ( self.spec['type'], self.spec['target_name']) def GetProductName(self): """Returns PRODUCT_NAME.""" return self.spec.get('product_name', self.spec['target_name']) def GetFullProductName(self): """Returns FULL_PRODUCT_NAME.""" if self._IsBundle(): return self.GetWrapperName() else: return self._GetStandaloneBinaryPath() def GetWrapperName(self): """Returns the directory name of the bundle represented by this target. Only valid for bundles.""" assert self._IsBundle() return self.GetProductName() + self.GetWrapperExtension() def GetBundleContentsFolderPath(self): """Returns the qualified path to the bundle's contents folder. E.g. Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles.""" if self.isIOS: return self.GetWrapperName() assert self._IsBundle() if self.spec['type'] == 'shared_library': return os.path.join( self.GetWrapperName(), 'Versions', self.GetFrameworkVersion()) else: # loadable_modules have a 'Contents' folder like executables. return os.path.join(self.GetWrapperName(), 'Contents') def GetBundleResourceFolder(self): """Returns the qualified path to the bundle's resource folder. E.g. Chromium.app/Contents/Resources. Only valid for bundles.""" assert self._IsBundle() if self.isIOS: return self.GetBundleContentsFolderPath() return os.path.join(self.GetBundleContentsFolderPath(), 'Resources') def GetBundlePlistPath(self): """Returns the qualified path to the bundle's plist file. E.g. Chromium.app/Contents/Info.plist. Only valid for bundles.""" assert self._IsBundle() if self.spec['type'] in ('executable', 'loadable_module'): return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist') else: return os.path.join(self.GetBundleContentsFolderPath(), 'Resources', 'Info.plist') def GetProductType(self): """Returns the PRODUCT_TYPE of this target.""" if self._IsIosAppExtension(): assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle ' '(target %s)' % self.spec['target_name']) return 'com.apple.product-type.app-extension' if self._IsIosWatchKitExtension(): assert self._IsBundle(), ('ios_watchkit_extension flag requires ' 'mac_bundle (target %s)' % self.spec['target_name']) return 'com.apple.product-type.watchkit-extension' if self._IsIosWatchApp(): assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle ' '(target %s)' % self.spec['target_name']) return 'com.apple.product-type.application.watchapp' if self._IsBundle(): return { 'executable': 'com.apple.product-type.application', 'loadable_module': 'com.apple.product-type.bundle', 'shared_library': 'com.apple.product-type.framework', }[self.spec['type']] else: return { 'executable': 'com.apple.product-type.tool', 'loadable_module': 'com.apple.product-type.library.dynamic', 'shared_library': 'com.apple.product-type.library.dynamic', 'static_library': 'com.apple.product-type.library.static', }[self.spec['type']] def GetMachOType(self): """Returns the MACH_O_TYPE of this target.""" # Weird, but matches Xcode. if not self._IsBundle() and self.spec['type'] == 'executable': return '' return { 'executable': 'mh_execute', 'static_library': 'staticlib', 'shared_library': 'mh_dylib', 'loadable_module': 'mh_bundle', }[self.spec['type']] def _GetBundleBinaryPath(self): """Returns the name of the bundle binary of by this target. E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles.""" assert self._IsBundle() if self.spec['type'] in ('shared_library') or self.isIOS: path = self.GetBundleContentsFolderPath() elif self.spec['type'] in ('executable', 'loadable_module'): path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS') return os.path.join(path, self.GetExecutableName()) def _GetStandaloneExecutableSuffix(self): if 'product_extension' in self.spec: return '.' + self.spec['product_extension'] return { 'executable': '', 'static_library': '.a', 'shared_library': '.dylib', 'loadable_module': '.so', }[self.spec['type']] def _GetStandaloneExecutablePrefix(self): return self.spec.get('product_prefix', { 'executable': '', 'static_library': 'lib', 'shared_library': 'lib', # Non-bundled loadable_modules are called foo.so for some reason # (that is, .so and no prefix) with the xcode build -- match that. 'loadable_module': '', }[self.spec['type']]) def _GetStandaloneBinaryPath(self): """Returns the name of the non-bundle binary represented by this target. E.g. hello_world. Only valid for non-bundles.""" assert not self._IsBundle() assert self.spec['type'] in ( 'executable', 'shared_library', 'static_library', 'loadable_module'), ( 'Unexpected type %s' % self.spec['type']) target = self.spec['target_name'] if self.spec['type'] == 'static_library': if target[:3] == 'lib': target = target[3:] elif self.spec['type'] in ('loadable_module', 'shared_library'): if target[:3] == 'lib': target = target[3:] target_prefix = self._GetStandaloneExecutablePrefix() target = self.spec.get('product_name', target) target_ext = self._GetStandaloneExecutableSuffix() return target_prefix + target + target_ext def GetExecutableName(self): """Returns the executable name of the bundle represented by this target. E.g. Chromium.""" if self._IsBundle(): return self.spec.get('product_name', self.spec['target_name']) else: return self._GetStandaloneBinaryPath() def GetExecutablePath(self): """Returns the directory name of the bundle represented by this target. E.g. Chromium.app/Contents/MacOS/Chromium.""" if self._IsBundle(): return self._GetBundleBinaryPath() else: return self._GetStandaloneBinaryPath() def GetActiveArchs(self, configname): """Returns the architectures this target should be built for.""" config_settings = self.xcode_settings[configname] xcode_archs_default = GetXcodeArchsDefault() return xcode_archs_default.ActiveArchs( config_settings.get('ARCHS'), config_settings.get('VALID_ARCHS'), config_settings.get('SDKROOT')) def _GetSdkVersionInfoItem(self, sdk, infoitem): # xcodebuild requires Xcode and can't run on Command Line Tools-only # systems from 10.7 onward. # Since the CLT has no SDK paths anyway, returning None is the # most sensible route and should still do the right thing. try: return GetStdout(['xcrun', '--sdk', sdk, infoitem]) except: pass def _SdkRoot(self, configname): if configname is None: configname = self.configname return self.GetPerConfigSetting('SDKROOT', configname, default='') def _XcodePlatformPath(self, configname=None): sdk_root = self._SdkRoot(configname) if sdk_root not in XcodeSettings._platform_path_cache: platform_path = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-platform-path') XcodeSettings._platform_path_cache[sdk_root] = platform_path return XcodeSettings._platform_path_cache[sdk_root] def _SdkPath(self, configname=None): sdk_root = self._SdkRoot(configname) if sdk_root.startswith('/'): return sdk_root return self._XcodeSdkPath(sdk_root) def _XcodeSdkPath(self, sdk_root): if sdk_root not in XcodeSettings._sdk_path_cache: sdk_path = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-path') XcodeSettings._sdk_path_cache[sdk_root] = sdk_path if sdk_root: XcodeSettings._sdk_root_cache[sdk_path] = sdk_root return XcodeSettings._sdk_path_cache[sdk_root] def _AppendPlatformVersionMinFlags(self, lst): self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s') if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings(): # TODO: Implement this better? sdk_path_basename = os.path.basename(self._SdkPath()) if sdk_path_basename.lower().startswith('iphonesimulator'): self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET', '-mios-simulator-version-min=%s') else: self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET', '-miphoneos-version-min=%s') def GetCflags(self, configname, arch=None): """Returns flags that need to be added to .c, .cc, .m, and .mm compilations.""" # This functions (and the similar ones below) do not offer complete # emulation of all xcode_settings keys. They're implemented on demand. self.configname = configname cflags = [] sdk_root = self._SdkPath() if 'SDKROOT' in self._Settings() and sdk_root: cflags.append('-isysroot %s' % sdk_root) if self.header_map_path: cflags.append('-I%s' % self.header_map_path) if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'): cflags.append('-Wconstant-conversion') if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'): cflags.append('-funsigned-char') if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'): cflags.append('-fasm-blocks') if 'GCC_DYNAMIC_NO_PIC' in self._Settings(): if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES': cflags.append('-mdynamic-no-pic') else: pass # TODO: In this case, it depends on the target. xcode passes # mdynamic-no-pic by default for executable and possibly static lib # according to mento if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'): cflags.append('-mpascal-strings') self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s') if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'): dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf') if dbg_format == 'dwarf': cflags.append('-gdwarf-2') elif dbg_format == 'stabs': raise NotImplementedError('stabs debug format is not supported yet.') elif dbg_format == 'dwarf-with-dsym': cflags.append('-gdwarf-2') else: raise NotImplementedError('Unknown debug format %s' % dbg_format) if self._Settings().get('GCC_STRICT_ALIASING') == 'YES': cflags.append('-fstrict-aliasing') elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO': cflags.append('-fno-strict-aliasing') if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'): cflags.append('-fvisibility=hidden') if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'): cflags.append('-Werror') if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'): cflags.append('-Wnewline-eof') # In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or # llvm-gcc. It also requires a fairly recent libtool, and # if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the # path to the libLTO.dylib that matches the used clang. if self._Test('LLVM_LTO', 'YES', default='NO'): cflags.append('-flto') self._AppendPlatformVersionMinFlags(cflags) # TODO: if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'): self._WarnUnimplemented('COPY_PHASE_STRIP') self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS') self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS') # TODO: This is exported correctly, but assigning to it is not supported. self._WarnUnimplemented('MACH_O_TYPE') self._WarnUnimplemented('PRODUCT_TYPE') if arch is not None: archs = [arch] else: assert self.configname archs = self.GetActiveArchs(self.configname) if len(archs) != 1: # TODO: Supporting fat binaries will be annoying. self._WarnUnimplemented('ARCHS') archs = ['i386'] cflags.append('-arch ' + archs[0]) if archs[0] in ('i386', 'x86_64'): if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'): cflags.append('-msse3') if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES', default='NO'): cflags.append('-mssse3') # Note 3rd 's'. if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'): cflags.append('-msse4.1') if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'): cflags.append('-msse4.2') cflags += self._Settings().get('WARNING_CFLAGS', []) platform_root = self._XcodePlatformPath(configname) if platform_root and self._IsXCTest(): cflags.append('-F' + platform_root + '/Developer/Library/Frameworks/') if sdk_root: framework_root = sdk_root else: framework_root = '' config = self.spec['configurations'][self.configname] framework_dirs = config.get('mac_framework_dirs', []) for directory in framework_dirs: cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root)) self.configname = None return cflags def GetCflagsC(self, configname): """Returns flags that need to be added to .c, and .m compilations.""" self.configname = configname cflags_c = [] if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi': cflags_c.append('-ansi') else: self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s') cflags_c += self._Settings().get('OTHER_CFLAGS', []) self.configname = None return cflags_c def GetCflagsCC(self, configname): """Returns flags that need to be added to .cc, and .mm compilations.""" self.configname = configname cflags_cc = [] clang_cxx_language_standard = self._Settings().get( 'CLANG_CXX_LANGUAGE_STANDARD') # Note: Don't make c++0x to c++11 so that c++0x can be used with older # clangs that don't understand c++11 yet (like Xcode 4.2's). if clang_cxx_language_standard: cflags_cc.append('-std=%s' % clang_cxx_language_standard) self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s') if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'): cflags_cc.append('-fno-rtti') if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'): cflags_cc.append('-fno-exceptions') if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'): cflags_cc.append('-fvisibility-inlines-hidden') if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'): cflags_cc.append('-fno-threadsafe-statics') # Note: This flag is a no-op for clang, it only has an effect for gcc. if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'): cflags_cc.append('-Wno-invalid-offsetof') other_ccflags = [] for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']): # TODO: More general variable expansion. Missing in many other places too. if flag in ('$inherited', '$(inherited)', '${inherited}'): flag = '$OTHER_CFLAGS' if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'): other_ccflags += self._Settings().get('OTHER_CFLAGS', []) else: other_ccflags.append(flag) cflags_cc += other_ccflags self.configname = None return cflags_cc def _AddObjectiveCGarbageCollectionFlags(self, flags): gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported') if gc_policy == 'supported': flags.append('-fobjc-gc') elif gc_policy == 'required': flags.append('-fobjc-gc-only') def _AddObjectiveCARCFlags(self, flags): if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'): flags.append('-fobjc-arc') def _AddObjectiveCMissingPropertySynthesisFlags(self, flags): if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS', 'YES', default='NO'): flags.append('-Wobjc-missing-property-synthesis') def GetCflagsObjC(self, configname): """Returns flags that need to be added to .m compilations.""" self.configname = configname cflags_objc = [] self._AddObjectiveCGarbageCollectionFlags(cflags_objc) self._AddObjectiveCARCFlags(cflags_objc) self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc) self.configname = None return cflags_objc def GetCflagsObjCC(self, configname): """Returns flags that need to be added to .mm compilations.""" self.configname = configname cflags_objcc = [] self._AddObjectiveCGarbageCollectionFlags(cflags_objcc) self._AddObjectiveCARCFlags(cflags_objcc) self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc) if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'): cflags_objcc.append('-fobjc-call-cxx-cdtors') self.configname = None return cflags_objcc def GetInstallNameBase(self): """Return DYLIB_INSTALL_NAME_BASE for this target.""" # Xcode sets this for shared_libraries, and for nonbundled loadable_modules. if (self.spec['type'] != 'shared_library' and (self.spec['type'] != 'loadable_module' or self._IsBundle())): return None install_base = self.GetPerTargetSetting( 'DYLIB_INSTALL_NAME_BASE', default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib') return install_base def _StandardizePath(self, path): """Do :standardizepath processing for path.""" # I'm not quite sure what :standardizepath does. Just call normpath(), # but don't let @executable_path/../foo collapse to foo. if '/' in path: prefix, rest = '', path if path.startswith('@'): prefix, rest = path.split('/', 1) rest = os.path.normpath(rest) # :standardizepath path = os.path.join(prefix, rest) return path def GetInstallName(self): """Return LD_DYLIB_INSTALL_NAME for this target.""" # Xcode sets this for shared_libraries, and for nonbundled loadable_modules. if (self.spec['type'] != 'shared_library' and (self.spec['type'] != 'loadable_module' or self._IsBundle())): return None default_install_name = \ '$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)' install_name = self.GetPerTargetSetting( 'LD_DYLIB_INSTALL_NAME', default=default_install_name) # Hardcode support for the variables used in chromium for now, to # unblock people using the make build. if '$' in install_name: assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/' '$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), ( 'Variables in LD_DYLIB_INSTALL_NAME are not generally supported ' 'yet in target \'%s\' (got \'%s\')' % (self.spec['target_name'], install_name)) install_name = install_name.replace( '$(DYLIB_INSTALL_NAME_BASE:standardizepath)', self._StandardizePath(self.GetInstallNameBase())) if self._IsBundle(): # These are only valid for bundles, hence the |if|. install_name = install_name.replace( '$(WRAPPER_NAME)', self.GetWrapperName()) install_name = install_name.replace( '$(PRODUCT_NAME)', self.GetProductName()) else: assert '$(WRAPPER_NAME)' not in install_name assert '$(PRODUCT_NAME)' not in install_name install_name = install_name.replace( '$(EXECUTABLE_PATH)', self.GetExecutablePath()) return install_name def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path): """Checks if ldflag contains a filename and if so remaps it from gyp-directory-relative to build-directory-relative.""" # This list is expanded on demand. # They get matched as: # -exported_symbols_list file # -Wl,exported_symbols_list file # -Wl,exported_symbols_list,file LINKER_FILE = r'(\S+)' WORD = r'\S+' linker_flags = [ ['-exported_symbols_list', LINKER_FILE], # Needed for NaCl. ['-unexported_symbols_list', LINKER_FILE], ['-reexported_symbols_list', LINKER_FILE], ['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting. ] for flag_pattern in linker_flags: regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern)) m = regex.match(ldflag) if m: ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \ ldflag[m.end(1):] # Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS, # TODO(thakis): Update ffmpeg.gyp): if ldflag.startswith('-L'): ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):]) return ldflag def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None): """Returns flags that need to be passed to the linker. Args: configname: The name of the configuration to get ld flags for. product_dir: The directory where products such static and dynamic libraries are placed. This is added to the library search path. gyp_to_build_path: A function that converts paths relative to the current gyp file to paths relative to the build direcotry. """ self.configname = configname ldflags = [] # The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS # can contain entries that depend on this. Explicitly absolutify these. for ldflag in self._Settings().get('OTHER_LDFLAGS', []): ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path)) if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'): ldflags.append('-Wl,-dead_strip') if self._Test('PREBINDING', 'YES', default='NO'): ldflags.append('-Wl,-prebind') self._Appendf( ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s') self._Appendf( ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s') self._AppendPlatformVersionMinFlags(ldflags) if 'SDKROOT' in self._Settings() and self._SdkPath(): ldflags.append('-isysroot ' + self._SdkPath()) for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []): ldflags.append('-L' + gyp_to_build_path(library_path)) if 'ORDER_FILE' in self._Settings(): ldflags.append('-Wl,-order_file ' + '-Wl,' + gyp_to_build_path( self._Settings()['ORDER_FILE'])) if arch is not None: archs = [arch] else: assert self.configname archs = self.GetActiveArchs(self.configname) if len(archs) != 1: # TODO: Supporting fat binaries will be annoying. self._WarnUnimplemented('ARCHS') archs = ['i386'] ldflags.append('-arch ' + archs[0]) # Xcode adds the product directory by default. ldflags.append('-L' + product_dir) install_name = self.GetInstallName() if install_name and self.spec['type'] != 'loadable_module': ldflags.append('-install_name ' + install_name.replace(' ', r'\ ')) for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []): ldflags.append('-Wl,-rpath,' + rpath) sdk_root = self._SdkPath() if not sdk_root: sdk_root = '' config = self.spec['configurations'][self.configname] framework_dirs = config.get('mac_framework_dirs', []) for directory in framework_dirs: ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root)) platform_root = self._XcodePlatformPath(configname) if sdk_root and platform_root and self._IsXCTest(): ldflags.append('-F' + platform_root + '/Developer/Library/Frameworks/') ldflags.append('-framework XCTest') is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension() if sdk_root and is_extension: # Adds the link flags for extensions. These flags are common for all # extensions and provide loader and main function. # These flags reflect the compilation options used by xcode to compile # extensions. if XcodeVersion() < '0900': ldflags.append('-lpkstart') ldflags.append(sdk_root + '/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit') else: ldflags.append('-e _NSExtensionMain') ldflags.append('-fapplication-extension') self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s') self.configname = None return ldflags def GetLibtoolflags(self, configname): """Returns flags that need to be passed to the static linker. Args: configname: The name of the configuration to get ld flags for. """ self.configname = configname libtoolflags = [] for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []): libtoolflags.append(libtoolflag) # TODO(thakis): ARCHS? self.configname = None return libtoolflags def GetPerTargetSettings(self): """Gets a list of all the per-target settings. This will only fetch keys whose values are the same across all configurations.""" first_pass = True result = {} for configname in sorted(self.xcode_settings.keys()): if first_pass: result = dict(self.xcode_settings[configname]) first_pass = False else: for key, value in self.xcode_settings[configname].iteritems(): if key not in result: continue elif result[key] != value: del result[key] return result def GetPerConfigSetting(self, setting, configname, default=None): if configname in self.xcode_settings: return self.xcode_settings[configname].get(setting, default) else: return self.GetPerTargetSetting(setting, default) def GetPerTargetSetting(self, setting, default=None): """Tries to get xcode_settings.setting from spec. Assumes that the setting has the same value in all configurations and throws otherwise.""" is_first_pass = True result = None for configname in sorted(self.xcode_settings.keys()): if is_first_pass: result = self.xcode_settings[configname].get(setting, None) is_first_pass = False else: assert result == self.xcode_settings[configname].get(setting, None), ( "Expected per-target setting for '%s', got per-config setting " "(target %s)" % (setting, self.spec['target_name'])) if result is None: return default return result def _GetStripPostbuilds(self, configname, output_binary, quiet): """Returns a list of shell commands that contain the shell commands neccessary to strip this target's binary. These should be run as postbuilds before the actual postbuilds run.""" self.configname = configname result = [] if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')): default_strip_style = 'debugging' if ((self.spec['type'] == 'loadable_module' or self._IsIosAppExtension()) and self._IsBundle()): default_strip_style = 'non-global' elif self.spec['type'] == 'executable': default_strip_style = 'all' strip_style = self._Settings().get('STRIP_STYLE', default_strip_style) strip_flags = { 'all': '', 'non-global': '-x', 'debugging': '-S', }[strip_style] explicit_strip_flags = self._Settings().get('STRIPFLAGS', '') if explicit_strip_flags: strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags) if not quiet: result.append('echo STRIP\\(%s\\)' % self.spec['target_name']) result.append('strip %s %s' % (strip_flags, output_binary)) self.configname = None return result def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet): """Returns a list of shell commands that contain the shell commands neccessary to massage this target's debug information. These should be run as postbuilds before the actual postbuilds run.""" self.configname = configname # For static libraries, no dSYMs are created. result = [] if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and self._Test( 'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and self.spec['type'] != 'static_library'): if not quiet: result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name']) result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM')) self.configname = None return result def _GetTargetPostbuilds(self, configname, output, output_binary, quiet=False): """Returns a list of shell commands that contain the shell commands to run as postbuilds for this target, before the actual postbuilds.""" # dSYMs need to build before stripping happens. return ( self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) + self._GetStripPostbuilds(configname, output_binary, quiet)) def _GetIOSPostbuilds(self, configname, output_binary): """Return a shell command to codesign the iOS output binary so it can be deployed to a device. This should be run as the very last step of the build.""" if not (self.isIOS and (self.spec['type'] == 'executable' or self._IsXCTest()) or self.IsIosFramework()): return [] settings = self.xcode_settings[configname] key = self._GetIOSCodeSignIdentityKey(settings) if not key: return [] # Warn for any unimplemented signing xcode keys. unimpl = ['OTHER_CODE_SIGN_FLAGS'] unimpl = set(unimpl) & set(self.xcode_settings[configname].keys()) if unimpl: print 'Warning: Some codesign keys not implemented, ignoring: %s' % ( ', '.join(sorted(unimpl))) return ['%s code-sign-bundle "%s" "%s" "%s"' % ( os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key, settings.get('CODE_SIGN_ENTITLEMENTS', ''), settings.get('PROVISIONING_PROFILE', '')) ] def _GetIOSCodeSignIdentityKey(self, settings): identity = settings.get('CODE_SIGN_IDENTITY') if not identity: return None if identity not in XcodeSettings._codesigning_key_cache: output = subprocess.check_output( ['security', 'find-identity', '-p', 'codesigning', '-v']) for line in output.splitlines(): if identity in line: fingerprint = line.split()[1] cache = XcodeSettings._codesigning_key_cache assert identity not in cache or fingerprint == cache[identity], ( "Multiple codesigning fingerprints for identity: %s" % identity) XcodeSettings._codesigning_key_cache[identity] = fingerprint return XcodeSettings._codesigning_key_cache.get(identity, '') def AddImplicitPostbuilds(self, configname, output, output_binary, postbuilds=[], quiet=False): """Returns a list of shell commands that should run before and after |postbuilds|.""" assert output_binary is not None pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet) post = self._GetIOSPostbuilds(configname, output_binary) return pre + postbuilds + post def _AdjustLibrary(self, library, config_name=None): if library.endswith('.framework'): l = '-framework ' + os.path.splitext(os.path.basename(library))[0] else: m = self.library_re.match(library) if m: l = '-l' + m.group(1) else: l = library sdk_root = self._SdkPath(config_name) if not sdk_root: sdk_root = '' # Xcode 7 started shipping with ".tbd" (text based stubs) files instead of # ".dylib" without providing a real support for them. What it does, for # "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the # library order and cause collision when building Chrome. # # Instead substitude ".tbd" to ".dylib" in the generated project when the # following conditions are both true: # - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib", # - the ".dylib" file does not exists but a ".tbd" file do. library = l.replace('$(SDKROOT)', sdk_root) if l.startswith('$(SDKROOT)'): basename, ext = os.path.splitext(library) if ext == '.dylib' and not os.path.exists(library): tbd_library = basename + '.tbd' if os.path.exists(tbd_library): library = tbd_library return library def AdjustLibraries(self, libraries, config_name=None): """Transforms entries like 'Cocoa.framework' in libraries into entries like '-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc. """ libraries = [self._AdjustLibrary(library, config_name) for library in libraries] return libraries def _BuildMachineOSBuild(self): return GetStdout(['sw_vers', '-buildVersion']) def _XcodeIOSDeviceFamily(self, configname): family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1') return [int(x) for x in family.split(',')] def GetExtraPlistItems(self, configname=None): """Returns a dictionary with extra items to insert into Info.plist.""" if configname not in XcodeSettings._plist_cache: cache = {} cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild() xcode, xcode_build = XcodeVersion() cache['DTXcode'] = xcode cache['DTXcodeBuild'] = xcode_build compiler = self.xcode_settings[configname].get('GCC_VERSION') if compiler is not None: cache['DTCompiler'] = compiler sdk_root = self._SdkRoot(configname) if not sdk_root: sdk_root = self._DefaultSdkRoot() sdk_version = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-version') cache['DTSDKName'] = sdk_root + (sdk_version or '') if xcode >= '0720': cache['DTSDKBuild'] = self._GetSdkVersionInfoItem( sdk_root, '--show-sdk-build-version') elif xcode >= '0430': cache['DTSDKBuild'] = sdk_version else: cache['DTSDKBuild'] = cache['BuildMachineOSBuild'] if self.isIOS: cache['MinimumOSVersion'] = self.xcode_settings[configname].get( 'IPHONEOS_DEPLOYMENT_TARGET') cache['DTPlatformName'] = sdk_root cache['DTPlatformVersion'] = sdk_version if configname.endswith("iphoneos"): cache['CFBundleSupportedPlatforms'] = ['iPhoneOS'] cache['DTPlatformBuild'] = cache['DTSDKBuild'] else: cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator'] # This is weird, but Xcode sets DTPlatformBuild to an empty field # for simulator builds. cache['DTPlatformBuild'] = "" XcodeSettings._plist_cache[configname] = cache # Include extra plist items that are per-target, not per global # XcodeSettings. items = dict(XcodeSettings._plist_cache[configname]) if self.isIOS: items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname) return items def _DefaultSdkRoot(self): """Returns the default SDKROOT to use. Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode project, then the environment variable was empty. Starting with this version, Xcode uses the name of the newest SDK installed. """ xcode_version, xcode_build = XcodeVersion() if xcode_version < '0500': return '' default_sdk_path = self._XcodeSdkPath('') default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path) if default_sdk_root: return default_sdk_root try: all_sdks = GetStdout(['xcodebuild', '-showsdks']) except: # If xcodebuild fails, there will be no valid SDKs return '' for line in all_sdks.splitlines(): items = line.split() if len(items) >= 3 and items[-2] == '-sdk': sdk_root = items[-1] sdk_path = self._XcodeSdkPath(sdk_root) if sdk_path == default_sdk_path: return sdk_root return '' class MacPrefixHeader(object): """A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature. This feature consists of several pieces: * If GCC_PREFIX_HEADER is present, all compilations in that project get an additional |-include path_to_prefix_header| cflag. * If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is instead compiled, and all other compilations in the project get an additional |-include path_to_compiled_header| instead. + Compiled prefix headers have the extension gch. There is one gch file for every language used in the project (c, cc, m, mm), since gch files for different languages aren't compatible. + gch files themselves are built with the target's normal cflags, but they obviously don't get the |-include| flag. Instead, they need a -x flag that describes their language. + All o files in the target need to depend on the gch file, to make sure it's built before any o file is built. This class helps with some of these tasks, but it needs help from the build system for writing dependencies to the gch files, for writing build commands for the gch files, and for figuring out the location of the gch files. """ def __init__(self, xcode_settings, gyp_path_to_build_path, gyp_path_to_build_output): """If xcode_settings is None, all methods on this class are no-ops. Args: gyp_path_to_build_path: A function that takes a gyp-relative path, and returns a path relative to the build directory. gyp_path_to_build_output: A function that takes a gyp-relative path and a language code ('c', 'cc', 'm', or 'mm'), and that returns a path to where the output of precompiling that path for that language should be placed (without the trailing '.gch'). """ # This doesn't support per-configuration prefix headers. Good enough # for now. self.header = None self.compile_headers = False if xcode_settings: self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER') self.compile_headers = xcode_settings.GetPerTargetSetting( 'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO' self.compiled_headers = {} if self.header: if self.compile_headers: for lang in ['c', 'cc', 'm', 'mm']: self.compiled_headers[lang] = gyp_path_to_build_output( self.header, lang) self.header = gyp_path_to_build_path(self.header) def _CompiledHeader(self, lang, arch): assert self.compile_headers h = self.compiled_headers[lang] if arch: h += '.' + arch return h def GetInclude(self, lang, arch=None): """Gets the cflags to include the prefix header for language |lang|.""" if self.compile_headers and lang in self.compiled_headers: return '-include %s' % self._CompiledHeader(lang, arch) elif self.header: return '-include %s' % self.header else: return '' def _Gch(self, lang, arch): """Returns the actual file name of the prefix header for language |lang|.""" assert self.compile_headers return self._CompiledHeader(lang, arch) + '.gch' def GetObjDependencies(self, sources, objs, arch=None): """Given a list of source files and the corresponding object files, returns a list of (source, object, gch) tuples, where |gch| is the build-directory relative path to the gch file each object file depends on. |compilable[i]| has to be the source file belonging to |objs[i]|.""" if not self.header or not self.compile_headers: return [] result = [] for source, obj in zip(sources, objs): ext = os.path.splitext(source)[1] lang = { '.c': 'c', '.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc', '.m': 'm', '.mm': 'mm', }.get(ext, None) if lang: result.append((source, obj, self._Gch(lang, arch))) return result def GetPchBuildCommands(self, arch=None): """Returns [(path_to_gch, language_flag, language, header)]. |path_to_gch| and |header| are relative to the build directory. """ if not self.header or not self.compile_headers: return [] return [ (self._Gch('c', arch), '-x c-header', 'c', self.header), (self._Gch('cc', arch), '-x c++-header', 'cc', self.header), (self._Gch('m', arch), '-x objective-c-header', 'm', self.header), (self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header), ] def XcodeVersion(): """Returns a tuple of version and build version of installed Xcode.""" # `xcodebuild -version` output looks like # Xcode 4.6.3 # Build version 4H1503 # or like # Xcode 3.2.6 # Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0 # BuildVersion: 10M2518 # Convert that to '0463', '4H1503'. global XCODE_VERSION_CACHE if XCODE_VERSION_CACHE: return XCODE_VERSION_CACHE try: version_list = GetStdout(['xcodebuild', '-version']).splitlines() # In some circumstances xcodebuild exits 0 but doesn't return # the right results; for example, a user on 10.7 or 10.8 with # a bogus path set via xcode-select # In that case this may be a CLT-only install so fall back to # checking that version. if len(version_list) < 2: raise GypError("xcodebuild returned unexpected results") except: version = CLTVersion() if version: version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0] else: raise GypError("No Xcode or CLT version detected!") # The CLT has no build information, so we return an empty string. version_list = [version, ''] version = version_list[0] build = version_list[-1] # Be careful to convert "4.2" to "0420": version = version.split()[-1].replace('.', '') version = (version + '0' * (3 - len(version))).zfill(4) if build: build = build.split()[-1] XCODE_VERSION_CACHE = (version, build) return XCODE_VERSION_CACHE # This function ported from the logic in Homebrew's CLT version check def CLTVersion(): """Returns the version of command-line tools from pkgutil.""" # pkgutil output looks like # package-id: com.apple.pkg.CLTools_Executables # version: 5.0.1.0.1.1382131676 # volume: / # location: / # install-time: 1382544035 # groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo" FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI" MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables" regex = re.compile('version: (?P<version>.+)') for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]: try: output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key]) return re.search(regex, output).groupdict()['version'] except: continue def GetStdout(cmdlist): """Returns the content of standard output returned by invoking |cmdlist|. Raises |GypError| if the command return with a non-zero return code.""" job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE) out = job.communicate()[0] if job.returncode != 0: sys.stderr.write(out + '\n') raise GypError('Error %d running %s' % (job.returncode, cmdlist[0])) return out.rstrip('\n') def MergeGlobalXcodeSettingsToSpec(global_dict, spec): """Merges the global xcode_settings dictionary into each configuration of the target represented by spec. For keys that are both in the global and the local xcode_settings dict, the local key gets precendence. """ # The xcode generator special-cases global xcode_settings and does something # that amounts to merging in the global xcode_settings into each local # xcode_settings dict. global_xcode_settings = global_dict.get('xcode_settings', {}) for config in spec['configurations'].values(): if 'xcode_settings' in config: new_settings = global_xcode_settings.copy() new_settings.update(config['xcode_settings']) config['xcode_settings'] = new_settings def IsMacBundle(flavor, spec): """Returns if |spec| should be treated as a bundle. Bundles are directories with a certain subdirectory structure, instead of just a single file. Bundle rules do not produce a binary but also package resources into that directory.""" is_mac_bundle = int(spec.get('mac_xctest_bundle', 0)) != 0 or \ (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac') if is_mac_bundle: assert spec['type'] != 'none', ( 'mac_bundle targets cannot have type none (target "%s")' % spec['target_name']) return is_mac_bundle def GetMacBundleResources(product_dir, xcode_settings, resources): """Yields (output, resource) pairs for every resource in |resources|. Only call this for mac bundle targets. Args: product_dir: Path to the directory containing the output bundle, relative to the build directory. xcode_settings: The XcodeSettings of the current target. resources: A list of bundle resources, relative to the build directory. """ dest = os.path.join(product_dir, xcode_settings.GetBundleResourceFolder()) for res in resources: output = dest # The make generator doesn't support it, so forbid it everywhere # to keep the generators more interchangable. assert ' ' not in res, ( "Spaces in resource filenames not supported (%s)" % res) # Split into (path,file). res_parts = os.path.split(res) # Now split the path into (prefix,maybe.lproj). lproj_parts = os.path.split(res_parts[0]) # If the resource lives in a .lproj bundle, add that to the destination. if lproj_parts[1].endswith('.lproj'): output = os.path.join(output, lproj_parts[1]) output = os.path.join(output, res_parts[1]) # Compiled XIB files are referred to by .nib. if output.endswith('.xib'): output = os.path.splitext(output)[0] + '.nib' # Compiled storyboard files are referred to by .storyboardc. if output.endswith('.storyboard'): output = os.path.splitext(output)[0] + '.storyboardc' yield output, res def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path): """Returns (info_plist, dest_plist, defines, extra_env), where: * |info_plist| is the source plist path, relative to the build directory, * |dest_plist| is the destination plist path, relative to the build directory, * |defines| is a list of preprocessor defines (empty if the plist shouldn't be preprocessed, * |extra_env| is a dict of env variables that should be exported when invoking |mac_tool copy-info-plist|. Only call this for mac bundle targets. Args: product_dir: Path to the directory containing the output bundle, relative to the build directory. xcode_settings: The XcodeSettings of the current target. gyp_to_build_path: A function that converts paths relative to the current gyp file to paths relative to the build direcotry. """ info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE') if not info_plist: return None, None, [], {} # The make generator doesn't support it, so forbid it everywhere # to keep the generators more interchangable. assert ' ' not in info_plist, ( "Spaces in Info.plist filenames not supported (%s)" % info_plist) info_plist = gyp_path_to_build_path(info_plist) # If explicitly set to preprocess the plist, invoke the C preprocessor and # specify any defines as -D flags. if xcode_settings.GetPerTargetSetting( 'INFOPLIST_PREPROCESS', default='NO') == 'YES': # Create an intermediate file based on the path. defines = shlex.split(xcode_settings.GetPerTargetSetting( 'INFOPLIST_PREPROCESSOR_DEFINITIONS', default='')) else: defines = [] dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath()) extra_env = xcode_settings.GetPerTargetSettings() return info_plist, dest_plist, defines, extra_env def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, additional_settings=None): """Return the environment variables that Xcode would set. See http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153 for a full list. Args: xcode_settings: An XcodeSettings object. If this is None, this function returns an empty dict. built_products_dir: Absolute path to the built products dir. srcroot: Absolute path to the source root. configuration: The build configuration name. additional_settings: An optional dict with more values to add to the result. """ if not xcode_settings: return {} # This function is considered a friend of XcodeSettings, so let it reach into # its implementation details. spec = xcode_settings.spec # These are filled in on a as-needed basis. env = { 'BUILT_FRAMEWORKS_DIR' : built_products_dir, 'BUILT_PRODUCTS_DIR' : built_products_dir, 'CONFIGURATION' : configuration, 'PRODUCT_NAME' : xcode_settings.GetProductName(), # See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME 'SRCROOT' : srcroot, 'SOURCE_ROOT': '${SRCROOT}', # This is not true for static libraries, but currently the env is only # written for bundles: 'TARGET_BUILD_DIR' : built_products_dir, 'TEMP_DIR' : '${TMPDIR}', 'XCODE_VERSION_ACTUAL' : XcodeVersion()[0], } if xcode_settings.GetPerConfigSetting('SDKROOT', configuration): env['SDKROOT'] = xcode_settings._SdkPath(configuration) else: env['SDKROOT'] = '' if xcode_settings.mac_toolchain_dir: env['DEVELOPER_DIR'] = xcode_settings.mac_toolchain_dir if spec['type'] in ( 'executable', 'static_library', 'shared_library', 'loadable_module'): env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName() env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath() env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName() mach_o_type = xcode_settings.GetMachOType() if mach_o_type: env['MACH_O_TYPE'] = mach_o_type env['PRODUCT_TYPE'] = xcode_settings.GetProductType() if xcode_settings._IsBundle(): env['CONTENTS_FOLDER_PATH'] = \ xcode_settings.GetBundleContentsFolderPath() env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \ xcode_settings.GetBundleResourceFolder() env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath() env['WRAPPER_NAME'] = xcode_settings.GetWrapperName() install_name = xcode_settings.GetInstallName() if install_name: env['LD_DYLIB_INSTALL_NAME'] = install_name install_name_base = xcode_settings.GetInstallNameBase() if install_name_base: env['DYLIB_INSTALL_NAME_BASE'] = install_name_base if XcodeVersion() >= '0500' and not env.get('SDKROOT'): sdk_root = xcode_settings._SdkRoot(configuration) if not sdk_root: sdk_root = xcode_settings._XcodeSdkPath('') env['SDKROOT'] = sdk_root if not additional_settings: additional_settings = {} else: # Flatten lists to strings. for k in additional_settings: if not isinstance(additional_settings[k], str): additional_settings[k] = ' '.join(additional_settings[k]) additional_settings.update(env) for k in additional_settings: additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k]) return additional_settings def _NormalizeEnvVarReferences(str): """Takes a string containing variable references in the form ${FOO}, $(FOO), or $FOO, and returns a string with all variable references in the form ${FOO}. """ # $FOO -> ${FOO} str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str) # $(FOO) -> ${FOO} matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str) for match in matches: to_replace, variable = match assert '$(' not in match, '$($(FOO)) variables not supported: ' + match str = str.replace(to_replace, '${' + variable + '}') return str def ExpandEnvVars(string, expansions): """Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the expansions list. If the variable expands to something that references another variable, this variable is expanded as well if it's in env -- until no variables present in env are left.""" for k, v in reversed(expansions): string = string.replace('${' + k + '}', v) string = string.replace('$(' + k + ')', v) string = string.replace('$' + k, v) return string def _TopologicallySortedEnvVarKeys(env): """Takes a dict |env| whose values are strings that can refer to other keys, for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of env such that key2 is after key1 in L if env[key2] refers to env[key1]. Throws an Exception in case of dependency cycles. """ # Since environment variables can refer to other variables, the evaluation # order is important. Below is the logic to compute the dependency graph # and sort it. regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}') def GetEdges(node): # Use a definition of edges such that user_of_variable -> used_varible. # This happens to be easier in this case, since a variable's # definition contains all variables it references in a single string. # We can then reverse the result of the topological sort at the end. # Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG)) matches = set([v for v in regex.findall(env[node]) if v in env]) for dependee in matches: assert '${' not in dependee, 'Nested variables not supported: ' + dependee return matches try: # Topologically sort, and then reverse, because we used an edge definition # that's inverted from the expected result of this function (see comment # above). order = gyp.common.TopologicallySorted(env.keys(), GetEdges) order.reverse() return order except gyp.common.CycleError, e: raise GypError( 'Xcode environment variables are cyclically dependent: ' + str(e.nodes)) def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, additional_settings=None): env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, additional_settings) return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)] def GetSpecPostbuildCommands(spec, quiet=False): """Returns the list of postbuilds explicitly defined on |spec|, in a form executable by a shell.""" postbuilds = [] for postbuild in spec.get('postbuilds', []): if not quiet: postbuilds.append('echo POSTBUILD\\(%s\\) %s' % ( spec['target_name'], postbuild['postbuild_name'])) postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action'])) return postbuilds def _HasIOSTarget(targets): """Returns true if any target contains the iOS specific key IPHONEOS_DEPLOYMENT_TARGET.""" for target_dict in targets.values(): for config in target_dict['configurations'].values(): if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'): return True return False def _AddIOSDeviceConfigurations(targets): """Clone all targets and append -iphoneos to the name. Configure these targets to build for iOS devices and use correct architectures for those builds.""" for target_dict in targets.itervalues(): toolset = target_dict['toolset'] configs = target_dict['configurations'] for config_name, config_dict in dict(configs).iteritems(): iphoneos_config_dict = copy.deepcopy(config_dict) configs[config_name + '-iphoneos'] = iphoneos_config_dict configs[config_name + '-iphonesimulator'] = config_dict if toolset == 'target': iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos' return targets def CloneConfigurationForDeviceAndEmulator(target_dicts): """If |target_dicts| contains any iOS targets, automatically create -iphoneos targets for iOS device builds.""" if _HasIOSTarget(target_dicts): return _AddIOSDeviceConfigurations(target_dicts) return target_dicts
mit
6,018,361,578,783,747,000
38.973763
191
0.662346
false
xavieraijon/init
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py
2736
1804
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import gyp.common import gyp.easy_xml as easy_xml class Writer(object): """Visual Studio XML tool file writer.""" def __init__(self, tool_file_path, name): """Initializes the tool file. Args: tool_file_path: Path to the tool file. name: Name of the tool file. """ self.tool_file_path = tool_file_path self.name = name self.rules_section = ['Rules'] def AddCustomBuildRule(self, name, cmd, description, additional_dependencies, outputs, extensions): """Adds a rule to the tool file. Args: name: Name of the rule. description: Description of the rule. cmd: Command line of the rule. additional_dependencies: other files which may trigger the rule. outputs: outputs of the rule. extensions: extensions handled by the rule. """ rule = ['CustomBuildRule', {'Name': name, 'ExecutionDescription': description, 'CommandLine': cmd, 'Outputs': ';'.join(outputs), 'FileExtensions': ';'.join(extensions), 'AdditionalDependencies': ';'.join(additional_dependencies) }] self.rules_section.append(rule) def WriteIfChanged(self): """Writes the tool file.""" content = ['VisualStudioToolFile', {'Version': '8.00', 'Name': self.name }, self.rules_section ] easy_xml.WriteXmlIfChanged(content, self.tool_file_path, encoding="Windows-1252")
mit
-5,613,351,463,418,433,000
30.103448
72
0.582594
false
chfw/Flask-Excel
examples/database_example_formatted.py
2
3569
""" database_example_formatted.py :copyright: (c) 2015 by C. W. :license: New BSD """ from flask import Flask, request, jsonify, redirect, url_for import flask_excel as excel from flask_sqlalchemy import SQLAlchemy from datetime import datetime # please uncomment the following line if you use pyexcel < 0.2.2 # import pyexcel.ext.xls app = Flask(__name__) excel.init_excel(app) app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///tmp.db" db = SQLAlchemy(app) class Post(db.Model): id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(80)) body = db.Column(db.Text) pub_date = db.Column(db.DateTime) category_id = db.Column(db.Integer, db.ForeignKey("category.id")) category = db.relationship( "Category", backref=db.backref("posts", lazy="dynamic") ) def __init__(self, title, body, category, pub_date=None): self.title = title self.body = body if pub_date is None: pub_date = datetime.utcnow() self.pub_date = pub_date self.category = category def __repr__(self): return "<Post %r>" % self.title class Category(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(50)) def __init__(self, name): self.name = name def __repr__(self): return "<Category %r>" % self.name db.create_all() @app.route("/upload", methods=["GET", "POST"]) def upload_file(): if request.method == "POST": return jsonify({"result": request.get_array("file")}) return """ <!doctype html> <title>Upload an excel file</title> <h1>Excel file upload (csv, tsv, csvz, tsvz only)</h1> <form action="" method=post enctype=multipart/form-data><p> <input type=file name=file><input type=submit value=Upload> </form> """ @app.route("/download", methods=["GET"]) def download_file(): return excel.make_response_from_array([[1, 2], [3, 4]], "csv") @app.route("/import", methods=["GET", "POST"]) def doimport(): if request.method == "POST": def category_init_func(row): c = Category(row["name"]) c.id = row["id"] return c def post_init_func(row): c = Category.query.filter_by(name=row["category"]).first() p = Post(row["title"], row["body"], c, row["pub_date"]) return p request.save_book_to_database( field_name="file", session=db.session, tables=[Category, Post], initializers=[category_init_func, post_init_func], ) return redirect(url_for(".handson_table"), code=302) return """ <!doctype html> <title>Upload an excel file</title> <h1>Excel file upload (xls, xlsx, ods please)</h1> <form action="" method=post enctype=multipart/form-data><p> <input type=file name=file><input type=submit value=Upload> </form> """ @app.route("/export", methods=["GET"]) def doexport(): return excel.make_response_from_tables(db.session, [Category, Post], "xls") @app.route("/custom_export", methods=["GET"]) def docustomexport(): query_sets = Category.query.filter_by(id=1).all() column_names = ["id", "name"] return excel.make_response_from_query_sets( query_sets, column_names, "xls", dest_sheet_name="custom_sheet" ) @app.route("/handson_view", methods=["GET"]) def handson_table(): return excel.make_response_from_tables( db.session, [Category, Post], "handsontable.html" ) if __name__ == "__main__": app.run()
bsd-3-clause
4,753,169,454,554,032,000
26.666667
79
0.610815
false
dmitriy0611/django
tests/m2m_through_regress/tests.py
182
9847
from __future__ import unicode_literals from django.contrib.auth.models import User from django.core import management from django.test import TestCase from django.utils.six import StringIO from .models import ( Car, CarDriver, Driver, Group, Membership, Person, UserMembership, ) class M2MThroughTestCase(TestCase): @classmethod def setUpTestData(cls): cls.bob = Person.objects.create(name="Bob") cls.jim = Person.objects.create(name="Jim") cls.rock = Group.objects.create(name="Rock") cls.roll = Group.objects.create(name="Roll") cls.frank = User.objects.create_user("frank", "[email protected]", "password") cls.jane = User.objects.create_user("jane", "[email protected]", "password") # normal intermediate model cls.bob_rock = Membership.objects.create(person=cls.bob, group=cls.rock) cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll, price=50) cls.jim_rock = Membership.objects.create(person=cls.jim, group=cls.rock, price=50) # intermediate model with custom id column cls.frank_rock = UserMembership.objects.create(user=cls.frank, group=cls.rock) cls.frank_roll = UserMembership.objects.create(user=cls.frank, group=cls.roll) cls.jane_rock = UserMembership.objects.create(user=cls.jane, group=cls.rock) def test_retrieve_reverse_m2m_items(self): self.assertQuerysetEqual( self.bob.group_set.all(), [ "<Group: Rock>", "<Group: Roll>", ], ordered=False ) def test_retrieve_forward_m2m_items(self): self.assertQuerysetEqual( self.roll.members.all(), [ "<Person: Bob>", ] ) def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self): self.assertRaises(AttributeError, setattr, self.bob, "group_set", []) def test_cannot_use_setattr_on_forward_m2m_with_intermediary_model(self): self.assertRaises(AttributeError, setattr, self.roll, "members", []) def test_cannot_use_create_on_m2m_with_intermediary_model(self): self.assertRaises(AttributeError, self.rock.members.create, name="Anne") def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self): self.assertRaises(AttributeError, self.bob.group_set.create, name="Funk") def test_retrieve_reverse_m2m_items_via_custom_id_intermediary(self): self.assertQuerysetEqual( self.frank.group_set.all(), [ "<Group: Rock>", "<Group: Roll>", ], ordered=False ) def test_retrieve_forward_m2m_items_via_custom_id_intermediary(self): self.assertQuerysetEqual( self.roll.user_members.all(), [ "<User: frank>", ] ) def test_join_trimming_forwards(self): "Check that we don't involve too many copies of the intermediate table when doing a join. Refs #8046, #8254" self.assertQuerysetEqual( self.rock.members.filter(membership__price=50), [ "<Person: Jim>", ] ) def test_join_trimming_reverse(self): self.assertQuerysetEqual( self.bob.group_set.filter(membership__price=50), [ "<Group: Roll>", ] ) class M2MThroughSerializationTestCase(TestCase): @classmethod def setUpTestData(cls): cls.bob = Person.objects.create(name="Bob") cls.roll = Group.objects.create(name="Roll") cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll) def test_serialization(self): "m2m-through models aren't serialized as m2m fields. Refs #8134" pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk} out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out) self.assertJSONEqual(out.getvalue().strip(), """[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": 100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": "Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]""" % pks) out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="xml", indent=2, stdout=out) self.assertXMLEqual(out.getvalue().strip(), """ <?xml version="1.0" encoding="utf-8"?> <django-objects version="1.0"> <object pk="%(m_pk)s" model="m2m_through_regress.membership"> <field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field> <field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field> <field type="IntegerField" name="price">100</field> </object> <object pk="%(p_pk)s" model="m2m_through_regress.person"> <field type="CharField" name="name">Bob</field> </object> <object pk="%(g_pk)s" model="m2m_through_regress.group"> <field type="CharField" name="name">Roll</field> </object> </django-objects> """.strip() % pks) class ToFieldThroughTests(TestCase): def setUp(self): self.car = Car.objects.create(make="Toyota") self.driver = Driver.objects.create(name="Ryan Briscoe") CarDriver.objects.create(car=self.car, driver=self.driver) # We are testing if wrong objects get deleted due to using wrong # field value in m2m queries. So, it is essential that the pk # numberings do not match. # Create one intentionally unused driver to mix up the autonumbering self.unused_driver = Driver.objects.create(name="Barney Gumble") # And two intentionally unused cars. self.unused_car1 = Car.objects.create(make="Trabant") self.unused_car2 = Car.objects.create(make="Wartburg") def test_to_field(self): self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Ryan Briscoe>"] ) def test_to_field_reverse(self): self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>"] ) def test_to_field_clear_reverse(self): self.driver.car_set.clear() self.assertQuerysetEqual( self.driver.car_set.all(), []) def test_to_field_clear(self): self.car.drivers.clear() self.assertQuerysetEqual( self.car.drivers.all(), []) # Low level tests for _add_items and _remove_items. We test these methods # because .add/.remove aren't available for m2m fields with through, but # through is the only way to set to_field currently. We do want to make # sure these methods are ready if the ability to use .add or .remove with # to_field relations is added some day. def test_add(self): self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Ryan Briscoe>"] ) # Yikes - barney is going to drive... self.car.drivers._add_items('car', 'driver', self.unused_driver) self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"] ) def test_add_null(self): nullcar = Car.objects.create(make=None) with self.assertRaises(ValueError): nullcar.drivers._add_items('car', 'driver', self.unused_driver) def test_add_related_null(self): nulldriver = Driver.objects.create(name=None) with self.assertRaises(ValueError): self.car.drivers._add_items('car', 'driver', nulldriver) def test_add_reverse(self): car2 = Car.objects.create(make="Honda") self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>"] ) self.driver.car_set._add_items('driver', 'car', car2) self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>", "<Car: Honda>"], ordered=False ) def test_add_null_reverse(self): nullcar = Car.objects.create(make=None) with self.assertRaises(ValueError): self.driver.car_set._add_items('driver', 'car', nullcar) def test_add_null_reverse_related(self): nulldriver = Driver.objects.create(name=None) with self.assertRaises(ValueError): nulldriver.car_set._add_items('driver', 'car', self.car) def test_remove(self): self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Ryan Briscoe>"] ) self.car.drivers._remove_items('car', 'driver', self.driver) self.assertQuerysetEqual( self.car.drivers.all(), []) def test_remove_reverse(self): self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>"] ) self.driver.car_set._remove_items('driver', 'car', self.car) self.assertQuerysetEqual( self.driver.car_set.all(), []) class ThroughLoadDataTestCase(TestCase): fixtures = ["m2m_through"] def test_sequence_creation(self): "Check that sequences on an m2m_through are created for the through model, not a phantom auto-generated m2m table. Refs #11107" out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out) self.assertJSONEqual(out.getvalue().strip(), """[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user": 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, "model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]""")
bsd-3-clause
-3,335,269,392,340,007,000
39.522634
360
0.618158
false
raj454raj/eden
tests/dbmigration/example_migration.py
33
2001
import os import sys import copy import subprocess WEB2PY_PATH = sys.argv[1] APP = sys.argv[2] changed_table = "org_organisation" new_field = "type_id" new_table = "org_organisation_type" old_field = "type" new_table_field = "name" os.chdir(WEB2PY_PATH) sys.path.append(WEB2PY_PATH) from gluon.custom_import import custom_import_install custom_import_install(WEB2PY_PATH) from gluon.shell import env from gluon import DAL, Field old_env = env(APP, c=None, import_models=True) old_str =''' try: s3db.load_all_models() except NameError: print "s3db not defined" ''' globals().update(**old_env) exec old_str in globals(), locals() database_string = "sqlite://storage.db" old_database_folder = "%s/applications/%s/databases" % (WEB2PY_PATH, APP) temp_db = DAL(database_string, folder=old_database_folder, migrate_enabled=True, migrate=True) # Migration Script list_of_fields = [] list_of_fields.append(Field(new_field, "integer")) list_of_new_table_fields = [] list_of_new_table_fields.append(Field(new_table_field, "integer")) try: db[changed_table]._primarykey except KeyError: db[changed_table]._primarykey = None temp_db.define_table(changed_table, db[changed_table], *list_of_fields, primarykey = db[changed_table]._primarykey ) temp_db.define_table(new_table, *list_of_new_table_fields ) temp_db.commit() # Add a new field of int type in a class for old_row in temp_db().select(temp_db[changed_table][old_field]): if (len(temp_db(temp_db[new_table][new_table_field] == old_row[old_field]).select()) == 0): row = temp_db[new_table].insert(name = old_row[old_field]) new_id = int(row["id"]) temp_db(temp_db[changed_table][old_field] == old_row[old_field]).update(type_id = new_id) temp_db.commit() # END =========================================================================
mit
-608,431,684,315,557,200
27.183099
97
0.621689
false
ahmadRagheb/goldenHR
erpnext/projects/report/project_wise_stock_tracking/project_wise_stock_tracking.py
62
3071
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe import _ def execute(filters=None): columns = get_columns() proj_details = get_project_details() pr_item_map = get_purchased_items_cost() se_item_map = get_issued_items_cost() dn_item_map = get_delivered_items_cost() data = [] for project in proj_details: data.append([project.name, pr_item_map.get(project.name, 0), se_item_map.get(project.name, 0), dn_item_map.get(project.name, 0), project.project_name, project.status, project.company, project.customer, project.estimated_costing, project.expected_start_date, project.expected_end_date]) return columns, data def get_columns(): return [_("Project Id") + ":Link/Project:140", _("Cost of Purchased Items") + ":Currency:160", _("Cost of Issued Items") + ":Currency:160", _("Cost of Delivered Items") + ":Currency:160", _("Project Name") + "::120", _("Project Status") + "::120", _("Company") + ":Link/Company:100", _("Customer") + ":Link/Customer:140", _("Project Value") + ":Currency:120", _("Project Start Date") + ":Date:120", _("Completion Date") + ":Date:120"] def get_project_details(): return frappe.db.sql(""" select name, project_name, status, company, customer, estimated_costing, expected_start_date, expected_end_date from tabProject where docstatus < 2""", as_dict=1) def get_purchased_items_cost(): pr_items = frappe.db.sql("""select project, sum(base_net_amount) as amount from `tabPurchase Receipt Item` where ifnull(project, '') != '' and docstatus = 1 group by project""", as_dict=1) pr_item_map = {} for item in pr_items: pr_item_map.setdefault(item.project, item.amount) return pr_item_map def get_issued_items_cost(): se_items = frappe.db.sql("""select se.project, sum(se_item.amount) as amount from `tabStock Entry` se, `tabStock Entry Detail` se_item where se.name = se_item.parent and se.docstatus = 1 and ifnull(se_item.t_warehouse, '') = '' and ifnull(se.project, '') != '' group by se.project""", as_dict=1) se_item_map = {} for item in se_items: se_item_map.setdefault(item.project, item.amount) return se_item_map def get_delivered_items_cost(): dn_items = frappe.db.sql("""select dn.project, sum(dn_item.base_net_amount) as amount from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item where dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != '' group by dn.project""", as_dict=1) si_items = frappe.db.sql("""select si.project, sum(si_item.base_net_amount) as amount from `tabSales Invoice` si, `tabSales Invoice Item` si_item where si.name = si_item.parent and si.docstatus = 1 and si.update_stock = 1 and si.is_pos = 1 and ifnull(si.project, '') != '' group by si.project""", as_dict=1) dn_item_map = {} for item in dn_items: dn_item_map.setdefault(item.project, item.amount) for item in si_items: dn_item_map.setdefault(item.project, item.amount) return dn_item_map
gpl-3.0
-8,644,819,031,627,431,000
37.873418
98
0.689678
false
clubcapra/Ibex
src/capra_ai/scripts/igvc_basic_north.py
1
1052
#! /usr/bin/env python from state_ai import StateAi import rospy from math import pi from std_msgs.msg import Bool class IGVCBasicNorth(StateAi): def __init__(self): super(IGVCBasicNorth, self).__init__("igvc_basic_north") def on_start(self): #self.generate_circle(2.0, pi/4, 2 * pi - pi/4, pi/270.0, 120) pass def on_goal_changed(self, goal_msg): rospy.loginfo("Targeting goal with priority: {}".format(goal_msg.priority)) if goal_msg.priority == 105: ## First GOAL pass if goal_msg.priority == 404: ## MiddlePoint self.clear_octomap(self.start_pos, 5, 5) if goal_msg.priority == 103: ## Last GOAL pass if goal_msg.priority == 402: self.generate_bar(8, -1.5, -1) def on_last_goal_reached(self, msg): rospy.loginfo("WE FINISHED THE BASIC COURSE !!! CONGRATS EVERYONE !!!") pass if __name__ == "__main__": try: a = IGVCBasicNorth() except rospy.ROSInterruptException: pass
gpl-3.0
4,078,991,938,382,549,000
24.682927
83
0.590304
false
batxes/4c2vhic
SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/SHH_WT_models_highres22408.py
4
88247
import _surface import chimera try: import chimera.runCommand except: pass from VolumePath import markerset as ms try: from VolumePath import Marker_Set, Link new_marker_set=Marker_Set except: from VolumePath import volume_path_dialog d= volume_path_dialog(True) new_marker_set= d.new_marker_set marker_sets={} surf_sets={} if "particle_0 geometry" not in marker_sets: s=new_marker_set('particle_0 geometry') marker_sets["particle_0 geometry"]=s s= marker_sets["particle_0 geometry"] mark=s.place_marker((2741.55, -327.326, 2795.68), (0.7, 0.7, 0.7), 182.271) if "particle_1 geometry" not in marker_sets: s=new_marker_set('particle_1 geometry') marker_sets["particle_1 geometry"]=s s= marker_sets["particle_1 geometry"] mark=s.place_marker((2635.52, -708.606, 2992.33), (0.7, 0.7, 0.7), 258.199) if "particle_2 geometry" not in marker_sets: s=new_marker_set('particle_2 geometry') marker_sets["particle_2 geometry"]=s s= marker_sets["particle_2 geometry"] mark=s.place_marker((2541.68, -317.222, 3056.47), (0.7, 0.7, 0.7), 123.897) if "particle_3 geometry" not in marker_sets: s=new_marker_set('particle_3 geometry') marker_sets["particle_3 geometry"]=s s= marker_sets["particle_3 geometry"] mark=s.place_marker((2753.53, -421.408, 3384.17), (0.7, 0.7, 0.7), 146.739) if "particle_4 geometry" not in marker_sets: s=new_marker_set('particle_4 geometry') marker_sets["particle_4 geometry"]=s s= marker_sets["particle_4 geometry"] mark=s.place_marker((3062.32, -423.876, 3762.62), (0.7, 0.7, 0.7), 179.098) if "particle_5 geometry" not in marker_sets: s=new_marker_set('particle_5 geometry') marker_sets["particle_5 geometry"]=s s= marker_sets["particle_5 geometry"] mark=s.place_marker((3009.04, 119.367, 3419.17), (0.7, 0.7, 0.7), 148.854) if "particle_6 geometry" not in marker_sets: s=new_marker_set('particle_6 geometry') marker_sets["particle_6 geometry"]=s s= marker_sets["particle_6 geometry"] mark=s.place_marker((2968.55, 695.52, 3259.31), (0.7, 0.7, 0.7), 196.357) if "particle_7 geometry" not in marker_sets: s=new_marker_set('particle_7 geometry') marker_sets["particle_7 geometry"]=s s= marker_sets["particle_7 geometry"] mark=s.place_marker((2913.65, 1050.01, 3778.63), (0.7, 0.7, 0.7), 166.873) if "particle_8 geometry" not in marker_sets: s=new_marker_set('particle_8 geometry') marker_sets["particle_8 geometry"]=s s= marker_sets["particle_8 geometry"] mark=s.place_marker((2979.14, 1364.88, 4413.29), (0.7, 0.7, 0.7), 95.4711) if "particle_9 geometry" not in marker_sets: s=new_marker_set('particle_9 geometry') marker_sets["particle_9 geometry"]=s s= marker_sets["particle_9 geometry"] mark=s.place_marker((3026.4, 1414.54, 4001.2), (0.7, 0.7, 0.7), 185.401) if "particle_10 geometry" not in marker_sets: s=new_marker_set('particle_10 geometry') marker_sets["particle_10 geometry"]=s s= marker_sets["particle_10 geometry"] mark=s.place_marker((3051.88, 1171.6, 3448.43), (0.7, 0.7, 0.7), 151.984) if "particle_11 geometry" not in marker_sets: s=new_marker_set('particle_11 geometry') marker_sets["particle_11 geometry"]=s s= marker_sets["particle_11 geometry"] mark=s.place_marker((3030.88, 913.082, 2821.97), (0.7, 0.7, 0.7), 185.612) if "particle_12 geometry" not in marker_sets: s=new_marker_set('particle_12 geometry') marker_sets["particle_12 geometry"]=s s= marker_sets["particle_12 geometry"] mark=s.place_marker((3173.84, 558.064, 2529.87), (0.7, 0.7, 0.7), 210.273) if "particle_13 geometry" not in marker_sets: s=new_marker_set('particle_13 geometry') marker_sets["particle_13 geometry"]=s s= marker_sets["particle_13 geometry"] mark=s.place_marker((3331.13, 240.428, 2598.69), (0.7, 0.7, 0.7), 106.892) if "particle_14 geometry" not in marker_sets: s=new_marker_set('particle_14 geometry') marker_sets["particle_14 geometry"]=s s= marker_sets["particle_14 geometry"] mark=s.place_marker((3533.35, -29.3789, 2397.38), (0.7, 0.7, 0.7), 202.025) if "particle_15 geometry" not in marker_sets: s=new_marker_set('particle_15 geometry') marker_sets["particle_15 geometry"]=s s= marker_sets["particle_15 geometry"] mark=s.place_marker((3583.23, -440.423, 2058.68), (0.7, 0.7, 0.7), 192.169) if "particle_16 geometry" not in marker_sets: s=new_marker_set('particle_16 geometry') marker_sets["particle_16 geometry"]=s s= marker_sets["particle_16 geometry"] mark=s.place_marker((3547.97, -849.289, 1565.82), (0.7, 0.7, 0.7), 241.11) if "particle_17 geometry" not in marker_sets: s=new_marker_set('particle_17 geometry') marker_sets["particle_17 geometry"]=s s= marker_sets["particle_17 geometry"] mark=s.place_marker((3373.05, -981.874, 1012.02), (0.7, 0.7, 0.7), 128.465) if "particle_18 geometry" not in marker_sets: s=new_marker_set('particle_18 geometry') marker_sets["particle_18 geometry"]=s s= marker_sets["particle_18 geometry"] mark=s.place_marker((3077.47, -1143.6, 472.392), (0.7, 0.7, 0.7), 217.38) if "particle_19 geometry" not in marker_sets: s=new_marker_set('particle_19 geometry') marker_sets["particle_19 geometry"]=s s= marker_sets["particle_19 geometry"] mark=s.place_marker((2784.73, -1601.17, -16.1303), (0.7, 0.7, 0.7), 184.555) if "particle_20 geometry" not in marker_sets: s=new_marker_set('particle_20 geometry') marker_sets["particle_20 geometry"]=s s= marker_sets["particle_20 geometry"] mark=s.place_marker((2652.86, -942.844, 70.2615), (0.7, 0.7, 0.7), 140.055) if "particle_21 geometry" not in marker_sets: s=new_marker_set('particle_21 geometry') marker_sets["particle_21 geometry"]=s s= marker_sets["particle_21 geometry"] mark=s.place_marker((2771.95, -502.308, -113.207), (0.7, 0.7, 0.7), 169.708) if "particle_22 geometry" not in marker_sets: s=new_marker_set('particle_22 geometry') marker_sets["particle_22 geometry"]=s s= marker_sets["particle_22 geometry"] mark=s.place_marker((2813.09, -141.296, -445.61), (0.7, 0.7, 0.7), 184.639) if "particle_23 geometry" not in marker_sets: s=new_marker_set('particle_23 geometry') marker_sets["particle_23 geometry"]=s s= marker_sets["particle_23 geometry"] mark=s.place_marker((2610.53, 181.077, -477.077), (0.7, 0.7, 0.7), 119.286) if "particle_24 geometry" not in marker_sets: s=new_marker_set('particle_24 geometry') marker_sets["particle_24 geometry"]=s s= marker_sets["particle_24 geometry"] mark=s.place_marker((2303.23, 269.335, -508.128), (0.7, 0.7, 0.7), 147.754) if "particle_25 geometry" not in marker_sets: s=new_marker_set('particle_25 geometry') marker_sets["particle_25 geometry"]=s s= marker_sets["particle_25 geometry"] mark=s.place_marker((2226.37, 45.6785, -287.536), (0.7, 0.7, 0.7), 171.4) if "particle_26 geometry" not in marker_sets: s=new_marker_set('particle_26 geometry') marker_sets["particle_26 geometry"]=s s= marker_sets["particle_26 geometry"] mark=s.place_marker((2489.64, 139.297, 29.4646), (0.7, 0.7, 0.7), 156.341) if "particle_27 geometry" not in marker_sets: s=new_marker_set('particle_27 geometry') marker_sets["particle_27 geometry"]=s s= marker_sets["particle_27 geometry"] mark=s.place_marker((2474.67, 309.045, 596.606), (0.7, 0.7, 0.7), 186.501) if "particle_28 geometry" not in marker_sets: s=new_marker_set('particle_28 geometry') marker_sets["particle_28 geometry"]=s s= marker_sets["particle_28 geometry"] mark=s.place_marker((2462.57, 550.748, 1094.68), (0.7, 0.7, 0.7), 308.325) if "particle_29 geometry" not in marker_sets: s=new_marker_set('particle_29 geometry') marker_sets["particle_29 geometry"]=s s= marker_sets["particle_29 geometry"] mark=s.place_marker((2669.28, 689.534, 1468.55), (0.7, 0.7, 0.7), 138.617) if "particle_30 geometry" not in marker_sets: s=new_marker_set('particle_30 geometry') marker_sets["particle_30 geometry"]=s s= marker_sets["particle_30 geometry"] mark=s.place_marker((2824.23, 919.558, 1608.01), (0.7, 0.7, 0.7), 130.03) if "particle_31 geometry" not in marker_sets: s=new_marker_set('particle_31 geometry') marker_sets["particle_31 geometry"]=s s= marker_sets["particle_31 geometry"] mark=s.place_marker((2798.14, 796.182, 1299.15), (0.7, 0.7, 0.7), 156.552) if "particle_32 geometry" not in marker_sets: s=new_marker_set('particle_32 geometry') marker_sets["particle_32 geometry"]=s s= marker_sets["particle_32 geometry"] mark=s.place_marker((2607.19, 577.036, 1395.03), (0.7, 0.7, 0.7), 183.244) if "particle_33 geometry" not in marker_sets: s=new_marker_set('particle_33 geometry') marker_sets["particle_33 geometry"]=s s= marker_sets["particle_33 geometry"] mark=s.place_marker((2422.81, 396.399, 1469.51), (0.7, 0.7, 0.7), 181.382) if "particle_34 geometry" not in marker_sets: s=new_marker_set('particle_34 geometry') marker_sets["particle_34 geometry"]=s s= marker_sets["particle_34 geometry"] mark=s.place_marker((2239.48, 389.226, 1393.29), (0.7, 0.7, 0.7), 101.943) if "particle_35 geometry" not in marker_sets: s=new_marker_set('particle_35 geometry') marker_sets["particle_35 geometry"]=s s= marker_sets["particle_35 geometry"] mark=s.place_marker((2014.88, 124.104, 1308.08), (1, 0.7, 0), 138.913) if "particle_36 geometry" not in marker_sets: s=new_marker_set('particle_36 geometry') marker_sets["particle_36 geometry"]=s s= marker_sets["particle_36 geometry"] mark=s.place_marker((2382.45, -230.454, 2251.68), (0.7, 0.7, 0.7), 221.737) if "particle_37 geometry" not in marker_sets: s=new_marker_set('particle_37 geometry') marker_sets["particle_37 geometry"]=s s= marker_sets["particle_37 geometry"] mark=s.place_marker((2369.99, -431.305, 3082.57), (0.7, 0.7, 0.7), 256.38) if "particle_38 geometry" not in marker_sets: s=new_marker_set('particle_38 geometry') marker_sets["particle_38 geometry"]=s s= marker_sets["particle_38 geometry"] mark=s.place_marker((2153.53, -82.2576, 3597.1), (0.7, 0.7, 0.7), 221.694) if "particle_39 geometry" not in marker_sets: s=new_marker_set('particle_39 geometry') marker_sets["particle_39 geometry"]=s s= marker_sets["particle_39 geometry"] mark=s.place_marker((2239.44, 594.941, 3492.15), (0.7, 0.7, 0.7), 259.341) if "particle_40 geometry" not in marker_sets: s=new_marker_set('particle_40 geometry') marker_sets["particle_40 geometry"]=s s= marker_sets["particle_40 geometry"] mark=s.place_marker((2520.19, 1079.45, 2913.27), (0.7, 0.7, 0.7), 117.89) if "particle_41 geometry" not in marker_sets: s=new_marker_set('particle_41 geometry') marker_sets["particle_41 geometry"]=s s= marker_sets["particle_41 geometry"] mark=s.place_marker((2628.76, 1216.45, 2087.75), (0.7, 0.7, 0.7), 116.071) if "particle_42 geometry" not in marker_sets: s=new_marker_set('particle_42 geometry') marker_sets["particle_42 geometry"]=s s= marker_sets["particle_42 geometry"] mark=s.place_marker((2290.02, 1129.8, 1720.83), (0.7, 0.7, 0.7), 268.224) if "particle_43 geometry" not in marker_sets: s=new_marker_set('particle_43 geometry') marker_sets["particle_43 geometry"]=s s= marker_sets["particle_43 geometry"] mark=s.place_marker((2060.12, 1149.05, 1954.89), (0.7, 0.7, 0.7), 386.918) if "particle_44 geometry" not in marker_sets: s=new_marker_set('particle_44 geometry') marker_sets["particle_44 geometry"]=s s= marker_sets["particle_44 geometry"] mark=s.place_marker((2026.33, 1328.4, 2562.9), (0.7, 0.7, 0.7), 121.316) if "particle_45 geometry" not in marker_sets: s=new_marker_set('particle_45 geometry') marker_sets["particle_45 geometry"]=s s= marker_sets["particle_45 geometry"] mark=s.place_marker((1727.44, 1308.31, 2862.9), (0.7, 0.7, 0.7), 138.363) if "particle_46 geometry" not in marker_sets: s=new_marker_set('particle_46 geometry') marker_sets["particle_46 geometry"]=s s= marker_sets["particle_46 geometry"] mark=s.place_marker((1627.79, 909.083, 2271), (1, 0.7, 0), 175.207) if "particle_47 geometry" not in marker_sets: s=new_marker_set('particle_47 geometry') marker_sets["particle_47 geometry"]=s s= marker_sets["particle_47 geometry"] mark=s.place_marker((1330.65, 1020.55, 2919.69), (0.7, 0.7, 0.7), 131.468) if "particle_48 geometry" not in marker_sets: s=new_marker_set('particle_48 geometry') marker_sets["particle_48 geometry"]=s s= marker_sets["particle_48 geometry"] mark=s.place_marker((876.668, 1075.86, 3491.95), (0.7, 0.7, 0.7), 287.894) if "particle_49 geometry" not in marker_sets: s=new_marker_set('particle_49 geometry') marker_sets["particle_49 geometry"]=s s= marker_sets["particle_49 geometry"] mark=s.place_marker((955.807, 1422.62, 3079.83), (0.7, 0.7, 0.7), 88.1109) if "particle_50 geometry" not in marker_sets: s=new_marker_set('particle_50 geometry') marker_sets["particle_50 geometry"]=s s= marker_sets["particle_50 geometry"] mark=s.place_marker((1310.35, 1497.02, 2620.58), (0.7, 0.7, 0.7), 145.385) if "particle_51 geometry" not in marker_sets: s=new_marker_set('particle_51 geometry') marker_sets["particle_51 geometry"]=s s= marker_sets["particle_51 geometry"] mark=s.place_marker((1516.64, 1597.33, 2582.89), (0.7, 0.7, 0.7), 155.452) if "particle_52 geometry" not in marker_sets: s=new_marker_set('particle_52 geometry') marker_sets["particle_52 geometry"]=s s= marker_sets["particle_52 geometry"] mark=s.place_marker((1212.3, 1786.79, 3096.63), (0.7, 0.7, 0.7), 145.512) if "particle_53 geometry" not in marker_sets: s=new_marker_set('particle_53 geometry') marker_sets["particle_53 geometry"]=s s= marker_sets["particle_53 geometry"] mark=s.place_marker((1027.78, 1971.44, 3534.04), (0.7, 0.7, 0.7), 99.9972) if "particle_54 geometry" not in marker_sets: s=new_marker_set('particle_54 geometry') marker_sets["particle_54 geometry"]=s s= marker_sets["particle_54 geometry"] mark=s.place_marker((889.374, 2106.02, 3927.64), (0.7, 0.7, 0.7), 327.529) if "particle_55 geometry" not in marker_sets: s=new_marker_set('particle_55 geometry') marker_sets["particle_55 geometry"]=s s= marker_sets["particle_55 geometry"] mark=s.place_marker((1475.81, 2252.35, 3711.46), (0.7, 0.7, 0.7), 137.983) if "particle_56 geometry" not in marker_sets: s=new_marker_set('particle_56 geometry') marker_sets["particle_56 geometry"]=s s= marker_sets["particle_56 geometry"] mark=s.place_marker((1666.43, 2250.77, 3237.23), (0.7, 0.7, 0.7), 83.3733) if "particle_57 geometry" not in marker_sets: s=new_marker_set('particle_57 geometry') marker_sets["particle_57 geometry"]=s s= marker_sets["particle_57 geometry"] mark=s.place_marker((1798.7, 2157.57, 2682.61), (0.7, 0.7, 0.7), 101.562) if "particle_58 geometry" not in marker_sets: s=new_marker_set('particle_58 geometry') marker_sets["particle_58 geometry"]=s s= marker_sets["particle_58 geometry"] mark=s.place_marker((1868.39, 1939.33, 2200.25), (0.7, 0.7, 0.7), 165.689) if "particle_59 geometry" not in marker_sets: s=new_marker_set('particle_59 geometry') marker_sets["particle_59 geometry"]=s s= marker_sets["particle_59 geometry"] mark=s.place_marker((1583.18, 1820.05, 2103.88), (0.7, 0.7, 0.7), 136.925) if "particle_60 geometry" not in marker_sets: s=new_marker_set('particle_60 geometry') marker_sets["particle_60 geometry"]=s s= marker_sets["particle_60 geometry"] mark=s.place_marker((1577.01, 1777.02, 2235.57), (0.7, 0.7, 0.7), 123.389) if "particle_61 geometry" not in marker_sets: s=new_marker_set('particle_61 geometry') marker_sets["particle_61 geometry"]=s s= marker_sets["particle_61 geometry"] mark=s.place_marker((1581.25, 2027.75, 2651.89), (0.7, 0.7, 0.7), 184.47) if "particle_62 geometry" not in marker_sets: s=new_marker_set('particle_62 geometry') marker_sets["particle_62 geometry"]=s s= marker_sets["particle_62 geometry"] mark=s.place_marker((1389.66, 2489.65, 3292.78), (0.7, 0.7, 0.7), 148.473) if "particle_63 geometry" not in marker_sets: s=new_marker_set('particle_63 geometry') marker_sets["particle_63 geometry"]=s s= marker_sets["particle_63 geometry"] mark=s.place_marker((1051.32, 3102.8, 4043.29), (0.7, 0.7, 0.7), 241.406) if "particle_64 geometry" not in marker_sets: s=new_marker_set('particle_64 geometry') marker_sets["particle_64 geometry"]=s s= marker_sets["particle_64 geometry"] mark=s.place_marker((1544.31, 2733.61, 3811.03), (0.7, 0.7, 0.7), 182.736) if "particle_65 geometry" not in marker_sets: s=new_marker_set('particle_65 geometry') marker_sets["particle_65 geometry"]=s s= marker_sets["particle_65 geometry"] mark=s.place_marker((1764.28, 2406.35, 3619.57), (0.7, 0.7, 0.7), 166.62) if "particle_66 geometry" not in marker_sets: s=new_marker_set('particle_66 geometry') marker_sets["particle_66 geometry"]=s s= marker_sets["particle_66 geometry"] mark=s.place_marker((1625.84, 2437.7, 3345.5), (0.7, 0.7, 0.7), 113.872) if "particle_67 geometry" not in marker_sets: s=new_marker_set('particle_67 geometry') marker_sets["particle_67 geometry"]=s s= marker_sets["particle_67 geometry"] mark=s.place_marker((1579.87, 2243.19, 3088.63), (0.7, 0.7, 0.7), 110.065) if "particle_68 geometry" not in marker_sets: s=new_marker_set('particle_68 geometry') marker_sets["particle_68 geometry"]=s s= marker_sets["particle_68 geometry"] mark=s.place_marker((1378.6, 1963.01, 2898.42), (0.7, 0.7, 0.7), 150.08) if "particle_69 geometry" not in marker_sets: s=new_marker_set('particle_69 geometry') marker_sets["particle_69 geometry"]=s s= marker_sets["particle_69 geometry"] mark=s.place_marker((1079.55, 1601.98, 2784.68), (0.7, 0.7, 0.7), 118.525) if "particle_70 geometry" not in marker_sets: s=new_marker_set('particle_70 geometry') marker_sets["particle_70 geometry"]=s s= marker_sets["particle_70 geometry"] mark=s.place_marker((636.978, 1292.42, 2695.27), (0.7, 0.7, 0.7), 163.955) if "particle_71 geometry" not in marker_sets: s=new_marker_set('particle_71 geometry') marker_sets["particle_71 geometry"]=s s= marker_sets["particle_71 geometry"] mark=s.place_marker((287.774, 1409.24, 2757.28), (0.7, 0.7, 0.7), 170.131) if "particle_72 geometry" not in marker_sets: s=new_marker_set('particle_72 geometry') marker_sets["particle_72 geometry"]=s s= marker_sets["particle_72 geometry"] mark=s.place_marker((396.95, 2094.53, 3080.13), (0.7, 0.7, 0.7), 78.2127) if "particle_73 geometry" not in marker_sets: s=new_marker_set('particle_73 geometry') marker_sets["particle_73 geometry"]=s s= marker_sets["particle_73 geometry"] mark=s.place_marker((639.812, 2782.38, 3485.19), (0.7, 0.7, 0.7), 251.896) if "particle_74 geometry" not in marker_sets: s=new_marker_set('particle_74 geometry') marker_sets["particle_74 geometry"]=s s= marker_sets["particle_74 geometry"] mark=s.place_marker((1014.06, 3219.86, 3844.25), (0.7, 0.7, 0.7), 167.55) if "particle_75 geometry" not in marker_sets: s=new_marker_set('particle_75 geometry') marker_sets["particle_75 geometry"]=s s= marker_sets["particle_75 geometry"] mark=s.place_marker((1380.82, 3338.8, 3998.45), (0.7, 0.7, 0.7), 167.846) if "particle_76 geometry" not in marker_sets: s=new_marker_set('particle_76 geometry') marker_sets["particle_76 geometry"]=s s= marker_sets["particle_76 geometry"] mark=s.place_marker((1046.63, 3619.82, 3709.06), (0.7, 0.7, 0.7), 259.68) if "particle_77 geometry" not in marker_sets: s=new_marker_set('particle_77 geometry') marker_sets["particle_77 geometry"]=s s= marker_sets["particle_77 geometry"] mark=s.place_marker((614.949, 3472.15, 3569.53), (0.7, 0.7, 0.7), 80.2854) if "particle_78 geometry" not in marker_sets: s=new_marker_set('particle_78 geometry') marker_sets["particle_78 geometry"]=s s= marker_sets["particle_78 geometry"] mark=s.place_marker((485.953, 3498.96, 3739.87), (0.7, 0.7, 0.7), 82.4427) if "particle_79 geometry" not in marker_sets: s=new_marker_set('particle_79 geometry') marker_sets["particle_79 geometry"]=s s= marker_sets["particle_79 geometry"] mark=s.place_marker((469.933, 3847.22, 3836.31), (0.7, 0.7, 0.7), 212.811) if "particle_80 geometry" not in marker_sets: s=new_marker_set('particle_80 geometry') marker_sets["particle_80 geometry"]=s s= marker_sets["particle_80 geometry"] mark=s.place_marker((886.004, 4118.01, 3304.47), (0.7, 0.7, 0.7), 176.391) if "particle_81 geometry" not in marker_sets: s=new_marker_set('particle_81 geometry') marker_sets["particle_81 geometry"]=s s= marker_sets["particle_81 geometry"] mark=s.place_marker((1359.83, 3751.74, 2872.72), (0.7, 0.7, 0.7), 99.3204) if "particle_82 geometry" not in marker_sets: s=new_marker_set('particle_82 geometry') marker_sets["particle_82 geometry"]=s s= marker_sets["particle_82 geometry"] mark=s.place_marker((1481.84, 3252.26, 2574.88), (0.7, 0.7, 0.7), 166.62) if "particle_83 geometry" not in marker_sets: s=new_marker_set('particle_83 geometry') marker_sets["particle_83 geometry"]=s s= marker_sets["particle_83 geometry"] mark=s.place_marker((1470.73, 3098.34, 2459.25), (0.7, 0.7, 0.7), 102.831) if "particle_84 geometry" not in marker_sets: s=new_marker_set('particle_84 geometry') marker_sets["particle_84 geometry"]=s s= marker_sets["particle_84 geometry"] mark=s.place_marker((1168.59, 3883.83, 2880.49), (0.7, 0.7, 0.7), 65.0997) if "particle_85 geometry" not in marker_sets: s=new_marker_set('particle_85 geometry') marker_sets["particle_85 geometry"]=s s= marker_sets["particle_85 geometry"] mark=s.place_marker((1279.59, 3550.72, 3231.46), (0.7, 0.7, 0.7), 92.1294) if "particle_86 geometry" not in marker_sets: s=new_marker_set('particle_86 geometry') marker_sets["particle_86 geometry"]=s s= marker_sets["particle_86 geometry"] mark=s.place_marker((1496.95, 3073.77, 3349.07), (0.7, 0.7, 0.7), 194.791) if "particle_87 geometry" not in marker_sets: s=new_marker_set('particle_87 geometry') marker_sets["particle_87 geometry"]=s s= marker_sets["particle_87 geometry"] mark=s.place_marker((1717.54, 2804.96, 3508.05), (0.7, 0.7, 0.7), 120.766) if "particle_88 geometry" not in marker_sets: s=new_marker_set('particle_88 geometry') marker_sets["particle_88 geometry"]=s s= marker_sets["particle_88 geometry"] mark=s.place_marker((1665.43, 3276.68, 3838.83), (0.7, 0.7, 0.7), 217.803) if "particle_89 geometry" not in marker_sets: s=new_marker_set('particle_89 geometry') marker_sets["particle_89 geometry"]=s s= marker_sets["particle_89 geometry"] mark=s.place_marker((1280.5, 3242.37, 3681.28), (0.7, 0.7, 0.7), 115.775) if "particle_90 geometry" not in marker_sets: s=new_marker_set('particle_90 geometry') marker_sets["particle_90 geometry"]=s s= marker_sets["particle_90 geometry"] mark=s.place_marker((1066.7, 2892.62, 3585.36), (0.7, 0.7, 0.7), 115.648) if "particle_91 geometry" not in marker_sets: s=new_marker_set('particle_91 geometry') marker_sets["particle_91 geometry"]=s s= marker_sets["particle_91 geometry"] mark=s.place_marker((1300.04, 2719.56, 3415.09), (0.7, 0.7, 0.7), 83.8386) if "particle_92 geometry" not in marker_sets: s=new_marker_set('particle_92 geometry') marker_sets["particle_92 geometry"]=s s= marker_sets["particle_92 geometry"] mark=s.place_marker((1656.4, 2705.44, 3570.43), (0.7, 0.7, 0.7), 124.32) if "particle_93 geometry" not in marker_sets: s=new_marker_set('particle_93 geometry') marker_sets["particle_93 geometry"]=s s= marker_sets["particle_93 geometry"] mark=s.place_marker((1987.68, 2523.02, 3800.62), (0.7, 0.7, 0.7), 185.993) if "particle_94 geometry" not in marker_sets: s=new_marker_set('particle_94 geometry') marker_sets["particle_94 geometry"]=s s= marker_sets["particle_94 geometry"] mark=s.place_marker((1993, 2261.19, 4351.74), (0.7, 0.7, 0.7), 238.826) if "particle_95 geometry" not in marker_sets: s=new_marker_set('particle_95 geometry') marker_sets["particle_95 geometry"]=s s= marker_sets["particle_95 geometry"] mark=s.place_marker((1616.64, 2122.79, 4726.33), (0.7, 0.7, 0.7), 128.465) if "particle_96 geometry" not in marker_sets: s=new_marker_set('particle_96 geometry') marker_sets["particle_96 geometry"]=s s= marker_sets["particle_96 geometry"] mark=s.place_marker((1170.24, 2175.32, 4250.69), (0.7, 0.7, 0.7), 203.209) if "particle_97 geometry" not in marker_sets: s=new_marker_set('particle_97 geometry') marker_sets["particle_97 geometry"]=s s= marker_sets["particle_97 geometry"] mark=s.place_marker((1230.7, 2411.54, 3793.71), (0.7, 0.7, 0.7), 160.486) if "particle_98 geometry" not in marker_sets: s=new_marker_set('particle_98 geometry') marker_sets["particle_98 geometry"]=s s= marker_sets["particle_98 geometry"] mark=s.place_marker((1508.99, 2593.7, 3913.91), (0.7, 0.7, 0.7), 149.277) if "particle_99 geometry" not in marker_sets: s=new_marker_set('particle_99 geometry') marker_sets["particle_99 geometry"]=s s= marker_sets["particle_99 geometry"] mark=s.place_marker((1269.36, 2821.02, 4335.36), (0.7, 0.7, 0.7), 35.7435) if "particle_100 geometry" not in marker_sets: s=new_marker_set('particle_100 geometry') marker_sets["particle_100 geometry"]=s s= marker_sets["particle_100 geometry"] mark=s.place_marker((1169.42, 2529.58, 3379.37), (0.7, 0.7, 0.7), 98.3898) if "particle_101 geometry" not in marker_sets: s=new_marker_set('particle_101 geometry') marker_sets["particle_101 geometry"]=s s= marker_sets["particle_101 geometry"] mark=s.place_marker((1322.96, 2267.73, 2363.11), (0.7, 0.7, 0.7), 188.404) if "particle_102 geometry" not in marker_sets: s=new_marker_set('particle_102 geometry') marker_sets["particle_102 geometry"]=s s= marker_sets["particle_102 geometry"] mark=s.place_marker((1713.69, 2290.53, 2028.12), (0.7, 0.7, 0.7), 110.318) if "particle_103 geometry" not in marker_sets: s=new_marker_set('particle_103 geometry') marker_sets["particle_103 geometry"]=s s= marker_sets["particle_103 geometry"] mark=s.place_marker((1712.1, 2572.06, 2301.56), (0.7, 0.7, 0.7), 127.534) if "particle_104 geometry" not in marker_sets: s=new_marker_set('particle_104 geometry') marker_sets["particle_104 geometry"]=s s= marker_sets["particle_104 geometry"] mark=s.place_marker((1613.57, 2715.52, 2648.03), (0.7, 0.7, 0.7), 91.368) if "particle_105 geometry" not in marker_sets: s=new_marker_set('particle_105 geometry') marker_sets["particle_105 geometry"]=s s= marker_sets["particle_105 geometry"] mark=s.place_marker((1464.95, 2755.05, 3018.66), (0.7, 0.7, 0.7), 131.045) if "particle_106 geometry" not in marker_sets: s=new_marker_set('particle_106 geometry') marker_sets["particle_106 geometry"]=s s= marker_sets["particle_106 geometry"] mark=s.place_marker((1343.45, 2575.21, 3368.75), (0.7, 0.7, 0.7), 143.608) if "particle_107 geometry" not in marker_sets: s=new_marker_set('particle_107 geometry') marker_sets["particle_107 geometry"]=s s= marker_sets["particle_107 geometry"] mark=s.place_marker((1596.45, 2511.97, 3666.9), (0.7, 0.7, 0.7), 135.783) if "particle_108 geometry" not in marker_sets: s=new_marker_set('particle_108 geometry') marker_sets["particle_108 geometry"]=s s= marker_sets["particle_108 geometry"] mark=s.place_marker((1844.16, 2497.68, 3895.53), (0.7, 0.7, 0.7), 92.5947) if "particle_109 geometry" not in marker_sets: s=new_marker_set('particle_109 geometry') marker_sets["particle_109 geometry"]=s s= marker_sets["particle_109 geometry"] mark=s.place_marker((2030.74, 2603.94, 3723.49), (0.7, 0.7, 0.7), 150.123) if "particle_110 geometry" not in marker_sets: s=new_marker_set('particle_110 geometry') marker_sets["particle_110 geometry"]=s s= marker_sets["particle_110 geometry"] mark=s.place_marker((2092.89, 2750.61, 3511.04), (0.7, 0.7, 0.7), 121.57) if "particle_111 geometry" not in marker_sets: s=new_marker_set('particle_111 geometry') marker_sets["particle_111 geometry"]=s s= marker_sets["particle_111 geometry"] mark=s.place_marker((2285.24, 2984.6, 3648.96), (0.7, 0.7, 0.7), 104.777) if "particle_112 geometry" not in marker_sets: s=new_marker_set('particle_112 geometry') marker_sets["particle_112 geometry"]=s s= marker_sets["particle_112 geometry"] mark=s.place_marker((2429.43, 2891.63, 3272.3), (0.7, 0.7, 0.7), 114.844) if "particle_113 geometry" not in marker_sets: s=new_marker_set('particle_113 geometry') marker_sets["particle_113 geometry"]=s s= marker_sets["particle_113 geometry"] mark=s.place_marker((2576.95, 2783.7, 2862.55), (0.7, 0.7, 0.7), 150.588) if "particle_114 geometry" not in marker_sets: s=new_marker_set('particle_114 geometry') marker_sets["particle_114 geometry"]=s s= marker_sets["particle_114 geometry"] mark=s.place_marker((2299.56, 2677.99, 2562.02), (0.7, 0.7, 0.7), 103.55) if "particle_115 geometry" not in marker_sets: s=new_marker_set('particle_115 geometry') marker_sets["particle_115 geometry"]=s s= marker_sets["particle_115 geometry"] mark=s.place_marker((1886.4, 2891.84, 2280.86), (0.7, 0.7, 0.7), 215.392) if "particle_116 geometry" not in marker_sets: s=new_marker_set('particle_116 geometry') marker_sets["particle_116 geometry"]=s s= marker_sets["particle_116 geometry"] mark=s.place_marker((1541.76, 3035.28, 1876.78), (0.7, 0.7, 0.7), 99.9126) if "particle_117 geometry" not in marker_sets: s=new_marker_set('particle_117 geometry') marker_sets["particle_117 geometry"]=s s= marker_sets["particle_117 geometry"] mark=s.place_marker((1214.23, 3658.04, 1726.81), (0.7, 0.7, 0.7), 99.7857) if "particle_118 geometry" not in marker_sets: s=new_marker_set('particle_118 geometry') marker_sets["particle_118 geometry"]=s s= marker_sets["particle_118 geometry"] mark=s.place_marker((846.04, 4085.66, 1785.25), (0.7, 0.7, 0.7), 109.98) if "particle_119 geometry" not in marker_sets: s=new_marker_set('particle_119 geometry') marker_sets["particle_119 geometry"]=s s= marker_sets["particle_119 geometry"] mark=s.place_marker((1287.98, 3850.74, 1923.63), (0.7, 0.7, 0.7), 102.831) if "particle_120 geometry" not in marker_sets: s=new_marker_set('particle_120 geometry') marker_sets["particle_120 geometry"]=s s= marker_sets["particle_120 geometry"] mark=s.place_marker((1462.32, 3548.85, 2123.45), (0.7, 0.7, 0.7), 103.593) if "particle_121 geometry" not in marker_sets: s=new_marker_set('particle_121 geometry') marker_sets["particle_121 geometry"]=s s= marker_sets["particle_121 geometry"] mark=s.place_marker((1572.87, 3245.23, 2484.13), (0.7, 0.7, 0.7), 173.472) if "particle_122 geometry" not in marker_sets: s=new_marker_set('particle_122 geometry') marker_sets["particle_122 geometry"]=s s= marker_sets["particle_122 geometry"] mark=s.place_marker((1433.54, 3214.93, 3022.63), (0.7, 0.7, 0.7), 113.575) if "particle_123 geometry" not in marker_sets: s=new_marker_set('particle_123 geometry') marker_sets["particle_123 geometry"]=s s= marker_sets["particle_123 geometry"] mark=s.place_marker((1583.43, 2952.46, 3396.19), (0.7, 0.7, 0.7), 128.296) if "particle_124 geometry" not in marker_sets: s=new_marker_set('particle_124 geometry') marker_sets["particle_124 geometry"]=s s= marker_sets["particle_124 geometry"] mark=s.place_marker((1811.4, 2770.88, 3791.12), (0.7, 0.7, 0.7), 145.004) if "particle_125 geometry" not in marker_sets: s=new_marker_set('particle_125 geometry') marker_sets["particle_125 geometry"]=s s= marker_sets["particle_125 geometry"] mark=s.place_marker((2193.24, 2458.46, 4082.79), (0.7, 0.7, 0.7), 148.261) if "particle_126 geometry" not in marker_sets: s=new_marker_set('particle_126 geometry') marker_sets["particle_126 geometry"]=s s= marker_sets["particle_126 geometry"] mark=s.place_marker((2429.59, 2199.64, 4649.2), (0.7, 0.7, 0.7), 127.704) if "particle_127 geometry" not in marker_sets: s=new_marker_set('particle_127 geometry') marker_sets["particle_127 geometry"]=s s= marker_sets["particle_127 geometry"] mark=s.place_marker((2443.35, 1944.73, 5199.9), (0.7, 0.7, 0.7), 129.607) if "particle_128 geometry" not in marker_sets: s=new_marker_set('particle_128 geometry') marker_sets["particle_128 geometry"]=s s= marker_sets["particle_128 geometry"] mark=s.place_marker((2010.84, 2027.01, 5036.63), (0.7, 0.7, 0.7), 139.759) if "particle_129 geometry" not in marker_sets: s=new_marker_set('particle_129 geometry') marker_sets["particle_129 geometry"]=s s= marker_sets["particle_129 geometry"] mark=s.place_marker((1591.71, 2098.64, 4582.86), (0.7, 0.7, 0.7), 118.567) if "particle_130 geometry" not in marker_sets: s=new_marker_set('particle_130 geometry') marker_sets["particle_130 geometry"]=s s= marker_sets["particle_130 geometry"] mark=s.place_marker((1558.49, 2438.54, 4392.82), (0.7, 0.7, 0.7), 136.164) if "particle_131 geometry" not in marker_sets: s=new_marker_set('particle_131 geometry') marker_sets["particle_131 geometry"]=s s= marker_sets["particle_131 geometry"] mark=s.place_marker((1623.02, 2685.74, 4028.43), (0.7, 0.7, 0.7), 121.655) if "particle_132 geometry" not in marker_sets: s=new_marker_set('particle_132 geometry') marker_sets["particle_132 geometry"]=s s= marker_sets["particle_132 geometry"] mark=s.place_marker((1817.16, 2934.72, 3720.99), (0.7, 0.7, 0.7), 127.492) if "particle_133 geometry" not in marker_sets: s=new_marker_set('particle_133 geometry') marker_sets["particle_133 geometry"]=s s= marker_sets["particle_133 geometry"] mark=s.place_marker((1941.11, 3349.57, 3703.59), (0.7, 0.7, 0.7), 138.617) if "particle_134 geometry" not in marker_sets: s=new_marker_set('particle_134 geometry') marker_sets["particle_134 geometry"]=s s= marker_sets["particle_134 geometry"] mark=s.place_marker((2218.1, 3437.14, 3490.06), (0.7, 0.7, 0.7), 120.766) if "particle_135 geometry" not in marker_sets: s=new_marker_set('particle_135 geometry') marker_sets["particle_135 geometry"]=s s= marker_sets["particle_135 geometry"] mark=s.place_marker((2344.88, 3361.46, 3289.98), (0.7, 0.7, 0.7), 145.893) if "particle_136 geometry" not in marker_sets: s=new_marker_set('particle_136 geometry') marker_sets["particle_136 geometry"]=s s= marker_sets["particle_136 geometry"] mark=s.place_marker((2137.04, 2938.81, 3261.78), (0.7, 0.7, 0.7), 185.02) if "particle_137 geometry" not in marker_sets: s=new_marker_set('particle_137 geometry') marker_sets["particle_137 geometry"]=s s= marker_sets["particle_137 geometry"] mark=s.place_marker((2182.42, 2445.89, 3035.18), (0.7, 0.7, 0.7), 221.314) if "particle_138 geometry" not in marker_sets: s=new_marker_set('particle_138 geometry') marker_sets["particle_138 geometry"]=s s= marker_sets["particle_138 geometry"] mark=s.place_marker((2404.72, 2055.48, 2785.44), (0.7, 0.7, 0.7), 165.139) if "particle_139 geometry" not in marker_sets: s=new_marker_set('particle_139 geometry') marker_sets["particle_139 geometry"]=s s= marker_sets["particle_139 geometry"] mark=s.place_marker((2321.07, 2027.08, 2682.99), (0.7, 0.7, 0.7), 179.437) if "particle_140 geometry" not in marker_sets: s=new_marker_set('particle_140 geometry') marker_sets["particle_140 geometry"]=s s= marker_sets["particle_140 geometry"] mark=s.place_marker((2021.37, 2331.66, 2754.97), (0.7, 0.7, 0.7), 137.898) if "particle_141 geometry" not in marker_sets: s=new_marker_set('particle_141 geometry') marker_sets["particle_141 geometry"]=s s= marker_sets["particle_141 geometry"] mark=s.place_marker((1742.15, 2597.42, 2832.32), (0.7, 0.7, 0.7), 124.658) if "particle_142 geometry" not in marker_sets: s=new_marker_set('particle_142 geometry') marker_sets["particle_142 geometry"]=s s= marker_sets["particle_142 geometry"] mark=s.place_marker((1807.26, 2928.63, 2885.26), (0.7, 0.7, 0.7), 97.7553) if "particle_143 geometry" not in marker_sets: s=new_marker_set('particle_143 geometry') marker_sets["particle_143 geometry"]=s s= marker_sets["particle_143 geometry"] mark=s.place_marker((1781.17, 3245.93, 2906.1), (0.7, 0.7, 0.7), 92.9331) if "particle_144 geometry" not in marker_sets: s=new_marker_set('particle_144 geometry') marker_sets["particle_144 geometry"]=s s= marker_sets["particle_144 geometry"] mark=s.place_marker((1636.24, 3580.87, 2953.13), (0.7, 0.7, 0.7), 123.135) if "particle_145 geometry" not in marker_sets: s=new_marker_set('particle_145 geometry') marker_sets["particle_145 geometry"]=s s= marker_sets["particle_145 geometry"] mark=s.place_marker((1610.82, 3268.24, 2699.59), (0.7, 0.7, 0.7), 125.716) if "particle_146 geometry" not in marker_sets: s=new_marker_set('particle_146 geometry') marker_sets["particle_146 geometry"]=s s= marker_sets["particle_146 geometry"] mark=s.place_marker((1722.97, 2994.43, 2625.4), (0.7, 0.7, 0.7), 127.534) if "particle_147 geometry" not in marker_sets: s=new_marker_set('particle_147 geometry') marker_sets["particle_147 geometry"]=s s= marker_sets["particle_147 geometry"] mark=s.place_marker((1731.62, 2879.87, 2879.8), (0.7, 0.7, 0.7), 94.9212) if "particle_148 geometry" not in marker_sets: s=new_marker_set('particle_148 geometry') marker_sets["particle_148 geometry"]=s s= marker_sets["particle_148 geometry"] mark=s.place_marker((1844.73, 2482.67, 2746.77), (0.7, 0.7, 0.7), 137.644) if "particle_149 geometry" not in marker_sets: s=new_marker_set('particle_149 geometry') marker_sets["particle_149 geometry"]=s s= marker_sets["particle_149 geometry"] mark=s.place_marker((2015.31, 2178.97, 2731.69), (0.7, 0.7, 0.7), 149.277) if "particle_150 geometry" not in marker_sets: s=new_marker_set('particle_150 geometry') marker_sets["particle_150 geometry"]=s s= marker_sets["particle_150 geometry"] mark=s.place_marker((2281.95, 2418.65, 2744.08), (0.7, 0.7, 0.7), 103.677) if "particle_151 geometry" not in marker_sets: s=new_marker_set('particle_151 geometry') marker_sets["particle_151 geometry"]=s s= marker_sets["particle_151 geometry"] mark=s.place_marker((2542.63, 2769.46, 2977.12), (0.7, 0.7, 0.7), 99.6588) if "particle_152 geometry" not in marker_sets: s=new_marker_set('particle_152 geometry') marker_sets["particle_152 geometry"]=s s= marker_sets["particle_152 geometry"] mark=s.place_marker((2762.9, 2985.04, 3198.66), (0.7, 0.7, 0.7), 134.133) if "particle_153 geometry" not in marker_sets: s=new_marker_set('particle_153 geometry') marker_sets["particle_153 geometry"]=s s= marker_sets["particle_153 geometry"] mark=s.place_marker((2617.5, 3009.45, 2906.68), (0.7, 0.7, 0.7), 173.007) if "particle_154 geometry" not in marker_sets: s=new_marker_set('particle_154 geometry') marker_sets["particle_154 geometry"]=s s= marker_sets["particle_154 geometry"] mark=s.place_marker((2443.56, 2576.89, 2585.1), (0.7, 0.7, 0.7), 141.028) if "particle_155 geometry" not in marker_sets: s=new_marker_set('particle_155 geometry') marker_sets["particle_155 geometry"]=s s= marker_sets["particle_155 geometry"] mark=s.place_marker((2194.03, 2261.14, 2350.31), (0.7, 0.7, 0.7), 161.121) if "particle_156 geometry" not in marker_sets: s=new_marker_set('particle_156 geometry') marker_sets["particle_156 geometry"]=s s= marker_sets["particle_156 geometry"] mark=s.place_marker((1876.22, 2166.23, 2497.78), (0.7, 0.7, 0.7), 119.582) if "particle_157 geometry" not in marker_sets: s=new_marker_set('particle_157 geometry') marker_sets["particle_157 geometry"]=s s= marker_sets["particle_157 geometry"] mark=s.place_marker((1720.51, 2465.28, 2722.84), (0.7, 0.7, 0.7), 137.094) if "particle_158 geometry" not in marker_sets: s=new_marker_set('particle_158 geometry') marker_sets["particle_158 geometry"]=s s= marker_sets["particle_158 geometry"] mark=s.place_marker((1472.39, 2881.28, 2862.14), (0.7, 0.7, 0.7), 149.234) if "particle_159 geometry" not in marker_sets: s=new_marker_set('particle_159 geometry') marker_sets["particle_159 geometry"]=s s= marker_sets["particle_159 geometry"] mark=s.place_marker((1338.87, 2935.84, 2474.67), (0.7, 0.7, 0.7), 151.011) if "particle_160 geometry" not in marker_sets: s=new_marker_set('particle_160 geometry') marker_sets["particle_160 geometry"]=s s= marker_sets["particle_160 geometry"] mark=s.place_marker((1408.5, 2757.04, 2018.6), (0.7, 0.7, 0.7), 184.216) if "particle_161 geometry" not in marker_sets: s=new_marker_set('particle_161 geometry') marker_sets["particle_161 geometry"]=s s= marker_sets["particle_161 geometry"] mark=s.place_marker((1753.23, 2926.96, 1886.01), (0.7, 0.7, 0.7), 170.596) if "particle_162 geometry" not in marker_sets: s=new_marker_set('particle_162 geometry') marker_sets["particle_162 geometry"]=s s= marker_sets["particle_162 geometry"] mark=s.place_marker((1665.04, 3485.8, 2161.74), (0.7, 0.7, 0.7), 215.603) if "particle_163 geometry" not in marker_sets: s=new_marker_set('particle_163 geometry') marker_sets["particle_163 geometry"]=s s= marker_sets["particle_163 geometry"] mark=s.place_marker((1462.34, 4252.89, 2550.88), (0.7, 0.7, 0.7), 79.0164) if "particle_164 geometry" not in marker_sets: s=new_marker_set('particle_164 geometry') marker_sets["particle_164 geometry"]=s s= marker_sets["particle_164 geometry"] mark=s.place_marker((1759.34, 4370.01, 2689.14), (0.7, 0.7, 0.7), 77.2821) if "particle_165 geometry" not in marker_sets: s=new_marker_set('particle_165 geometry') marker_sets["particle_165 geometry"]=s s= marker_sets["particle_165 geometry"] mark=s.place_marker((1988.54, 4109.78, 2769.16), (0.7, 0.7, 0.7), 188.658) if "particle_166 geometry" not in marker_sets: s=new_marker_set('particle_166 geometry') marker_sets["particle_166 geometry"]=s s= marker_sets["particle_166 geometry"] mark=s.place_marker((2303.09, 4117.51, 2710.28), (0.7, 0.7, 0.7), 115.437) if "particle_167 geometry" not in marker_sets: s=new_marker_set('particle_167 geometry') marker_sets["particle_167 geometry"]=s s= marker_sets["particle_167 geometry"] mark=s.place_marker((2209.64, 3535.33, 2573.73), (0.7, 0.7, 0.7), 88.4916) if "particle_168 geometry" not in marker_sets: s=new_marker_set('particle_168 geometry') marker_sets["particle_168 geometry"]=s s= marker_sets["particle_168 geometry"] mark=s.place_marker((2096.99, 2938.46, 2427.82), (0.7, 0.7, 0.7), 108.88) if "particle_169 geometry" not in marker_sets: s=new_marker_set('particle_169 geometry') marker_sets["particle_169 geometry"]=s s= marker_sets["particle_169 geometry"] mark=s.place_marker((1845.49, 2695.74, 2358.05), (0.7, 0.7, 0.7), 172.119) if "particle_170 geometry" not in marker_sets: s=new_marker_set('particle_170 geometry') marker_sets["particle_170 geometry"]=s s= marker_sets["particle_170 geometry"] mark=s.place_marker((1709.57, 3154.5, 2466.64), (0.7, 0.7, 0.7), 139.505) if "particle_171 geometry" not in marker_sets: s=new_marker_set('particle_171 geometry') marker_sets["particle_171 geometry"]=s s= marker_sets["particle_171 geometry"] mark=s.place_marker((1593.12, 3607.82, 2590.08), (0.7, 0.7, 0.7), 92.7639) if "particle_172 geometry" not in marker_sets: s=new_marker_set('particle_172 geometry') marker_sets["particle_172 geometry"]=s s= marker_sets["particle_172 geometry"] mark=s.place_marker((1374.97, 3522.61, 2512.68), (0.7, 0.7, 0.7), 89.8452) if "particle_173 geometry" not in marker_sets: s=new_marker_set('particle_173 geometry') marker_sets["particle_173 geometry"]=s s= marker_sets["particle_173 geometry"] mark=s.place_marker((1599.05, 3532.11, 2345.71), (0.7, 0.7, 0.7), 149.446) if "particle_174 geometry" not in marker_sets: s=new_marker_set('particle_174 geometry') marker_sets["particle_174 geometry"]=s s= marker_sets["particle_174 geometry"] mark=s.place_marker((1839.23, 3766.32, 2406.83), (0.7, 0.7, 0.7), 126.858) if "particle_175 geometry" not in marker_sets: s=new_marker_set('particle_175 geometry') marker_sets["particle_175 geometry"]=s s= marker_sets["particle_175 geometry"] mark=s.place_marker((1710.12, 3947.55, 2659.44), (0.7, 0.7, 0.7), 106.046) if "particle_176 geometry" not in marker_sets: s=new_marker_set('particle_176 geometry') marker_sets["particle_176 geometry"]=s s= marker_sets["particle_176 geometry"] mark=s.place_marker((1318.24, 3776.79, 2926.64), (0.7, 0.7, 0.7), 156.298) if "particle_177 geometry" not in marker_sets: s=new_marker_set('particle_177 geometry') marker_sets["particle_177 geometry"]=s s= marker_sets["particle_177 geometry"] mark=s.place_marker((956.258, 3535.08, 3348.37), (0.7, 0.7, 0.7), 231.212) if "particle_178 geometry" not in marker_sets: s=new_marker_set('particle_178 geometry') marker_sets["particle_178 geometry"]=s s= marker_sets["particle_178 geometry"] mark=s.place_marker((711.147, 3028.52, 3293.96), (0.7, 0.7, 0.7), 88.4916) if "particle_179 geometry" not in marker_sets: s=new_marker_set('particle_179 geometry') marker_sets["particle_179 geometry"]=s s= marker_sets["particle_179 geometry"] mark=s.place_marker((818.465, 2687.3, 2977.72), (0.7, 0.7, 0.7), 111.334) if "particle_180 geometry" not in marker_sets: s=new_marker_set('particle_180 geometry') marker_sets["particle_180 geometry"]=s s= marker_sets["particle_180 geometry"] mark=s.place_marker((1230.42, 2524.04, 2548.39), (0.7, 0.7, 0.7), 127.619) if "particle_181 geometry" not in marker_sets: s=new_marker_set('particle_181 geometry') marker_sets["particle_181 geometry"]=s s= marker_sets["particle_181 geometry"] mark=s.place_marker((1530.68, 2434.22, 2211.53), (0.7, 0.7, 0.7), 230.746) if "particle_182 geometry" not in marker_sets: s=new_marker_set('particle_182 geometry') marker_sets["particle_182 geometry"]=s s= marker_sets["particle_182 geometry"] mark=s.place_marker((1462.02, 2765.87, 2407.25), (0.7, 0.7, 0.7), 124.573) if "particle_183 geometry" not in marker_sets: s=new_marker_set('particle_183 geometry') marker_sets["particle_183 geometry"]=s s= marker_sets["particle_183 geometry"] mark=s.place_marker((1258.93, 3278.44, 2663.65), (0.7, 0.7, 0.7), 124.489) if "particle_184 geometry" not in marker_sets: s=new_marker_set('particle_184 geometry') marker_sets["particle_184 geometry"]=s s= marker_sets["particle_184 geometry"] mark=s.place_marker((1502.98, 3550.95, 2787.43), (0.7, 0.7, 0.7), 196.61) if "particle_185 geometry" not in marker_sets: s=new_marker_set('particle_185 geometry') marker_sets["particle_185 geometry"]=s s= marker_sets["particle_185 geometry"] mark=s.place_marker((1614.74, 3380.99, 2789.56), (0.7, 0.7, 0.7), 134.049) if "particle_186 geometry" not in marker_sets: s=new_marker_set('particle_186 geometry') marker_sets["particle_186 geometry"]=s s= marker_sets["particle_186 geometry"] mark=s.place_marker((1312.53, 3421.41, 2728.61), (0.7, 0.7, 0.7), 141.493) if "particle_187 geometry" not in marker_sets: s=new_marker_set('particle_187 geometry') marker_sets["particle_187 geometry"]=s s= marker_sets["particle_187 geometry"] mark=s.place_marker((971.504, 3691.2, 2725.46), (0.7, 0.7, 0.7), 172.203) if "particle_188 geometry" not in marker_sets: s=new_marker_set('particle_188 geometry') marker_sets["particle_188 geometry"]=s s= marker_sets["particle_188 geometry"] mark=s.place_marker((1600.99, 3615.22, 2696.21), (0.7, 0.7, 0.7), 271.354) if "particle_189 geometry" not in marker_sets: s=new_marker_set('particle_189 geometry') marker_sets["particle_189 geometry"]=s s= marker_sets["particle_189 geometry"] mark=s.place_marker((1979.38, 3415, 2928.39), (0.7, 0.7, 0.7), 97.0785) if "particle_190 geometry" not in marker_sets: s=new_marker_set('particle_190 geometry') marker_sets["particle_190 geometry"]=s s= marker_sets["particle_190 geometry"] mark=s.place_marker((2095.69, 3285.75, 3276.21), (0.7, 0.7, 0.7), 151.857) if "particle_191 geometry" not in marker_sets: s=new_marker_set('particle_191 geometry') marker_sets["particle_191 geometry"]=s s= marker_sets["particle_191 geometry"] mark=s.place_marker((2529.16, 3118.2, 3575.06), (0.7, 0.7, 0.7), 199.233) if "particle_192 geometry" not in marker_sets: s=new_marker_set('particle_192 geometry') marker_sets["particle_192 geometry"]=s s= marker_sets["particle_192 geometry"] mark=s.place_marker((2795.05, 2677.36, 3379.26), (0.7, 0.7, 0.7), 118.863) if "particle_193 geometry" not in marker_sets: s=new_marker_set('particle_193 geometry') marker_sets["particle_193 geometry"]=s s= marker_sets["particle_193 geometry"] mark=s.place_marker((2951.39, 2370.65, 3600.66), (0.7, 0.7, 0.7), 172.415) if "particle_194 geometry" not in marker_sets: s=new_marker_set('particle_194 geometry') marker_sets["particle_194 geometry"]=s s= marker_sets["particle_194 geometry"] mark=s.place_marker((3052.67, 2342.01, 4079.13), (0.7, 0.7, 0.7), 134.26) if "particle_195 geometry" not in marker_sets: s=new_marker_set('particle_195 geometry') marker_sets["particle_195 geometry"]=s s= marker_sets["particle_195 geometry"] mark=s.place_marker((3255.29, 2539.83, 4956.41), (0.7, 0.7, 0.7), 139.548) if "particle_196 geometry" not in marker_sets: s=new_marker_set('particle_196 geometry') marker_sets["particle_196 geometry"]=s s= marker_sets["particle_196 geometry"] mark=s.place_marker((2715.26, 2716.14, 4985.63), (0.7, 0.7, 0.7), 196.526) if "particle_197 geometry" not in marker_sets: s=new_marker_set('particle_197 geometry') marker_sets["particle_197 geometry"]=s s= marker_sets["particle_197 geometry"] mark=s.place_marker((2161.25, 2801.95, 4403.76), (0.7, 0.7, 0.7), 136.206) if "particle_198 geometry" not in marker_sets: s=new_marker_set('particle_198 geometry') marker_sets["particle_198 geometry"]=s s= marker_sets["particle_198 geometry"] mark=s.place_marker((1819.1, 2465.83, 3522.74), (0.7, 0.7, 0.7), 152.322) if "particle_199 geometry" not in marker_sets: s=new_marker_set('particle_199 geometry') marker_sets["particle_199 geometry"]=s s= marker_sets["particle_199 geometry"] mark=s.place_marker((1731.93, 2334, 2874.81), (0.7, 0.7, 0.7), 126.054) if "particle_200 geometry" not in marker_sets: s=new_marker_set('particle_200 geometry') marker_sets["particle_200 geometry"]=s s= marker_sets["particle_200 geometry"] mark=s.place_marker((1578.96, 2723.19, 2742.39), (0.7, 0.7, 0.7), 164.378) if "particle_201 geometry" not in marker_sets: s=new_marker_set('particle_201 geometry') marker_sets["particle_201 geometry"]=s s= marker_sets["particle_201 geometry"] mark=s.place_marker((1646.41, 3167.97, 2712.49), (0.7, 0.7, 0.7), 122.205) if "particle_202 geometry" not in marker_sets: s=new_marker_set('particle_202 geometry') marker_sets["particle_202 geometry"]=s s= marker_sets["particle_202 geometry"] mark=s.place_marker((1874.69, 3539.8, 2664.36), (0.7, 0.7, 0.7), 134.979) if "particle_203 geometry" not in marker_sets: s=new_marker_set('particle_203 geometry') marker_sets["particle_203 geometry"]=s s= marker_sets["particle_203 geometry"] mark=s.place_marker((2170.35, 3393.79, 2572.64), (0.7, 0.7, 0.7), 136.375) if "particle_204 geometry" not in marker_sets: s=new_marker_set('particle_204 geometry') marker_sets["particle_204 geometry"]=s s= marker_sets["particle_204 geometry"] mark=s.place_marker((2007.39, 3370.7, 2815.9), (0.7, 0.7, 0.7), 151.688) if "particle_205 geometry" not in marker_sets: s=new_marker_set('particle_205 geometry') marker_sets["particle_205 geometry"]=s s= marker_sets["particle_205 geometry"] mark=s.place_marker((1941.99, 3566.74, 2608.47), (0.7, 0.7, 0.7), 116.156) if "particle_206 geometry" not in marker_sets: s=new_marker_set('particle_206 geometry') marker_sets["particle_206 geometry"]=s s= marker_sets["particle_206 geometry"] mark=s.place_marker((1944.32, 2904.91, 2370.57), (0.7, 0.7, 0.7), 122.839) if "particle_207 geometry" not in marker_sets: s=new_marker_set('particle_207 geometry') marker_sets["particle_207 geometry"]=s s= marker_sets["particle_207 geometry"] mark=s.place_marker((1822.94, 2394.1, 2409.65), (0.7, 0.7, 0.7), 164.716) if "particle_208 geometry" not in marker_sets: s=new_marker_set('particle_208 geometry') marker_sets["particle_208 geometry"]=s s= marker_sets["particle_208 geometry"] mark=s.place_marker((1609.62, 2760.4, 3146.6), (0.7, 0.7, 0.7), 303.672) if "particle_209 geometry" not in marker_sets: s=new_marker_set('particle_209 geometry') marker_sets["particle_209 geometry"]=s s= marker_sets["particle_209 geometry"] mark=s.place_marker((1740.52, 3531.79, 3901.15), (0.7, 0.7, 0.7), 220.298) if "particle_210 geometry" not in marker_sets: s=new_marker_set('particle_210 geometry') marker_sets["particle_210 geometry"]=s s= marker_sets["particle_210 geometry"] mark=s.place_marker((1743.69, 3905.72, 3378.27), (0.7, 0.7, 0.7), 175.883) if "particle_211 geometry" not in marker_sets: s=new_marker_set('particle_211 geometry') marker_sets["particle_211 geometry"]=s s= marker_sets["particle_211 geometry"] mark=s.place_marker((1432.79, 4099.28, 2783.14), (0.7, 0.7, 0.7), 233.581) if "particle_212 geometry" not in marker_sets: s=new_marker_set('particle_212 geometry') marker_sets["particle_212 geometry"]=s s= marker_sets["particle_212 geometry"] mark=s.place_marker((873.754, 3778.78, 2320.67), (0.7, 0.7, 0.7), 231.127) if "particle_213 geometry" not in marker_sets: s=new_marker_set('particle_213 geometry') marker_sets["particle_213 geometry"]=s s= marker_sets["particle_213 geometry"] mark=s.place_marker((721.756, 3825.45, 1697.84), (0.7, 0.7, 0.7), 247.413) if "particle_214 geometry" not in marker_sets: s=new_marker_set('particle_214 geometry') marker_sets["particle_214 geometry"]=s s= marker_sets["particle_214 geometry"] mark=s.place_marker((916.647, 4100.36, 1128.44), (0.7, 0.7, 0.7), 200.206) if "particle_215 geometry" not in marker_sets: s=new_marker_set('particle_215 geometry') marker_sets["particle_215 geometry"]=s s= marker_sets["particle_215 geometry"] mark=s.place_marker((1280.27, 4292.22, 1031.47), (0.7, 0.7, 0.7), 150.419) if "particle_216 geometry" not in marker_sets: s=new_marker_set('particle_216 geometry') marker_sets["particle_216 geometry"]=s s= marker_sets["particle_216 geometry"] mark=s.place_marker((1344.89, 3686, 1083.45), (0.7, 0.7, 0.7), 140.14) if "particle_217 geometry" not in marker_sets: s=new_marker_set('particle_217 geometry') marker_sets["particle_217 geometry"]=s s= marker_sets["particle_217 geometry"] mark=s.place_marker((1145.22, 3299.63, 1009.84), (0.7, 0.7, 0.7), 132.949) if "particle_218 geometry" not in marker_sets: s=new_marker_set('particle_218 geometry') marker_sets["particle_218 geometry"]=s s= marker_sets["particle_218 geometry"] mark=s.place_marker((1129.86, 2933.12, 1004.29), (0.7, 0.7, 0.7), 141.113) if "particle_219 geometry" not in marker_sets: s=new_marker_set('particle_219 geometry') marker_sets["particle_219 geometry"]=s s= marker_sets["particle_219 geometry"] mark=s.place_marker((884.041, 2895.93, 1224.04), (0.7, 0.7, 0.7), 171.526) if "particle_220 geometry" not in marker_sets: s=new_marker_set('particle_220 geometry') marker_sets["particle_220 geometry"]=s s= marker_sets["particle_220 geometry"] mark=s.place_marker((686.954, 3320.25, 1559.97), (0.7, 0.7, 0.7), 326.937) if "particle_221 geometry" not in marker_sets: s=new_marker_set('particle_221 geometry') marker_sets["particle_221 geometry"]=s s= marker_sets["particle_221 geometry"] mark=s.place_marker((839.31, 3632.16, 2025.31), (0.7, 0.7, 0.7), 92.0871) if "particle_222 geometry" not in marker_sets: s=new_marker_set('particle_222 geometry') marker_sets["particle_222 geometry"]=s s= marker_sets["particle_222 geometry"] mark=s.place_marker((1016.31, 3360.94, 2294.73), (0.7, 0.7, 0.7), 210.273) if "particle_223 geometry" not in marker_sets: s=new_marker_set('particle_223 geometry') marker_sets["particle_223 geometry"]=s s= marker_sets["particle_223 geometry"] mark=s.place_marker((1279.65, 2676.56, 2055.04), (0.7, 0.7, 0.7), 122.628) if "particle_224 geometry" not in marker_sets: s=new_marker_set('particle_224 geometry') marker_sets["particle_224 geometry"]=s s= marker_sets["particle_224 geometry"] mark=s.place_marker((1325.8, 2456.22, 1919.87), (0.7, 0.7, 0.7), 109.176) if "particle_225 geometry" not in marker_sets: s=new_marker_set('particle_225 geometry') marker_sets["particle_225 geometry"]=s s= marker_sets["particle_225 geometry"] mark=s.place_marker((1336.36, 2739.85, 1984.62), (0.7, 0.7, 0.7), 142.213) if "particle_226 geometry" not in marker_sets: s=new_marker_set('particle_226 geometry') marker_sets["particle_226 geometry"]=s s= marker_sets["particle_226 geometry"] mark=s.place_marker((1250.02, 2843.78, 2369.7), (0.7, 0.7, 0.7), 250.078) if "particle_227 geometry" not in marker_sets: s=new_marker_set('particle_227 geometry') marker_sets["particle_227 geometry"]=s s= marker_sets["particle_227 geometry"] mark=s.place_marker((1669.3, 3025.48, 2266.62), (0.7, 0.7, 0.7), 123.558) if "particle_228 geometry" not in marker_sets: s=new_marker_set('particle_228 geometry') marker_sets["particle_228 geometry"]=s s= marker_sets["particle_228 geometry"] mark=s.place_marker((2089.66, 2826.92, 2176.5), (0.7, 0.7, 0.7), 235.992) if "particle_229 geometry" not in marker_sets: s=new_marker_set('particle_229 geometry') marker_sets["particle_229 geometry"]=s s= marker_sets["particle_229 geometry"] mark=s.place_marker((2577.51, 2763.59, 2085.23), (0.7, 0.7, 0.7), 172.373) if "particle_230 geometry" not in marker_sets: s=new_marker_set('particle_230 geometry') marker_sets["particle_230 geometry"]=s s= marker_sets["particle_230 geometry"] mark=s.place_marker((2848.86, 3107.9, 2220.87), (0.7, 0.7, 0.7), 152.322) if "particle_231 geometry" not in marker_sets: s=new_marker_set('particle_231 geometry') marker_sets["particle_231 geometry"]=s s= marker_sets["particle_231 geometry"] mark=s.place_marker((2944.39, 3345.32, 2431.25), (0.7, 0.7, 0.7), 196.653) if "particle_232 geometry" not in marker_sets: s=new_marker_set('particle_232 geometry') marker_sets["particle_232 geometry"]=s s= marker_sets["particle_232 geometry"] mark=s.place_marker((2948.74, 3226.99, 2104.49), (0.7, 0.7, 0.7), 134.091) if "particle_233 geometry" not in marker_sets: s=new_marker_set('particle_233 geometry') marker_sets["particle_233 geometry"]=s s= marker_sets["particle_233 geometry"] mark=s.place_marker((2990.67, 3036.92, 1849.09), (0.7, 0.7, 0.7), 180.325) if "particle_234 geometry" not in marker_sets: s=new_marker_set('particle_234 geometry') marker_sets["particle_234 geometry"]=s s= marker_sets["particle_234 geometry"] mark=s.place_marker((2610.89, 2922.67, 2100.81), (0.7, 0.7, 0.7), 218.437) if "particle_235 geometry" not in marker_sets: s=new_marker_set('particle_235 geometry') marker_sets["particle_235 geometry"]=s s= marker_sets["particle_235 geometry"] mark=s.place_marker((2248.7, 3109.92, 2337.43), (0.7, 0.7, 0.7), 148.008) if "particle_236 geometry" not in marker_sets: s=new_marker_set('particle_236 geometry') marker_sets["particle_236 geometry"]=s s= marker_sets["particle_236 geometry"] mark=s.place_marker((1953.18, 3650.87, 2507.22), (0.7, 0.7, 0.7), 191.873) if "particle_237 geometry" not in marker_sets: s=new_marker_set('particle_237 geometry') marker_sets["particle_237 geometry"]=s s= marker_sets["particle_237 geometry"] mark=s.place_marker((1601.93, 4058.59, 2427.21), (0.7, 0.7, 0.7), 138.575) if "particle_238 geometry" not in marker_sets: s=new_marker_set('particle_238 geometry') marker_sets["particle_238 geometry"]=s s= marker_sets["particle_238 geometry"] mark=s.place_marker((1716.74, 4444.65, 2257.34), (0.7, 0.7, 0.7), 161.205) if "particle_239 geometry" not in marker_sets: s=new_marker_set('particle_239 geometry') marker_sets["particle_239 geometry"]=s s= marker_sets["particle_239 geometry"] mark=s.place_marker((1849.13, 4155.38, 2578.67), (0.7, 0.7, 0.7), 288.021) if "particle_240 geometry" not in marker_sets: s=new_marker_set('particle_240 geometry') marker_sets["particle_240 geometry"]=s s= marker_sets["particle_240 geometry"] mark=s.place_marker((2317.53, 3793.31, 2199.7), (0.7, 0.7, 0.7), 227.405) if "particle_241 geometry" not in marker_sets: s=new_marker_set('particle_241 geometry') marker_sets["particle_241 geometry"]=s s= marker_sets["particle_241 geometry"] mark=s.place_marker((2553.21, 3348.15, 2097.32), (0.7, 0.7, 0.7), 126.519) if "particle_242 geometry" not in marker_sets: s=new_marker_set('particle_242 geometry') marker_sets["particle_242 geometry"]=s s= marker_sets["particle_242 geometry"] mark=s.place_marker((2666.12, 3509.63, 2339.16), (0.7, 0.7, 0.7), 117.975) if "particle_243 geometry" not in marker_sets: s=new_marker_set('particle_243 geometry') marker_sets["particle_243 geometry"]=s s= marker_sets["particle_243 geometry"] mark=s.place_marker((2490.91, 3148.2, 2405.43), (0.7, 0.7, 0.7), 200.883) if "particle_244 geometry" not in marker_sets: s=new_marker_set('particle_244 geometry') marker_sets["particle_244 geometry"]=s s= marker_sets["particle_244 geometry"] mark=s.place_marker((2480.94, 2977.52, 2083.48), (0.7, 0.7, 0.7), 158.794) if "particle_245 geometry" not in marker_sets: s=new_marker_set('particle_245 geometry') marker_sets["particle_245 geometry"]=s s= marker_sets["particle_245 geometry"] mark=s.place_marker((2351.64, 2995.61, 1791.84), (0.7, 0.7, 0.7), 115.86) if "particle_246 geometry" not in marker_sets: s=new_marker_set('particle_246 geometry') marker_sets["particle_246 geometry"]=s s= marker_sets["particle_246 geometry"] mark=s.place_marker((2376.02, 2758.91, 1741.39), (0.7, 0.7, 0.7), 133.034) if "particle_247 geometry" not in marker_sets: s=new_marker_set('particle_247 geometry') marker_sets["particle_247 geometry"]=s s= marker_sets["particle_247 geometry"] mark=s.place_marker((2683.07, 2474.69, 1973.49), (0.7, 0.7, 0.7), 314.627) if "particle_248 geometry" not in marker_sets: s=new_marker_set('particle_248 geometry') marker_sets["particle_248 geometry"]=s s= marker_sets["particle_248 geometry"] mark=s.place_marker((2672.05, 2802.16, 2130.33), (0.7, 0.7, 0.7), 115.352) if "particle_249 geometry" not in marker_sets: s=new_marker_set('particle_249 geometry') marker_sets["particle_249 geometry"]=s s= marker_sets["particle_249 geometry"] mark=s.place_marker((2572.51, 3217.25, 2075.18), (0.7, 0.7, 0.7), 180.621) if "particle_250 geometry" not in marker_sets: s=new_marker_set('particle_250 geometry') marker_sets["particle_250 geometry"]=s s= marker_sets["particle_250 geometry"] mark=s.place_marker((2387.03, 3247.21, 1767.48), (0.7, 0.7, 0.7), 126.265) if "particle_251 geometry" not in marker_sets: s=new_marker_set('particle_251 geometry') marker_sets["particle_251 geometry"]=s s= marker_sets["particle_251 geometry"] mark=s.place_marker((2127.7, 3032.6, 1599.17), (0.7, 0.7, 0.7), 133.541) if "particle_252 geometry" not in marker_sets: s=new_marker_set('particle_252 geometry') marker_sets["particle_252 geometry"]=s s= marker_sets["particle_252 geometry"] mark=s.place_marker((1742.75, 2972.58, 1421.19), (0.7, 0.7, 0.7), 171.019) if "particle_253 geometry" not in marker_sets: s=new_marker_set('particle_253 geometry') marker_sets["particle_253 geometry"]=s s= marker_sets["particle_253 geometry"] mark=s.place_marker((1392.15, 3088.31, 1300.23), (0.7, 0.7, 0.7), 115.437) if "particle_254 geometry" not in marker_sets: s=new_marker_set('particle_254 geometry') marker_sets["particle_254 geometry"]=s s= marker_sets["particle_254 geometry"] mark=s.place_marker((1718.89, 3167.89, 1340.9), (0.7, 0.7, 0.7), 158.583) if "particle_255 geometry" not in marker_sets: s=new_marker_set('particle_255 geometry') marker_sets["particle_255 geometry"]=s s= marker_sets["particle_255 geometry"] mark=s.place_marker((1905.03, 2837.64, 1626.12), (0.7, 0.7, 0.7), 192) if "particle_256 geometry" not in marker_sets: s=new_marker_set('particle_256 geometry') marker_sets["particle_256 geometry"]=s s= marker_sets["particle_256 geometry"] mark=s.place_marker((2215.25, 2590.23, 1807.28), (0.7, 0.7, 0.7), 150.165) if "particle_257 geometry" not in marker_sets: s=new_marker_set('particle_257 geometry') marker_sets["particle_257 geometry"]=s s= marker_sets["particle_257 geometry"] mark=s.place_marker((2000.89, 2451.62, 1699.83), (0.7, 0.7, 0.7), 157.567) if "particle_258 geometry" not in marker_sets: s=new_marker_set('particle_258 geometry') marker_sets["particle_258 geometry"]=s s= marker_sets["particle_258 geometry"] mark=s.place_marker((1980.93, 2411.52, 1806.35), (0.7, 0.7, 0.7), 199.36) if "particle_259 geometry" not in marker_sets: s=new_marker_set('particle_259 geometry') marker_sets["particle_259 geometry"]=s s= marker_sets["particle_259 geometry"] mark=s.place_marker((1803.91, 2801.24, 1985.16), (0.7, 0.7, 0.7), 105.369) if "particle_260 geometry" not in marker_sets: s=new_marker_set('particle_260 geometry') marker_sets["particle_260 geometry"]=s s= marker_sets["particle_260 geometry"] mark=s.place_marker((1759.31, 2936.32, 2218.67), (0.7, 0.7, 0.7), 118.651) if "particle_261 geometry" not in marker_sets: s=new_marker_set('particle_261 geometry') marker_sets["particle_261 geometry"]=s s= marker_sets["particle_261 geometry"] mark=s.place_marker((2101.28, 2648.14, 2190.72), (0.7, 0.7, 0.7), 219.664) if "particle_262 geometry" not in marker_sets: s=new_marker_set('particle_262 geometry') marker_sets["particle_262 geometry"]=s s= marker_sets["particle_262 geometry"] mark=s.place_marker((2407.4, 2184.96, 1989.89), (0.7, 0.7, 0.7), 196.018) if "particle_263 geometry" not in marker_sets: s=new_marker_set('particle_263 geometry') marker_sets["particle_263 geometry"]=s s= marker_sets["particle_263 geometry"] mark=s.place_marker((2648.1, 1776.94, 1884.71), (0.7, 0.7, 0.7), 218.141) if "particle_264 geometry" not in marker_sets: s=new_marker_set('particle_264 geometry') marker_sets["particle_264 geometry"]=s s= marker_sets["particle_264 geometry"] mark=s.place_marker((2349.39, 1569.55, 1909.22), (0.7, 0.7, 0.7), 181.636) if "particle_265 geometry" not in marker_sets: s=new_marker_set('particle_265 geometry') marker_sets["particle_265 geometry"]=s s= marker_sets["particle_265 geometry"] mark=s.place_marker((2109.11, 1727.2, 1986.03), (0.7, 0.7, 0.7), 195.003) if "particle_266 geometry" not in marker_sets: s=new_marker_set('particle_266 geometry') marker_sets["particle_266 geometry"]=s s= marker_sets["particle_266 geometry"] mark=s.place_marker((2299.59, 1607.88, 1849.35), (0.7, 0.7, 0.7), 139.209) if "particle_267 geometry" not in marker_sets: s=new_marker_set('particle_267 geometry') marker_sets["particle_267 geometry"]=s s= marker_sets["particle_267 geometry"] mark=s.place_marker((2282.74, 1584.73, 1772.01), (0.7, 0.7, 0.7), 189.885) if "particle_268 geometry" not in marker_sets: s=new_marker_set('particle_268 geometry') marker_sets["particle_268 geometry"]=s s= marker_sets["particle_268 geometry"] mark=s.place_marker((2372.79, 1811.6, 1613.76), (0.7, 0.7, 0.7), 267.674) if "particle_269 geometry" not in marker_sets: s=new_marker_set('particle_269 geometry') marker_sets["particle_269 geometry"]=s s= marker_sets["particle_269 geometry"] mark=s.place_marker((2555.75, 2293.2, 1377.48), (0.7, 0.7, 0.7), 196.568) if "particle_270 geometry" not in marker_sets: s=new_marker_set('particle_270 geometry') marker_sets["particle_270 geometry"]=s s= marker_sets["particle_270 geometry"] mark=s.place_marker((2309.24, 2203.9, 1164.5), (0.7, 0.7, 0.7), 192.423) if "particle_271 geometry" not in marker_sets: s=new_marker_set('particle_271 geometry') marker_sets["particle_271 geometry"]=s s= marker_sets["particle_271 geometry"] mark=s.place_marker((2243.52, 1815.84, 1180.39), (1, 0.7, 0), 202.405) if "particle_272 geometry" not in marker_sets: s=new_marker_set('particle_272 geometry') marker_sets["particle_272 geometry"]=s s= marker_sets["particle_272 geometry"] mark=s.place_marker((2376.27, 2652.35, 1062.06), (0.7, 0.7, 0.7), 135.529) if "particle_273 geometry" not in marker_sets: s=new_marker_set('particle_273 geometry') marker_sets["particle_273 geometry"]=s s= marker_sets["particle_273 geometry"] mark=s.place_marker((2348.71, 3597.63, 841.451), (0.7, 0.7, 0.7), 114.21) if "particle_274 geometry" not in marker_sets: s=new_marker_set('particle_274 geometry') marker_sets["particle_274 geometry"]=s s= marker_sets["particle_274 geometry"] mark=s.place_marker((2195.52, 3676.56, 1114.88), (0.7, 0.7, 0.7), 159.133) if "particle_275 geometry" not in marker_sets: s=new_marker_set('particle_275 geometry') marker_sets["particle_275 geometry"]=s s= marker_sets["particle_275 geometry"] mark=s.place_marker((2270.97, 3498.65, 1477.92), (0.7, 0.7, 0.7), 144.412) if "particle_276 geometry" not in marker_sets: s=new_marker_set('particle_276 geometry') marker_sets["particle_276 geometry"]=s s= marker_sets["particle_276 geometry"] mark=s.place_marker((2355.02, 3351.67, 1747.29), (0.7, 0.7, 0.7), 70.8525) if "particle_277 geometry" not in marker_sets: s=new_marker_set('particle_277 geometry') marker_sets["particle_277 geometry"]=s s= marker_sets["particle_277 geometry"] mark=s.place_marker((2322.97, 2742.56, 1839.41), (0.7, 0.7, 0.7), 141.874) if "particle_278 geometry" not in marker_sets: s=new_marker_set('particle_278 geometry') marker_sets["particle_278 geometry"]=s s= marker_sets["particle_278 geometry"] mark=s.place_marker((2286.68, 2131.17, 1819.69), (0.7, 0.7, 0.7), 217.337) if "particle_279 geometry" not in marker_sets: s=new_marker_set('particle_279 geometry') marker_sets["particle_279 geometry"]=s s= marker_sets["particle_279 geometry"] mark=s.place_marker((2365.78, 2098.15, 1827.52), (0.7, 0.7, 0.7), 237.641) if "particle_280 geometry" not in marker_sets: s=new_marker_set('particle_280 geometry') marker_sets["particle_280 geometry"]=s s= marker_sets["particle_280 geometry"] mark=s.place_marker((2618.98, 2443.32, 2002.75), (0.7, 0.7, 0.7), 229.393) if "particle_281 geometry" not in marker_sets: s=new_marker_set('particle_281 geometry') marker_sets["particle_281 geometry"]=s s= marker_sets["particle_281 geometry"] mark=s.place_marker((3048.45, 2025.92, 1996.98), (0.7, 0.7, 0.7), 349.906) if "particle_282 geometry" not in marker_sets: s=new_marker_set('particle_282 geometry') marker_sets["particle_282 geometry"]=s s= marker_sets["particle_282 geometry"] mark=s.place_marker((3290.74, 1659.48, 1647.09), (0.7, 0.7, 0.7), 162.347) if "particle_283 geometry" not in marker_sets: s=new_marker_set('particle_283 geometry') marker_sets["particle_283 geometry"]=s s= marker_sets["particle_283 geometry"] mark=s.place_marker((3421.67, 1605.96, 1556.05), (0.7, 0.7, 0.7), 194.072) if "particle_284 geometry" not in marker_sets: s=new_marker_set('particle_284 geometry') marker_sets["particle_284 geometry"]=s s= marker_sets["particle_284 geometry"] mark=s.place_marker((3567.4, 1692.5, 1622.05), (0.7, 0.7, 0.7), 242.21) if "particle_285 geometry" not in marker_sets: s=new_marker_set('particle_285 geometry') marker_sets["particle_285 geometry"]=s s= marker_sets["particle_285 geometry"] mark=s.place_marker((3789.68, 2124.09, 1537.07), (0.7, 0.7, 0.7), 320.93) if "particle_286 geometry" not in marker_sets: s=new_marker_set('particle_286 geometry') marker_sets["particle_286 geometry"]=s s= marker_sets["particle_286 geometry"] mark=s.place_marker((4284.62, 2333, 1361.14), (0.7, 0.7, 0.7), 226.432) if "particle_287 geometry" not in marker_sets: s=new_marker_set('particle_287 geometry') marker_sets["particle_287 geometry"]=s s= marker_sets["particle_287 geometry"] mark=s.place_marker((4241.61, 2133.35, 1673.72), (0.7, 0.7, 0.7), 125.208) if "particle_288 geometry" not in marker_sets: s=new_marker_set('particle_288 geometry') marker_sets["particle_288 geometry"]=s s= marker_sets["particle_288 geometry"] mark=s.place_marker((4071.99, 1752.75, 2061.96), (0.7, 0.7, 0.7), 197.837) if "particle_289 geometry" not in marker_sets: s=new_marker_set('particle_289 geometry') marker_sets["particle_289 geometry"]=s s= marker_sets["particle_289 geometry"] mark=s.place_marker((4468.28, 1644.84, 2537.11), (0.7, 0.7, 0.7), 167.804) if "particle_290 geometry" not in marker_sets: s=new_marker_set('particle_290 geometry') marker_sets["particle_290 geometry"]=s s= marker_sets["particle_290 geometry"] mark=s.place_marker((5158.43, 1707.88, 2992.52), (0.7, 0.7, 0.7), 136.84) if "particle_291 geometry" not in marker_sets: s=new_marker_set('particle_291 geometry') marker_sets["particle_291 geometry"]=s s= marker_sets["particle_291 geometry"] mark=s.place_marker((5305.49, 2070.3, 2799.74), (0.7, 0.7, 0.7), 85.7421) if "particle_292 geometry" not in marker_sets: s=new_marker_set('particle_292 geometry') marker_sets["particle_292 geometry"]=s s= marker_sets["particle_292 geometry"] mark=s.place_marker((4406.25, 1833.39, 1735.12), (1, 0.7, 0), 256) if "particle_293 geometry" not in marker_sets: s=new_marker_set('particle_293 geometry') marker_sets["particle_293 geometry"]=s s= marker_sets["particle_293 geometry"] mark=s.place_marker((4630.74, 1537.67, 2812.46), (0.7, 0.7, 0.7), 138.702) if "particle_294 geometry" not in marker_sets: s=new_marker_set('particle_294 geometry') marker_sets["particle_294 geometry"]=s s= marker_sets["particle_294 geometry"] mark=s.place_marker((4706.01, 1237.08, 3184.68), (0.7, 0.7, 0.7), 140.732) if "particle_295 geometry" not in marker_sets: s=new_marker_set('particle_295 geometry') marker_sets["particle_295 geometry"]=s s= marker_sets["particle_295 geometry"] mark=s.place_marker((4808.28, 1231.69, 2877.49), (0.7, 0.7, 0.7), 81.3006) if "particle_296 geometry" not in marker_sets: s=new_marker_set('particle_296 geometry') marker_sets["particle_296 geometry"]=s s= marker_sets["particle_296 geometry"] mark=s.place_marker((5227.01, 1284.68, 2702.89), (0.7, 0.7, 0.7), 133.837) if "particle_297 geometry" not in marker_sets: s=new_marker_set('particle_297 geometry') marker_sets["particle_297 geometry"]=s s= marker_sets["particle_297 geometry"] mark=s.place_marker((4808.97, 1455.23, 2280.03), (0.7, 0.7, 0.7), 98.3475) if "particle_298 geometry" not in marker_sets: s=new_marker_set('particle_298 geometry') marker_sets["particle_298 geometry"]=s s= marker_sets["particle_298 geometry"] mark=s.place_marker((4071.41, 1472.28, 1930.31), (0.7, 0.7, 0.7), 297.623) if "particle_299 geometry" not in marker_sets: s=new_marker_set('particle_299 geometry') marker_sets["particle_299 geometry"]=s s= marker_sets["particle_299 geometry"] mark=s.place_marker((3834.06, 1622.42, 1608.06), (0.7, 0.7, 0.7), 212.938) if "particle_300 geometry" not in marker_sets: s=new_marker_set('particle_300 geometry') marker_sets["particle_300 geometry"]=s s= marker_sets["particle_300 geometry"] mark=s.place_marker((3870.9, 1477.77, 1470.88), (0.7, 0.7, 0.7), 154.183) if "particle_301 geometry" not in marker_sets: s=new_marker_set('particle_301 geometry') marker_sets["particle_301 geometry"]=s s= marker_sets["particle_301 geometry"] mark=s.place_marker((4022.97, 1755.91, 1184.86), (0.7, 0.7, 0.7), 180.832) if "particle_302 geometry" not in marker_sets: s=new_marker_set('particle_302 geometry') marker_sets["particle_302 geometry"]=s s= marker_sets["particle_302 geometry"] mark=s.place_marker((4124.22, 2124.09, 1140.97), (0.7, 0.7, 0.7), 122.332) if "particle_303 geometry" not in marker_sets: s=new_marker_set('particle_303 geometry') marker_sets["particle_303 geometry"]=s s= marker_sets["particle_303 geometry"] mark=s.place_marker((4181.68, 2484.29, 1254.79), (0.7, 0.7, 0.7), 209.047) if "particle_304 geometry" not in marker_sets: s=new_marker_set('particle_304 geometry') marker_sets["particle_304 geometry"]=s s= marker_sets["particle_304 geometry"] mark=s.place_marker((4229.81, 2477.23, 845.986), (0.7, 0.7, 0.7), 126.985) if "particle_305 geometry" not in marker_sets: s=new_marker_set('particle_305 geometry') marker_sets["particle_305 geometry"]=s s= marker_sets["particle_305 geometry"] mark=s.place_marker((4580.11, 2470.31, 580.054), (0.7, 0.7, 0.7), 122.205) if "particle_306 geometry" not in marker_sets: s=new_marker_set('particle_306 geometry') marker_sets["particle_306 geometry"]=s s= marker_sets["particle_306 geometry"] mark=s.place_marker((4797.46, 2254.26, 550.085), (0.7, 0.7, 0.7), 107.95) if "particle_307 geometry" not in marker_sets: s=new_marker_set('particle_307 geometry') marker_sets["particle_307 geometry"]=s s= marker_sets["particle_307 geometry"] mark=s.place_marker((4314.89, 1987.98, 796.109), (0.7, 0.7, 0.7), 182.567) if "particle_308 geometry" not in marker_sets: s=new_marker_set('particle_308 geometry') marker_sets["particle_308 geometry"]=s s= marker_sets["particle_308 geometry"] mark=s.place_marker((3894.36, 1803.08, 1227.49), (0.7, 0.7, 0.7), 185.274) if "particle_309 geometry" not in marker_sets: s=new_marker_set('particle_309 geometry') marker_sets["particle_309 geometry"]=s s= marker_sets["particle_309 geometry"] mark=s.place_marker((3686.7, 1923.63, 1593.65), (0.7, 0.7, 0.7), 413.567) if "particle_310 geometry" not in marker_sets: s=new_marker_set('particle_310 geometry') marker_sets["particle_310 geometry"]=s s= marker_sets["particle_310 geometry"] mark=s.place_marker((3511.6, 1739.84, 1620.74), (0.7, 0.7, 0.7), 240.01) if "particle_311 geometry" not in marker_sets: s=new_marker_set('particle_311 geometry') marker_sets["particle_311 geometry"]=s s= marker_sets["particle_311 geometry"] mark=s.place_marker((3529.83, 1794.17, 1612.06), (0.7, 0.7, 0.7), 238.995) if "particle_312 geometry" not in marker_sets: s=new_marker_set('particle_312 geometry') marker_sets["particle_312 geometry"]=s s= marker_sets["particle_312 geometry"] mark=s.place_marker((3637.04, 1716.27, 1431.83), (0.7, 0.7, 0.7), 203.674) if "particle_313 geometry" not in marker_sets: s=new_marker_set('particle_313 geometry') marker_sets["particle_313 geometry"]=s s= marker_sets["particle_313 geometry"] mark=s.place_marker((3598.33, 1835.65, 845.316), (0.7, 0.7, 0.7), 266.744) if "particle_314 geometry" not in marker_sets: s=new_marker_set('particle_314 geometry') marker_sets["particle_314 geometry"]=s s= marker_sets["particle_314 geometry"] mark=s.place_marker((3783.14, 1410.78, 813.574), (0.7, 0.7, 0.7), 147.585) if "particle_315 geometry" not in marker_sets: s=new_marker_set('particle_315 geometry') marker_sets["particle_315 geometry"]=s s= marker_sets["particle_315 geometry"] mark=s.place_marker((3705.6, 1414.86, 1080.75), (0.7, 0.7, 0.7), 249.485) if "particle_316 geometry" not in marker_sets: s=new_marker_set('particle_316 geometry') marker_sets["particle_316 geometry"]=s s= marker_sets["particle_316 geometry"] mark=s.place_marker((3531.5, 1797.64, 1173.68), (0.7, 0.7, 0.7), 119.371) if "particle_317 geometry" not in marker_sets: s=new_marker_set('particle_317 geometry') marker_sets["particle_317 geometry"]=s s= marker_sets["particle_317 geometry"] mark=s.place_marker((3714.58, 2479.07, 1241.28), (0.7, 0.7, 0.7), 155.875) if "particle_318 geometry" not in marker_sets: s=new_marker_set('particle_318 geometry') marker_sets["particle_318 geometry"]=s s= marker_sets["particle_318 geometry"] mark=s.place_marker((4175.68, 2931.47, 1618.34), (0.7, 0.7, 0.7), 189.419) if "particle_319 geometry" not in marker_sets: s=new_marker_set('particle_319 geometry') marker_sets["particle_319 geometry"]=s s= marker_sets["particle_319 geometry"] mark=s.place_marker((4221.08, 2813.66, 2132.32), (0.7, 0.7, 0.7), 137.475) if "particle_320 geometry" not in marker_sets: s=new_marker_set('particle_320 geometry') marker_sets["particle_320 geometry"]=s s= marker_sets["particle_320 geometry"] mark=s.place_marker((4026.08, 2564.39, 2516.67), (0.7, 0.7, 0.7), 176.179) if "particle_321 geometry" not in marker_sets: s=new_marker_set('particle_321 geometry') marker_sets["particle_321 geometry"]=s s= marker_sets["particle_321 geometry"] mark=s.place_marker((3990.14, 2349.02, 2927.53), (0.7, 0.7, 0.7), 138.829) if "particle_322 geometry" not in marker_sets: s=new_marker_set('particle_322 geometry') marker_sets["particle_322 geometry"]=s s= marker_sets["particle_322 geometry"] mark=s.place_marker((4116.21, 2158.41, 3280.12), (0.7, 0.7, 0.7), 148.727) if "particle_323 geometry" not in marker_sets: s=new_marker_set('particle_323 geometry') marker_sets["particle_323 geometry"]=s s= marker_sets["particle_323 geometry"] mark=s.place_marker((4453.56, 2047.54, 3664.74), (0.7, 0.7, 0.7), 230.323) if "particle_324 geometry" not in marker_sets: s=new_marker_set('particle_324 geometry') marker_sets["particle_324 geometry"]=s s= marker_sets["particle_324 geometry"] mark=s.place_marker((4383.9, 2210.94, 3045.95), (0.7, 0.7, 0.7), 175.376) if "particle_325 geometry" not in marker_sets: s=new_marker_set('particle_325 geometry') marker_sets["particle_325 geometry"]=s s= marker_sets["particle_325 geometry"] mark=s.place_marker((4156.32, 2315.87, 2593.87), (0.7, 0.7, 0.7), 161.163) if "particle_326 geometry" not in marker_sets: s=new_marker_set('particle_326 geometry') marker_sets["particle_326 geometry"]=s s= marker_sets["particle_326 geometry"] mark=s.place_marker((4057.1, 2772.29, 2751.64), (0.7, 0.7, 0.7), 125.885) if "particle_327 geometry" not in marker_sets: s=new_marker_set('particle_327 geometry') marker_sets["particle_327 geometry"]=s s= marker_sets["particle_327 geometry"] mark=s.place_marker((4112.88, 3220, 2822.84), (0.7, 0.7, 0.7), 206.635) if "particle_328 geometry" not in marker_sets: s=new_marker_set('particle_328 geometry') marker_sets["particle_328 geometry"]=s s= marker_sets["particle_328 geometry"] mark=s.place_marker((3728.31, 2984.82, 2862.59), (0.7, 0.7, 0.7), 151.392) if "particle_329 geometry" not in marker_sets: s=new_marker_set('particle_329 geometry') marker_sets["particle_329 geometry"]=s s= marker_sets["particle_329 geometry"] mark=s.place_marker((3545.48, 2716.98, 3003.47), (0.7, 0.7, 0.7), 173.388) if "particle_330 geometry" not in marker_sets: s=new_marker_set('particle_330 geometry') marker_sets["particle_330 geometry"]=s s= marker_sets["particle_330 geometry"] mark=s.place_marker((3761.85, 2587.02, 3220.9), (0.7, 0.7, 0.7), 135.825) if "particle_331 geometry" not in marker_sets: s=new_marker_set('particle_331 geometry') marker_sets["particle_331 geometry"]=s s= marker_sets["particle_331 geometry"] mark=s.place_marker((4128.63, 2521.18, 3453.55), (0.7, 0.7, 0.7), 186.839) if "particle_332 geometry" not in marker_sets: s=new_marker_set('particle_332 geometry') marker_sets["particle_332 geometry"]=s s= marker_sets["particle_332 geometry"] mark=s.place_marker((4541.37, 2464.77, 3684.1), (0.7, 0.7, 0.7), 121.189) if "particle_333 geometry" not in marker_sets: s=new_marker_set('particle_333 geometry') marker_sets["particle_333 geometry"]=s s= marker_sets["particle_333 geometry"] mark=s.place_marker((4366.87, 2510.48, 3307.43), (0.7, 0.7, 0.7), 102.916) if "particle_334 geometry" not in marker_sets: s=new_marker_set('particle_334 geometry') marker_sets["particle_334 geometry"]=s s= marker_sets["particle_334 geometry"] mark=s.place_marker((4150.95, 2443.39, 2734.26), (0.7, 0.7, 0.7), 212.769) if "particle_335 geometry" not in marker_sets: s=new_marker_set('particle_335 geometry') marker_sets["particle_335 geometry"]=s s= marker_sets["particle_335 geometry"] mark=s.place_marker((3703.28, 2365.92, 2265.36), (0.7, 0.7, 0.7), 173.092) if "particle_336 geometry" not in marker_sets: s=new_marker_set('particle_336 geometry') marker_sets["particle_336 geometry"]=s s= marker_sets["particle_336 geometry"] mark=s.place_marker((3554.78, 2440.35, 1800.84), (0.7, 0.7, 0.7), 264.502) if "particle_337 geometry" not in marker_sets: s=new_marker_set('particle_337 geometry') marker_sets["particle_337 geometry"]=s s= marker_sets["particle_337 geometry"] mark=s.place_marker((3773.25, 2698.01, 1374.04), (0.7, 0.7, 0.7), 208.666) if "particle_338 geometry" not in marker_sets: s=new_marker_set('particle_338 geometry') marker_sets["particle_338 geometry"]=s s= marker_sets["particle_338 geometry"] mark=s.place_marker((4102.69, 2703.51, 1015.64), (0.7, 0.7, 0.7), 186.797) if "particle_339 geometry" not in marker_sets: s=new_marker_set('particle_339 geometry') marker_sets["particle_339 geometry"]=s s= marker_sets["particle_339 geometry"] mark=s.place_marker((4004.78, 2341.04, 681.696), (0.7, 0.7, 0.7), 255.534) if "particle_340 geometry" not in marker_sets: s=new_marker_set('particle_340 geometry') marker_sets["particle_340 geometry"]=s s= marker_sets["particle_340 geometry"] mark=s.place_marker((4275.49, 2016.37, 616.45), (0.7, 0.7, 0.7), 153.126) if "particle_341 geometry" not in marker_sets: s=new_marker_set('particle_341 geometry') marker_sets["particle_341 geometry"]=s s= marker_sets["particle_341 geometry"] mark=s.place_marker((4456.24, 2336.22, 459.961), (0.7, 0.7, 0.7), 165.816) if "particle_342 geometry" not in marker_sets: s=new_marker_set('particle_342 geometry') marker_sets["particle_342 geometry"]=s s= marker_sets["particle_342 geometry"] mark=s.place_marker((4086.01, 2486.03, 475.835), (0.7, 0.7, 0.7), 134.429) if "particle_343 geometry" not in marker_sets: s=new_marker_set('particle_343 geometry') marker_sets["particle_343 geometry"]=s s= marker_sets["particle_343 geometry"] mark=s.place_marker((3899.99, 2643.86, 808.879), (0.7, 0.7, 0.7), 178.971) if "particle_344 geometry" not in marker_sets: s=new_marker_set('particle_344 geometry') marker_sets["particle_344 geometry"]=s s= marker_sets["particle_344 geometry"] mark=s.place_marker((4096.28, 2787.7, 1280.34), (0.7, 0.7, 0.7), 189.969) if "particle_345 geometry" not in marker_sets: s=new_marker_set('particle_345 geometry') marker_sets["particle_345 geometry"]=s s= marker_sets["particle_345 geometry"] mark=s.place_marker((4530.89, 3211.78, 1448.26), (0.7, 0.7, 0.7), 121.359) if "particle_346 geometry" not in marker_sets: s=new_marker_set('particle_346 geometry') marker_sets["particle_346 geometry"]=s s= marker_sets["particle_346 geometry"] mark=s.place_marker((4603.81, 3265.01, 1973.68), (0.7, 0.7, 0.7), 187.262) if "particle_347 geometry" not in marker_sets: s=new_marker_set('particle_347 geometry') marker_sets["particle_347 geometry"]=s s= marker_sets["particle_347 geometry"] mark=s.place_marker((4276.76, 3052.95, 2484.62), (0.7, 0.7, 0.7), 164.335) if "particle_348 geometry" not in marker_sets: s=new_marker_set('particle_348 geometry') marker_sets["particle_348 geometry"]=s s= marker_sets["particle_348 geometry"] mark=s.place_marker((4332.38, 2622.66, 2732.09), (0.7, 0.7, 0.7), 138.363) if "particle_349 geometry" not in marker_sets: s=new_marker_set('particle_349 geometry') marker_sets["particle_349 geometry"]=s s= marker_sets["particle_349 geometry"] mark=s.place_marker((4432.95, 2456.81, 3041.05), (0.7, 0.7, 0.7), 138.49) if "particle_350 geometry" not in marker_sets: s=new_marker_set('particle_350 geometry') marker_sets["particle_350 geometry"]=s s= marker_sets["particle_350 geometry"] mark=s.place_marker((4188.86, 2666.5, 3183.48), (0.7, 0.7, 0.7), 116.325) if "particle_351 geometry" not in marker_sets: s=new_marker_set('particle_351 geometry') marker_sets["particle_351 geometry"]=s s= marker_sets["particle_351 geometry"] mark=s.place_marker((4028.61, 2877.28, 2807.05), (0.7, 0.7, 0.7), 106.511) if "particle_352 geometry" not in marker_sets: s=new_marker_set('particle_352 geometry') marker_sets["particle_352 geometry"]=s s= marker_sets["particle_352 geometry"] mark=s.place_marker((4007.66, 2866.08, 2267.34), (0.7, 0.7, 0.7), 151.096) if "particle_353 geometry" not in marker_sets: s=new_marker_set('particle_353 geometry') marker_sets["particle_353 geometry"]=s s= marker_sets["particle_353 geometry"] mark=s.place_marker((4200.86, 2884.79, 1633.83), (0.7, 0.7, 0.7), 240.856) if "particle_354 geometry" not in marker_sets: s=new_marker_set('particle_354 geometry') marker_sets["particle_354 geometry"]=s s= marker_sets["particle_354 geometry"] mark=s.place_marker((4414.91, 2843.91, 1168.97), (0.7, 0.7, 0.7), 149.7) if "particle_355 geometry" not in marker_sets: s=new_marker_set('particle_355 geometry') marker_sets["particle_355 geometry"]=s s= marker_sets["particle_355 geometry"] mark=s.place_marker((4253.57, 2805.12, 872.176), (0.7, 0.7, 0.7), 165.943) if "particle_356 geometry" not in marker_sets: s=new_marker_set('particle_356 geometry') marker_sets["particle_356 geometry"]=s s= marker_sets["particle_356 geometry"] mark=s.place_marker((3802.04, 2393.54, 950.575), (0.7, 0.7, 0.7), 178.971) if "particle_357 geometry" not in marker_sets: s=new_marker_set('particle_357 geometry') marker_sets["particle_357 geometry"]=s s= marker_sets["particle_357 geometry"] mark=s.place_marker((3163.65, 1993.15, 831.422), (0.7, 0.7, 0.7), 154.945) for k in surf_sets.keys(): chimera.openModels.add([surf_sets[k]])
gpl-3.0
3,440,770,142,866,438,000
47.809181
76
0.702641
false
green-span/green-mail
old/imapproxyserver.py
1
2623
#|############################################################################## #|Copyright (c) 2009, The Green-Span Project. All rights reserved. This code is #|Open Source Free Software - redistribution and use in source and binary forms, #|with or without modification, are permitted under the Two Clause BSD License. #|############################################################################## #|File Created: 2009-04-01 #|Author(s): Sean Hastings, #|############################################################################## VERBOSE = True from globals import ALLVERBOSE """ ImapProxyServer Object Version 0.0.1 Sets up framework for loging into proxy server spawns new CredentialsChecker spawns new protocol factory listens for connections Version 0.0.0 - New File """ from twisted.internet import reactor, protocol from twisted.cred import portal from twisted.mail import imap4 from credentialschecker import CredentialsChecker from imapproxyuserrealm import ImapProxyUserRealm class ImapProxyServer(object): """Serves up protocol for incomming user imap-client connections""" def __init__(self,valid_logins): "gets valid email-addy:password combos from central object" #shared caching for connections to same IMAP account self.connected = {} #Start up server cred_check = CredentialsChecker(valid_logins) the_portal = portal.Portal(ImapProxyUserRealm(self)) the_portal.registerChecker(cred_check) factory = ImapProxyServerProtocolFactory() factory.portal = the_portal reactor.listenTCP(143, factory) #TODO - convert to listenSSL class ImapProxyServerProtocol(imap4.IMAP4Server): "Extension of Imap4Server class to expose functionality" def lineReceived(self, line): if ALLVERBOSE or VERBOSE: print "C>P S:", line imap4.IMAP4Server.lineReceived(self,line) def sendLine(self,line): if ALLVERBOSE or VERBOSE: print "C<P S", line imap4.IMAP4Server.sendLine(self,line) def connectionLost(self, reason): if ALLVERBOSE or VERBOSE: print "ImapProxyServerProtocol.connectionLost - %s", reason imap4.IMAP4Server.connectionLost(self,reason) class ImapProxyServerProtocolFactory(protocol.Factory): protocol = ImapProxyServerProtocol portal = None def buildProtocol(self, address): p = self.protocol() p.portal = self.portal p.factory = self return p
bsd-2-clause
-8,055,283,197,536,979,000
32.090909
93
0.617995
false
altsen/diandiyun-platform
lms/djangoapps/linkedin/management/commands/linkedin_mailusers.py
9
10824
""" Send emails to users inviting them to add their course certificates to their LinkedIn profiles. """ from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError, SMTPException import json import logging import urllib from boto.exception import AWSConnectionError from boto.ses.exceptions import ( SESAddressNotVerifiedError, SESIdentityNotVerifiedError, SESDomainNotConfirmedError, SESAddressBlacklistedError, SESDailyQuotaExceededError, SESMaxSendingRateExceededError, SESDomainEndsWithDotError, SESLocalAddressCharacterError, SESIllegalAddressError, ) from django.conf import settings from django.core.mail import EmailMessage from django.core.management.base import BaseCommand from django.db import transaction from django.template import Context from django.template.loader import get_template from django.core.urlresolvers import reverse from optparse import make_option from edxmako.shortcuts import render_to_string from certificates.models import GeneratedCertificate from courseware.courses import get_course_by_id, course_image_url from ...models import LinkedIn # The following is blatantly cribbed from bulk_email/tasks.py # Errors that an individual email is failing to be sent, and should just # be treated as a fail. SINGLE_EMAIL_FAILURE_ERRORS = ( SESAddressBlacklistedError, # Recipient's email address has been temporarily blacklisted. SESDomainEndsWithDotError, # Recipient's email address' domain ends with a period/dot. SESIllegalAddressError, # Raised when an illegal address is encountered. SESLocalAddressCharacterError, # An address contained a control or whitespace character. ) # Exceptions that, if caught, should cause the task to be re-tried. # These errors will be caught a limited number of times before the task fails. LIMITED_RETRY_ERRORS = ( SMTPConnectError, SMTPServerDisconnected, AWSConnectionError, ) # Errors that indicate that a mailing task should be retried without limit. # An example is if email is being sent too quickly, but may succeed if sent # more slowly. When caught by a task, it triggers an exponential backoff and retry. # Retries happen continuously until the email is sent. # Note that the SMTPDataErrors here are only those within the 4xx range. # Those not in this range (i.e. in the 5xx range) are treated as hard failures # and thus like SINGLE_EMAIL_FAILURE_ERRORS. INFINITE_RETRY_ERRORS = ( SESMaxSendingRateExceededError, # Your account's requests/second limit has been exceeded. SMTPDataError, ) # Errors that are known to indicate an inability to send any more emails, # and should therefore not be retried. For example, exceeding a quota for emails. # Also, any SMTP errors that are not explicitly enumerated above. BULK_EMAIL_FAILURE_ERRORS = ( SESAddressNotVerifiedError, # Raised when a "Reply-To" address has not been validated in SES yet. SESIdentityNotVerifiedError, # Raised when an identity has not been verified in SES yet. SESDomainNotConfirmedError, # Raised when domain ownership is not confirmed for DKIM. SESDailyQuotaExceededError, # 24-hour allotment of outbound email has been exceeded. SMTPException, ) MAX_ATTEMPTS = 10 log = logging.getLogger("linkedin") class Command(BaseCommand): """ Django command for inviting users to add their course certificates to their LinkedIn profiles. """ args = '' help = ('Sends emails to edX users that are on LinkedIn who have completed ' 'course certificates, inviting them to add their certificates to ' 'their LinkedIn profiles') option_list = BaseCommand.option_list + ( make_option( '--mock', action='store_true', dest='mock_run', default=False, help="Run without sending the final e-mails."),) def __init__(self): super(Command, self).__init__() @transaction.commit_manually def handle(self, *args, **options): whitelist = settings.LINKEDIN_API['EMAIL_WHITELIST'] mock_run = options.get('mock_run', False) accounts = LinkedIn.objects.filter(has_linkedin_account=True) for account in accounts: user = account.user if whitelist and user.email not in whitelist: # Whitelist only certain addresses for testing purposes continue try: emailed = json.loads(account.emailed_courses) except Exception: log.exception("LinkedIn: Could not parse emailed_courses for {}".format(user.username)) continue certificates = GeneratedCertificate.objects.filter(user=user) certificates = certificates.filter(status='downloadable') certificates = [cert for cert in certificates if cert.course_id not in emailed] # Shouldn't happen, since we're only picking users who have # certificates, but just in case... if not certificates: log.info("LinkedIn: No certificates for user {}".format(user.username)) continue # Basic sanity checks passed, now try to send the emails try: success = False success = self.send_grandfather_email(user, certificates, mock_run) log.info("LinkedIn: Sent email for user {}".format(user.username)) if not mock_run: emailed.extend([cert.course_id for cert in certificates]) if success and not mock_run: account.emailed_courses = json.dumps(emailed) account.save() transaction.commit() except BULK_EMAIL_FAILURE_ERRORS: log.exception("LinkedIn: No further email sending will work, aborting") transaction.commit() return -1 except Exception: log.exception("LinkedIn: User {} couldn't be processed".format(user.username)) transaction.commit() def certificate_url(self, certificate): """ Generates a certificate URL based on LinkedIn's documentation. The documentation is from a Word document: DAT_DOCUMENTATION_v3.12.docx """ course = get_course_by_id(certificate.course_id) tracking_code = '-'.join([ 'eml', 'prof', # the 'product'--no idea what that's supposed to mean 'edX', # Partner's name course.number, # Certificate's name 'gf']) query = [ ('pfCertificationName', course.display_name_with_default), ('pfAuthorityId', settings.LINKEDIN_API['COMPANY_ID']), ('pfCertificationUrl', certificate.download_url), ('pfLicenseNo', certificate.course_id), ('pfCertStartDate', course.start.strftime('%Y%m')), ('_mSplash', '1'), ('trk', tracking_code), ('startTask', 'CERTIFICATION_NAME'), ('force', 'true')] return 'http://www.linkedin.com/profile/guided?' + urllib.urlencode(query) def send_grandfather_email(self, user, certificates, mock_run=False): """ Send the 'grandfathered' email informing historical students that they may now post their certificates on their LinkedIn profiles. """ courses_list = [] for cert in certificates: course = get_course_by_id(cert.course_id) course_url = 'https://{}{}'.format( settings.SITE_NAME, reverse('course_root', kwargs={'course_id': cert.course_id}) ) course_title = course.display_name_with_default course_img_url = 'https://{}{}'.format(settings.SITE_NAME, course_image_url(course)) course_end_date = course.end.strftime('%b %Y') course_org = course.org courses_list.append({ 'course_url': course_url, 'course_org': course_org, 'course_title': course_title, 'course_image_url': course_img_url, 'course_end_date': course_end_date, 'linkedin_add_url': self.certificate_url(cert), }) context = {'courses_list': courses_list, 'num_courses': len(courses_list)} body = render_to_string('linkedin/linkedin_email.html', context) subject = u'{}, Add your Achievements to your LinkedIn Profile'.format(user.profile.name) if mock_run: return True else: return self.send_email(user, subject, body) def send_email(self, user, subject, body, num_attempts=MAX_ATTEMPTS): """ Send an email. Return True if it succeeded, False if it didn't. """ fromaddr = u'[email protected]' toaddr = u'{} <{}>'.format(user.profile.name, user.email) msg = EmailMessage(subject, body, fromaddr, (toaddr,)) msg.content_subtype = "html" i = 1 while i <= num_attempts: try: msg.send() return True # Happy path! except SINGLE_EMAIL_FAILURE_ERRORS: # Something unrecoverable is wrong about the email acct we're sending to log.exception( u"LinkedIn: Email send failed for user {}, email {}" .format(user.username, user.email) ) return False except LIMITED_RETRY_ERRORS: # Something went wrong (probably an intermittent connection error), # but maybe if we beat our heads against the wall enough times, # we can crack our way through. Thwack! Thwack! Thwack! # Give up after num_attempts though (for loop exits), let's not # get carried away. log.exception( u"LinkedIn: Email send for user {}, email {}, encountered error, attempt #{}" .format(user.username, user.email, i) ) i += 1 continue except INFINITE_RETRY_ERRORS: # Dude, it will *totally* work if I just... sleep... a little... # Things like max send rate exceeded. The smart thing would be # to do exponential backoff. The lazy thing to do would be just # sleep some arbitrary amount and trust that it'll probably work. # GUESS WHAT WE'RE DOING BOYS AND GIRLS!?! log.exception("LinkedIn: temporary error encountered, retrying") time.sleep(1) # If we hit here, we went through all our attempts without success return False
agpl-3.0
6,356,314,451,060,511,000
40.791506
103
0.631837
false
jupyter/nbgrader
nbgrader/tests/nbextensions/test_validate_assignment.py
3
7158
import pytest from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import TimeoutException, NoSuchElementException, NoAlertPresentException from .conftest import _make_nbserver, _make_browser, _close_nbserver, _close_browser @pytest.fixture(scope="module") def nbserver(request, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache): server = _make_nbserver("", port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache) def fin(): _close_nbserver(server) request.addfinalizer(fin) return server @pytest.fixture(scope="module") def browser(request, tempdir, nbserver): browser = _make_browser(tempdir) def fin(): _close_browser(browser) request.addfinalizer(fin) return browser def _wait(browser): return WebDriverWait(browser, 30) def _load_notebook(browser, port, notebook, retries=5): # go to the correct page url = "http://localhost:{}/notebooks/{}.ipynb".format(port, notebook) browser.get(url) alert = '' for _ in range(5): if alert is None: break try: alert = browser.switch_to.alert except NoAlertPresentException: alert = None else: print("Warning: dismissing unexpected alert ({})".format(alert.text)) alert.accept() def page_loaded(browser): return browser.execute_script( """ return (typeof Jupyter !== "undefined" && Jupyter.page !== undefined && Jupyter.notebook !== undefined && $("#notebook_name").text() === "{}"); """.format(notebook)) # wait for the page to load try: _wait(browser).until(page_loaded) except TimeoutException: if retries > 0: print("Retrying page load...") # page timeout, but sometimes this happens, so try refreshing? _load_notebook(browser, port, retries=retries - 1, notebook=notebook) else: print("Failed to load the page too many times") raise def _wait_for_validate_button(browser): def validate_exists(browser): try: browser.find_element_by_css_selector("button.validate") except NoSuchElementException: return False return True _wait(browser).until(validate_exists) def _wait_for_modal(browser): _wait(browser).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".modal-dialog"))) def _dismiss_modal(browser): button = browser.find_element_by_css_selector(".modal-footer .btn-primary") button.click() def modal_gone(browser): try: browser.find_element_by_css_selector(".modal-dialog") except NoSuchElementException: return True return False _wait(browser).until(modal_gone) @pytest.mark.nbextensions def test_validate_ok(browser, port): _load_notebook(browser, port, "submitted-changed") _wait_for_validate_button(browser) # click the "validate" button browser.find_element_by_css_selector("button.validate").click() # wait for the modal dialog to appear _wait_for_modal(browser) # check that it succeeded browser.find_element_by_css_selector(".modal-dialog .validation-success") # close the modal dialog _dismiss_modal(browser) @pytest.mark.nbextensions def test_validate_failure(browser, port): _load_notebook(browser, port, "submitted-unchanged") _wait_for_validate_button(browser) # click the "validate" button browser.find_element_by_css_selector("button.validate").click() # wait for the modal dialog to appear _wait_for_modal(browser) # check that it failed browser.find_element_by_css_selector(".modal-dialog .validation-failed") # close the modal dialog _dismiss_modal(browser) @pytest.mark.nbextensions def test_validate_grade_cell_changed(browser, port): _load_notebook(browser, port, "submitted-grade-cell-changed") _wait_for_validate_button(browser) # click the "validate" button browser.find_element_by_css_selector("button.validate").click() # wait for the modal dialog to appear _wait_for_modal(browser) # check that it failed browser.find_element_by_css_selector(".modal-dialog .validation-changed") # close the modal dialog _dismiss_modal(browser) @pytest.mark.nbextensions def test_validate_locked_cell_changed(browser, port): _load_notebook(browser, port, "submitted-locked-cell-changed") _wait_for_validate_button(browser) # click the "validate" button browser.find_element_by_css_selector("button.validate").click() # wait for the modal dialog to appear _wait_for_modal(browser) # check that it failed browser.find_element_by_css_selector(".modal-dialog .validation-changed") # close the modal dialog _dismiss_modal(browser) @pytest.mark.nbextensions def test_validate_open_relative_file(browser, port): _load_notebook(browser, port, "open_relative_file") _wait_for_validate_button(browser) # click the "validate" button browser.find_element_by_css_selector("button.validate").click() # wait for the modal dialog to appear _wait_for_modal(browser) # check that it succeeded browser.find_element_by_css_selector(".modal-dialog .validation-success") # close the modal dialog _dismiss_modal(browser) @pytest.mark.nbextensions def test_validate_grade_cell_type_changed(browser, port): _load_notebook(browser, port, "submitted-grade-cell-type-changed") _wait_for_validate_button(browser) # click the "validate" button browser.find_element_by_css_selector("button.validate").click() # wait for the modal dialog to appear _wait_for_modal(browser) # check that it failed browser.find_element_by_css_selector(".modal-dialog .validation-type-changed") # close the modal dialog _dismiss_modal(browser) @pytest.mark.nbextensions def test_validate_answer_cell_type_changed(browser, port): _load_notebook(browser, port, "submitted-answer-cell-type-changed") _wait_for_validate_button(browser) # click the "validate" button browser.find_element_by_css_selector("button.validate").click() # wait for the modal dialog to appear _wait_for_modal(browser) # check that it failed browser.find_element_by_css_selector(".modal-dialog .validation-type-changed") # close the modal dialog _dismiss_modal(browser) ################################################################################ ####### DO NOT ADD TESTS BELOW THIS LINE ####################################### ################################################################################ @pytest.mark.nbextensions def test_final(browser, port): """This is a final test to be run so that the browser doesn't hang, see https://github.com/mozilla/geckodriver/issues/1151 """ _load_notebook(browser, port, "blank")
bsd-3-clause
-4,502,118,144,260,063,700
28.949791
104
0.658005
false
nkvelkov/Pyganizer
source/tests/test_task_scheduler.py
1
2614
import unittest from task_scheduler import TaskScheduler from task import Task import arrow class TaskSchedulerTestCase(unittest.TestCase): def setUp(self): self.task_scheduler = TaskScheduler() self.tz = 'local' self.datetime = arrow.Arrow(2015, 5, 9, 11, 11, 11, tzinfo=self.tz) self.task = Task(self.datetime, "name", "message", 100, 1, 1, self.tz) class TestTaskScheduler(TaskSchedulerTestCase): def test_add_task(self): now = arrow.now() task = self.task_scheduler.append_task(now, "name", "message", 100, 1, self.tz) self.assertTrue(task in self.task_scheduler.todos[task.datetime]) def test_add_multiple_active_tasks(self): test_task = self.task test_task.datetime = arrow.now() self.task_scheduler.add_multiple_active_tasks([test_task, self.task]) self.assertTrue(test_task in self.task_scheduler.active_tasks and self.task in self.task_scheduler.active_tasks) def test_add_multiple_pending_tasks(self): test_task = self.task test_task.datetime = arrow.now() self.task_scheduler.add_multiple_pending_tasks([test_task, self.task]) test_task_in = test_task in self.task_scheduler.todos[test_task.datetime] task_in = self.task in self.task_scheduler.todos[self.task.datetime] self.assertTrue(test_task_in and task_in) def test_activate_task(self): self.task_scheduler.insert_task(self.task) self.task_scheduler._activate_task(self.task) in_active_tasks = self.task in self.task_scheduler.active_tasks removed = self.task.datetime not in self.task_scheduler.todos.keys() self.assertTrue(in_active_tasks and removed) def test_add_task_progress_by_id(self): self.task_scheduler.insert_task(self.task) self.task_scheduler.add_task_progress_by_id(self.task.tid, 110) is_finished = self.task.datetime in self.task_scheduler.todos self.assertTrue(is_finished) def test_find_pending_task(self): self.task_scheduler.insert_task(self.task) self.assertTrue(self.task.datetime in self.task_scheduler.todos.keys()) def test_add_task_progress_by_id_(self): self.task_scheduler.insert_task(self.task) self.task_scheduler._activate_task(self.task) self.task_scheduler.add_task_progress_by_id(self.task.tid, 10) result_task = self.task_scheduler.find_active_task(self.task.tid) self.assertEqual(result_task.completeness, 90) if __name__ == '__main__': unittest.main()
gpl-2.0
7,058,214,314,973,799,000
35.319444
87
0.670237
false
Glasgow2015/team-10
env/lib/python2.7/site-packages/PIL/ImageFilter.py
87
6618
# # The Python Imaging Library. # $Id$ # # standard filters # # History: # 1995-11-27 fl Created # 2002-06-08 fl Added rank and mode filters # 2003-09-15 fl Fixed rank calculation in rank filter; added expand call # # Copyright (c) 1997-2003 by Secret Labs AB. # Copyright (c) 1995-2002 by Fredrik Lundh. # # See the README file for information on usage and redistribution. # import functools class Filter(object): pass class Kernel(Filter): """ Create a convolution kernel. The current version only supports 3x3 and 5x5 integer and floating point kernels. In the current version, kernels can only be applied to "L" and "RGB" images. :param size: Kernel size, given as (width, height). In the current version, this must be (3,3) or (5,5). :param kernel: A sequence containing kernel weights. :param scale: Scale factor. If given, the result for each pixel is divided by this value. the default is the sum of the kernel weights. :param offset: Offset. If given, this value is added to the result, after it has been divided by the scale factor. """ def __init__(self, size, kernel, scale=None, offset=0): if scale is None: # default scale is sum of kernel scale = functools.reduce(lambda a, b: a+b, kernel) if size[0] * size[1] != len(kernel): raise ValueError("not enough coefficients in kernel") self.filterargs = size, scale, offset, kernel def filter(self, image): if image.mode == "P": raise ValueError("cannot filter palette images") return image.filter(*self.filterargs) class BuiltinFilter(Kernel): def __init__(self): pass class RankFilter(Filter): """ Create a rank filter. The rank filter sorts all pixels in a window of the given size, and returns the **rank**'th value. :param size: The kernel size, in pixels. :param rank: What pixel value to pick. Use 0 for a min filter, ``size * size / 2`` for a median filter, ``size * size - 1`` for a max filter, etc. """ name = "Rank" def __init__(self, size, rank): self.size = size self.rank = rank def filter(self, image): if image.mode == "P": raise ValueError("cannot filter palette images") image = image.expand(self.size//2, self.size//2) return image.rankfilter(self.size, self.rank) class MedianFilter(RankFilter): """ Create a median filter. Picks the median pixel value in a window with the given size. :param size: The kernel size, in pixels. """ name = "Median" def __init__(self, size=3): self.size = size self.rank = size*size//2 class MinFilter(RankFilter): """ Create a min filter. Picks the lowest pixel value in a window with the given size. :param size: The kernel size, in pixels. """ name = "Min" def __init__(self, size=3): self.size = size self.rank = 0 class MaxFilter(RankFilter): """ Create a max filter. Picks the largest pixel value in a window with the given size. :param size: The kernel size, in pixels. """ name = "Max" def __init__(self, size=3): self.size = size self.rank = size*size-1 class ModeFilter(Filter): """ Create a mode filter. Picks the most frequent pixel value in a box with the given size. Pixel values that occur only once or twice are ignored; if no pixel value occurs more than twice, the original pixel value is preserved. :param size: The kernel size, in pixels. """ name = "Mode" def __init__(self, size=3): self.size = size def filter(self, image): return image.modefilter(self.size) class GaussianBlur(Filter): """Gaussian blur filter. :param radius: Blur radius. """ name = "GaussianBlur" def __init__(self, radius=2): self.radius = radius def filter(self, image): return image.gaussian_blur(self.radius) class UnsharpMask(Filter): """Unsharp mask filter. See Wikipedia's entry on `digital unsharp masking`_ for an explanation of the parameters. :param radius: Blur Radius :param percent: Unsharp strength, in percent :param threshold: Threshold controls the minimum brightness change that will be sharpened .. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking """ name = "UnsharpMask" def __init__(self, radius=2, percent=150, threshold=3): self.radius = radius self.percent = percent self.threshold = threshold def filter(self, image): return image.unsharp_mask(self.radius, self.percent, self.threshold) class BLUR(BuiltinFilter): name = "Blur" filterargs = (5, 5), 16, 0, ( 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1 ) class CONTOUR(BuiltinFilter): name = "Contour" filterargs = (3, 3), 1, 255, ( -1, -1, -1, -1, 8, -1, -1, -1, -1 ) class DETAIL(BuiltinFilter): name = "Detail" filterargs = (3, 3), 6, 0, ( 0, -1, 0, -1, 10, -1, 0, -1, 0 ) class EDGE_ENHANCE(BuiltinFilter): name = "Edge-enhance" filterargs = (3, 3), 2, 0, ( -1, -1, -1, -1, 10, -1, -1, -1, -1 ) class EDGE_ENHANCE_MORE(BuiltinFilter): name = "Edge-enhance More" filterargs = (3, 3), 1, 0, ( -1, -1, -1, -1, 9, -1, -1, -1, -1 ) class EMBOSS(BuiltinFilter): name = "Emboss" filterargs = (3, 3), 1, 128, ( -1, 0, 0, 0, 1, 0, 0, 0, 0 ) class FIND_EDGES(BuiltinFilter): name = "Find Edges" filterargs = (3, 3), 1, 0, ( -1, -1, -1, -1, 8, -1, -1, -1, -1 ) class SMOOTH(BuiltinFilter): name = "Smooth" filterargs = (3, 3), 13, 0, ( 1, 1, 1, 1, 5, 1, 1, 1, 1 ) class SMOOTH_MORE(BuiltinFilter): name = "Smooth More" filterargs = (5, 5), 100, 0, ( 1, 1, 1, 1, 1, 1, 5, 5, 5, 1, 1, 5, 44, 5, 1, 1, 5, 5, 5, 1, 1, 1, 1, 1, 1 ) class SHARPEN(BuiltinFilter): name = "Sharpen" filterargs = (3, 3), 16, 0, ( -2, -2, -2, -2, 32, -2, -2, -2, -2 )
apache-2.0
-1,584,388,950,149,352,200
23.065455
102
0.56165
false