repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
flgiordano/netcash | +/google-cloud-sdk/lib/surface/compute/instance_groups/managed/list.py | 1 | 4512 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing managed instance groups."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import managed_instance_groups_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
def _AutoscalerWithErrorInList(resources):
"""Checks, if there exists autoscaler, which reports errors."""
for resource in resources:
if resource['autoscaled'] == 'yes (*)':
return True
return False
class ListDynamicPropertisMixin(object):
"""Untilies for computling Autoscaler related data for 'list' commands."""
def ComputeDynamicProperties(self, args, items):
"""Add Autoscaler information if Autoscaler is defined for the item."""
_ = args
# Items are expected to be IGMs.
items = list(items)
for mig in managed_instance_groups_utils.AddAutoscalersToMigs(
migs_iterator=self.ComputeInstanceGroupSize(items=items),
project=self.project,
compute=self.compute,
http=self.http,
batch_url=self.batch_url,
fail_when_api_not_supported=False):
if 'autoscaler' in mig and mig['autoscaler'] is not None:
if (hasattr(mig['autoscaler'], 'status') and mig['autoscaler'].status ==
self.messages.Autoscaler.StatusValueValuesEnum.ERROR):
mig['autoscaled'] = 'yes (*)'
else:
mig['autoscaled'] = 'yes'
else:
mig['autoscaled'] = 'no'
yield mig
# TODO(user): This acts like
# instance-groups list --only-managed
# so they should share code.
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class List(ListDynamicPropertisMixin,
base_classes.InstanceGroupManagerDynamicProperiesMixin,
base_classes.ZonalLister):
"""List Google Compute Engine managed instance groups."""
@property
def service(self):
return self.compute.instanceGroupManagers
@property
def resource_type(self):
return 'instanceGroupManagers'
def GetResources(self, args, errors):
resources = super(List, self).GetResources(args, errors)
return (resource for resource in resources if resource.zone)
def Display(self, args, resources):
"""Prints the given resources."""
resources = list(resources)
super(List, self).Display(args, resources)
if _AutoscalerWithErrorInList(resources):
log.err.Print('(*) - there are errors in your autoscaling setup, please '
'describe the resource to see details')
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class ListAlpha(ListDynamicPropertisMixin,
base_classes.InstanceGroupManagerDynamicProperiesMixin,
base_classes.MultiScopeLister):
"""List Google Compute Engine managed instance groups."""
SCOPES = [base_classes.ScopeType.regional_scope,
base_classes.ScopeType.zonal_scope]
@staticmethod
def Args(parser):
base_classes.MultiScopeLister.AddScopeArgs(parser, ListAlpha.SCOPES)
@property
def global_service(self):
return None
@property
def regional_service(self):
return self.compute.regionInstanceGroupManagers
@property
def zonal_service(self):
return self.compute.instanceGroupManagers
@property
def aggregation_service(self):
return self.compute.instanceGroupManagers
@property
def resource_type(self):
return 'instanceGroupManagers'
def Display(self, args, resources):
"""Prints the given resources."""
resources = list(resources)
super(ListAlpha, self).Display(args, resources)
if _AutoscalerWithErrorInList(resources):
log.err.Print('(*) - there are errors in your autoscaling setup, please '
'describe the resource to see details')
List.detailed_help = base_classes.GetZonalListerHelp(
'managed instance groups')
ListAlpha.detailed_help = base_classes.GetMultiScopeListerHelp(
'managed instance groups', ListAlpha.SCOPES)
| bsd-3-clause | -6,334,655,391,686,217,000 | 33.707692 | 80 | 0.717199 | false |
Meallia/robotframework-appiumlibrary | AppiumLibrary/keywords/_android_utils.py | 1 | 5329 | # -*- coding: utf-8 -*-
import base64
from .keywordgroup import KeywordGroup
from selenium.common.exceptions import TimeoutException
from kitchen.text.converters import to_bytes
class _AndroidUtilsKeywords(KeywordGroup):
# Public
def get_network_connection_status(self):
"""Returns an integer bitmask specifying the network connection type.
Android only.
See `set network connection status` for more details.
"""
driver = self._current_application()
return driver.network_connection
def set_network_connection_status(self, connectionStatus):
"""Sets the network connection Status.
Android only.
Possible values:
| =Value= | =Alias= | =Data= | =Wifi= | =Airplane Mode= |
| 0 | (None) | 0 | 0 | 0 |
| 1 | (Airplane Mode) | 0 | 0 | 1 |
| 2 | (Wifi only) | 0 | 1 | 0 |
| 4 | (Data only) | 1 | 0 | 0 |
| 6 | (All network on) | 1 | 1 | 0 |
"""
driver = self._current_application()
return driver.set_network_connection(int(connectionStatus))
def pull_file(self, path, decode=False):
"""Retrieves the file at `path` and return it's content.
Android only.
- _path_ - the path to the file on the device
- _decode_ - True/False decode the data (base64) before returning it (default=False)
"""
driver = self._current_application()
theFile = driver.pull_file(path)
if decode:
theFile = base64.b64decode(theFile)
return str(theFile)
def pull_folder(self, path, decode=False):
"""Retrieves a folder at `path`. Returns the folder's contents zipped.
Android only.
- _path_ - the path to the folder on the device
- _decode_ - True/False decode the data (base64) before returning it (default=False)
"""
driver = self._current_application()
theFolder = driver.pull_folder(path)
if decode:
theFolder = base64.b64decode(theFolder)
return theFolder
def push_file(self, path, data, encode=False):
"""Puts the data in the file specified as `path`.
Android only.
- _path_ - the path on the device
- _data_ - data to be written to the file
- _encode_ - True/False encode the data as base64 before writing it to the file (default=False)
"""
driver = self._current_application()
data = to_bytes(data)
if encode:
data = base64.b64encode(data)
driver.push_file(path, data)
def get_activity(self):
"""Retrieves the current activity on the device.
Android only.
"""
driver = self._current_application()
return driver.current_activity
def start_activity(self, appPackage, appActivity, **opts):
"""Opens an arbitrary activity during a test. If the activity belongs to
another application, that application is started and the activity is opened.
Android only.
- _appPackage_ - The package containing the activity to start.
- _appActivity_ - The activity to start.
- _appWaitPackage_ - Begin automation after this package starts (optional).
- _appWaitActivity_ - Begin automation after this activity starts (optional).
- _intentAction_ - Intent to start (opt_ional).
- _intentCategory_ - Intent category to start (optional).
- _intentFlags_ - Flags to send to the intent (optional).
- _optionalIntentArguments_ - Optional arguments to the intent (optional).
- _stopAppOnReset_ - Should the app be stopped on reset (optional)?
"""
# Almost the same code as in appium's start activity,
# just to keep the same keyword names as in open application
arguments = {
'app_wait_package': 'appWaitPackage',
'app_wait_activity': 'appWaitActivity',
'intent_action': 'intentAction',
'intent_category': 'intentCategory',
'intent_flags': 'intentFlags',
'optional_intent_arguments': 'optionalIntentArguments',
'stop_app_on_reset': 'stopAppOnReset'
}
data = {}
for key, value in arguments.items():
if value in opts:
data[key] = opts[value]
driver = self._current_application()
driver.start_activity(app_package=appPackage, app_activity=appActivity, **data)
def wait_activity(self, activity, timeout, interval=1):
"""Wait for an activity: block until target activity presents
or time out.
Android only.
- _activity_ - target activity
- _timeout_ - max wait time, in seconds
- _interval_ - sleep interval between retries, in seconds
"""
driver = self._current_application()
if not driver.wait_activity(activity=activity, timeout=float(timeout), interval=float(interval)):
raise TimeoutException(msg="Activity %s never presented, current activity: %s" % (activity, self.get_activity()))
| apache-2.0 | -2,644,014,547,221,142,000 | 36.265734 | 125 | 0.586789 | false |
odoo-brazil/odoo-brazil-commission | l10n_br_sale_commission/models/account_invoice.py | 1 | 1097 | # -*- coding: utf-8 -*-
# © 2016 KMEE INFORMATICA LTDA (https://www.kmee.com.br)
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, api
from openerp.tools.safe_eval import safe_eval
class AccountInvoiceLineAgent(models.Model):
_inherit = 'account.invoice.line.agent'
@api.depends('commission.commission_type', 'invoice_line.price_subtotal',
'commission.amount_base_type')
def _compute_amount(self):
for line in self:
line.amount = 0.0
if (line.commission.amount_base_type == 'notax' and
not line.invoice_line.product_id.commission_free and
line.commission):
subtotal = line.invoice_line.price_gross
if line.commission.commission_type == 'fixed':
line.amount = subtotal * (line.commission.fix_qty / 100.0)
else:
line.amount = line.commission.calculate_section(subtotal)
else:
return super(AccountInvoiceLineAgent, line)._compute_amount()
| agpl-3.0 | 897,987,496,867,799,300 | 41.153846 | 78 | 0.610401 | false |
Lothiraldan/pytf | pytf/reporters.py | 1 | 3761 | from __future__ import print_function
import sys
import time
import traceback
class BaseReporter(object):
def begin_tests(self):
pass
def end_tests(self):
pass
class TextTestReporter(BaseReporter):
head_template = '{double_dash}\n{status}: {id}\n{single_dash}\n'
message_template = '{title}\n{message}\n{single_dash}\n'
message_foot_template = '{double_dash}\n'
foot_template = '{status}: Ran {total} tests in {duration:.3f}s, ' \
'{failing} failing tests and {errors} ' \
'tests in errors\n'
start_template = 'Starting tests\n\n'
def begin_tests(self):
self.failed = []
self.errors = []
self.runs = 0
self.start = time.time()
self._print_tpl(self.start_template)
def show_result(self, result):
self.runs += 1
if result.success:
self._print('.')
else:
if result.exception.phase == 'test':
self._print('F')
self.failed.append(result)
else:
self._print('E')
self.errors.append(result)
def end_tests(self):
# Print a newline
print('')
self._print_results('ERROR', self.errors)
self._print_results('FAILED', self.failed)
print('')
self._print_footer()
def _print_results(self, status, results):
for result in results:
self._print_failing_test(status, result)
def _print_failing_test(self, status, result):
double_dash = '=' * 70
single_dash = '-' * 70
self._print_tpl(self.head_template, double_dash=double_dash,
single_dash=single_dash, status=status,
id=result.id)
traceback.print_exception(*result.exception.exc_info)
for title, message in result.messages:
self._print_tpl(self.message_template,
title='{:-^70}'.format(title),
message=message, single_dash=single_dash)
self._print_tpl(self.message_foot_template, double_dash=double_dash)
def _print_footer(self):
stop = time.time()
running_time = stop - self.start
status = 'OK'
if self.errors:
status = 'ERROR'
elif self.failed:
status = 'FAILED'
self._print_tpl(self.foot_template, total=self.runs,
duration=running_time, failing=len(self.failed),
errors=len(self.errors), status=status)
def _print_tpl(self, template, **kwargs):
self._print(template.format(**kwargs))
def _print(self, text):
print(text, end='')
sys.stdout.flush()
class EarlyTextReporter(TextTestReporter):
success_template = '{status}: {id}\n'
head_template = '\t{double_dash}\n\t{status}: {id}\n\t{single_dash}\n'
message_template = '\t{title}\n\t{message}\n\t{single_dash}\n'
message_foot_template = '\t{double_dash}\n'
foot_template = '{status}: Ran {total} tests in {duration:.3f}s, ' \
'{failing} failing tests and {errors} ' \
'tests in errors\n'
start_template = 'Starting tests\n\n'
def show_result(self, result):
self.runs += 1
if result.success:
self._print_tpl(self.success_template, status="OK", id=result.id)
else:
if result.exception.phase == 'test':
self._print_failing_test("FAIL", result)
self.failed.append(result)
else:
self._print_failing_test("ERROR", result)
self.errors.append(result)
def end_tests(self):
# Print a newline
print('')
self._print_footer()
| mit | -6,397,205,347,495,334,000 | 27.709924 | 77 | 0.556767 | false |
zstackio/zstack-woodpecker | integrationtest/vm/mn_ha2/test_mnha2_1_mn_host_grace_stop_crt_vm.py | 1 | 3095 | '''
Test Steps:
1. grace stop host where vip located.
2. check vip switch to another MN.
3. create vm to validate everything goes on well.
@author: SyZhao
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.node_operations as node_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import time
import os
vm = None
vip_s_vm_cfg_lst = None
test_stub = test_lib.lib_get_test_stub()
def test():
global vm
global vip_s_vm_cfg_lst
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = os.environ['zstackHaVip']
vip_s_vm_cfg_lst = test_stub.get_s_vm_cfg_lst_vip_bind(test_lib.all_scenario_config, test_lib.scenario_file)
if len(vip_s_vm_cfg_lst) != 1:
test_util.test_fail('vip has been running on %d host(s)' % len(vip_s_vm_cfg_lst))
test_util.test_logger("force shutdown host [%s]" % (vip_s_vm_cfg_lst[0].ip_))
test_stub.stop_host(vip_s_vm_cfg_lst[0], test_lib.all_scenario_config)
time.sleep(20)
expected_vip_s_vm_cfg_lst_ip = test_stub.get_expected_vip_s_vm_cfg_lst_after_switch(test_lib.all_scenario_config, test_lib.scenario_file, vip_s_vm_cfg_lst[0].ip_)
if not test_stub.check_if_vip_is_on_host(test_lib.all_scenario_config, test_lib.scenario_file, expected_vip_s_vm_cfg_lst_ip):
test_util.test_fail("find vip should drift on ip %s, but is not on it." %(expected_vip_s_vm_cfg_lst_ip))
vip_s_vm_cfg_lst_new = test_stub.get_s_vm_cfg_lst_vip_bind(test_lib.all_scenario_config, test_lib.scenario_file)
if len(vip_s_vm_cfg_lst_new) != 1:
test_util.test_fail('vip has been running on %d host(s)' % len(vip_s_vm_cfg_lst_new))
test_stub.wrapper_of_wait_for_management_server_start(600)
test_stub.ensure_hosts_connected(exclude_host=[vip_s_vm_cfg_lst[0]])
test_stub.ensure_bss_connected(exclude_host=[vip_s_vm_cfg_lst[0]])
#test_stub.ensure_pss_connected()
ps_type = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0].type
cluster_num = len(res_ops.query_resource(res_ops.CLUSTER))
if ps_type == 'MiniStorage' and cluster_num == 1:
test_util.test_pass('Single Cluster MINI Env Test Success')
vm = test_stub.create_basic_vm()
vm.check()
vm.destroy()
test_util.test_pass('Create VM Test Success')
#Will be called what ever test result is
def env_recover():
test_util.test_logger("recover host: %s" % (vip_s_vm_cfg_lst[0].ip_))
test_stub.recover_host(vip_s_vm_cfg_lst[0], test_lib.all_scenario_config, test_lib.deploy_config)
#test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
test_stub.exec_zsha2_version(vip_s_vm_cfg_lst[0].ip_, "root", "password")
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
| apache-2.0 | -55,401,214,068,597,270 | 39.266667 | 166 | 0.67399 | false |
chrisnorman7/menu | menu/menu.py | 1 | 1890 | """Menu objects."""
from pyglet.window import key
from .errors import EmptyMenuError
class Menu(object):
"""A menu object. Use window.set_menu(menu) to add it to the window."""
def __init__(self, title):
self.window = None # The window this menu will be attached to.
self.title = title # The title of this menu.
self.items = [] # The items that are loaded into this menu.
self.position = 0 # The position of the cursor in the menu.
self.hotkeys = {
(key.DOWN, 0): self.move_down,
(key.UP, 0): self.move_up,
(key.ESCAPE, 0): self.detach
}
def append_item(self, item):
"""Add an item to the menu."""
self.items.append(item)
def remove_item(self, item):
"""Remove an item from the menu."""
self.items.remove(item)
def insert_item(self, pos, item):
"""Insert item at position pos."""
self.items.insert(pos, item)
def get_current_item(self):
"""Get the item at the current position."""
return self.items[self.position]
def attach(self, window):
"""Attach this menu to a window."""
if not self.items:
raise EmptyMenuError()
self.window = window
self.window.push_handlers(object()) # Push an empty event handler onto the stack.
self.activate()
def detach(self):
"""Detach this menu from the window."""
self.window.pop_handlers()
return True
def move_up(self):
"""Move up in the menu."""
self.position -= 1
return self.activate()
def move_down(self):
"""Move down in the menu."""
self.position += 1
return self.activate()
def activate(self):
"""Focus the current item in the menu."""
if self.position <= 0:
self.position = 0
elif self.position >= len(self.items):
self.position = len(self.items) - 1
i = self.get_current_item()
self.window.pop_handlers()
self.window.push_handlers(i)
i.announce()
return True
| mpl-2.0 | -1,545,767,328,364,982,000 | 25.794118 | 83 | 0.640741 | false |
tsujamin/digi-approval | src/digiapproval_project/digiapproval_project/apps/digiapproval/migrations/0004_auto__chg_field_task_uuid__add_field_workflowspec_toplevel.py | 1 | 7190 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Task.uuid'
db.alter_column(u'digiapproval_task', 'uuid', self.gf('django.db.models.fields.CharField')(max_length='36'))
# Adding field 'WorkflowSpec.toplevel'
db.add_column(u'digiapproval_workflowspec', 'toplevel',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Changing field 'Task.uuid'
db.alter_column(u'digiapproval_task', 'uuid', self.gf('django.db.models.fields.CharField')(max_length='32'))
# Deleting field 'WorkflowSpec.toplevel'
db.delete_column(u'digiapproval_workflowspec', 'toplevel')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'digiapproval.customeraccount': {
'Meta': {'object_name': 'CustomerAccount'},
'account_type': ('django.db.models.fields.CharField', [], {'default': "'CUSTOMER'", 'max_length': '16'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sub_accounts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['digiapproval.CustomerAccount']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'digiapproval.task': {
'Meta': {'object_name': 'Task'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('jsonfield.fields.JSONField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': "'36'"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.Workflow']"})
},
u'digiapproval.userfile': {
'Meta': {'object_name': 'UserFile'},
'_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'virus_status': ('django.db.models.fields.CharField', [], {'default': "'UNSCANNED'", 'max_length': '16'})
},
u'digiapproval.workflow': {
'Meta': {'object_name': 'Workflow'},
'approver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflow_approver'", 'to': u"orm['auth.User']"}),
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflow_customer'", 'to': u"orm['digiapproval.CustomerAccount']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'spec': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.WorkflowSpec']"}),
'workflow': ('digiapproval_project.apps.digiapproval.fields.WorkflowField', [], {})
},
u'digiapproval.workflowspec': {
'Meta': {'object_name': 'WorkflowSpec'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'64'"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spec': ('digiapproval_project.apps.digiapproval.fields.WorkflowSpecField', [], {}),
'toplevel': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
}
}
complete_apps = ['digiapproval'] | gpl-3.0 | -1,770,627,764,572,038,700 | 66.839623 | 226 | 0.566064 | false |
Taapat/enigma2-openpli-fulan | lib/python/Components/EpgList.py | 1 | 16648 | from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from enigma import eEPGCache, eListbox, eListboxPythonMultiContent, gFont, \
RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_HALIGN_CENTER, RT_VALIGN_CENTER
from Tools.Alternatives import CompareWithAlternatives
from Tools.LoadPixmap import LoadPixmap
from time import localtime, time
from Components.config import config
from ServiceReference import ServiceReference
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from skin import parseFont
EPG_TYPE_SINGLE = 0
EPG_TYPE_MULTI = 1
EPG_TYPE_SIMILAR = 2
class Rect:
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.w = width
self.h = height
# silly, but backward compatible
def left(self):
return self.x
def top(self):
return self.y
def height(self):
return self.h
def width(self):
return self.w
class EPGList(HTMLComponent, GUIComponent):
def __init__(self, type=EPG_TYPE_SINGLE, selChangedCB=None, timer = None):
self.days = (_("Mon"), _("Tue"), _("Wed"), _("Thu"), _("Fri"), _("Sat"), _("Sun"))
self.timer = timer
self.onSelChanged = [ ]
if selChangedCB is not None:
self.onSelChanged.append(selChangedCB)
GUIComponent.__init__(self)
self.type=type
self.l = eListboxPythonMultiContent()
self.eventItemFont = gFont("Regular", 22)
self.eventTimeFont = gFont("Regular", 16)
self.iconSize = 21
self.iconDistance = 2
self.colGap = 10
self.skinColumns = False
self.tw = 90
self.dy = 0
if type is EPG_TYPE_SINGLE:
self.l.setBuildFunc(self.buildSingleEntry)
elif type is EPG_TYPE_MULTI:
self.l.setBuildFunc(self.buildMultiEntry)
else:
assert(type is EPG_TYPE_SIMILAR)
self.l.setBuildFunc(self.buildSimilarEntry)
self.epgcache = eEPGCache.getInstance()
self.clocks = [ LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_post.png')) ]
def getEventFromId(self, service, eventid):
event = None
if self.epgcache is not None and eventid is not None:
event = self.epgcache.lookupEventId(service.ref, eventid)
return event
def getCurrentChangeCount(self):
if self.type is EPG_TYPE_MULTI and self.l.getCurrentSelection() is not None:
return self.l.getCurrentSelection()[0]
return 0
def getCurrent(self):
idx=0
if self.type is EPG_TYPE_MULTI:
idx += 1
tmp = self.l.getCurrentSelection()
if tmp is None:
return ( None, None )
eventid = tmp[idx+1]
service = ServiceReference(tmp[idx])
event = self.getEventFromId(service, eventid)
return ( event, service )
def moveUp(self):
self.instance.moveSelection(self.instance.moveUp)
def moveDown(self):
self.instance.moveSelection(self.instance.moveDown)
def connectSelectionChanged(func):
if not self.onSelChanged.count(func):
self.onSelChanged.append(func)
def disconnectSelectionChanged(func):
self.onSelChanged.remove(func)
def selectionChanged(self):
for x in self.onSelChanged:
if x is not None:
x()
# try:
# x()
# except: # FIXME!!!
# print "FIXME in EPGList.selectionChanged"
# pass
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
instance.setWrapAround(True)
instance.selectionChanged.get().append(self.selectionChanged)
instance.setContent(self.l)
def preWidgetRemove(self, instance):
instance.selectionChanged.get().remove(self.selectionChanged)
instance.setContent(None)
def recalcEntrySize(self):
esize = self.l.getItemSize()
width = esize.width()
height = esize.height()
try:
self.iconSize = self.clocks[0].size().height()
except:
pass
self.space = self.iconSize + self.iconDistance
self.dy = int((height - self.iconSize)/2.)
if self.type is EPG_TYPE_SINGLE:
if self.skinColumns:
x = 0
self.weekday_rect = Rect(0, 0, self.gap(self.col[0]), height)
x += self.col[0]
self.datetime_rect = Rect(x, 0, self.gap(self.col[1]), height)
x += self.col[1]
self.descr_rect = Rect(x, 0, width-x, height)
else:
self.weekday_rect = Rect(0, 0, width/20*2-10, height)
self.datetime_rect = Rect(width/20*2, 0, width/20*5-15, height)
self.descr_rect = Rect(width/20*7, 0, width/20*13, height)
elif self.type is EPG_TYPE_MULTI:
if self.skinColumns:
x = 0
self.service_rect = Rect(x, 0, self.gap(self.col[0]), height)
x += self.col[0]
self.progress_rect = Rect(x, 8, self.gap(self.col[1]), height-16)
self.start_end_rect = Rect(x, 0, self.gap(self.col[1]), height)
x += self.col[1]
self.descr_rect = Rect(x, 0, width-x, height)
else:
xpos = 0
w = width/10*3
self.service_rect = Rect(xpos, 0, w-10, height)
xpos += w
w = width/10*2
self.start_end_rect = Rect(xpos, 0, w-10, height)
self.progress_rect = Rect(xpos, 4, w-10, height-8)
xpos += w
w = width/10*5
self.descr_rect = Rect(xpos, 0, width, height)
else: # EPG_TYPE_SIMILAR
if self.skinColumns:
x = 0
self.weekday_rect = Rect(0, 0, self.gap(self.col[0]), height)
x += self.col[0]
self.datetime_rect = Rect(x, 0, self.gap(self.col[1]), height)
x += self.col[1]
self.service_rect = Rect(x, 0, width-x, height)
else:
self.weekday_rect = Rect(0, 0, width/20*2-10, height)
self.datetime_rect = Rect(width/20*2, 0, width/20*5-15, height)
self.service_rect = Rect(width/20*7, 0, width/20*13, height)
def gap(self, width):
return width - self.colGap
def getClockTypesForEntry(self, service, eventId, beginTime, duration):
if not beginTime:
return None
rec = self.timer.isInTimer(eventId, beginTime, duration, service)
if rec is not None:
return rec[1]
else:
return None
def buildSingleEntry(self, service, eventId, beginTime, duration, EventName):
clock_types = self.getClockTypesForEntry(service, eventId, beginTime, duration)
r1=self.weekday_rect
r2=self.datetime_rect
r3=self.descr_rect
t = localtime(beginTime)
res = [
None, # no private data needed
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, self.days[t[6]]),
(eListboxPythonMultiContent.TYPE_TEXT, r2.x, r2.y, r2.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, "%2d.%02d, %02d:%02d"%(t[2],t[1],t[3],t[4]))
]
if clock_types:
for i in range(len(clock_types)):
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r3.x + i * self.space, r3.y + self.dy, self.iconSize, self.iconSize, self.clocks[clock_types[i]]))
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x + (i + 1) * self.space, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName))
return res
def buildSimilarEntry(self, service, eventId, beginTime, service_name, duration):
clock_types = self.getClockTypesForEntry(service, eventId, beginTime, duration)
r1=self.weekday_rect
r2=self.datetime_rect
r3=self.service_rect
t = localtime(beginTime)
res = [
None, # no private data needed
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, self.days[t[6]]),
(eListboxPythonMultiContent.TYPE_TEXT, r2.x, r2.y, r2.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, "%2d.%02d, %02d:%02d"%(t[2],t[1],t[3],t[4]))
]
if clock_types:
for i in range(len(clock_types)):
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r3.x + i * self.space, r3.y + self.dy, self.iconSize, self.iconSize, self.clocks[clock_types[i]]))
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x + (i + 1) * self.space, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
return res
def buildMultiEntry(self, changecount, service, eventId, beginTime, duration, EventName, nowTime, service_name):
clock_types = self.getClockTypesForEntry(service, eventId, beginTime, duration)
r1=self.service_rect
r2=self.progress_rect
r3=self.descr_rect
r4=self.start_end_rect
res = [ None ] # no private data needed
if clock_types:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w - self.space * len(clock_types), r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
for i in range(len(clock_types)):
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r1.x + r1.w - self.space * (i + 1), r1.y + self.dy, self.iconSize, self.iconSize, self.clocks[clock_types[len(clock_types) - 1 - i]]))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
if beginTime is not None:
if nowTime < beginTime:
begin = localtime(beginTime)
end = localtime(beginTime+duration)
res.extend((
(eListboxPythonMultiContent.TYPE_TEXT, r4.x, r4.y, r4.w, r4.h, 1, RT_HALIGN_CENTER|RT_VALIGN_CENTER, "%02d.%02d - %02d.%02d"%(begin[3],begin[4],end[3],end[4])),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, self.gap(self.tw), r3.h, 1, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, _("%d min") % (duration / 60)),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x + self.tw, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT, EventName)
))
else:
percent = (nowTime - beginTime) * 100 / duration
prefix = "+"
remaining = ((beginTime+duration) - int(time())) / 60
if remaining <= 0:
prefix = ""
res.extend((
(eListboxPythonMultiContent.TYPE_PROGRESS, r2.x, r2.y, r2.w, r2.h, percent),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, self.gap(self.tw), r3.h, 1, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, _("%s%d min") % (prefix, remaining)),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x + self.tw, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT, EventName)
))
return res
def queryEPG(self, list, buildFunc=None):
if self.epgcache is not None:
if buildFunc is not None:
return self.epgcache.lookupEvent(list, buildFunc)
else:
return self.epgcache.lookupEvent(list)
return [ ]
def fillMultiEPG(self, services, stime=-1):
#t = time()
test = [ (service.ref.toString(), 0, stime) for service in services ]
test.insert(0, 'X0RIBDTCn')
self.list = self.queryEPG(test)
self.l.setList(self.list)
#print time() - t
self.selectionChanged()
def updateMultiEPG(self, direction):
#t = time()
test = [ x[3] and (x[1], direction, x[3]) or (x[1], direction, 0) for x in self.list ]
test.insert(0, 'XRIBDTCn')
tmp = self.queryEPG(test)
cnt=0
for x in tmp:
changecount = self.list[cnt][0] + direction
if changecount >= 0:
if x[2] is not None:
self.list[cnt]=(changecount, x[0], x[1], x[2], x[3], x[4], x[5], x[6])
cnt+=1
self.l.setList(self.list)
#print time() - t
self.selectionChanged()
def fillSingleEPG(self, service):
t = time()
epg_time = t - config.epg.histminutes.getValue()*60
test = [ 'RIBDT', (service.ref.toString(), 0, epg_time, -1) ]
self.list = self.queryEPG(test)
self.l.setList(self.list)
if t != epg_time:
idx = 0
for x in self.list:
idx += 1
if t < x[2]+x[3]:
break
self.instance.moveSelectionTo(idx-1)
self.selectionChanged()
def sortSingleEPG(self, type):
list = self.list
if list:
event_id = self.getSelectedEventId()
if type is 1:
list.sort(key=lambda x: (x[4] and x[4].lower(), x[2]))
else:
assert(type is 0)
list.sort(key=lambda x: x[2])
self.l.invalidate()
self.moveToEventId(event_id)
def getSelectedEventId(self):
x = self.l.getCurrentSelection()
return x and x[1]
def moveToService(self,serviceref):
if not serviceref:
return
index = 0
refstr = serviceref.toString()
for x in self.list:
if CompareWithAlternatives(x[1], refstr):
self.instance.moveSelectionTo(index)
break
index += 1
def moveToEventId(self, eventId):
if not eventId:
return
index = 0
for x in self.list:
if x[1] == eventId:
self.instance.moveSelectionTo(index)
break
index += 1
def fillSimilarList(self, refstr, event_id):
t = time()
# search similar broadcastings
if event_id is None:
return
l = self.epgcache.search(('RIBND', 1024, eEPGCache.SIMILAR_BROADCASTINGS_SEARCH, refstr, event_id))
if l and len(l):
l.sort(key=lambda x: x[2])
self.l.setList(l)
self.selectionChanged()
print time() - t
def applySkin(self, desktop, parent):
def warningWrongSkinParameter(string):
print "[EPGList] wrong '%s' skin parameters" % string
def setEventItemFont(value):
self.eventItemFont = parseFont(value, ((1,1),(1,1)))
def setEventTimeFont(value):
self.eventTimeFont = parseFont(value, ((1,1),(1,1)))
def setIconDistance(value):
self.iconDistance = int(value)
def setIconShift(value):
self.dy = int(value)
def setTimeWidth(value):
self.tw = int(value)
def setColWidths(value):
self.col = map(int, value.split(','))
if len(self.col) == 2:
self.skinColumns = True
else:
warningWrongSkinParameter(attrib)
def setColGap(value):
self.colGap = int(value)
for (attrib, value) in self.skinAttributes[:]:
try:
locals().get(attrib)(value)
self.skinAttributes.remove((attrib, value))
except:
pass
self.l.setFont(0, self.eventItemFont)
self.l.setFont(1, self.eventTimeFont)
return GUIComponent.applySkin(self, desktop, parent)
| gpl-2.0 | -8,331,686,049,846,407,000 | 38.450237 | 200 | 0.701766 | false |
eallrich/metricdock | core/core.py | 1 | 5988 | import datetime
import json
import logging
import os
import time
from flask import Flask, abort, g, request
import redis
from . import settings
from .whisperdb import Whisper
logging.basicConfig(**settings.logging)
logger = logging.getLogger(__name__)
app = Flask(__name__)
# Note that there is no teardown for the redis connection
def get_redis():
r = getattr(g, '_redis', None)
if not r:
r = g._redis = redis.StrictRedis(**settings.redis_server)
return r
@app.route('/')
def root():
return str(int(time.time()))
def find_whispers():
whispers = []
if not os.path.isdir(settings.whisper_path):
return whispers
# We're going to remove this prefix from results
prefix = "%s/" % settings.whisper_path
for root, _, files in os.walk(settings.whisper_path):
root = root.replace(prefix, '')
for name in files:
# Drop the extension
name = name.rsplit('.', 1)[0]
path = os.path.join(root, name)
whispers.append(path)
return whispers
@app.route('/fetch')
def fetch():
response = {'metrics': [w.replace('/','.') for w in find_whispers()]}
return json.dumps(response)
@app.route('/fetch/<metric>')
def fetch_metric(metric):
return fetch_metric_hour(metric)
@app.route('/fetch/<metric>/<start>/<end>')
def fetch_metric_interval(metric, start, end):
wsp = Whisper(metric)
timeinfo, values = wsp.fetch(start, end)
start, stop, step = timeinfo
response = {'start': start, 'stop': stop, 'step': step, 'values': values}
return json.dumps(response)
@app.route('/fetch/<metric>/hour')
def fetch_metric_hour(metric):
one_hour_ago = datetime.datetime.now() - datetime.timedelta(hours=1)
start_ts = one_hour_ago.strftime("%s")
end_ts = int(time.time())
return fetch_metric_interval(metric, start_ts, end_ts)
@app.route('/fetch/<metric>/day')
def fetch_metric_day(metric):
one_day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
start_ts = one_day_ago.strftime("%s")
end_ts = int(time.time())
return fetch_metric_interval(metric, start_ts, end_ts)
@app.route('/fetch/<metric>/6h')
def fetch_metric_six_hours(metric):
start = datetime.datetime.now() - datetime.timedelta(hours=6)
start_ts = start.strftime("%s")
end_ts = int(time.time())
return fetch_metric_interval(metric, start_ts, end_ts)
@app.route('/fetch/<metric>/12h')
def fetch_metric_twelve_hours(metric):
start = datetime.datetime.now() - datetime.timedelta(hours=12)
start_ts = start.strftime("%s")
end_ts = int(time.time())
return fetch_metric_interval(metric, start_ts, end_ts)
@app.route('/latest')
def latest():
"""Returns a json list of all metrics received recently"""
metrics = get_redis().zrange(settings.redis_latest, 0, -1)
logger.info("Latest zset contains %d entries" % len(metrics))
json_style = []
for m in metrics:
json_style.extend(json.loads(m))
return json.dumps(json_style)
@app.route('/trim', methods=['POST'])
def trim():
"""Trims the set of latest metrics to a recent time interval"""
discard_ts = int(time.time()) - settings.redis_latest_bound
n = get_redis().zremrangebyscore(settings.redis_latest, '-inf', discard_ts)
logger.info("Trimmed %d metrics from latest zset" % n)
return "%d\n" % n
def save(metrics):
logger.info("Saving %d metrics" % len(metrics))
now = int(time.time())
get_redis().zadd(settings.redis_latest, now, json.dumps(metrics))
for m in metrics:
wsp = Whisper(m['metric'])
wsp.save(m['value'], m['timestamp'])
@app.route('/publish', methods=['POST'])
def publish():
return create()
@app.route('/metrics', methods=['POST'])
def create():
"""Accepts metrics from clients
Saves metrics to whisper for persistence and also stashes them in a queue
of latest metrics."""
if request.content_type != 'application/json':
abort(415) # Unsupported media type
data = request.get_json()
try:
# List or dictionary?
_ = data[0]
except KeyError:
# It's a dictionary, make it a list for consistency
data = [data,]
metrics = []
# Make sure the syntax is as expected. If not, return 400 Bad Syntax
for document in data:
clean = {}
# Make sure all the keys exist
for key in ('metric', 'value', 'timestamp',):
try:
clean[key] = document[key]
except KeyError:
return "Missing required key '%s'\n" % key, 400
# Float-able value?
try:
clean['value'] = float(clean['value'])
except (ValueError, TypeError):
return "'value' (%r) must be a float\n" % clean['value'], 400
# Int-able timestamp?
try:
clean['timestamp'] = int(clean['timestamp'])
except (ValueError, TypeError):
return "'timestamp' (%r) must be an int\n" % clean['timestamp'], 400
metrics.append(clean)
save(metrics)
# Created
return "Saved %d metrics\n" % len(metrics), 201
def delete(metric):
"""Deletes the Whisper database for the specified metric"""
path = Whisper.make_db_path(Whisper.make_db_name(metric))
if not os.path.isfile(path):
abort(404) # Not found
logger.info("Deleting '%s' at '%s'" % (metric, path))
os.remove(path)
try:
os.removedirs(os.path.dirname(path))
except OSError as exc:
logger.warning("Unable to remove leaf directory containing deleted Whisper file")
logger.debug("OSError: %s" % exc)
# No content
return "", 204
@app.route('/delete/<metric>', methods=['GET'])
def browser_delete(metric):
return delete(metric)
@app.route('/metrics/<metric>', methods=['DELETE'])
def api_delete(metric):
return delete(metric)
| mit | -6,117,779,647,084,826,000 | 26.095023 | 89 | 0.616232 | false |
geopython/geolinks | tests/run_tests.py | 1 | 2016 | # =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2019 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import unittest
import json
from geolinks import sniff_link
class GeolinksTest(unittest.TestCase):
"""Test suite for package Foo"""
def setUp(self):
"""setup test fixtures, etc."""
with open('test_data.json') as f:
self.test_data = json.load(f)
def tearDown(self):
"""return to pristine state"""
pass
def test_link_types(self):
"""simple link type tests"""
for test in self.test_data['test_data']:
self.assertEqual(sniff_link(test['link']), test['expected'],
'Expected %s and %s to be equal' %
(test['link'], test['expected']))
if __name__ == '__main__':
unittest.main()
| mit | -5,451,843,022,301,467,000 | 35 | 72 | 0.637897 | false |
williballenthin/synapse | synapse/__init__.py | 1 | 1264 | '''
The synapse distributed key-value hypergraph analysis framework.
'''
import os
import msgpack
import tornado
import logging
logger = logging.getLogger(__name__)
if msgpack.version < (0,4,2):
raise Exception('synapse requires msgpack >= 0.4.2')
if tornado.version_info < (3,2,2):
raise Exception('synapse requires tornado >= 3.2.2')
version = (0,0,11)
verstring = '.'.join([ str(x) for x in version ])
import synapse.lib.modules as s_modules
# load all the synapse builtin modules
s_modules.load('synapse.models.syn')
s_modules.load('synapse.models.dns')
s_modules.load('synapse.models.orgs')
s_modules.load('synapse.models.inet')
s_modules.load('synapse.models.mime')
s_modules.load('synapse.models.files')
s_modules.load('synapse.models.money')
s_modules.load('synapse.models.telco')
s_modules.load('synapse.models.crypto')
s_modules.load('synapse.models.geopol')
s_modules.load('synapse.models.person')
s_modules.load('synapse.models.temporal')
s_modules.load('synapse.models.geospace')
s_modules.load('synapse.models.av')
mods = os.getenv('SYN_MODULES')
if mods:
for name in mods.split(','):
try:
s_modules.load(name)
except Exception as e:
logger.warning('SYN_MODULES failed: %s (%s)' % (name,e))
| apache-2.0 | 1,146,409,635,272,664,300 | 27.727273 | 68 | 0.705696 | false |
GoteoFoundation/goteo-api | goteoapi/users/resources.py | 1 | 4320 | # -*- coding: utf-8 -*-
import time
from flask import g
from flask.ext.restful import fields
from flasgger.utils import swag_from
from ..ratelimit import ratelimit
from ..auth.decorators import requires_auth
from ..helpers import DateTime, marshal, bad_request
from ..base_resources import BaseItem, BaseList, Response
from ..location.models import UserLocation
from .models import User, UserLang
user_resource_fields = {
"id": fields.String,
"name": fields.String,
"node": fields.String,
"date_created": DateTime,
"profile_url": fields.String,
"profile_image_url": fields.String,
# privacy concerns here
# "latitude": fields.Float,
# "longitude": fields.Float
# "region": fields.String
}
user_full_resource_fields = user_resource_fields.copy()
user_full_resource_fields['about'] = fields.String
user_full_resource_fields['lang'] = fields.String
user_full_resource_fields['amount_public_invested'] = fields.Float
user_full_resource_fields['projects_public_invested'] = fields.Integer
user_full_resource_fields['projects_published'] = fields.Integer
user_full_resource_fields['projects_collaborated'] = fields.Integer
# TODO: extra field for Oauth Auth only with enough privileges
# user_full_resource_fields['amount_private_invested'] = fields.Float
# user_full_resource_fields['projects_private_invested'] = fields.Integer
class UsersListAPI(BaseList):
"""User list"""
@requires_auth()
@ratelimit()
@swag_from('swagger_specs/user_list.yml')
def get(self):
res = self._get()
return res.response()
def _get(self):
"""Get()'s method dirty work"""
time_start = time.time()
# For privacy, removing location filter ?
args = self.parse_args(remove=('location'))
items = []
for u in User.list(**args):
item = marshal(u, user_resource_fields)
if 'latitude' in user_resource_fields:
location = UserLocation.get(u.id)
if location:
item['latitude'] = location.latitude
item['longitude'] = location.longitude
if location.region:
item['region'] = location.region
else:
item['region'] = location.country
items.append(item)
res = Response(
starttime=time_start,
attributes={'items': items},
filters=args.items(),
total=User.total(**args)
)
return res
class UserOwnerAPI(BaseItem):
"""Authenticated User Details"""
@requires_auth(scope='private')
@ratelimit()
# @swag_from('swagger_specs/user_item.yml')
def get(self):
res = UserAPI()._get(g.user.id)
if res.ret['id'] is None:
return bad_request('User not found', 404)
return res.response()
class UserAPI(BaseItem):
"""User Details"""
@requires_auth()
@ratelimit()
@swag_from('swagger_specs/user_item.yml')
def get(self, user_id):
res = self._get(user_id)
if res.ret['id'] is None:
return bad_request('User not found', 404)
return res.response()
def _get(self, user_id):
"""Get()'s method dirty work"""
time_start = time.time()
u = User.get(user_id)
item = marshal(u, user_full_resource_fields)
if u is not None:
if 'latitude' in user_full_resource_fields:
location = UserLocation.get(u.id)
if location:
item['latitude'] = location.latitude
item['longitude'] = location.longitude
if location.region:
item['region'] = location.region
else:
item['region'] = location.country
translations = {}
translate_keys = {
k: v for k, v in user_full_resource_fields.items()
if k in UserLang.get_translate_keys()
}
for k in u.Translations:
translations[k.lang] = marshal(k, translate_keys)
item['translations'] = translations
res = Response(
starttime=time_start,
attributes=item
)
return res
| agpl-3.0 | 3,585,244,529,358,929,000 | 29.638298 | 73 | 0.585417 | false |
HiTechIronMan/openfda | openfda/res/scrape_historic.py | 1 | 6477 | #!/usr/bin/python
"""
Scrapes historic HTML RES reports into the JSON format used for the new reports.
Note that the following fields in this new record format are populated:
product-type
recalling-firm
distribution-pattern
classification
product-description
code-info
product-quantity
reason-for-recall
report-date
And the following fields in this new record format are *not* populated:
event-id
status
city
state
country
voluntary-mandated
initial-firm-notification
recall-initiation-date
Example new record:
{"product-type": "Biologics", "event-id": "40631", "status": "Terminated", "recalling-firm": "Belle Bonfils Memorial Blood Center", "city": "Denver", "state": "CO", "country": "US", "voluntary-mandated": "Voluntary: Firm Initiated", "initial-firm-notification": "E-Mail", "distribution-pattern": "Switzerland, CO", "classification": "Class II", "product-description": "Red Blood Cells Leukocytes Reduced", "code-info": "9049505", "product-quantity": "1 unit", "reason-for-recall": "Blood products, collected from a donor with a history of living in an HIV-O risk area, were distributed.", "recall-initiation-date": "08/22/2005", "report-date": "07/11/2012"}
Note: It appears that the historic RES HTML is hand coded into the FDA CMS. The HTML structure has changed over the years. Thus
"""
from bs4 import BeautifulSoup
import datetime
import logging
import re
import simplejson
SECTION_MAPPING = {
'PRODUCT': 'product-description',
'RECALLING FIRM/MANUFACTURER': 'recalling-firm',
'REASON': 'reason-for-recall',
'VOLUME OF PRODUCT IN COMMERCE': 'product-quantity',
'DISTRIBUTION': 'distribution-pattern',
'CODE': 'code-info',
}
PRODUCT_TYPES = [
'BIOLOGICS',
'FOODS',
'DRUGS',
'DEVICES',
'VETERINARY MEDICINE',
]
CLASSIFICATIONS = [
'I',
'II',
'III'
]
def strip_unicode(raw_str):
# http://stackoverflow.com/questions/10993612/python-removing-xa0-from-string
return raw_str.replace(u'\xa0', ' ')\
.replace(u'\u2013', '-')\
.replace(u'\xae', ' ')\
.replace(u'\u201c', '')\
.replace(u'\u201d', '')
def scrape_report(html):
soup = BeautifulSoup(html)
# If the page contains a timeout warning, then skip it
title = soup.find(text='Gateway Timeout')
if title:
return []
product_type = None
classification = None
report_date = None
reports = []
middle_column_div = soup.find('div',
attrs={'class': re.compile(r'middle-column')})
middle_column_text = strip_unicode(middle_column_div.get_text())
end_index = middle_column_text.find('END OF ENFORCEMENT REPORT')
middle_column_text = middle_column_text[:end_index]
# The FDA begins a pilot program seeking to expedite notifications of human
# drug product recalls to the public. The agency will modify the drug product
# section of the Enforcement Report17, published every Wednesday, to include
# actions that have been determined to be recalls, but that remain in the
# process of being classified as a Class I, II, or III action. Such recalls
# will be listed under the heading, "Recalls Pending Classification: DRUGS."
# They will be reposted with their classification once that determination has
# been made. Send comments or suggestions to [email protected].
# http://www.fda.gov/Safety/Recalls/EnforcementReports/ucm285341.htm
recall_group_re = (r'RECALLS AND FIELD CORRECTIONS: ([A-Z ]+) - CLASS '
r'(I+)|RECALLS PENDING CLASSIFICATION: (\w+)')
recall_groups = re.split(recall_group_re, middle_column_text)
for recall_group in recall_groups:
if recall_group in PRODUCT_TYPES:
product_type = recall_group
product_type = product_type[0] + product_type[1:].lower()
if product_type == 'Foods':
# Be consistent with the XML format, which uses 'Food' rather than 'Foods'
product_type = 'Food'
continue
if recall_group in CLASSIFICATIONS:
classification = 'Class ' + recall_group
continue
if not recall_group:
continue
raw_recalls = re.split('_________+', recall_group)
for raw_recall in raw_recalls:
m = re.findall('Enforcement Report for (.+)', raw_recall)
if m:
text = m[0]
# Outlier Cases, Handled explicitly since data set is static
text = text.replace('Aprl', 'April')\
.replace('February 23 ', 'February 23, 2005')\
.replace('(PDF - 874KB)', '')\
.replace(',', '')\
.strip()
# June 13 2012
try:
dt = datetime.datetime.strptime(text, '%B %d %Y')
report_date = dt.strftime('%m/%d/%Y')
except:
logging.info('Malformed Date String: ' + raw_recall)
logging.info(r'Expecting Date Format (%B %d %Y), i.e. June 13 2012')
if 'PRODUCT' in raw_recall:
# Newlines are not consistently used across reports
raw_recall = raw_recall.replace('\n', '')
recall = scrape_one_recall(raw_recall)
recall['product-type'] = product_type
recall['classification'] = classification
recall['report-date'] = report_date
reports.append(recall)
return reports
def scrape_one_recall(recall):
recall_remaining = recall
recall_section = {}
while recall_remaining:
last_section = None
last_section_index = -1
for section in SECTION_MAPPING.keys():
section_index = recall_remaining.rfind(section)
if section_index == -1:
continue
# Detect 'PRODUCT' within 'VOLUME OF PRODUCT IN COMMERCE'
if (section == 'PRODUCT' and
recall_remaining.rfind('VOLUME OF PRODUCT IN COMMERCE') ==
section_index - len('VOLUME OF ')):
continue
if last_section_index < section_index:
last_section = section
last_section_index = section_index
# No sections found, so we're done
if last_section is None:
return recall_section
offset_section_index = last_section_index + len(last_section)
last_section_text = recall_remaining[offset_section_index:].strip()
if ' The FDA begins a pilot program' in last_section_text:
i = last_section_text.find(' The FDA begins a pilot program')
last_section_text = last_section_text[0:i]
recall_section[SECTION_MAPPING[last_section]] = last_section_text
recall_remaining = recall_remaining[0:last_section_index]
return recall_section
| cc0-1.0 | 6,793,334,345,480,814,000 | 32.734375 | 657 | 0.661417 | false |
odysseywestra/mypaint | gui/backgroundwindow.py | 1 | 15970 | # This file is part of MyPaint.
# Copyright (C) 2009-2018 by the MyPaint Development Team.
# Copyright (C) 2008-2014 by Martin Renold <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Background tile chooser dialog"""
## Imports
from __future__ import division, print_function
import os
import sys
import logging
from gettext import gettext as _
from gi.repository import Gtk
from gi.repository import GdkPixbuf
from . import pixbuflist
from . import windowing
from lib import tiledsurface
from lib import helpers
import lib.pixbuf
from lib.pycompat import unicode
from lib.pycompat import xrange
logger = logging.getLogger(__name__)
## Settings and consts
N = tiledsurface.N
DEFAULT_BACKGROUND = 'default.png'
FALLBACK_BACKGROUND = '10_soft_yellow.png'
BACKGROUNDS_SUBDIR = 'backgrounds'
RESPONSE_SAVE_AS_DEFAULT = 1
BLOAT_MAX_SIZE = 1024
## Class defs
class BackgroundWindow (windowing.Dialog):
def __init__(self):
from gui import application
app = application.get_app()
assert app is not None
self._current_background_pixbuf = None # set when changed
flags = Gtk.DialogFlags.DESTROY_WITH_PARENT
buttons = [
_('Save as Default'), RESPONSE_SAVE_AS_DEFAULT,
Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT,
]
windowing.Dialog.__init__(
self,
app=app,
title=_('Background'),
parent=app.drawWindow,
flags=flags,
buttons=buttons,
)
# Set up window.
self.connect('response', self._response_cb)
notebook = self.nb = Gtk.Notebook()
self.vbox.pack_start(notebook, True, True, 0)
# Set up patterns tab.
patterns_scroll = Gtk.ScrolledWindow()
patterns_scroll.set_policy(
Gtk.PolicyType.NEVER,
Gtk.PolicyType.AUTOMATIC,
)
notebook.append_page(patterns_scroll, Gtk.Label(_('Pattern')))
self.bgl = BackgroundList(self)
patterns_scroll.add_with_viewport(self.bgl)
self.connect("realize", self._realize_cb)
self.connect("show", self._show_cb)
self.connect("hide", self._hide_cb)
# Set up colors tab.
color_vbox = Gtk.VBox()
notebook.append_page(color_vbox, Gtk.Label(_('Color')))
self.cs = Gtk.ColorSelection()
self.cs.connect('color-changed', self._color_changed_cb)
color_vbox.pack_start(self.cs, True, True, 0)
b = Gtk.Button(_('Add color to Patterns'))
b.connect('clicked', self._add_color_to_patterns_cb)
color_vbox.pack_start(b, False, True, 0)
def _realize_cb(self, dialog):
if not self.bgl.initialized:
self.bgl.initialize()
def _show_cb(self, dialog):
self._current_background_pixbuf = None
self.set_response_sensitive(RESPONSE_SAVE_AS_DEFAULT, False)
def _hide_cb(self, dialog):
self._current_background_pixbuf = None
def _response_cb(self, dialog, response, *args):
if response == RESPONSE_SAVE_AS_DEFAULT:
self._save_as_default_cb()
elif response == Gtk.ResponseType.ACCEPT:
self.hide()
def _color_changed_cb(self, widget):
pixbuf = self._get_selected_color_pixbuf()
self.set_background(pixbuf)
def _get_selected_color_pixbuf(self):
rgb = self.cs.get_current_color()
rgb = (rgb.red, rgb.green, rgb.blue)
rgb = (c / 0xffff for c in rgb)
pixbuf = new_blank_pixbuf(rgb, N, N)
return pixbuf
def _save_as_default_cb(self):
pixbuf = self._current_background_pixbuf
assert pixbuf is not None, "BG pixbuf was not changed."
path = os.path.join(
self.app.user_datapath,
BACKGROUNDS_SUBDIR,
DEFAULT_BACKGROUND,
)
lib.pixbuf.save(pixbuf, path, 'png')
self.hide()
def set_background(self, pixbuf):
doc = self.app.doc.model
doc.layer_stack.set_background(pixbuf, make_default=True)
self._current_background_pixbuf = pixbuf
self.set_response_sensitive(RESPONSE_SAVE_AS_DEFAULT, True)
def _add_color_to_patterns_cb(self, widget):
pixbuf = self._get_selected_color_pixbuf()
i = 1
while True:
filename = os.path.join(self.app.user_datapath,
BACKGROUNDS_SUBDIR,
'color%02d.png' % i)
if not os.path.exists(filename):
break
i += 1
lib.pixbuf.save(pixbuf, filename, 'png')
self.bgl.backgrounds.append(pixbuf)
self.bgl.update()
self.bgl.set_selected(pixbuf)
self.nb.set_current_page(0)
class BackgroundList (pixbuflist.PixbufList):
_SUFFIXES = ('.jpg', '.jpeg', '.png')
def __init__(self, win):
pixbuflist.PixbufList.__init__(
self,
None,
N, N,
namefunc=self._get_tooltip,
pixbuffunc=self._get_preview_pixbuf,
)
self.app = win.app
self.win = win
stock_path = os.path.join(self.app.datapath, BACKGROUNDS_SUBDIR)
user_path = os.path.join(self.app.user_datapath, BACKGROUNDS_SUBDIR)
if not os.path.isdir(user_path):
os.mkdir(user_path)
self._background_files = self._list_dir(stock_path)
self._background_files.sort()
self._background_files += self._list_dir(user_path)
# Exclude DEFAULT_BACKGROUND from the list shown to the user
for filename in reversed(self._background_files):
file_basename = os.path.basename(filename)
if file_basename.lower() == DEFAULT_BACKGROUND:
self._background_files.remove(filename)
self._pixbuf_tooltip = {}
self._pixbufs_scaled = {} # lazily loaded by self.initialize()
self.backgrounds = []
self.item_selected += self._item_selected_cb
@classmethod
def _list_dir(cls, path):
"""Recursively find images by suffix"""
contents = []
for dir_path, dir_subdirs, dir_files in os.walk(path):
for file_name in dir_files:
is_matched = False
file_name_lowercase = file_name.lower()
for suffix in cls._SUFFIXES:
if not file_name_lowercase.endswith(suffix):
continue
is_matched = True
break
if is_matched:
file_path = os.path.join(dir_path, file_name)
contents.append(file_path)
contents.sort(key=os.path.getmtime)
return contents
@property
def initialized(self):
return len(self.backgrounds) != 0
def initialize(self):
self.backgrounds = self._load_pixbufs(self._background_files)
self.set_itemlist(self.backgrounds)
def _load_pixbufs(self, files, exclude_default=False):
pixbufs = []
load_errors = []
for filename in files:
is_matched = False
for suffix in self._SUFFIXES:
if not filename.lower().endswith(suffix):
continue
is_matched = True
break
if not is_matched:
logger.warning(
"Excluding %r: not in %r",
filename,
self._SUFFIXES,
)
continue
pixbuf, errors = load_background(filename)
if errors:
for err in errors:
logger.error("Error loading %r: %r", filename, err)
load_errors.append(err)
continue
if os.path.basename(filename).lower() == DEFAULT_BACKGROUND:
if exclude_default:
logger.warning("Excluding %r: is default background (%r)",
filename, DEFAULT_BACKGROUND)
continue
pixbufs.append(pixbuf)
tooltip = _filename_to_display(filename)
self._pixbuf_tooltip[pixbuf] = tooltip
if load_errors:
msg = "\n\n".join(load_errors)
self.app.message_dialog(
text=_("One or more backgrounds could not be loaded"),
title=_("Error loading backgrounds"),
secondary_text=_("Please remove the unloadable files, or "
"check your libgdkpixbuf installation."),
long_text=msg,
type=Gtk.MessageType.WARNING,
flags=Gtk.DialogFlags.MODAL,
)
logger.info("Loaded %d of %d background(s), with %d error(s)",
len(pixbufs), len(files), len(errors))
return pixbufs
def _get_preview_pixbuf(self, pixbuf):
if pixbuf in self._pixbufs_scaled:
return self._pixbufs_scaled[pixbuf]
w, h = pixbuf.get_width(), pixbuf.get_height()
if w == N and h == N:
return pixbuf
assert w >= N
assert h >= N
scale = max(0.25, N / min(w, h))
scaled = new_blank_pixbuf((0, 0, 0), N, N)
pixbuf.composite(
dest=scaled,
dest_x=0, dest_y=0,
dest_width=N, dest_height=N,
offset_x=0, offset_y=0,
scale_x=scale, scale_y=scale,
interp_type=GdkPixbuf.InterpType.BILINEAR,
overall_alpha=255,
)
self.app.pixmaps.plus.composite(
dest=scaled,
dest_x=0, dest_y=0,
dest_width=N, dest_height=N,
offset_x=0, offset_y=0,
scale_x=1.0, scale_y=1.0,
interp_type=GdkPixbuf.InterpType.BILINEAR,
overall_alpha=255,
)
self._pixbufs_scaled[pixbuf] = scaled
return scaled
def _get_tooltip(self, pixbuf):
return self._pixbuf_tooltip.get(pixbuf, None)
def _item_selected_cb(self, self_, pixbuf):
self.win.set_background(pixbuf)
## Helpers
def _filename_to_display(s):
"""Convert a str filename to Unicode without obsessing too much."""
# That said, try to be be correct about Windows/POSIX weirdness.
if not isinstance(s, unicode):
if sys.platform == "win32":
enc = "UTF-8" # always, and sys.getfilesystemencoding() breaks
else:
enc = sys.getfilesystemencoding()
s = s.decode(enc, "replace")
return s
def new_blank_pixbuf(rgb, w, h):
"""Create a blank pixbuf with all pixels set to a color
:param tuple rgb: Color to blank the pixbuf to (``R,G,B``, floats)
:param int w: Width for the new pixbuf
:param int h: Width for the new pixbuf
The returned pixbuf has no alpha channel.
"""
pixbuf = GdkPixbuf.Pixbuf.new(
GdkPixbuf.Colorspace.RGB, False, 8,
w, h,
)
r, g, b = (helpers.clamp(int(round(0xff * x)), 0, 0xff) for x in rgb)
rgba_pixel = (r << 24) + (g << 16) + (b << 8) + 0xff
pixbuf.fill(rgba_pixel)
return pixbuf
def load_background(filename, bloatmax=BLOAT_MAX_SIZE):
"""Load a pixbuf, testing it for suitability as a background
:param str filename: Full path to the filename to load.
:param int bloatmax: Repeat up to this size
:rtype: tuple
The returned tuple is a pair ``(PIXBUF, ERRORS)``,
where ``ERRORS`` is a list of localized strings
describing the errors encountered,
and ``PIXBUF`` contains the loaded background pixbuf.
If there were errors, ``PIXBUF`` is None.
The MyPaint rendering engine can only manage
background layers which fit into its tile structure.
Formerly, only background images with dimensions
which were exact multiples of the tile size were permitted.
We have a couple of workarounds now:
* "Bloating" the background by repetition (pixel-perfect)
* Scaling the image down to fit (distorts the image)
"""
filename_display = _filename_to_display(filename)
load_errors = []
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(filename)
except Exception as ex:
logger.error("Failed to load background %r: %s", filename, ex)
msg = unicode(_(
'Gdk-Pixbuf couldn\'t load "{filename}", and reported "{error}"'
))
load_errors.append(msg.format(
filename=filename_display,
error=repr(ex),
))
return (None, load_errors)
# Validity check
w, h = pixbuf.get_width(), pixbuf.get_height()
if w == 0 or h == 0:
msg = unicode(_("{filename} has zero size (w={w}, h={h})"))
load_errors.append(msg.format(
filename=filename_display,
w=w, h=h,
))
return (None, load_errors)
# Flatten
if pixbuf.get_has_alpha():
logger.warning(
"%r has an alpha channel, which should be removed manually",
filename,
)
new_pixbuf = new_blank_pixbuf((0, 0, 0), w, h)
pixbuf.composite(
dest=new_pixbuf,
dest_x=0, dest_y=0,
dest_width=w, dest_height=h,
offset_x=0, offset_y=0,
scale_x=1.0, scale_y=1.0,
interp_type=GdkPixbuf.InterpType.NEAREST,
overall_alpha=255,
)
pixbuf = new_pixbuf
logger.debug(
"Flattened %s by compositing it onto a black backdrop",
filename,
)
# Attempt to fit the image into our grid.
exact_fit = ((w % N, h % N) == (0, 0))
if not exact_fit:
logger.warning(
"%r (%dx%d) does not fit the %dx%d tile grid exactly",
filename,
w, h,
N, N,
)
repeats_x = _best_nrepeats_for_scaling(w, bloatmax)
repeats_y = _best_nrepeats_for_scaling(h, bloatmax)
if repeats_x > 1 or repeats_y > 1:
logger.info(
"Tiling %r to %dx%d (was: %dx%d, repeats: %d vert, %d horiz)",
filename,
w * repeats_x, h * repeats_y,
w, h,
repeats_x, repeats_y,
)
pixbuf = _tile_pixbuf(pixbuf, repeats_x, repeats_y)
w, h = pixbuf.get_width(), pixbuf.get_height()
if (w % N != 0) or (h % N != 0):
orig_w, orig_h = w, h
w = max(1, w // N) * N
h = max(1, h // N) * N
logger.info(
"Scaling %r to %dx%d (was: %dx%d)",
filename,
w, h,
orig_w, orig_h,
)
pixbuf = pixbuf.scale_simple(
dest_width=w, dest_height=h,
interp_type=GdkPixbuf.InterpType.BILINEAR,
)
assert (w % N == 0) and (h % N == 0)
if load_errors:
pixbuf = None
return pixbuf, load_errors
def _tile_pixbuf(pixbuf, repeats_x, repeats_y):
"""Make a repeated tiled image of a pixbuf"""
w, h = pixbuf.get_width(), pixbuf.get_height()
result = new_blank_pixbuf((0, 0, 0), repeats_x * w, repeats_y * h)
for xi in xrange(repeats_x):
for yi in xrange(repeats_y):
pixbuf.copy_area(0, 0, w, h, result, w * xi, h * yi)
return result
def _best_nrepeats_for_scaling(src_size, max_dest_size):
min_remainder = N
min_remainder_nrepeats = 1
nrepeats = 0
dest_size = 0
while dest_size <= max_dest_size:
nrepeats += 1
dest_size += src_size
remainder = dest_size % N
if remainder < min_remainder:
min_remainder_nrepeats = nrepeats
min_remainder = remainder
if remainder == 0:
break
return min_remainder_nrepeats
| gpl-2.0 | -2,074,244,919,298,233,000 | 32.340292 | 78 | 0.565373 | false |
anthrotype/lzcomp | Lib/lzcomp/cli.py | 1 | 3259 | #! /usr/bin/env python
"""lzcomp %s -- compression/decompression utility using the LZCOMP algorithm."""
from __future__ import print_function
import argparse
import sys
import os
import lzcomp
import platform
def get_binary_stdio(stream):
""" Return the specified standard input, output or errors stream as a
'raw' buffer object suitable for reading/writing binary data from/to it.
"""
assert stream in ['stdin', 'stdout', 'stderr'], "invalid stream name"
stdio = getattr(sys, stream)
if sys.version_info[0] < 3:
if sys.platform == 'win32':
# set I/O stream binary flag on python2.x (Windows)
runtime = platform.python_implementation()
if runtime == "PyPy":
# the msvcrt trick doesn't work in pypy, so I use fdopen
mode = "rb" if stream == "stdin" else "wb"
stdio = os.fdopen(stdio.fileno(), mode, 0)
else:
# this works with CPython -- untested on other implementations
import msvcrt
msvcrt.setmode(stdio.fileno(), os.O_BINARY)
return stdio
else:
# get 'buffer' attribute to read/write binary data on python3.x
if hasattr(stdio, 'buffer'):
return stdio.buffer
else:
orig_stdio = getattr(sys, "__%s__" % stream)
return orig_stdio.buffer
def main(args=None):
parser = argparse.ArgumentParser(
prog='lzcomp',
description=("Compression/decompression utility using the LZCOMP"
"algorithm."))
parser.add_argument('--version', action='version', version='0.1')
parser.add_argument('-i', '--input', metavar='FILE', type=str,
dest='infile', help='Input file', default=None)
parser.add_argument('-o', '--output', metavar='FILE', type=str,
dest='outfile', help='Output file', default=None)
parser.add_argument('-f', '--force', action='store_true',
help='Overwrite existing output file', default=False)
parser.add_argument('-d', '--decompress', action='store_true',
help='Decompress input file', default=False)
options = parser.parse_args(args=args)
if options.infile:
if not os.path.isfile(options.infile):
parser.error('file "%s" not found' % options.infile)
with open(options.infile, "rb") as infile:
data = infile.read()
else:
if sys.stdin.isatty():
# interactive console, just quit
parser.error('no input')
infile = get_binary_stdio('stdin')
data = infile.read()
if options.outfile:
if os.path.isfile(options.outfile) and not options.force:
parser.error('output file exists')
outfile = open(options.outfile, "wb")
else:
outfile = get_binary_stdio('stdout')
try:
if options.decompress:
data = lzcomp.decompress(data)
else:
data = lzcomp.compress(data)
except lzcomp.error as e:
parser.exit(
1, 'lzcomp: error: %s: %s' % (e, options.infile or 'sys.stdin'))
outfile.write(data)
outfile.close()
if __name__ == '__main__':
main()
| apache-2.0 | 5,779,710,267,726,327,000 | 34.813187 | 80 | 0.583001 | false |
istio/tools | isotope/runner/wait.py | 1 | 4107 | # Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions which block until certain conditions."""
import collections
import datetime
import logging
import subprocess
import time
from typing import Callable, List
from . import consts, sh
RETRY_INTERVAL = datetime.timedelta(seconds=5)
def until(predicate: Callable[[], bool],
retry_interval_seconds: int = RETRY_INTERVAL.seconds) -> None:
"""Calls predicate every RETRY_INTERVAL until it returns True."""
while not predicate():
time.sleep(retry_interval_seconds)
def until_output(args: List[str]) -> str:
output = None
while output is None:
stdout = sh.run(args).stdout
if stdout:
output = stdout
else:
time.sleep(RETRY_INTERVAL.seconds)
return output
def _until_rollouts_complete(resource_type: str, namespace: str) -> None:
proc = sh.run_kubectl(
[
'--namespace', namespace, 'get', resource_type, '-o',
'jsonpath={.items[*].metadata.name}'
],
check=True)
resources = collections.deque(proc.stdout.split(' '))
logging.info('waiting for %ss in %s (%s) to rollout', resource_type,
namespace, ', '.join(resources))
while len(resources) > 0:
resource = resources.popleft()
try:
# kubectl blocks until ready.
sh.run_kubectl(
[
'--namespace', namespace, 'rollout', 'status',
resource_type, resource
],
check=True)
except subprocess.CalledProcessError as e:
msg = 'failed to check rollout status of {}'.format(resource)
if 'watch closed' in e.stderr:
logging.debug('%s; retrying later', msg)
resources.append(resource)
else:
logging.error(msg)
def until_deployments_are_ready(
namespace: str = consts.DEFAULT_NAMESPACE) -> None:
"""Blocks until namespace's deployments' rollout statuses are complete."""
_until_rollouts_complete('deployment', namespace)
def until_stateful_sets_are_ready(
namespace: str = consts.DEFAULT_NAMESPACE) -> None:
"""Blocks until namespace's statefulsets' rollout statuses are complete."""
_until_rollouts_complete('statefulset', namespace)
def until_prometheus_has_scraped() -> None:
logging.info('allowing Prometheus time to scrape final metrics')
# Add 5 seconds for more confidence that responses to "/metrics" complete.
time.sleep(consts.PROMETHEUS_SCRAPE_INTERVAL.seconds + 5)
def until_namespace_is_deleted(
namespace: str = consts.DEFAULT_NAMESPACE) -> None:
"""Blocks until `kubectl get namespace` returns an error."""
until(lambda: _namespace_is_deleted(namespace))
def _namespace_is_deleted(namespace: str = consts.DEFAULT_NAMESPACE) -> bool:
proc = sh.run_kubectl(['get', 'namespace', namespace])
return proc.returncode != 0
def until_service_graph_is_ready() -> None:
"""Blocks until each node in the service graph reports readiness."""
until(_service_graph_is_ready)
def _service_graph_is_ready() -> bool:
proc = sh.run_kubectl(
[
'--namespace', consts.SERVICE_GRAPH_NAMESPACE, 'get', 'pods',
'--selector', consts.SERVICE_GRAPH_SERVICE_SELECTOR, '-o',
'jsonpath={.items[*].status.conditions[?(@.type=="Ready")].status}'
],
check=True)
out = proc.stdout
all_services_ready = out != '' and 'False' not in out
return all_services_ready
| apache-2.0 | 1,688,670,624,286,156,500 | 33.225 | 79 | 0.644753 | false |
KamilSzot/365_programs | 2017-04-02/sdl.py | 1 | 2280 | import os
import time
import ctypes
from math import sin, cos, pi
os.environ["PYSDL2_DLL_PATH"] = "C:\\Program Files\\Python35\\other"
import sdl2
import sdl2.ext
import sdl2.sdlttf as sdlttf
SCREEN_WIDTH = 1920
SCREEN_HEIGHT = 1080
sdl2.ext.init()
window = sdl2.ext.Window("Game (not really)", size=(SCREEN_WIDTH, SCREEN_HEIGHT), flags=sdl2.SDL_WINDOW_BORDERLESS)
renderer = sdl2.ext.Renderer(window, flags=sdl2.SDL_RENDERER_ACCELERATED)# | sdl2.SDL_RENDERER_PRESENTVSYNC
factory = sdl2.ext.SpriteFactory(sdl2.ext.TEXTURE, renderer=renderer)
spriterenderer = factory.create_sprite_render_system()
fontmanager = sdl2.ext.FontManager('Roboto-Regular.ttf')
sp = factory.from_image('heart-outline.png')
sp.x = (SCREEN_WIDTH - sp.size[0]) // 2
sp.y = (SCREEN_HEIGHT - sp.size[1]) // 2
window.show()
#sdl2.SDL_RaiseWindow(window.window)
renderer.clear((0, 0, 0, 255))
#sdl2.render.SDL_RenderPresent(spriterenderer.sdlrenderer)
black = True
frames = 0
start_time = time.time()-0.0001
msg = None
while True:
for event in sdl2.ext.get_events():
if event.type == sdl2.SDL_QUIT:
break
elif event.type == sdl2.SDL_KEYDOWN and event.key.repeat == 0:
print("Key was pressed")
elif event.type == sdl2.SDL_KEYUP and event.key.repeat == 0:
print("Key was released")
keystatus = sdl2.SDL_GetKeyboardState(None)
if keystatus[sdl2.SDL_SCANCODE_ESCAPE]:
print("the Esc key was pressed")
break
renderer.clear((80, 80, 80, 255))
# renderer.fill([(0,0,SCREEN_WIDTH,SCREEN_HEIGHT)],(80,80,80))
t = time.time()
black = not black
frames += 1
fps = 0
dt = (t-start_time)
if dt > 0:
fps = frames / (time.time()-start_time)
msg = factory.from_text('{}'.format(fps), fontmanager=fontmanager)
sp.x = (SCREEN_WIDTH - sp.size[0]) // 2 + int(sin(2*pi*t/3)*(SCREEN_WIDTH/4))
sp.y = (SCREEN_HEIGHT - sp.size[1]) // 2 + int(cos(2*pi*t/3)*(SCREEN_WIDTH/4))
spriterenderer.render([sp,msg])
# time.sleep(0.01)
# sdl2.render.SDL_RenderPresent(spriterenderer.sdlrenderer)
# sdl2.render.SDL_RenderPresent(spriterenderer.sdlrenderer)
sdl2.ext.quit()
| unlicense | -7,732,825,467,773,092,000 | 26.860759 | 115 | 0.639474 | false |
pigmej/solar | solar/cli/inputs.py | 1 | 3767 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import click
import yaml
from solar.core import resource as sresource
from solar.dblayer.model import NONE
@click.group(help="Manages raw resource inputs")
def inputs():
pass
@inputs.command(help="Adds new input to resource")
@click.argument('resource')
@click.option("--name", '-n', help="Name of input")
@click.option("--value", '-v', help="Value input (yaml load will "
" be executed on it)", default=NONE)
@click.option("--schema", '-s', help="Schema for input, "
"will be guessed if not not given",
default=None)
def add(resource, name, value, schema):
r = sresource.load(resource)
value = yaml.safe_load(value)
r.input_add(name, value, schema)
return
@inputs.command(help="Removes input from resource")
@click.argument('resource')
@click.option("--name", '-n', help="Name of input")
def remove(resource, name):
r = sresource.load(resource)
r.input_delete(name)
pass
@inputs.command(help="Shows resource inputs metadata")
@click.argument('resource')
def show_meta(resource):
r = sresource.load(resource)
db_obj = r.db_obj
meta = db_obj.meta_inputs
click.echo(yaml.safe_dump(meta, default_flow_style=False))
@inputs.command(help="Allows change computable input properties")
@click.argument('resource')
@click.option("-n", '--name')
@click.option("-t", '--type', default=None)
@click.option("-f", '--func', default=None)
@click.option("-l", '--lang', default=None)
def change_computable(resource, name, func, type, lang):
r = sresource.load(resource)
r.input_computable_change(name, func, type, lang)
return True
@inputs.command(help="Shows real input values, with full path")
@click.option('-v', '--values', default=False, is_flag=True)
@click.option('-r', '--real_values', default=False, is_flag=True)
@click.option('-i', '--input', default=None)
@click.argument('resource')
def backtrack(resource, input, values, real_values):
r = sresource.load(resource)
db_obj = r.db_obj
def single(resource, name, get_val=False):
db_obj = sresource.load(resource).db_obj
se = db_obj.inputs._single_edge(name)
se = tuple(se)
if not se:
if get_val:
return dict(resource=resource,
name=name,
value=db_obj.inputs[name])
else:
return dict(resource=resource, name=name)
l = []
for (rname, rinput), _, meta in se:
l.append(dict(resource=resource, name=name))
val = single(rname, rinput, get_val)
if meta and isinstance(val, dict):
val['meta'] = meta
l.append(val)
return l
inps = {}
if input:
inps[input] = single(resource, input, values)
else:
for _inp in db_obj.inputs:
inps[_inp] = single(resource, _inp, values)
for name, values in inps.iteritems():
click.echo(yaml.safe_dump({name: values}, default_flow_style=False))
if real_values:
click.echo('! Real value: %r\n' % sresource.load(
resource).db_obj.inputs[name])
| apache-2.0 | 7,739,368,291,777,561,000 | 32.336283 | 78 | 0.63313 | false |
boriel/zxbasic | tests/arch/zx48k/optimizer/test_helpers.py | 1 | 3434 | # -*- coding: utf-8 -*-
import src.arch.zx48k.optimizer.helpers as helpers
def test_new_tmp_val():
""" Test new tmp val is different each time, and starts with the
UNKNOWN_PREFIX
"""
a, b = helpers.new_tmp_val(), helpers.new_tmp_val()
assert a != b, "Values must be different"
assert all(helpers.RE_UNK_PREFIX.match(x) for x in (a, b)), "Values do not conform the Reg.Exp."
def test_is_unknown():
assert helpers.is_unknown(None)
assert not helpers.is_unknown(helpers.UNKNOWN_PREFIX)
assert not helpers.is_unknown(helpers.UNKNOWN_PREFIX + 'a0')
assert helpers.is_unknown(helpers.UNKNOWN_PREFIX + '0')
assert helpers.is_unknown('{0}000|{0}001'.format(helpers.UNKNOWN_PREFIX))
def test_is_unknown16():
assert helpers.is_unknown16(None)
assert not helpers.is_unknown16(helpers.new_tmp_val())
assert helpers.is_unknown16(helpers.new_tmp_val16())
def test_is_unknown16_half():
a = '{}|3'.format(helpers.new_tmp_val())
assert helpers.is_unknown16(a)
def test_HL_unknowns():
val = helpers.new_tmp_val16()
assert helpers.is_unknown(val)
assert len(val.split('|')) == 2
assert all(helpers.is_unknown(x) for x in val.split('|'))
assert helpers.is_unknown(helpers.get_H_from_unknown_value(val))
assert helpers.is_unknown(helpers.get_L_from_unknown_value(val))
a, b = val.split('|')
assert a == helpers.get_H_from_unknown_value(val)
assert b == helpers.get_L_from_unknown_value(val)
def test_L16_val():
""" Test low value of an integer or unknown val is ok
"""
# For an unknown 8 bit val, the high part is always 0
assert helpers.is_unknown8(helpers.LO16_val(None))
tmp8 = helpers.new_tmp_val()
lo16 = helpers.LO16_val(tmp8)
assert lo16 == tmp8
# For integers, it's just the high part
assert helpers.LO16_val('255') == '255'
assert helpers.LO16_val('256') == '0'
# For normal unknowns16, the high part must be returned
tmp16 = helpers.new_tmp_val16()
assert helpers.LO16_val(tmp16) == tmp16.split(helpers.HL_SEP)[1]
assert helpers.is_unknown8(helpers.LO16_val(tmp16))
assert helpers.is_unknown8(helpers.LO16_val('_unknown')) # An unknown expression
def test_H16_val():
""" Test high value of an integer or unknown val is ok
"""
# For an unknown 8 bit val, the high part is always 0
assert helpers.is_unknown8(helpers.HI16_val(None))
tmp8 = helpers.new_tmp_val()
hi16 = helpers.HI16_val(tmp8)
assert hi16 == '0'
# For integers, it's just the high part
assert helpers.HI16_val('255') == '0'
assert helpers.HI16_val('256') == '1'
# For normal unknowns16, the high part must be returned
tmp16 = helpers.new_tmp_val16()
assert helpers.HI16_val(tmp16) == tmp16.split(helpers.HL_SEP)[0]
assert helpers.is_unknown8(helpers.HI16_val(tmp16))
assert helpers.is_unknown8(helpers.HI16_val('_unknown')) # An unknown expression
def test_dict_intersection():
""" Test dict intersection works ok
"""
assert not helpers.dict_intersection({}, {'a': 1})
assert helpers.dict_intersection({'a': 1}, {'c': 1, 1: 2, 'a': 1}) == {'a': 1}
assert not helpers.dict_intersection({'a': 1}, {'c': 1, 1: 2, 'a': 2})
def test_single_registers():
""" Flags also for f must be passed
"""
assert helpers.single_registers('af') == ['a', 'f']
assert helpers.single_registers(['f', 'sp']) == ['f', 'sp']
| gpl-3.0 | -1,971,068,628,921,271,800 | 33.686869 | 100 | 0.655504 | false |
dwalton76/ev3dev-lang-python | ev3dev2/display.py | 1 | 14796 | # -----------------------------------------------------------------------------
# Copyright (c) 2015 Ralph Hempel <[email protected]>
# Copyright (c) 2015 Anton Vanhoucke <[email protected]>
# Copyright (c) 2015 Denis Demidov <[email protected]>
# Copyright (c) 2015 Eric Pascual <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
import sys
if sys.version_info < (3, 4):
raise SystemError('Must be using Python 3.4 or higher')
import os
import mmap
import ctypes
import logging
from PIL import Image, ImageDraw
from . import fonts
from . import get_current_platform, library_load_warning_message
from struct import pack
log = logging.getLogger(__name__)
try:
# This is a linux-specific module.
# It is required by the Display class, but failure to import it may be
# safely ignored if one just needs to run API tests on Windows.
import fcntl
except ImportError:
log.warning(library_load_warning_message("fcntl", "Display"))
class FbMem(object):
"""The framebuffer memory object.
Made of:
- the framebuffer file descriptor
- the fix screen info struct
- the var screen info struct
- the mapped memory
"""
# ------------------------------------------------------------------
# The code is adapted from
# https://github.com/LinkCareServices/cairotft/blob/master/cairotft/linuxfb.py
#
# The original code came with the following license:
# ------------------------------------------------------------------
# Copyright (c) 2012 Kurichan
#
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details.
# ------------------------------------------------------------------
__slots__ = ('fid', 'fix_info', 'var_info', 'mmap')
FBIOGET_VSCREENINFO = 0x4600
FBIOGET_FSCREENINFO = 0x4602
FB_VISUAL_MONO01 = 0
FB_VISUAL_MONO10 = 1
class FixScreenInfo(ctypes.Structure):
"""The fb_fix_screeninfo from fb.h."""
_fields_ = [
('id_name', ctypes.c_char * 16),
('smem_start', ctypes.c_ulong),
('smem_len', ctypes.c_uint32),
('type', ctypes.c_uint32),
('type_aux', ctypes.c_uint32),
('visual', ctypes.c_uint32),
('xpanstep', ctypes.c_uint16),
('ypanstep', ctypes.c_uint16),
('ywrapstep', ctypes.c_uint16),
('line_length', ctypes.c_uint32),
('mmio_start', ctypes.c_ulong),
('mmio_len', ctypes.c_uint32),
('accel', ctypes.c_uint32),
('reserved', ctypes.c_uint16 * 3),
]
class VarScreenInfo(ctypes.Structure):
class FbBitField(ctypes.Structure):
"""The fb_bitfield struct from fb.h."""
_fields_ = [
('offset', ctypes.c_uint32),
('length', ctypes.c_uint32),
('msb_right', ctypes.c_uint32),
]
def __str__(self):
return "%s (offset %s, length %s, msg_right %s)" %\
(self.__class__.__name__, self.offset, self.length, self.msb_right)
"""The fb_var_screeninfo struct from fb.h."""
_fields_ = [
('xres', ctypes.c_uint32),
('yres', ctypes.c_uint32),
('xres_virtual', ctypes.c_uint32),
('yres_virtual', ctypes.c_uint32),
('xoffset', ctypes.c_uint32),
('yoffset', ctypes.c_uint32),
('bits_per_pixel', ctypes.c_uint32),
('grayscale', ctypes.c_uint32),
('red', FbBitField),
('green', FbBitField),
('blue', FbBitField),
('transp', FbBitField),
]
def __str__(self):
return ("%sx%s at (%s,%s), bpp %s, grayscale %s, red %s, green %s, blue %s, transp %s" %
(self.xres, self.yres, self.xoffset, self.yoffset, self.bits_per_pixel, self.grayscale, self.red,
self.green, self.blue, self.transp))
def __init__(self, fbdev=None):
"""Create the FbMem framebuffer memory object."""
fid = FbMem._open_fbdev(fbdev)
fix_info = FbMem._get_fix_info(fid)
fbmmap = FbMem._map_fb_memory(fid, fix_info)
self.fid = fid
self.fix_info = fix_info
self.var_info = FbMem._get_var_info(fid)
self.mmap = fbmmap
@staticmethod
def _open_fbdev(fbdev=None):
"""Return the framebuffer file descriptor.
Try to use the FRAMEBUFFER environment variable if fbdev is
not given. Use '/dev/fb0' by default.
"""
dev = fbdev or os.getenv('FRAMEBUFFER', '/dev/fb0')
fbfid = os.open(dev, os.O_RDWR)
return fbfid
@staticmethod
def _get_fix_info(fbfid):
"""Return the fix screen info from the framebuffer file descriptor."""
fix_info = FbMem.FixScreenInfo()
fcntl.ioctl(fbfid, FbMem.FBIOGET_FSCREENINFO, fix_info)
return fix_info
@staticmethod
def _get_var_info(fbfid):
"""Return the var screen info from the framebuffer file descriptor."""
var_info = FbMem.VarScreenInfo()
fcntl.ioctl(fbfid, FbMem.FBIOGET_VSCREENINFO, var_info)
return var_info
@staticmethod
def _map_fb_memory(fbfid, fix_info):
"""Map the framebuffer memory."""
return mmap.mmap(fbfid, fix_info.smem_len, mmap.MAP_SHARED, mmap.PROT_READ | mmap.PROT_WRITE, offset=0)
class Display(FbMem):
"""
A convenience wrapper for the FbMem class.
Provides drawing functions from the python imaging library (PIL).
"""
GRID_COLUMNS = 22
GRID_COLUMN_PIXELS = 8
GRID_ROWS = 12
GRID_ROW_PIXELS = 10
def __init__(self, desc='Display'):
FbMem.__init__(self)
self.platform = get_current_platform()
if self.var_info.bits_per_pixel == 1:
im_type = "1"
elif self.platform == "ev3" and self.var_info.bits_per_pixel == 32:
im_type = "L"
elif self.var_info.bits_per_pixel == 16 or self.var_info.bits_per_pixel == 32:
im_type = "RGB"
else:
raise Exception("Not supported - platform %s with bits_per_pixel %s" %
(self.platform, self.var_info.bits_per_pixel))
self._img = Image.new(im_type, (self.fix_info.line_length * 8 // self.var_info.bits_per_pixel, self.yres),
"white")
self._draw = ImageDraw.Draw(self._img)
self.desc = desc
def __str__(self):
return self.desc
@property
def xres(self):
"""
Horizontal screen resolution
"""
return self.var_info.xres
@property
def yres(self):
"""
Vertical screen resolution
"""
return self.var_info.yres
@property
def shape(self):
"""
Dimensions of the screen.
"""
return (self.xres, self.yres)
@property
def draw(self):
"""
Returns a handle to PIL.ImageDraw.Draw class associated with the screen.
Example::
screen.draw.rectangle((10,10,60,20), fill='black')
"""
return self._draw
@property
def image(self):
"""
Returns a handle to PIL.Image class that is backing the screen. This can
be accessed for blitting images to the screen.
Example::
screen.image.paste(picture, (0, 0))
"""
return self._img
def clear(self):
"""
Clears the screen
"""
self._draw.rectangle(((0, 0), self.shape), fill="white")
def _color565(self, r, g, b):
"""Convert red, green, blue components to a 16-bit 565 RGB value. Components
should be values 0 to 255.
"""
return (((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (b >> 3))
def _img_to_rgb565_bytes(self):
pixels = [self._color565(r, g, b) for (r, g, b) in self._img.getdata()]
return pack('H' * len(pixels), *pixels)
def update(self):
"""
Applies pending changes to the screen.
Nothing will be drawn on the screen until this function is called.
"""
if self.var_info.bits_per_pixel == 1:
b = self._img.tobytes("raw", "1;R")
self.mmap[:len(b)] = b
elif self.var_info.bits_per_pixel == 16:
self.mmap[:] = self._img_to_rgb565_bytes()
elif self.var_info.bits_per_pixel == 32:
self.mmap[:] = self._img.convert("RGB").tobytes("raw", "XRGB")
else:
raise Exception("Not supported - platform %s with bits_per_pixel %s" %
(self.platform, self.var_info.bits_per_pixel))
def image_filename(self, filename, clear_screen=True, x1=0, y1=0, x2=None, y2=None):
if clear_screen:
self.clear()
filename_im = Image.open(filename)
if x2 is not None and y2 is not None:
return self._img.paste(filename_im, (x1, y1, x2, y2))
else:
return self._img.paste(filename_im, (x1, y1))
def line(self, clear_screen=True, x1=10, y1=10, x2=50, y2=50, line_color='black', width=1):
"""
Draw a line from (x1, y1) to (x2, y2)
"""
if clear_screen:
self.clear()
return self.draw.line((x1, y1, x2, y2), fill=line_color, width=width)
def circle(self, clear_screen=True, x=50, y=50, radius=40, fill_color='black', outline_color='black'):
"""
Draw a circle of 'radius' centered at (x, y)
"""
if clear_screen:
self.clear()
x1 = x - radius
y1 = y - radius
x2 = x + radius
y2 = y + radius
return self.draw.ellipse((x1, y1, x2, y2), fill=fill_color, outline=outline_color)
def rectangle(self, clear_screen=True, x1=10, y1=10, x2=80, y2=40, fill_color='black', outline_color='black'):
"""
Draw a rectangle where the top left corner is at (x1, y1) and the
bottom right corner is at (x2, y2)
"""
if clear_screen:
self.clear()
return self.draw.rectangle((x1, y1, x2, y2), fill=fill_color, outline=outline_color)
def point(self, clear_screen=True, x=10, y=10, point_color='black'):
"""
Draw a single pixel at (x, y)
"""
if clear_screen:
self.clear()
return self.draw.point((x, y), fill=point_color)
def text_pixels(self, text, clear_screen=True, x=0, y=0, text_color='black', font=None):
"""
Display ``text`` starting at pixel (x, y).
The EV3 display is 178x128 pixels
- (0, 0) would be the top left corner of the display
- (89, 64) would be right in the middle of the display
``text_color`` : PIL says it supports "common HTML color names". There
are 140 HTML color names listed here that are supported by all modern
browsers. This is probably a good list to start with.
https://www.w3schools.com/colors/colors_names.asp
``font`` : can be any font displayed here
http://ev3dev-lang.readthedocs.io/projects/python-ev3dev/en/ev3dev-stretch/display.html#bitmap-fonts
- If font is a string, it is the name of a font to be loaded.
- If font is a Font object, returned from :meth:`ev3dev2.fonts.load`, then it is
used directly. This is desirable for faster display times.
"""
if clear_screen:
self.clear()
if font is not None:
if isinstance(font, str):
assert font in fonts.available(), "%s is an invalid font" % font
font = fonts.load(font)
return self.draw.text((x, y), text, fill=text_color, font=font)
else:
return self.draw.text((x, y), text, fill=text_color)
def text_grid(self, text, clear_screen=True, x=0, y=0, text_color='black', font=None):
"""
Display ``text`` starting at grid (x, y)
The EV3 display can be broken down in a grid that is 22 columns wide
and 12 rows tall. Each column is 8 pixels wide and each row is 10
pixels tall.
``text_color`` : PIL says it supports "common HTML color names". There
are 140 HTML color names listed here that are supported by all modern
browsers. This is probably a good list to start with.
https://www.w3schools.com/colors/colors_names.asp
``font`` : can be any font displayed here
http://ev3dev-lang.readthedocs.io/projects/python-ev3dev/en/ev3dev-stretch/display.html#bitmap-fonts
- If font is a string, it is the name of a font to be loaded.
- If font is a Font object, returned from :meth:`ev3dev2.fonts.load`, then it is
used directly. This is desirable for faster display times.
"""
assert 0 <= x < Display.GRID_COLUMNS,\
"grid columns must be between 0 and %d, %d was requested" %\
((Display.GRID_COLUMNS - 1, x))
assert 0 <= y < Display.GRID_ROWS,\
"grid rows must be between 0 and %d, %d was requested" %\
((Display.GRID_ROWS - 1), y)
return self.text_pixels(text, clear_screen, x * Display.GRID_COLUMN_PIXELS, y * Display.GRID_ROW_PIXELS,
text_color, font)
def reset_screen(self):
self.clear()
self.update()
| mit | 8,205,199,397,252,134,000 | 33.814118 | 117 | 0.574277 | false |
Breakthru/splitbills | offset/migrations/0001_initial.py | 1 | 1369 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Mortgage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('credit_limit', models.DecimalField(max_digits=8, decimal_places=2)),
('initial_balance', models.DecimalField(max_digits=8, decimal_places=2)),
('start_date', models.DateField(verbose_name=b'start date')),
('initial_rate', models.DecimalField(max_digits=6, decimal_places=2)),
('term', models.IntegerField(verbose_name=b'duration in months')),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('amount', models.DecimalField(max_digits=8, decimal_places=2)),
('type', models.CharField(max_length=200)),
('date', models.DateField(verbose_name=b'transaction date')),
],
options={
'ordering': ['date'],
},
),
]
| gpl-3.0 | 1,168,651,718,606,197,800 | 37.027778 | 114 | 0.553689 | false |
RonnyPfannschmidt/pluggy | testing/test_pluginmanager.py | 1 | 14033 | """
``PluginManager`` unit and public API testing.
"""
import pytest
import types
import sys
from pluggy import (
PluginManager,
PluginValidationError,
HookCallError,
HookimplMarker,
HookspecMarker,
)
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
def test_plugin_double_register(pm):
"""Registering the same plugin more then once isn't allowed"""
pm.register(42, name="abc")
with pytest.raises(ValueError):
pm.register(42, name="abc")
with pytest.raises(ValueError):
pm.register(42, name="def")
def test_pm(pm):
"""Basic registration with objects"""
class A(object):
pass
a1, a2 = A(), A()
pm.register(a1)
assert pm.is_registered(a1)
pm.register(a2, "hello")
assert pm.is_registered(a2)
out = pm.get_plugins()
assert a1 in out
assert a2 in out
assert pm.get_plugin("hello") == a2
assert pm.unregister(a1) == a1
assert not pm.is_registered(a1)
out = pm.list_name_plugin()
assert len(out) == 1
assert out == [("hello", a2)]
def test_has_plugin(pm):
class A(object):
pass
a1 = A()
pm.register(a1, "hello")
assert pm.is_registered(a1)
assert pm.has_plugin("hello")
def test_register_dynamic_attr(he_pm):
class A(object):
def __getattr__(self, name):
if name[0] != "_":
return 42
raise AttributeError()
a = A()
he_pm.register(a)
assert not he_pm.get_hookcallers(a)
def test_pm_name(pm):
class A(object):
pass
a1 = A()
name = pm.register(a1, name="hello")
assert name == "hello"
pm.unregister(a1)
assert pm.get_plugin(a1) is None
assert not pm.is_registered(a1)
assert not pm.get_plugins()
name2 = pm.register(a1, name="hello")
assert name2 == name
pm.unregister(name="hello")
assert pm.get_plugin(a1) is None
assert not pm.is_registered(a1)
assert not pm.get_plugins()
def test_set_blocked(pm):
class A(object):
pass
a1 = A()
name = pm.register(a1)
assert pm.is_registered(a1)
assert not pm.is_blocked(name)
pm.set_blocked(name)
assert pm.is_blocked(name)
assert not pm.is_registered(a1)
pm.set_blocked("somename")
assert pm.is_blocked("somename")
assert not pm.register(A(), "somename")
pm.unregister(name="somename")
assert pm.is_blocked("somename")
def test_register_mismatch_method(he_pm):
class hello(object):
@hookimpl
def he_method_notexists(self):
pass
plugin = hello()
he_pm.register(plugin)
with pytest.raises(PluginValidationError) as excinfo:
he_pm.check_pending()
assert excinfo.value.plugin is plugin
def test_register_mismatch_arg(he_pm):
class hello(object):
@hookimpl
def he_method1(self, qlwkje):
pass
plugin = hello()
with pytest.raises(PluginValidationError) as excinfo:
he_pm.register(plugin)
assert excinfo.value.plugin is plugin
def test_register(pm):
class MyPlugin(object):
pass
my = MyPlugin()
pm.register(my)
assert my in pm.get_plugins()
my2 = MyPlugin()
pm.register(my2)
assert set([my, my2]).issubset(pm.get_plugins())
assert pm.is_registered(my)
assert pm.is_registered(my2)
pm.unregister(my)
assert not pm.is_registered(my)
assert my not in pm.get_plugins()
def test_register_unknown_hooks(pm):
class Plugin1(object):
@hookimpl
def he_method1(self, arg):
return arg + 1
pname = pm.register(Plugin1())
class Hooks(object):
@hookspec
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
# assert not pm._unverified_hooks
assert pm.hook.he_method1(arg=1) == [2]
assert len(pm.get_hookcallers(pm.get_plugin(pname))) == 1
def test_register_historic(pm):
class Hooks(object):
@hookspec(historic=True)
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
pm.hook.he_method1.call_historic(kwargs=dict(arg=1))
out = []
class Plugin(object):
@hookimpl
def he_method1(self, arg):
out.append(arg)
pm.register(Plugin())
assert out == [1]
class Plugin2(object):
@hookimpl
def he_method1(self, arg):
out.append(arg * 10)
pm.register(Plugin2())
assert out == [1, 10]
pm.hook.he_method1.call_historic(kwargs=dict(arg=12))
assert out == [1, 10, 120, 12]
@pytest.mark.parametrize("result_callback", [True, False])
def test_with_result_memorized(pm, result_callback):
"""Verify that ``_HookCaller._maybe_apply_history()`
correctly applies the ``result_callback`` function, when provided,
to the result from calling each newly registered hook.
"""
out = []
if result_callback:
def callback(res):
out.append(res)
else:
callback = None
class Hooks(object):
@hookspec(historic=True)
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
class Plugin1(object):
@hookimpl
def he_method1(self, arg):
return arg * 10
pm.register(Plugin1())
he_method1 = pm.hook.he_method1
he_method1.call_historic(result_callback=callback, kwargs=dict(arg=1))
class Plugin2(object):
@hookimpl
def he_method1(self, arg):
return arg * 10
pm.register(Plugin2())
if result_callback:
assert out == [10, 10]
else:
assert out == []
def test_with_callbacks_immediately_executed(pm):
class Hooks(object):
@hookspec(historic=True)
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
class Plugin1(object):
@hookimpl
def he_method1(self, arg):
return arg * 10
class Plugin2(object):
@hookimpl
def he_method1(self, arg):
return arg * 20
class Plugin3(object):
@hookimpl
def he_method1(self, arg):
return arg * 30
out = []
pm.register(Plugin1())
pm.register(Plugin2())
he_method1 = pm.hook.he_method1
he_method1.call_historic(lambda res: out.append(res), dict(arg=1))
assert out == [20, 10]
pm.register(Plugin3())
assert out == [20, 10, 30]
def test_register_historic_incompat_hookwrapper(pm):
class Hooks(object):
@hookspec(historic=True)
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
out = []
class Plugin(object):
@hookimpl(hookwrapper=True)
def he_method1(self, arg):
out.append(arg)
with pytest.raises(PluginValidationError):
pm.register(Plugin())
def test_call_extra(pm):
class Hooks(object):
@hookspec
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
def he_method1(arg):
return arg * 10
out = pm.hook.he_method1.call_extra([he_method1], dict(arg=1))
assert out == [10]
def test_call_with_too_few_args(pm):
class Hooks(object):
@hookspec
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
class Plugin1(object):
@hookimpl
def he_method1(self, arg):
0 / 0
pm.register(Plugin1())
with pytest.raises(HookCallError):
with pytest.warns(UserWarning):
pm.hook.he_method1()
def test_subset_hook_caller(pm):
class Hooks(object):
@hookspec
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
out = []
class Plugin1(object):
@hookimpl
def he_method1(self, arg):
out.append(arg)
class Plugin2(object):
@hookimpl
def he_method1(self, arg):
out.append(arg * 10)
class PluginNo(object):
pass
plugin1, plugin2, plugin3 = Plugin1(), Plugin2(), PluginNo()
pm.register(plugin1)
pm.register(plugin2)
pm.register(plugin3)
pm.hook.he_method1(arg=1)
assert out == [10, 1]
out[:] = []
hc = pm.subset_hook_caller("he_method1", [plugin1])
hc(arg=2)
assert out == [20]
out[:] = []
hc = pm.subset_hook_caller("he_method1", [plugin2])
hc(arg=2)
assert out == [2]
out[:] = []
pm.unregister(plugin1)
hc(arg=2)
assert out == []
out[:] = []
pm.hook.he_method1(arg=1)
assert out == [10]
def test_add_hookspecs_nohooks(pm):
with pytest.raises(ValueError):
pm.add_hookspecs(10)
def test_reject_prefixed_module(pm):
"""Verify that a module type attribute that contains the project
prefix in its name (in this case `'example_*'` isn't collected
when registering a module which imports it.
"""
pm._implprefix = "example"
conftest = types.ModuleType("conftest")
src = """
def example_hook():
pass
"""
exec(src, conftest.__dict__)
conftest.example_blah = types.ModuleType("example_blah")
with pytest.deprecated_call():
name = pm.register(conftest)
assert name == "conftest"
assert getattr(pm.hook, "example_blah", None) is None
assert getattr(
pm.hook, "example_hook", None
) # conftest.example_hook should be collected
with pytest.deprecated_call():
assert pm.parse_hookimpl_opts(conftest, "example_blah") is None
assert pm.parse_hookimpl_opts(conftest, "example_hook") == {}
def test_load_setuptools_instantiation(monkeypatch, pm):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "hello"
class EntryPoint(object):
name = "myname"
dist = None
def load(self):
class PseudoPlugin(object):
x = 42
return PseudoPlugin()
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, "iter_entry_points", my_iter)
num = pm.load_setuptools_entrypoints("hello")
assert num == 1
plugin = pm.get_plugin("myname")
assert plugin.x == 42
assert pm.list_plugin_distinfo() == [(plugin, None)]
def test_load_setuptools_version_conflict(monkeypatch, pm):
"""Check that we properly handle a VersionConflict problem when loading entry points"""
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "hello"
class EntryPoint(object):
name = "myname"
dist = None
def load(self):
raise pkg_resources.VersionConflict("Some conflict")
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, "iter_entry_points", my_iter)
with pytest.raises(
PluginValidationError,
match="Plugin 'myname' could not be loaded: Some conflict!",
):
pm.load_setuptools_entrypoints("hello")
def test_load_setuptools_not_installed(monkeypatch, pm):
monkeypatch.setitem(sys.modules, "pkg_resources", types.ModuleType("pkg_resources"))
with pytest.raises(ImportError):
pm.load_setuptools_entrypoints("qwe")
def test_add_tracefuncs(he_pm):
out = []
class api1(object):
@hookimpl
def he_method1(self):
out.append("he_method1-api1")
class api2(object):
@hookimpl
def he_method1(self):
out.append("he_method1-api2")
he_pm.register(api1())
he_pm.register(api2())
def before(hook_name, hook_impls, kwargs):
out.append((hook_name, list(hook_impls), kwargs))
def after(outcome, hook_name, hook_impls, kwargs):
out.append((outcome, hook_name, list(hook_impls), kwargs))
undo = he_pm.add_hookcall_monitoring(before, after)
he_pm.hook.he_method1(arg=1)
assert len(out) == 4
assert out[0][0] == "he_method1"
assert len(out[0][1]) == 2
assert isinstance(out[0][2], dict)
assert out[1] == "he_method1-api2"
assert out[2] == "he_method1-api1"
assert len(out[3]) == 4
assert out[3][1] == out[0][0]
undo()
he_pm.hook.he_method1(arg=1)
assert len(out) == 4 + 2
def test_hook_tracing(he_pm):
saveindent = []
class api1(object):
@hookimpl
def he_method1(self):
saveindent.append(he_pm.trace.root.indent)
class api2(object):
@hookimpl
def he_method1(self):
saveindent.append(he_pm.trace.root.indent)
raise ValueError()
he_pm.register(api1())
out = []
he_pm.trace.root.setwriter(out.append)
undo = he_pm.enable_tracing()
try:
indent = he_pm.trace.root.indent
he_pm.hook.he_method1(arg=1)
assert indent == he_pm.trace.root.indent
assert len(out) == 2
assert "he_method1" in out[0]
assert "finish" in out[1]
out[:] = []
he_pm.register(api2())
with pytest.raises(ValueError):
he_pm.hook.he_method1(arg=1)
assert he_pm.trace.root.indent == indent
assert saveindent[0] > indent
finally:
undo()
@pytest.mark.parametrize("include_hookspec", [True, False])
def test_prefix_hookimpl(include_hookspec):
with pytest.deprecated_call():
pm = PluginManager(hookspec.project_name, "hello_")
if include_hookspec:
class HookSpec(object):
@hookspec
def hello_myhook(self, arg1):
""" add to arg1 """
pm.add_hookspecs(HookSpec)
class Plugin(object):
def hello_myhook(self, arg1):
return arg1 + 1
with pytest.deprecated_call():
pm.register(Plugin())
pm.register(Plugin())
results = pm.hook.hello_myhook(arg1=17)
assert results == [18, 18]
def test_prefix_hookimpl_dontmatch_module():
with pytest.deprecated_call():
pm = PluginManager(hookspec.project_name, "hello_")
class BadPlugin(object):
hello_module = __import__("email")
pm.register(BadPlugin())
pm.check_pending()
| mit | 157,715,618,383,124,380 | 23.070326 | 91 | 0.600086 | false |
anubhav929/eden | controllers/event.py | 1 | 15603 | # -*- coding: utf-8 -*-
"""
Event Module - Controllers
http://eden.sahanafoundation.org/wiki/BluePrintScenario
"""
module = request.controller
resourcename = request.function
if not deployment_settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def create():
""" Redirect to event/create """
redirect(URL(f="event", args="create"))
# =============================================================================
# Events
# =============================================================================
def event():
"""
RESTful CRUD controller
An Event is an instantiation of a template
"""
# Pre-process
def prep(r):
if r.interactive:
if r.component:
if r.component.name == "req":
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
s3db.req_create_form_mods()
elif r.component.name == "config":
s3db.configure("gis_config",
deletable=False)
s3.crud.submit_button = T("Update")
elif r.component.name == "human_resource":
s3db.configure("event_human_resource",
list_fields=["human_resource_id"])
s3.crud.submit_button = T("Assign")
elif r.component.name == "asset":
s3db.configure("event_asset",
list_fields=["asset_id"])
s3.crud.submit_button = T("Assign")
else:
s3.crud.submit_button = T("Assign")
elif r.method != "update" and r.method != "read":
# Create or ListCreate
r.table.closed.writable = r.table.closed.readable = False
elif r.method == "update":
# Can't change details after event activation
r.table.scenario_id.writable = False
r.table.exercise.writable = False
r.table.exercise.comment = None
r.table.zero_hour.writable = False
return True
response.s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if r.component:
if r.component.name == "asset":
s3mgr.LABEL["DELETE"]=T("Remove")
elif r.component.name == "human_resource":
s3mgr.LABEL["DELETE"]=T("Remove")
if "msg" in deployment_settings.modules:
update_url = URL(c="hrm", f="human_resource", args=["[id]"])
s3mgr.crud.action_buttons(r, update_url=update_url)
s3mgr.crud.action_button(url = URL(f="compose",
vars = {"hrm_id": "[id]"}),
_class = "action-btn",
label = str(T("Send Notification")))
elif r.component.name == "site":
s3mgr.LABEL["DELETE"]=T("Remove")
elif r.component.name == "task":
s3mgr.LABEL["DELETE"]=T("Remove")
elif r.component.name == "activity":
s3mgr.LABEL["DELETE"]=T("Remove")
return output
response.s3.postp = postp
output = s3_rest_controller("event", resourcename,
rheader=event_rheader)
return output
# -----------------------------------------------------------------------------
def event_rheader(r):
""" Resource headers for component views """
rheader = None
if r.representation == "html":
if r.name == "event":
# Event Controller
tabs = [(T("Event Details"), None)]
if deployment_settings.has_module("project"):
tabs.append((T("Tasks"), "task"))
if deployment_settings.has_module("hrm"):
tabs.append((T("Human Resources"), "human_resource"))
if deployment_settings.has_module("asset"):
tabs.append((T("Assets"), "asset"))
tabs.append((T("Facilities"), "site"))
if deployment_settings.has_module("req"):
tabs.append((T("Requests"), "req"))
#if deployment_settings.has_module("project"):
# tabs.append((T("Activities"), "activity"))
tabs.append((T("Map Configuration"), "config"))
rheader_tabs = s3_rheader_tabs(r, tabs)
event = r.record
if event:
if event.exercise:
exercise = TH(T("EXERCISE"))
else:
exercise = TH()
if event.closed:
closed = TH(T("CLOSED"))
else:
closed = TH()
rheader = DIV(TABLE(TR(exercise),
TR(TH("%s: " % T("Name")),
event.name),
TH("%s: " % T("Comments")),
event.comments,
TR(TH("%s: " % T("Zero Hour")),
event.zero_hour),
TR(closed),
), rheader_tabs)
return rheader
# =============================================================================
def person():
""" Person controller for AddPersonWidget """
def prep(r):
if r.representation != "s3json":
# Do not serve other representations here
return False
else:
s3mgr.show_ids = True
return True
response.s3.prep = prep
return s3_rest_controller("pr", "person")
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
vars = request.vars
if "hrm_id" in vars:
id = vars.hrm_id
fieldname = "hrm_id"
table = s3db.pr_person
htable = s3db.hrm_human_resource
pe_id_query = (htable.id == id) & \
(htable.person_id == table.id)
title = T("Send a message to this person")
else:
session.error = T("Record not found")
redirect(URL(f="index"))
pe = db(pe_id_query).select(table.pe_id,
limitby=(0, 1)).first()
if not pe:
session.error = T("Record not found")
redirect(URL(f="index"))
pe_id = pe.pe_id
# Get the individual's communications options & preference
table = s3db.pr_contact
contact = db(table.pe_id == pe_id).select(table.contact_method,
orderby="priority",
limitby=(0, 1)).first()
if contact:
s3db.msg_outbox.pr_message_method.default = contact.contact_method
else:
session.error = T("No contact method found")
redirect(URL(f="index"))
# URL to redirect to after message sent
url = URL(c=module,
f="compose",
vars={fieldname: id})
# Create the form
output = msg.compose(recipient = pe_id,
url = url)
output["title"] = title
response.view = "msg/compose.html"
return output
# =============================================================================
# Components - no longer needed with new link-table support?
# =============================================================================
def asset():
""" RESTful CRUD controller """
# Load the Models
s3db.table("event_event")
# Parse the Request
r = s3mgr.parse_request()
link = request.vars.get("link", None)
# Pre-Process
if r.id and link:
# Go back to the asset list of the scenario/event after removing the asset
s3db.configure(r.tablename,
delete_next=URL(link,
args=[r.record["%s_id" % link],
"asset"]))
edit_btn = None
if link:
if r.method in ("update", None) and r.id:
# Store the edit & delete buttons
edit_btn = A(T("Edit"),
_href=r.url(method="update",
representation="html"),
_target="_blank")
delete_btn=A(T("Remove this asset from this event"),
_href=r.url(method="delete",
representation="html"),
_class="delete-btn")
# Switch to the other request
asset_id = r.record.asset_id
r = s3base.S3Request(s3mgr,
c="asset", f="asset",
args=[str(asset_id)],
extension=auth.permission.format)
# Execute the request
output = r()
# Post-Process
s3_action_buttons(r)
# Restore the edit & delete buttons with the correct ID
if r.representation == "plain" and edit_btn:
output.update(edit_btn=edit_btn)
elif r.interactive and "delete_btn" in output:
output.update(delete_btn=delete_btn)
return output
# -----------------------------------------------------------------------------
def human_resource():
""" RESTful CRUD controller """
# Load the Models
s3db.table("event_event")
# Parse the Request
r = s3mgr.parse_request()
link = request.vars.get("link", None)
# Pre-Process
if r.id and link:
# Go back to the human_resource list of the scenario/event after removing the human_resource
s3db.configure(r.tablename,
delete_next=URL(link,
args=[r.record["%s_id" % link],
"human_resource"]))
edit_btn = None
if link:
if r.method in ("update", None) and r.id:
# Store the edit & delete buttons
edit_btn = A(T("Edit"),
_href=r.url(method="update",
representation="html"),
_target="_blank")
delete_btn=A(T("Remove this human resource from this event"),
_href=r.url(method="delete",
representation="html"),
_class="delete-btn")
# Switch to the other request
hrm_id = r.record.human_resource_id
r = s3base.S3Request(s3mgr,
c="hrm", f="human_resource",
args=[str(hrm_id)],
extension=auth.permission.format)
# Execute the request
output = r()
# Post-Process
s3_action_buttons(r)
# Restore the edit & delete buttons with the correct ID
if r.representation == "plain" and edit_btn:
output.update(edit_btn=edit_btn)
elif r.interactive and "delete_btn" in output:
output.update(delete_btn=delete_btn)
return output
# -----------------------------------------------------------------------------
def site():
""" RESTful CRUD controller """
# Load the Models
s3db.table("event_event")
# Parse the Request
r = s3mgr.parse_request()
link = request.vars.get("link", None)
# Pre-Process
if r.id and link:
# Go back to the facility list of the scenario/event after removing the facility
s3db.configure(r.tablename,
delete_next=URL(link,
args=[r.record["%s_id" % link],
"site"]))
edit_btn = None
if link:
if r.method in ("update", None) and r.id:
# Store the edit & delete buttons
edit_btn = A(T("Edit"),
_href=r.url(method="update",
representation="html"),
_target="_blank")
delete_btn=A(T("Remove this facility from this event"),
_href=r.url(method="delete",
representation="html"),
_class="delete-btn")
# Switch to the other request
site_id = r.record.site_id
r = s3base.S3Request(s3mgr,
c="org", f="site",
args=[str(site_id)],
extension=auth.permission.format)
# Execute the request
output = r()
# Post-Process
s3_action_buttons(r)
# Restore the edit & delete buttons with the correct ID
if r.representation == "plain" and edit_btn:
output.update(edit_btn=edit_btn)
elif r.interactive and "delete_btn" in output:
output.update(delete_btn=delete_btn)
return output
# -----------------------------------------------------------------------------
def task():
""" RESTful CRUD controller """
# Load the Models
s3db.table("event_event")
# Parse the Request
r = s3mgr.parse_request()
link = request.vars.get("link", None)
# Pre-Process
if r.id and link:
# Go back to the task list of the scenario/event after removing the task
s3db.configure(r.tablename,
delete_next=URL(link,
args=[r.record["%s_id" % link],
"task"]))
edit_btn = None
if link:
if r.method in ("update", None) and r.id:
# Store the edit & delete buttons
edit_btn = A(T("Edit"),
_href=r.url(method="update",
representation="html"),
_target="_blank")
delete_btn=A(T("Remove this task from this event"),
_href=r.url(method="delete",
representation="html"),
_class="delete-btn")
# Switch to the other request
task_id = r.record.task_id
r = s3base.S3Request(s3mgr,
c="project", f="task",
args=[str(task_id)],
extension=auth.permission.format)
# Execute the request
output = r()
# Post-Process
s3_action_buttons(r)
# Restore the edit & delete buttons with the correct ID
if r.representation == "plain" and edit_btn:
output.update(edit_btn=edit_btn)
elif r.interactive and "delete_btn" in output:
output.update(delete_btn=delete_btn)
return output
# END =========================================================================
| mit | 681,451,439,806,837,100 | 34.623288 | 100 | 0.448632 | false |
briend/mypaint | gui/dialogs.py | 1 | 12908 | # This file is part of MyPaint.
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2018 by the MyPaint Development Team.
# Copyright (C) 2009-2013 by Martin Renold <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Common dialog functions"""
## Imports
from __future__ import division, print_function
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from gettext import gettext as _
from fnmatch import fnmatch
from . import widgets
from lib.color import RGBColor
from . import uicolor
## Module constants
OVERWRITE_THIS = 1
OVERWRITE_ALL = 2
DONT_OVERWRITE_THIS = 3
DONT_OVERWRITE_ANYTHING = 4
CANCEL = 5
## Function defs
def confirm(widget, question):
window = widget.get_toplevel()
d = Gtk.MessageDialog(
window,
Gtk.DialogFlags.MODAL,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.NONE,
question)
d.add_button(Gtk.STOCK_NO, Gtk.ResponseType.REJECT)
d.add_button(Gtk.STOCK_YES, Gtk.ResponseType.ACCEPT)
d.set_default_response(Gtk.ResponseType.ACCEPT)
response = d.run()
d.destroy()
return response == Gtk.ResponseType.ACCEPT
def _entry_activate_dialog_response_cb(entry, dialog,
response=Gtk.ResponseType.ACCEPT):
dialog.response(response)
def ask_for_name(widget, title, default):
window = widget.get_toplevel()
d = Gtk.Dialog(title,
window,
Gtk.DialogFlags.MODAL,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT))
d.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
hbox = Gtk.HBox()
hbox.set_property("spacing", widgets.SPACING)
hbox.set_border_width(widgets.SPACING)
d.vbox.pack_start(hbox, True, True, 0)
hbox.pack_start(Gtk.Label(label=_('Name')), False, False, 0)
if default is None:
default = ""
d.e = e = Gtk.Entry()
e.set_size_request(250, -1)
e.set_text(default)
e.select_region(0, len(default))
e.set_input_hints(Gtk.InputHints.UPPERCASE_WORDS)
e.set_input_purpose(Gtk.InputPurpose.FREE_FORM)
e.connect("activate", _entry_activate_dialog_response_cb, d)
hbox.pack_start(e, True, True, 0)
d.vbox.show_all()
if d.run() == Gtk.ResponseType.ACCEPT:
result = d.e.get_text()
if isinstance(result, bytes):
result = result.decode('utf-8')
else:
result = None
d.destroy()
return result
def error(widget, message):
window = widget.get_toplevel()
d = Gtk.MessageDialog(
window,
Gtk.DialogFlags.MODAL,
Gtk.MessageType.ERROR,
Gtk.ButtonsType.OK,
message,
)
d.run()
d.destroy()
def image_new_from_png_data(data):
loader = GdkPixbuf.PixbufLoader.new_with_type("png")
loader.write(data)
loader.close()
pixbuf = loader.get_pixbuf()
image = Gtk.Image()
image.set_from_pixbuf(pixbuf)
return image
def confirm_rewrite_brush(window, brushname, existing_preview_pixbuf,
imported_preview_data):
flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT
dialog = Gtk.Dialog(_("Overwrite brush?"), window, flags)
cancel = Gtk.Button(stock=Gtk.STOCK_CANCEL)
cancel.show_all()
img_yes = Gtk.Image()
img_yes.set_from_stock(Gtk.STOCK_YES, Gtk.IconSize.BUTTON)
img_no = Gtk.Image()
img_no.set_from_stock(Gtk.STOCK_NO, Gtk.IconSize.BUTTON)
overwrite_this = Gtk.Button(label=_("Replace"))
overwrite_this.set_image(img_yes)
overwrite_this.show_all()
skip_this = Gtk.Button(label=_("Rename"))
skip_this.set_image(img_no)
skip_this.show_all()
overwrite_all = Gtk.Button(label=_("Replace all"))
overwrite_all.show_all()
skip_all = Gtk.Button(label=_("Rename all"))
skip_all.show_all()
buttons = [
(cancel, CANCEL),
(skip_all, DONT_OVERWRITE_ANYTHING),
(overwrite_all, OVERWRITE_ALL),
(skip_this, DONT_OVERWRITE_THIS),
(overwrite_this, OVERWRITE_THIS),
]
for button, code in buttons:
dialog.add_action_widget(button, code)
hbox = Gtk.HBox()
vbox_l = Gtk.VBox()
vbox_r = Gtk.VBox()
try:
preview_r = Gtk.image_new_from_pixbuf(existing_preview_pixbuf)
except AttributeError:
preview_r = Gtk.Image.new_from_pixbuf(existing_preview_pixbuf)
label_l = Gtk.Label(label=_("Imported brush"))
label_r = Gtk.Label(label=_("Existing brush"))
question = Gtk.Label(label=_(
u"<b>A brush named “{brush_name}” already exists.</b>\n"
u"Do you want to replace it, "
u"or should the new brush be renamed?"
).format(
brush_name = brushname,
))
question.set_use_markup(True)
preview_l = image_new_from_png_data(imported_preview_data)
vbox_l.pack_start(preview_l, True, True, 0)
vbox_l.pack_start(label_l, False, True, 0)
vbox_r.pack_start(preview_r, True, True, 0)
vbox_r.pack_start(label_r, False, True, 0)
hbox.pack_start(vbox_l, False, True, 0)
hbox.pack_start(question, True, True, 0)
hbox.pack_start(vbox_r, False, True, 0)
hbox.show_all()
dialog.vbox.pack_start(hbox, True, True, 0)
answer = dialog.run()
dialog.destroy()
return answer
def confirm_rewrite_group(window, groupname, deleted_groupname):
flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT
dialog = Gtk.Dialog(_("Overwrite brush group?"), window, flags)
cancel = Gtk.Button(stock=Gtk.STOCK_CANCEL)
cancel.show_all()
img_yes = Gtk.Image()
img_yes.set_from_stock(Gtk.STOCK_YES, Gtk.IconSize.BUTTON)
img_no = Gtk.Image()
img_no.set_from_stock(Gtk.STOCK_NO, Gtk.IconSize.BUTTON)
overwrite_this = Gtk.Button(label=_("Replace"))
overwrite_this.set_image(img_yes)
overwrite_this.show_all()
skip_this = Gtk.Button(label=_("Rename"))
skip_this.set_image(img_no)
skip_this.show_all()
buttons = [
(cancel, CANCEL),
(skip_this, DONT_OVERWRITE_THIS),
(overwrite_this, OVERWRITE_THIS),
]
for button, code in buttons:
dialog.add_action_widget(button, code)
question = Gtk.Label(label=_(
u"<b>A group named “{groupname}” already exists.</b>\n"
u"Do you want to replace it, or should the new group be renamed?\n"
u"If you replace it, the brushes may be moved to a group called"
u" “{deleted_groupname}”."
).format(
groupname=groupname,
deleted_groupname=deleted_groupname,
))
question.set_use_markup(True)
dialog.vbox.pack_start(question, True, True, 0)
dialog.vbox.show_all()
answer = dialog.run()
dialog.destroy()
return answer
def open_dialog(title, window, filters):
"""Show a file chooser dialog.
Filters should be a list of tuples: (filtertitle, globpattern).
Returns a tuple of the form (fileformat, filename). Here
"fileformat" is the index of the filter that matched filename, or
None if there were no matches). "filename" is None if no file was
selected.
"""
dialog = Gtk.FileChooserDialog(title, window,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
dialog.set_default_response(Gtk.ResponseType.OK)
for filter_title, pattern in filters:
f = Gtk.FileFilter()
f.set_name(filter_title)
f.add_pattern(pattern)
dialog.add_filter(f)
result = (None, None)
if dialog.run() == Gtk.ResponseType.OK:
filename = dialog.get_filename()
if isinstance(filename, bytes):
filename = filename.decode('utf-8')
file_format = None
for i, (_junk, pattern) in enumerate(filters):
if fnmatch(filename, pattern):
file_format = i
break
result = (file_format, filename)
dialog.hide()
return result
def save_dialog(title, window, filters, default_format=None):
"""Shows a file save dialog.
"filters" should be a list of tuples: (filter title, glob pattern).
"default_format" may be a pair (format id, suffix).
That suffix will be added to filename if it does not match any of filters.
Returns a tuple of the form (fileformat, filename). Here
"fileformat" is index of filter that matches filename, or None if no
matches). "filename" is None if no file was selected.
"""
dialog = Gtk.FileChooserDialog(title, window,
Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK))
dialog.set_default_response(Gtk.ResponseType.OK)
dialog.set_do_overwrite_confirmation(True)
for filter_title, pattern in filters:
f = Gtk.FileFilter()
f.set_name(filter_title)
f.add_pattern(pattern)
dialog.add_filter(f)
result = (None, None)
while dialog.run() == Gtk.ResponseType.OK:
filename = dialog.get_filename()
if isinstance(filename, bytes):
filename = filename.decode('utf-8')
file_format = None
for i, (_junk, pattern) in enumerate(filters):
if fnmatch(filename, pattern):
file_format = i
break
if file_format is None and default_format is not None:
file_format, suffix = default_format
filename += suffix
dialog.set_current_name(filename)
dialog.response(Gtk.ResponseType.OK)
else:
result = (file_format, filename)
break
dialog.hide()
return result
def confirm_brushpack_import(packname, window=None, readme=None):
dialog = Gtk.Dialog(
_("Import brush package?"),
window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
(
Gtk.STOCK_CANCEL,
Gtk.ResponseType.REJECT,
Gtk.STOCK_OK,
Gtk.ResponseType.ACCEPT
)
)
dialog.vbox.set_spacing(12)
if readme:
tv = Gtk.TextView()
tv.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
tv.get_buffer().set_text(readme)
tv.set_editable(False)
tv.set_left_margin(12)
tv.set_right_margin(12)
try: # methods introduced in GTK 3.18
tv.set_top_margin(6)
tv.set_bottom_margin(6)
except AttributeError:
pass
scrolls = Gtk.ScrolledWindow()
scrolls.set_size_request(640, 480)
scrolls.add(tv)
dialog.vbox.pack_start(scrolls, True, True, 0)
question = Gtk.Label(label=_(
"<b>Do you really want to import package “{brushpack_name}”?</b>"
).format(
brushpack_name=packname,
))
question.set_use_markup(True)
dialog.vbox.pack_start(question, True, True, 0)
dialog.vbox.show_all()
answer = dialog.run()
dialog.destroy()
return answer
def ask_for_color(title, color=None, previous_color=None, parent=None):
"""Returns a color chosen by the user via a modal dialog.
The dialog is a standard `Gtk.ColorSelectionDialog`.
The returned value may be `None`,
which means that the user pressed Cancel in the dialog.
"""
if color is None:
color = RGBColor(0.5, 0.5, 0.5)
if previous_color is None:
previous_color = RGBColor(0.5, 0.5, 0.5)
dialog = Gtk.ColorSelectionDialog(title)
sel = dialog.get_color_selection()
sel.set_current_color(uicolor.to_gdk_color(color))
sel.set_previous_color(uicolor.to_gdk_color(previous_color))
dialog.set_position(Gtk.WindowPosition.MOUSE)
dialog.set_modal(True)
dialog.set_resizable(False)
if parent is not None:
dialog.set_transient_for(parent)
# GNOME likes to darken the main window
# when it is set as the transient-for parent window.
# The setting is "Attached Modal Dialogs", which defaultss to ON.
# See https://github.com/mypaint/mypaint/issues/325 .
# This is unhelpful for art programs,
# but advertising the dialog
# as a utility window restores sensible behaviour.
dialog.set_type_hint(Gdk.WindowTypeHint.UTILITY)
dialog.set_default_response(Gtk.ResponseType.OK)
response_id = dialog.run()
result = None
if response_id == Gtk.ResponseType.OK:
col_gdk = sel.get_current_color()
result = uicolor.from_gdk_color(col_gdk)
dialog.destroy()
return result
| gpl-2.0 | 98,405,544,774,130,530 | 30.832099 | 78 | 0.630546 | false |
Fabien-B/Web_ASA_Sourdoire | www/page_conso.py | 1 | 6251 | template = Import('template.py' ) # import du fichier template (entete, pieds de page...)
connexion = Import('gestion_session.py')
releve = Import('releve.py')
parcelle = Import('parcelle.py')
compteur = Import('compteur.py')
exploitant = Import('Exploitant.py')
def index(error=''):
if "login" in Session() and not Session()["Id_exploitant"]:
ret=template.afficherHautPage(error, titre='Consos')
else:
ret=template.afficherHautPage(error, titre='Ma Conso')
if "login" in Session():
ret += corps_page_connecte()
else:
ret += corps_page_deconnecte()
ret += template.afficherBasPage()
return ret
def corps_page_connecte2():
html = """
<div class="container">
<div class="sixteen columns main-content">"""
if Session()["Id_exploitant"]:
html +='''<h2>Ma Consommation</h2>'''
else:
html += '''<h2>Consommation</h2><form>'''
options = get_exploitant_options()
html += '''<select id="combo_ex_conso" name="exploitants" onchange="change_exploitant_conso(this,this.selectedIndex)">
{0}
</select>'''.format(options)
html += """<script>
$(function() {
$( "#datepicker" ).datepicker();
});
</script><form>
<label for="date_debut">Date de début:</label>
<input type="text" name="date_debut" id="date_debut" onchange="update_conso()">
<label for="date_fin">date de fin:</label>
<p style="display:inline-flex;"><input style="margin-right:20px;" type="text" name="Date_fin" id="date_fin" onchange="update_conso()">
<button type="button" id="update_releves" >Ok</button></p>
</form>"""
html += conso_table(id_ex=1)
html += """</div>
</div>"""
return html
def corps_page_connecte():
id_compteurs =compteur.Compteur.get_compteurs_id(Session()["Id_exploitant"])
script = '''
<script>
$(function() {
$( "#date_fin" ).datepicker({
changeMonth: true,
changeYear: true,
showButtonPanel: true,
})
});
</script>
<script>
$(function() {
$( "#date_debut" ).datepicker({
changeMonth: true,
changeYear: true,
showButtonPanel: true,
})
});
</script>
'''
html = '''
<div class="container">
<aside class="six columns left-sidebar">'''
if Session()["Id_exploitant"]:
html +='''<h2>Ma Consommation</h2>'''
else:
html += '''<h2>Consommation</h2>'''
options = get_exploitant_options()
html += '''<select id="combo_ex_conso" name="exploitants" onchange="change_exploitant_conso(this,this.selectedIndex)">
{0}
</select>'''.format(options)
html+='''<label for="date_debut">Date de début:</label>
<input type="text" name="date_debut" id="date_debut" onchange="update_conso()">
<label for="date_fin">Date de fin:</label>
<p style="display:inline-flex;"><input style="margin-right:20px;" type="text" name="Date_fin" id="date_fin" onchange="update_conso()">
<button type="button" id="update_releves" >Ok</button></p>
</aside>
<article class="ten columns main-content">'''
html += conso_table(id_ex=1)
html+='''
<head>
<script type="text/javascript" src="../js/jquery-2.1.3.js"></script>
</head>
<link rel="stylesheet" href="../stylesheets/jquery-ui.min.css">
<script src="../js/jquery-ui.min.js"></script>
{0}
</article>
</div>
'''.format(script)
return html
def corps_page_deconnecte():
html = """
<div class="container">
<div style="text-align:center;" class="sixteen columns main-content">
<div class="sixteen columns">
Bonjour! Merci de vous connecter !
</div>
</div>
</div>
"""
return html
def conso_table(date_debut=None, date_fin=None, id_ex = None):
if not id_ex:
id_ex = Session()["Id_exploitant"]
dico_parc_rels = get_releves(id_ex, date_debut, date_fin) #TODO: ajouter la date de début et celle de fin.
debut = date_debut if date_debut else 'plus ancien'
fin = date_fin if date_fin else 'plus récent'
html = """
<table id="conso_content">
<caption>Votre consommation depuis le {} jusqu'au {} :</caption>
<tr class="titre">
<th>Parcelle</th>
<th>Compteur</th>
<th>Consommation (m<sup>3</sup>)</th>
</tr>""".format(debut, fin)
for (i, champ) in enumerate(dico_parc_rels.keys()):
(line,conso) = add_line(champ, dico_parc_rels[champ], i)
html += line
html += '</table>'
return html
def add_line(parc, rels, i):
'''add_line(<Parcelle>, [<Releve1>,<Releve2>, ...]'''
conso = 0
nom_compteur = compteur.Compteur(parc.compteur).nom
for rel in rels:
conso += 10 * (rel.index_fin - rel.index_deb)
if i%2 == 1:
parite = "impair"
else:
parite = "pair"
line = """
<tr class="{}">
<td>{}</td>
<td>{}</td>
<td>{}</td>
</tr>""".format(parite, str.capitalize(parc.nom), nom_compteur, conso)
return (line, conso)
def get_releves(Id_exploitant, date_debut=None, date_fin=None):
releves_list = releve.Releve.get_releves_id(Id_exploitant,date_debut,date_fin)
dico = {} #{<Parcelle>:[<Releve1>,<Releve2>], ... }
parcelles = {} #{id:<Parcelle> , ...}
for (id_releve,id_parcelle) in releves_list:
if id_parcelle == -1:
return dico
if id_parcelle in parcelles.keys():
dico[parcelles[id_parcelle]].append(releve.Releve(id_releve))
else:
parcelles[id_parcelle] = parcelle.Parcelle(id_parcelle)
dico[parcelles[id_parcelle]] = [releve.Releve(id_releve)]
return dico
def traiterFormulaireConnexion(choix, login='',password=''):
return connexion.Connexion(index, choix, login, password)
def get_exploitant_options():
exploits = exploitant.Exploitant.get_all_exploitants()
options = ''
for ex in exploits:
line = '<option value="{}" >'.format(ex.id) + ex.nom + '</option>\n'
options += line
return options | lgpl-3.0 | 772,124,882,227,105,900 | 31.541667 | 142 | 0.573715 | false |
ColumbiaSC-Tech/botty_mcbotface | botty_mcbotface/botty/db/__init__.py | 1 | 2644 | import os
from botty_mcbotface import log
from sqlalchemy import create_engine, insert
from sqlalchemy.orm import Query, scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import MetaData
DB_NAME = 'botty_db'
DB_URI = 'sqlite:///botty_db'
class BottyDB:
"""Application global database for plugin use"""
def __init__(self):
self.metadata = MetaData()
self.engine = create_engine(DB_URI)
self.factory = sessionmaker(bind=self.engine, query_cls=Query)
self.session = scoped_session(self.factory)
self.base = declarative_base(metadata=self.metadata, bind=self.engine)
self.base.query = self.session.query_property(query_cls=Query)
# *** DB Initialization Functions *** #
# FIXME: For testing only.
# Once baseline relationships are established use Alembic
# if os.path.exists(DB_NAME):
# os.remove(DB_NAME)
# log.info('REMOVED::%s', DB_NAME)
# Instantiate db
db = BottyDB()
Base = db.base
# session = db.session()
# Import models here to avoid circular importing
from botty_mcbotface.botty.db.models import *
# *** Common DB Functions *** #
def crud_session_table_rows(func):
"""Session based CRUD wrapper for managing table rows in db"""
def wrapper(*args, **kwargs):
print(*args)
try:
func(*args)
db.session().commit()
db.session().close()
except Exception as e:
db.session().rollback()
log.error('An error occurred while adding row: %s', e)
return wrapper
@crud_session_table_rows
def db_create_row(row):
print('db_create_row')
# sess = db.session()
db.session().add(row)
# def db_create_row(row):
# try:
# # sess = db.session()
# db.session().add(row)
# db.session().commit()
# db.session().close()
# except Exception as e:
# db.session().rollback()
# log.error('An error occurred while adding row: %s', e)
def db_read_row(table, row):
return db.session().query(table).get(row)
def db_update_row(row):
try:
# sess = db.session()
db.session().merge(row)
db.session().commit()
db.session().close()
except Exception as e:
db.session().rollback()
log.error('An error occurred while merging row: %s', e)
def db_delete_row(row):
try:
# sess = db.session()
db.session().delete(row)
db.session().commit()
db.session().close()
except Exception as e:
db.session().rollback()
log.error('An error occurred while deleting row: %s', e)
| mit | -5,054,454,187,015,856,000 | 26.257732 | 78 | 0.624811 | false |
rahuldan/sympy | sympy/functions/special/delta_functions.py | 2 | 16753 | from __future__ import print_function, division
from sympy.core import S, sympify, diff, oo
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.relational import Eq
from sympy.core.logic import fuzzy_not
from sympy.polys.polyerrors import PolynomialError
from sympy.functions.elementary.complexes import im, sign
from sympy.functions.elementary.piecewise import Piecewise
from sympy.core.decorators import deprecated
###############################################################################
################################ DELTA FUNCTION ###############################
###############################################################################
class DiracDelta(Function):
"""
The DiracDelta function and its derivatives.
DiracDelta is not an ordinary function. It can be rigorously defined either
as a distribution or as a measure.
DiracDelta only makes sense in definite integrals, and in particular, integrals
of the form ``Integral(f(x)*DiracDelta(x - x0), (x, a, b))``, where it equals
``f(x0)`` if ``a <= x0 <= b`` and ``0`` otherwise. Formally, DiracDelta acts
in some ways like a function that is ``0`` everywhere except at ``0``,
but in many ways it also does not. It can often be useful to treat DiracDelta
in formal ways, building up and manipulating expressions with delta functions
(which may eventually be integrated), but care must be taken to not treat it
as a real function.
SymPy's ``oo`` is similar. It only truly makes sense formally in certain contexts
(such as integration limits), but SymPy allows its use everywhere, and it tries to be
consistent with operations on it (like ``1/oo``), but it is easy to get into trouble
and get wrong results if ``oo`` is treated too much like a number.
Similarly, if DiracDelta is treated too much like a function, it is easy to get wrong
or nonsensical results.
DiracDelta function has the following properties:
1) ``diff(Heaviside(x),x) = DiracDelta(x)``
2) ``integrate(DiracDelta(x-a)*f(x),(x,-oo,oo)) = f(a)`` and
``integrate(DiracDelta(x-a)*f(x),(x,a-e,a+e)) = f(a)``
3) ``DiracDelta(x) = 0`` for all ``x != 0``
4) ``DiracDelta(g(x)) = Sum_i(DiracDelta(x-x_i)/abs(g'(x_i)))``
Where ``x_i``-s are the roots of ``g``
Derivatives of ``k``-th order of DiracDelta have the following property:
5) ``DiracDelta(x,k) = 0``, for all ``x != 0``
Examples
========
>>> from sympy import DiracDelta, diff, pi, Piecewise
>>> from sympy.abc import x, y
>>> DiracDelta(x)
DiracDelta(x)
>>> DiracDelta(1)
0
>>> DiracDelta(-1)
0
>>> DiracDelta(pi)
0
>>> DiracDelta(x - 4).subs(x, 4)
DiracDelta(0)
>>> diff(DiracDelta(x))
DiracDelta(x, 1)
>>> diff(DiracDelta(x - 1),x,2)
DiracDelta(x - 1, 2)
>>> diff(DiracDelta(x**2 - 1),x,2)
2*(2*x**2*DiracDelta(x**2 - 1, 2) + DiracDelta(x**2 - 1, 1))
>>> DiracDelta(3*x).is_simple(x)
True
>>> DiracDelta(x**2).is_simple(x)
False
>>> DiracDelta((x**2 - 1)*y).expand(diracdelta=True, wrt=x)
DiracDelta(x - 1)/(2*Abs(y)) + DiracDelta(x + 1)/(2*Abs(y))
See Also
========
Heaviside
simplify, is_simple
sympy.functions.special.tensor_functions.KroneckerDelta
References
==========
.. [1] http://mathworld.wolfram.com/DeltaFunction.html
"""
is_real = True
def fdiff(self, argindex=1):
"""
Returns the first derivative of a DiracDelta Function.
The difference between ``diff()`` and ``fdiff()`` is:-
``diff()`` is the user-level function and ``fdiff()`` is an object method.
``fdiff()`` is just a convenience method available in the ``Function`` class.
It returns the derivative of the function without considering the chain rule.
``diff(function, x)`` calls ``Function._eval_derivative`` which in turn calls
``fdiff()`` internally to compute the derivative of the function.
Examples
========
>>> from sympy import DiracDelta, diff
>>> from sympy.abc import x
>>> DiracDelta(x).fdiff()
DiracDelta(x, 1)
>>> DiracDelta(x, 1).fdiff()
DiracDelta(x, 2)
>>> DiracDelta(x**2 - 1).fdiff()
DiracDelta(x**2 - 1, 1)
>>> diff(DiracDelta(x, 1)).fdiff()
DiracDelta(x, 3)
"""
if argindex == 1:
#I didn't know if there is a better way to handle default arguments
k = 0
if len(self.args) > 1:
k = self.args[1]
return self.func(self.args[0], k + 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg, k=0):
"""
Returns a simplified form or a value of DiracDelta depending on the
argument passed by the DiracDelta object.
The ``eval()`` method is automatically called when the ``DiracDelta`` class
is about to be instantiated and it returns either some simplified instance
or the unevaluated instance depending on the argument passed. In other words,
``eval()`` method is not needed to be called explicitly, it is being called
and evaluated once the object is called.
Examples
========
>>> from sympy import DiracDelta, S, Subs
>>> from sympy.abc import x
>>> DiracDelta(x)
DiracDelta(x)
>>> DiracDelta(x,1)
DiracDelta(x, 1)
>>> DiracDelta(1)
0
>>> DiracDelta(5,1)
0
>>> DiracDelta(0)
DiracDelta(0)
>>> DiracDelta(-1)
0
>>> DiracDelta(S.NaN)
nan
>>> DiracDelta(x).eval(1)
0
>>> DiracDelta(x - 100).subs(x, 5)
0
>>> DiracDelta(x - 100).subs(x, 100)
DiracDelta(0)
"""
k = sympify(k)
if not k.is_Integer or k.is_negative:
raise ValueError("Error: the second argument of DiracDelta must be \
a non-negative integer, %s given instead." % (k,))
arg = sympify(arg)
if arg is S.NaN:
return S.NaN
if arg.is_positive or arg.is_negative:
return S.Zero
if fuzzy_not(im(arg).is_zero):
raise ValueError("Function defined only for Real Values. Complex part: %s found in %s ." % (repr(im(arg)), repr(arg)) )
@deprecated(useinstead="expand(diracdelta=True, wrt=x)", deprecated_since_version="1.0.1")
def simplify(self, x):
return self.expand(diracdelta=True, wrt=x)
def _eval_expand_diracdelta(self, **hints):
"""Compute a simplified representation of the function using
property number 4. Pass wrt as a hint to expand the expression
with respect to a particular variable.
wrt is:
- a variable with respect to which a DiracDelta expression will
get expanded.
Examples
========
>>> from sympy import DiracDelta
>>> from sympy.abc import x, y
>>> DiracDelta(x*y).expand(diracdelta=True, wrt=x)
DiracDelta(x)/Abs(y)
>>> DiracDelta(x*y).expand(diracdelta=True, wrt=y)
DiracDelta(y)/Abs(x)
>>> DiracDelta(x**2 + x - 2).expand(diracdelta=True, wrt=x)
DiracDelta(x - 1)/3 + DiracDelta(x + 2)/3
See Also
========
is_simple, Diracdelta
"""
from sympy.polys.polyroots import roots
wrt = hints.get('wrt', None)
if wrt is None:
free = self.free_symbols
if len(free) == 1:
wrt = free.pop()
else:
raise TypeError(filldedent('''
When there is more than 1 free symbol or variable in the expression,
the 'wrt' keyword is required as a hint to expand when using the
DiracDelta hint.'''))
if not self.args[0].has(wrt) or (len(self.args) > 1 and self.args[1] != 0 ):
return self
try:
argroots = roots(self.args[0], wrt)
result = 0
valid = True
darg = abs(diff(self.args[0], wrt))
for r, m in argroots.items():
if r.is_real is not False and m == 1:
result += self.func(wrt - r)/darg.subs(wrt, r)
else:
# don't handle non-real and if m != 1 then
# a polynomial will have a zero in the derivative (darg)
# at r
valid = False
break
if valid:
return result
except PolynomialError:
pass
return self
def is_simple(self, x):
"""is_simple(self, x)
Tells whether the argument(args[0]) of DiracDelta is a linear
expression in x.
x can be:
- a symbol
Examples
========
>>> from sympy import DiracDelta, cos
>>> from sympy.abc import x, y
>>> DiracDelta(x*y).is_simple(x)
True
>>> DiracDelta(x*y).is_simple(y)
True
>>> DiracDelta(x**2 + x - 2).is_simple(x)
False
>>> DiracDelta(cos(x)).is_simple(x)
False
See Also
========
simplify, Diracdelta
"""
p = self.args[0].as_poly(x)
if p:
return p.degree() == 1
return False
def _eval_rewrite_as_Piecewise(self, *args):
"""Represents DiracDelta in a Piecewise form
Examples
========
>>> from sympy import DiracDelta, Piecewise, Symbol
>>> x = Symbol('x')
>>> DiracDelta(x).rewrite(Piecewise)
Piecewise((DiracDelta(0), Eq(x, 0)), (0, True))
>>> DiracDelta(x - 5).rewrite(Piecewise)
Piecewise((DiracDelta(0), Eq(x - 5, 0)), (0, True))
>>> DiracDelta(x**2 - 5).rewrite(Piecewise)
Piecewise((DiracDelta(0), Eq(x**2 - 5, 0)), (0, True))
>>> DiracDelta(x - 5, 4).rewrite(Piecewise)
DiracDelta(x - 5, 4)
"""
if len(args) == 1:
return Piecewise((DiracDelta(0), Eq(args[0], 0)), (0, True))
@staticmethod
def _latex_no_arg(printer):
return r'\delta'
def _sage_(self):
import sage.all as sage
return sage.dirac_delta(self.args[0]._sage_())
###############################################################################
############################## HEAVISIDE FUNCTION #############################
###############################################################################
class Heaviside(Function):
"""Heaviside Piecewise function
Heaviside function has the following properties [*]_:
1) ``diff(Heaviside(x),x) = DiracDelta(x)``
``( 0, if x < 0``
2) ``Heaviside(x) = < ( undefined if x==0 [*]``
``( 1, if x > 0``
3) ``Max(0,x).diff(x) = Heaviside(x)``
.. [*] Regarding to the value at 0, Mathematica defines ``H(0) = 1``,
but Maple uses ``H(0) = undefined``. Different application areas
may have specific conventions. For example, in control theory, it
is common practice to assume ``H(0) == 0`` to match the Laplace
transform of a DiracDelta distribution.
To specify the value of Heaviside at x=0, a second argument can be given.
Omit this 2nd argument or pass ``None`` to recover the default behavior.
>>> from sympy import Heaviside, S
>>> from sympy.abc import x
>>> Heaviside(9)
1
>>> Heaviside(-9)
0
>>> Heaviside(0)
Heaviside(0)
>>> Heaviside(0, S.Half)
1/2
>>> (Heaviside(x) + 1).replace(Heaviside(x), Heaviside(x, 1))
Heaviside(x, 1) + 1
See Also
========
DiracDelta
References
==========
.. [1] http://mathworld.wolfram.com/HeavisideStepFunction.html
.. [2] http://dlmf.nist.gov/1.16#iv
"""
is_real = True
def fdiff(self, argindex=1):
"""
Returns the first derivative of a Heaviside Function.
Examples
========
>>> from sympy import Heaviside, diff
>>> from sympy.abc import x
>>> Heaviside(x).fdiff()
DiracDelta(x)
>>> Heaviside(x**2 - 1).fdiff()
DiracDelta(x**2 - 1)
>>> diff(Heaviside(x)).fdiff()
DiracDelta(x, 1)
"""
if argindex == 1:
# property number 1
return DiracDelta(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def __new__(cls, arg, H0=None, **options):
if H0 is None:
return super(cls, cls).__new__(cls, arg, **options)
else:
return super(cls, cls).__new__(cls, arg, H0, **options)
@classmethod
def eval(cls, arg, H0=None):
"""
Returns a simplified form or a value of Heaviside depending on the
argument passed by the Heaviside object.
The ``eval()`` method is automatically called when the ``Heaviside`` class
is about to be instantiated and it returns either some simplified instance
or the unevaluated instance depending on the argument passed. In other words,
``eval()`` method is not needed to be called explicitly, it is being called
and evaluated once the object is called.
Examples
========
>>> from sympy import Heaviside, S
>>> from sympy.abc import x
>>> Heaviside(x)
Heaviside(x)
>>> Heaviside(19)
1
>>> Heaviside(0)
Heaviside(0)
>>> Heaviside(0, 1)
1
>>> Heaviside(-5)
0
>>> Heaviside(S.NaN)
nan
>>> Heaviside(x).eval(100)
1
>>> Heaviside(x - 100).subs(x, 5)
0
>>> Heaviside(x - 100).subs(x, 105)
1
"""
H0 = sympify(H0)
arg = sympify(arg)
if arg.is_negative:
return S.Zero
elif arg.is_positive:
return S.One
elif arg.is_zero:
return H0
elif arg is S.NaN:
return S.NaN
elif fuzzy_not(im(arg).is_zero):
raise ValueError("Function defined only for Real Values. Complex part: %s found in %s ." % (repr(im(arg)), repr(arg)) )
def _eval_rewrite_as_Piecewise(self, arg, H0=None):
"""Represents Heaviside in a Piecewise form
Examples
========
>>> from sympy import Heaviside, Piecewise, Symbol, pprint
>>> x = Symbol('x')
>>> Heaviside(x).rewrite(Piecewise)
Piecewise((0, x < 0), (Heaviside(0), Eq(x, 0)), (1, x > 0))
>>> Heaviside(x - 5).rewrite(Piecewise)
Piecewise((0, x - 5 < 0), (Heaviside(0), Eq(x - 5, 0)), (1, x - 5 > 0))
>>> Heaviside(x**2 - 1).rewrite(Piecewise)
Piecewise((0, x**2 - 1 < 0), (Heaviside(0), Eq(x**2 - 1, 0)), (1, x**2 - 1 > 0))
"""
if H0 is None:
return Piecewise((0, arg < 0), (Heaviside(0), Eq(arg, 0)), (1, arg > 0))
if H0 == 0:
return Piecewise((0, arg <= 0), (1, arg > 0))
if H0 == 1:
return Piecewise((0, arg < 0), (1, arg >= 0))
return Piecewise((0, arg < 0), (H0, Eq(arg, 0)), (1, arg > 0))
def _eval_rewrite_as_sign(self, arg, H0=None):
"""Represents the Heaviside function in the form of sign function.
The value of the second argument of Heaviside must specify Heaviside(0)
= 1/2 for rewritting as sign to be strictly equivalent. For easier
usage, we also allow this rewriting when Heaviside(0) is undefined.
Examples
========
>>> from sympy import Heaviside, Symbol, sign
>>> x = Symbol('x', real=True)
>>> Heaviside(x).rewrite(sign)
sign(x)/2 + 1/2
>>> Heaviside(x, 0).rewrite(sign)
Heaviside(x, 0)
>>> Heaviside(x - 2).rewrite(sign)
sign(x - 2)/2 + 1/2
>>> Heaviside(x**2 - 2*x + 1).rewrite(sign)
sign(x**2 - 2*x + 1)/2 + 1/2
>>> y = Symbol('y')
>>> Heaviside(y).rewrite(sign)
Heaviside(y)
>>> Heaviside(y**2 - 2*y + 1).rewrite(sign)
Heaviside(y**2 - 2*y + 1)
See Also
========
sign
"""
if arg.is_real:
if H0 is None or H0 == S.Half:
return (sign(arg)+1)/2
def _sage_(self):
import sage.all as sage
return sage.heaviside(self.args[0]._sage_())
| bsd-3-clause | 461,175,796,935,635,400 | 29.294756 | 132 | 0.529577 | false |
nomad010/typesafe_json | type_from_json.py | 1 | 2268 | import json
import os
import sys
def print_usage():
print """./type_from_json <json_file> [ARGS]
Options
--typedef <type_name>\t\t\t Displays the type as a typedef."""
def type_of_int(obj):
return "JSONNumber"
type_of_float = type_of_int
def type_of_string(obj):
return "JSONString"
type_of_unicode = type_of_string
def type_of_bool(obj):
return "JSONBool"
class EmptyListException(Exception):
def __init__(self):
super(EmptyListException, self).__init__("Unable to decompose empty list into types")
class HeterogenousListException(Exception):
def __init__(self):
super(HeterogenousListException, self).__init__("All the types of list elements must be identical.")
def type_of_list(li):
result = "JSONHomogenousArray<"
sub_types = []
for item in li:
sub_func_name = "type_of_" + str(item.__class__.__name__)
sub_type = globals()[sub_func_name](li)
sub_types.append(sub_type)
if len(sub_types) == 0:
raise EmptyListException()
if not all(map(lambda x: x == sub_types[0], sub_types)):
raise HeterogenousListException()
result += sub_types[0]
result += ">"
return result
def type_of_dict(obj):
result = "JSONSet<"
is_first = True
for prop, val in obj.items():
sub_func_name = "type_of_" + str(val.__class__.__name__)
if is_first:
is_first = False
else:
result += ", "
result += "NamedType<" + globals()[sub_func_name](val) + ", str_to_list_{}(\"{}\")".format(len(prop), prop)
result += ">"
return result
def main(filename, args):
typedef_name = None
for i in range(len(args)):
if args[i] == "--typedef":
i += 1
typedef_name = args[i]
with open(filename) as f:
loaded_json = json.loads(f.read())
if typedef_name is not None:
sys.stdout.write("typedef ")
sys.stdout.write(type_of_dict(loaded_json))
if typedef_name is not None:
sys.stdout.write(" " + typedef_name + ";")
sys.stdout.write("\n")
if len(sys.argv) == 1:
print_usage()
else:
main(sys.argv[1], sys.argv[1:]) | mit | 2,502,793,068,441,330,000 | 23.663043 | 120 | 0.567901 | false |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/build/android/incremental_install/installer.py | 1 | 7393 | #!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Install *_incremental.apk targets as well as their dependent files."""
import argparse
import glob
import logging
import os
import posixpath
import shutil
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from devil.android import apk_helper
from devil.android import device_utils
from devil.android import device_errors
from devil.android.sdk import version_codes
from devil.utils import reraiser_thread
from pylib import constants
from pylib.utils import run_tests_helper
from pylib.utils import time_profile
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'gyp'))
from util import build_utils
def _TransformDexPaths(paths):
"""Given paths like ["/a/b/c", "/a/c/d"], returns ["b.c", "c.d"]."""
prefix_len = len(os.path.commonprefix(paths))
return [p[prefix_len:].replace(os.sep, '.') for p in paths]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('apk_path',
help='The path to the APK to install.')
parser.add_argument('--split',
action='append',
dest='splits',
help='A glob matching the apk splits. '
'Can be specified multiple times.')
parser.add_argument('--lib-dir',
help='Path to native libraries directory.')
parser.add_argument('--dex-files',
help='List of dex files to push.',
action='append',
default=[])
parser.add_argument('-d', '--device', dest='device',
help='Target device for apk to install on.')
parser.add_argument('--uninstall',
action='store_true',
default=False,
help='Remove the app and all side-loaded files.')
parser.add_argument('--output-directory',
help='Path to the root build directory.')
parser.add_argument('--no-threading',
action='store_true',
default=False,
help='Do not install and push concurrently')
parser.add_argument('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose_count)
constants.SetBuildType('Debug')
if args.output_directory:
constants.SetOutputDirectory(args.output_directory)
main_timer = time_profile.TimeProfile()
install_timer = time_profile.TimeProfile()
push_native_timer = time_profile.TimeProfile()
push_dex_timer = time_profile.TimeProfile()
if args.device:
# Retries are annoying when commands fail for legitimate reasons. Might want
# to enable them if this is ever used on bots though.
device = device_utils.DeviceUtils(args.device, default_retries=0)
else:
devices = device_utils.DeviceUtils.HealthyDevices(default_retries=0)
if not devices:
raise device_errors.NoDevicesError()
elif len(devices) == 1:
device = devices[0]
else:
all_devices = device_utils.DeviceUtils.parallel(devices)
msg = ('More than one device available.\n'
'Use --device=SERIAL to select a device.\n'
'Available devices:\n')
descriptions = all_devices.pMap(lambda d: d.build_description).pGet(None)
for d, desc in zip(devices, descriptions):
msg += ' %s (%s)\n' % (d, desc)
raise Exception(msg)
apk_help = apk_helper.ApkHelper(args.apk_path)
apk_package = apk_help.GetPackageName()
device_incremental_dir = '/data/local/tmp/incremental-app-%s' % apk_package
if args.uninstall:
device.Uninstall(apk_package)
device.RunShellCommand(['rm', '-rf', device_incremental_dir],
check_return=True)
logging.info('Uninstall took %s seconds.', main_timer.GetDelta())
return
if device.build_version_sdk >= version_codes.MARSHMALLOW:
if apk_help.HasIsolatedProcesses():
raise Exception('Cannot use perform incremental installs on Android M+ '
'without first disabling isolated processes. Use GN arg: '
'disable_incremental_isolated_processes=true to do so.')
# Install .apk(s) if any of them have changed.
def do_install():
install_timer.Start()
if args.splits:
splits = []
for split_glob in args.splits:
splits.extend((f for f in glob.glob(split_glob)))
device.InstallSplitApk(args.apk_path, splits, reinstall=True,
allow_cached_props=True, permissions=())
else:
device.Install(args.apk_path, reinstall=True, permissions=())
install_timer.Stop(log=False)
# Push .so and .dex files to the device (if they have changed).
def do_push_files():
if args.lib_dir:
push_native_timer.Start()
device_lib_dir = posixpath.join(device_incremental_dir, 'lib')
device.PushChangedFiles([(args.lib_dir, device_lib_dir)],
delete_device_stale=True)
push_native_timer.Stop(log=False)
if args.dex_files:
push_dex_timer.Start()
# Put all .dex files to be pushed into a temporary directory so that we
# can use delete_device_stale=True.
with build_utils.TempDir() as temp_dir:
device_dex_dir = posixpath.join(device_incremental_dir, 'dex')
# Ensure no two files have the same name.
transformed_names = _TransformDexPaths(args.dex_files)
for src_path, dest_name in zip(args.dex_files, transformed_names):
shutil.copyfile(src_path, os.path.join(temp_dir, dest_name))
device.PushChangedFiles([(temp_dir, device_dex_dir)],
delete_device_stale=True)
push_dex_timer.Stop(log=False)
# Create 2 lock files:
# * install.lock tells the app to pause on start-up (until we release it).
# * firstrun.lock is used by the app to pause all secondary processes until
# the primary process finishes loading the .dex / .so files.
def create_lock_files():
# Creates or zeros out lock files.
cmd = ('D="%s";'
'mkdir -p $D &&'
'echo -n >$D/install.lock 2>$D/firstrun.lock')
device.RunShellCommand(cmd % device_incremental_dir, check_return=True)
# The firstrun.lock is released by the app itself.
def release_installer_lock():
device.RunShellCommand('echo > %s/install.lock' % device_incremental_dir,
check_return=True)
create_lock_files()
# Concurrency here speeds things up quite a bit, but DeviceUtils hasn't
# been designed for multi-threading. Enabling only because this is a
# developer-only tool.
if args.no_threading:
do_install()
do_push_files()
else:
reraiser_thread.RunAsync((do_install, do_push_files))
release_installer_lock()
logging.info('Took %s seconds (install=%s, libs=%s, dex=%s)',
main_timer.GetDelta(), install_timer.GetDelta(),
push_native_timer.GetDelta(), push_dex_timer.GetDelta())
if __name__ == '__main__':
sys.exit(main())
| mit | 3,702,598,307,499,654,700 | 38.116402 | 80 | 0.631408 | false |
sadowski/sublime-regex-align | RegexAlign.py | 1 | 3725 | import sublime
import sublime_plugin
import re
# Takes the sublime.View and a sublime.Region object
# Return the array of strings, each element represents on line of the selection
def get_lines(view, selection_region):
multiline_text = view.substr(selection_region)
lines = re.split(r'(.*\n)', multiline_text, re.DOTALL)
return lines
# Takes the delimiter string given by the user and splits the text string
# on the delimiter. It will also take into account any spaces before
# the delimiter, which is the spacing that changes to cause text to
# appear to align in columns.
# Returns and array in which each element represents the line split up
# by the delimiter.
# Eg:
# split_on_delimiter(":a => :b", "=>")
# [":a", " =>", " :b"]
def split_on_delimiter(text, delimiter):
return re.split(r'(%s)' % delimiter, text)
# Takes tokens, which is an array of arrays in which the first level of the
# array represents each line, and the second level represents the line split
# up by the delimiter, and the delimiter string itself.
# This method will join each line together, taking into account the spacing of
# the delimiter, and produce a multi-line string to replace the origingal
# selection.
def merge_tokens(tokens, delimiter):
result = []
# Iterate until all sub arrays have been merged into the first sub-element
# of each array.
# Eg:
# Start: [['a', ' =>', ' b'], ['cc', ' =>', ' dd, ee', ' =>', ' ff']]
# Step1: [['a => b'], ['cc => dd, ee', ' =>', ' ff']]
# Step2: [['a => b'], ['cc => dd, ee => ff']]
#
# Note how each array always starts with an odd number of element and
# in the end all the arrays are reduced into the first element, so
# that each array ends up with exacly one element.
while max(map(len, tokens)) > 1:
# We look at the first element of each aray to determine
# which is the longest
max_length = max(map(len, zip(*tokens)[0]))
# The first 3 tokens in the array (pre_delimiter, delimiter, post_delimiter)
# and merge them into the first element, making sure to modify the 'delimiter'
# element to have the correct number of spaces.
for i, token in enumerate(tokens):
if len(token) == 1: continue
num_spaces = (max_length - len(token[0]))
repl = lambda match: (' ' * num_spaces + match.group(1))
sub = re.sub(r'\s*(%s)' % delimiter, repl, token[1])
token = [token[0] + sub + token[2]] + token[3:]
tokens[i] = token
return "".join(sum(tokens, []))
class RegexAlignCommand(sublime_plugin.TextCommand):
# The entry method for the function. Calls the UI input panel and delegates
# to a callback when the user enters input.
def run(self, view):
window = sublime.active_window()
window.show_input_panel("Regex", '=>', self.regex_align, None, None)
# self.regex_align('=>') # debugging
# Takes in the delimiter the user entered. Figures out which view in the
# editor is active and gets the selected text for that view. Calculates the
# replacement text for that selection and replaces the text with the newly
# aligned text. Works for each selection group in the view (in the case of
# multi-select).
def regex_align(self, delimiter):
delimiter = r'\s+%s' % delimiter # include preceding spaces
view = sublime.active_window().active_view()
edit = view.begin_edit()
for selection in view.sel():
lines = get_lines(view, selection)
tokens = []
for line in lines:
token = split_on_delimiter(line, delimiter)
tokens.append(token)
replacement_text = merge_tokens(tokens, delimiter)
view.replace(edit, selection, replacement_text)
view.end_edit(edit)
| mit | 2,233,810,149,495,154,400 | 38.62766 | 82 | 0.665772 | false |
frankyrumple/smc | controllers/test.py | 1 | 3559 | # -*- coding: utf-8 -*-
# try something like
#from ednet.ad import AD
#from ednet import Util, SequentialGUID, AppSettings, W2Py, Student
#from ednet import *
from ednet.ad import AD
from ednet.faculty import Faculty
from ednet.util import Util
from ednet.canvas import Canvas
from pytube import YouTube
import os
import ldap
import sys
def test():
#db_canvas = current.db_canvas
#sql = "select * from users"
#rows = db_canvas.executesql(sql)
#test = Canvas.EnsureAdminAccessToken()
#student_test = Canvas.EnsureStudentAccessToken("s777777")
initial_run = cache.ram("startup", lambda:True, time_expire=3600)
cache_time = cache.ram("tmptime", lambda:time.ctime(), time_expire=30)
return locals()
def index():
yt = YouTube()
# Set the video URL.
ret = ""
tmpurl = "http://youtu.be/BthR2vlqrLo!!1"
tmpurl = tmpurl.replace("/embed/", "/watch?v=")
yt.url = tmpurl
ret += str(yt.videos)
yt.filename = "tempvid"
# Get the highest res mp4
ret += str(type(yt.filter('mp4')))
f = yt.filter('mp4')[-1]
try:
f.download()
except Exception, e:
ret += str(e)
#test = {}
#ret = isinstance(test, dict)
#AD.Connect()
#cn="cn=ropulsipher,OU=CSE,OU=Faculty,DC=cbcc,DC=pencollege,DC=net"
#cn_name = AD.GetNameFromLDAPPath(cn)
#ret = ""
#p1 = AD.GetParentLDAPPath(cn, 1)
#p2 = AD.GetParentLDAPPath(cn, 2)
#r = AD._ldap.search_s(p2, ldap.SCOPE_SUBTREE, "(name=" + str(p2) + ")" , ['distinguishedName'])
#AD._errors.append("Found Object : " + str(r))
#cn = "OU=CSE," + cn
#ret = AD.MakePathCN(cn)
#ret = AD.CreateUser('walshb', cn)
#errors = AD._errors
#AD.Close()
#path = sys.path
#a = Util.ParseName("bob smith")
#b = Student.GetQuota("777777")
#c = Student.QueueActiveDirectoryImports("SIMPLE SHEET")
#d = Student.ProcessADStudent()
#e = AD.GetCN("OU=Students,DC=cbcc,DC=pencollege,DC=net")
#f = AD.GetCN("CN=s777777,OU=Students,DC=cbcc,DC=pencollege,DC=net")
#createou = AD.CreateOU("OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#creategroup = AD.CreateGroup("CN=TestGroup,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#createdn = AD.GetDN("1st", "2nd")
#createuser = AD.CreateUser("s777777", "OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#addtogroup = AD.AddUserToGroup("CN=s777777,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net", "CN=TestGroup,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#setpassword = AD.SetPassword("CN=s777777,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net", "SID7777772")
#enableuser = AD.EnableUser("CN=s777777,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#updateuser = AD.UpdateUserInfo("CN=s777777,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net", "[email protected]", "bob", "smith", "smith, bob", description="Student account", id_number="s777777", home_drive_letter="", home_directory="", login_script="", profile_path="", ts_allow_login='FALSE')
#disableuser = AD.DisableUser("CN=s777777,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#setpass = AD.SetPassword("CN=s777780,OU=Students,DC=cbcc,DC=pencollege,DC=net", "123f")
#groupdn = AD.GetLDAPObject("CN=Students,OU=StudentGroups,DC=cbcc,DC=pencollege,DC=net")
#cn = AD.GetLDAPObject("OU=StudentGroups,DC=cbcc,DC=pencollege,DC=net")
#setpass = Faculty.SetPassword("walshb", "12345612ABC")
#ad_errors = AD._errors
return dict(vars=locals())
| mit | -9,005,375,558,022,422,000 | 31.953704 | 297 | 0.652149 | false |
krother/maze_run | leftovers/part2_before_refac.py | 1 | 11978 |
# TODO: fix and check allcommand-line arguments
from util import debug_print
from pygame import image, Rect, Surface
from pygame.locals import KEYDOWN, KEYUP, USEREVENT
import pygame
import sys
import random
import json
import os
from collections import namedtuple
from functools import partial
import argparse
import logging
#logging.basicConfig(filename='random_levels.log', level=logging.INFO)
log = logging.getLogger('moves')
log.addHandler(logging.FileHandler('moves.log', mode='w'))
log.setLevel(logging.INFO)
eventlog = logging.getLogger('events')
eventlog.addHandler(logging.StreamHandler(sys.stderr))
#fmt='%(asctime)s %(message)s'
#eventlog.addFormatter(logging.Formatter(fmt), datefmt='%m/%d/%Y %I:%M:%S %p')
eventlog.setLevel(logging.INFO)
# ------------ CONSTANTS ----------------
CONFIG_PATH = os.path.split(__file__)[0]
TILE_POSITION_FILE = CONFIG_PATH + 'tiles.json'
TILE_IMAGE_FILE = CONFIG_PATH + '../images/tiles.xpm'
LEVEL_FILE = 'level.txt'
SIZE = 32
SPEED = 4
Position = namedtuple("Position", ["x", "y"])
LEFT = Position(-1, 0)
RIGHT = Position(1, 0)
UP = Position(0, -1)
DOWN = Position(0, 1)
DIRECTIONS = {
276: LEFT, 275: RIGHT,
273: UP, 274: DOWN
}
KEY_REPEAT_TIME = 250
KEY_REPEATED = USEREVENT + 1
DRAW_REPEAT_TIME = 100
DRAW = USEREVENT + 2
UPDATE = USEREVENT + 3
UPDATE_REPEAT_TIME = 20
MOVE_GHOST = USEREVENT + 4
MOVE_GHOST_TIME = 500
EXIT = USEREVENT + 5
# ------------- LOADING TILES -----------
def get_tile_rect(pos):
"""Converts tile indices to a pygame.Rect"""
return Rect(pos.x*SIZE, pos.y*SIZE, SIZE, SIZE)
def load_tiles(json_fn):
"""Loads tile positions from a JSON file name"""
tiles = {}
jd = json.loads(open(json_fn).read())
for tile in jd.values():
abbrev = tile["abbrev"]
pos = Position(tile["x"], tile["y"])
rect = get_tile_rect(pos)
tiles[abbrev] = rect
return tiles
# ------------- GENERATING MAZES ------------
class MazeGenerator:
"""Generates two-dimensional mazes consisting of walls and dots."""
@staticmethod
def create_grid_string(dots, xsize, ysize):
grid = ""
for y in range(ysize):
for x in range(xsize):
grid += "." if Position(x, y) in dots else "#"
grid += "\n"
return grid
@staticmethod
def get_all_dot_positions(xsize, ysize):
return [Position(x, y) for x in range(1, xsize-1) for y in range(1, ysize-1)]
@staticmethod
def get_neighbors(pos):
return [
Position(pos.x , pos.y-1), Position(pos.x , pos.y+1),
Position(pos.x-1, pos.y ), Position(pos.x+1, pos.y ),
Position(pos.x-1, pos.y-1), Position(pos.x+1, pos.y-1),
Position(pos.x-1, pos.y+1), Position(pos.x+1, pos.y+1)
]
@staticmethod
def generate_dot_positions(xsize, ysize):
positions = MazeGenerator.get_all_dot_positions(xsize, ysize)
dots = set()
while positions != []:
pos = random.choice(positions)
neighbors = MazeGenerator.get_neighbors(pos)
free = [nb in dots for nb in neighbors]
if free.count(True) < 5:
dots.add(pos)
positions.remove(pos)
return dots
@staticmethod
def create_maze(size):
"""Returns a size.x * size.y maze as a string"""
dots = MazeGenerator.generate_dot_positions(size.x, size.y)
maze = MazeGenerator.create_grid_string(dots, size.x, size.y)
return maze
# ------------- DRAWING GRIDS --------------
class TileGrid:
def __init__(self, data):
self._grid = self.parse_grid(data)
def __repr__(self):
return "\n".join(["".join(row) for row in self._grid])
def parse_grid(self, data):
"""Parses the string representation into a nested list"""
return [list(row) for row in data.strip().split("\n")]
@property
def rows(self):
return self._grid
@property
def xsize(self):
return len(self.rows[0])
@property
def ysize(self):
return len(self.rows)
def __getitem__(self, pos):
return self._grid[pos.y][pos.x]
def __setitem__(self, pos, value):
self._grid[pos.y][pos.x] = value
def __iter__(self):
"""Iterate over all grid tiles"""
for y, row in enumerate(self.rows):
for x, char in enumerate(row):
pos = Position(x, y)
yield pos, char
def find_tile(self, query='*'):
"""Returns a Position tuple for the given char on the level"""
for pos, char in self:
if char == query:
return pos
def draw_grid(self, tile_img, tiles):
"""Returns an image of a tile-based grid"""
#debug_print("drawing level", data)
img = Surface((self.xsize * SIZE, self.ysize * SIZE))
for pos, char in self:
rect = get_tile_rect(pos)
img.blit(tile_img, rect, tiles[char])
return img
# ------------- SPRITES --------------
Sprite = namedtuple("Sprite", ['tile', 'pos'])
Animation = namedtuple("Animation", ['direction', 'offset', 'callback'])
sprites = {}
animations = {}
def is_moving(actor):
return actor in animations
def move(level, direction, actor="player"):
"""Handles moves on the level"""
if is_moving(actor):
return
log.info('{} moves in direction {}/{}'.format(actor, direction.x, direction.y))
old = sprites[actor].pos
# avoids problem with finding: if '*' on map it might not be there
new = Position(old.x + direction.x, old.y + direction.y)
if level[new] in [" ", ".", "x"]:
sprites[actor] = Sprite(sprites[actor].tile, new)
start_ofs = Position(-direction.x * SIZE, -direction.y * SIZE)
check_collision()
if actor == 'player':
# this if should appear only once!
# anyway, it is an indication that a class would be better.
callback = partial(player_arrives_on_new_tile, level)
else:
callback = ghost_arrives_on_new_tile
animations[actor] = Animation(direction, start_ofs, callback)
def ghost_arrives_on_new_tile():
pass
def player_arrives_on_new_tile(level):
pos = sprites['player'].pos
tile = level[pos]
if tile == '.':
level[pos] = ' ' # eats dot
elif tile == 'x':
exit_game()
def animate_sprites():
for actor in list(animations.keys()):
ani = animations[actor]
ofs_x = ani.offset.x + ani.direction.x * SPEED
ofs_y = ani.offset.y + ani.direction.y * SPEED
new_offset = Position(ofs_x, ofs_y)
if ofs_x == 0 and ofs_y == 0:
ani.callback()
del animations[actor]
else:
animations[actor] = Animation(ani.direction, new_offset, ani.callback)
def draw_sprites(img, tile_img, tiles):
"""Returns an image of a tile-based grid"""
for actor in sprites:
sprite = sprites[actor]
rect = get_tile_rect(sprite.pos)
if actor in animations:
offset = animations[actor].offset
rect = Rect((rect.x + offset.x, rect.y + offset.y, rect.w, rect.h))
img.blit(tile_img, rect, tiles[sprite.tile])
# ------------- EVENT LOOP --------------
def event_loop(handle_key, delay=10, repeat=KEY_REPEAT_TIME):
"""Processes events and updates callbacks."""
repeat_key = None
running = True
while running:
pygame.event.pump()
event = pygame.event.poll()
if event.type == KEYDOWN:
handle_key(event.key)
repeat_key = event.key
pygame.time.set_timer(KEY_REPEATED, KEY_REPEAT_TIME)
elif event.type == KEYUP:
if event.key == repeat_key:
repeat_key = None
pygame.time.set_timer(KEY_REPEATED, 0)
elif event.type == KEY_REPEATED:
handle_key(repeat_key)
elif event.type == DRAW:
draw()
elif event.type == UPDATE:
update()
elif event.type == MOVE_GHOST:
move_ghost()
elif event.type == EXIT:
running = False
eventlog.critical('exit event received: ' + str(event))
else:
eventlog.info('unhandled event: ' + str(event))
pygame.time.delay(delay)
# ------------- GAME MECHANICS --------------
def move_ghost():
direction = random.choice([LEFT, RIGHT, UP, DOWN])
move(maze, direction, "ghost")
def check_collision():
if sprites['player'].pos == sprites['ghost'].pos:
# much better than: if level[new] in ["*", "g"]:
exit_game()
def exit_game():
eve = pygame.event.Event(EXIT)
pygame.event.post(eve)
def update():
"""Manages recurring checks in the game"""
#check_collision() # redundant at the moment
animate_sprites()
# ------------- MAIN GAME --------------
def load_level(fn):
data = open(fn).read()
maze = TileGrid(data)
return maze
def draw():
img = maze.draw_grid(tile_img, tiles)
draw_sprites(img, tile_img, tiles)
rect = Rect((0, 0, maze.xsize*SIZE, maze.ysize*SIZE))
display.blit(img, rect, rect)
pygame.display.update()
def game(key):
"""Handles key events in the game"""
direction = DIRECTIONS.get(key)
if direction:
move(maze, direction, "player") # more explicit than '*'
# design flaw: uses global variables 'display', tile_img', 'tiles'
def create_random_maze(size):
maze_data = MazeGenerator.create_maze(size)
maze = TileGrid(maze_data)
maze[Position(size.x-2, size.y-2)] = 'x'
return maze
def create_sprites(size):
sprites = {
'player': Sprite('*', Position(1, 1)),
'ghost': Sprite('g', Position(size.x-2, 1))
}
return sprites
def create_display():
pygame.init()
pygame.display.set_mode((800, 600))
display = pygame.display.get_surface()
return display
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Start the MazeRun game.')
parser.add_argument('--x', type=int, default=12,
help='x size of random maze')
parser.add_argument('--y', type=int, default=7,
help='y size of random maze')
parser.add_argument('--ghost',
#dest="MOVE_GHOST_TIME", action="store_const",
type=int, default=500,
help='ghost speed (moves every .. milliseconds)')
parser.add_argument('--load', type=str, default=None,
help='load maze from text file')
parser.add_argument('--replay', type=str, default=None,
help='log file to replay from')
parser.add_argument('-v', '--verbose', action="store_true",
help='print debugging information')
#parser.add_argument('words', type=str, nargs='+',
# help='the word for which characters are counted')
#parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2],
# positional arguments: without dashes
# optional: with --
# g = parser.add_mutually_exclusive_group()
# g.add_argument(...)
# g.add_argument(...)
# -d delay=50 game speed
# -g ghost speed
# -x, -y size of the grid
# -r replay from logfile
# -l load level from file
# optional arguments
# --verbose
# --help info
args = parser.parse_args()
size = Position(args.x, args.y)
display = create_display()
maze = create_random_maze(size)
log.info("random level created\n" + str(maze))
log.info("----------------\n")
#maze = load_level(LEVEL_FILE)
sprites = create_sprites(size)
tile_img = image.load(TILE_IMAGE_FILE)
tiles = load_tiles(TILE_POSITION_FILE)
pygame.time.set_timer(DRAW, DRAW_REPEAT_TIME)
pygame.time.set_timer(UPDATE, UPDATE_REPEAT_TIME)
pygame.time.set_timer(MOVE_GHOST, MOVE_GHOST_TIME)
event_loop(game)
| mit | -8,104,493,672,380,548,000 | 29.478372 | 85 | 0.584822 | false |
OrganicNavigation/Python-Environments-for-Science-and-Engineering | fit_data.py | 1 | 2118 | """
Sample script for fitting sinusoidal data. This script
uses the data file: sample_AD_data.csv
This data was logged on: 2017-03-08 12:40 pm using an arduino
A/D pin and an associated test circuit.
Author: HMokhtarzadeh
Date: April 13, 2017
"""
import pandas as pd
import numpy as np
#
# Load Data
#
path_data = '/home/hamid/Documents/Work/ONav-Hive/talks/2017-04-13-Python-Environments/sample_AD_data.csv'
df = pd.DataFrame.from_csv(path_data)
df.index = df.index*1e-6 #conver from usec to sec
df.index.names = ["time(sec)"]
print("Data loaded from: {:s}".format(path_data))
#
# Caluculate and report data sampling rate.
#
dt_sec = np.median(np.diff(df.index))
print("Data Sampling rate: {:d} Hz".format(int(1./dt_sec)))
#
# Fit to sinusoid.
#
# This is method is quite sensative to a good initial guess.
# Alternate approach: http://exnumerus.blogspot.com/2010/04/how-to-fit-sine-wave-example-in-python.html
tmax_sec = .5
def func(x, time, data):
bias, amp, freq_hz, phase_rad = x
predict = amp * np.cos(2*np.pi*freq_hz*time + phase_rad)
return predict - data
from scipy.optimize import leastsq # Other optimizers tried: fmin, minimize, least_squares
x0 = [0.7, 1, 60, 0] # good iniital guess
ind = df.index < tmax_sec
xopt = leastsq(func, x0, args=(df.index[ind], df['amplitude'][ind]))
#
# Save initial guess, fit, and print final fit.
#
bias, amp, freq_hz, phase_rad = x0
df['fit x0'] = bias + amp * np.cos(2*np.pi*freq_hz*df.index + phase_rad)
bias, amp, freq_hz, phase_rad = xopt[0]
df['fit xopt'] = bias + amp * np.cos(2*np.pi*freq_hz*df.index + phase_rad)
print("Fitted Sinusoid: {b:.3f} + {a:.3f}*cos(2 PI {f:.2f} + {p:.3f})".format(b=bias, a=amp, f=freq_hz, p=phase_rad))
#
# Visualize
#
from matplotlib import pyplot as plt
fig, [ax0, ax1] = plt.subplots(2,1)
df['amplitude'][df.index < tmax_sec].plot(ax=ax0, label='raw data', linestyle='', marker='.')
df['fit x0'][df.index < tmax_sec].plot(ax=ax0, alpha=.5, color='black', marker='', lw=2)
df['fit xopt'][df.index < tmax_sec].plot(ax=ax0, color='black', marker='', lw=2)
ax0.legend()
df['amplitude'].plot(ax=ax1)
plt.show() | bsd-3-clause | 4,361,661,130,279,193,600 | 29.710145 | 117 | 0.679887 | false |
orionzhou/robin | formats/coords.py | 1 | 16290 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
parses JCVI software NUCMER (http://mummer.sourceforge.net/manual/)
output - mostly as *.coords file.
"""
import sys
import logging
from math import exp
from itertools import groupby
from maize.formats.base import LineFile, must_open
from maize.algorithms.graph import BiGraph
from maize.apps.base import sh, need_update, get_abs_path
Overlap_types = ("none", "a ~ b", "b ~ a", "a in b", "b in a")
class CoordsLine (object):
"""
The coords line looks like (in one line):
2953 4450 | 525 2023 | 1498 1499 | 98.07 |
8046 2023 | 18.62 74.10 | AC182814.30 contig_100476
the coords file needs to be generated by `show-coords -rcl`
"""
def __init__(self, row):
row = row.replace(" | ", "")
atoms = row.split()
assert len(atoms) in (13, 17), "expecting 13 or 17 columns"
self.start1 = int(atoms[0])
self.end1 = int(atoms[1])
self.start2 = int(atoms[2])
self.end2 = int(atoms[3])
if self.start2 > self.end2:
self.start2, self.end2 = self.end2, self.start2
self.orientation = '-'
else:
self.orientation = '+'
self.len1 = int(atoms[4])
self.len2 = int(atoms[5])
self.identity = float(atoms[6])
self.reflen = int(atoms[7])
self.querylen = int(atoms[8])
self.refcov = float(atoms[9]) / 100.
self.querycov = float(atoms[10]) / 100.
self.ref = atoms[11]
self.query = atoms[12]
# this is taken from CoGeBlast:
# the coverage of the hit muliplied by percent seq identity
# range from 0-100
self.quality = self.identity * self.querycov
self.score = int(self.identity * self.len1 / 100)
def __str__(self):
slots = "ref start1 end1 reflen " +\
"query start2 end2 querylen orientation"
return "\t".join(str(x) for x in \
[getattr(self, attr) for attr in slots.split()])
def bedline(self, pctid=False):
score = self.identity if pctid else self.score
return '\t'.join(str(x) for x in (self.ref, self.start1 - 1, self.end1,
self.query, score, self.orientation))
def qbedline(self, pctid=False):
score = self.identity if pctid else self.score
return '\t'.join(str(x) for x in (self.query, self.start2 - 1,
self.end2, self.ref, score, self.orientation))
@property
def blastline(self):
hitlen = max(self.len1, self.len2)
score = self.score
mismatch = int(self.len1 * (1 - self.identity / 100))
log_prob = -score * 0.693147181
evalue = 3.0e9 * exp(log_prob)
evalue = "{0:.1g}".format(evalue)
return "\t".join(str(x) for x in (self.query, self.ref,
self.identity, hitlen, mismatch, 0, self.start2, self.end2,
self.start1, self.end1, evalue, score
))
def overlap(self, max_hang=100):
"""
Determine the type of overlap given query, ref alignment coordinates
Consider the following alignment between sequence a and b:
aLhang \ / aRhang
\------------/
/------------\
bLhang / \ bRhang
Terminal overlap: a before b, b before a
Contain overlap: a in b, b in a
"""
aL, aR = 1, self.reflen
bL, bR = 1, self.querylen
aLhang, aRhang = self.start1 - aL, aR - self.end1
bLhang, bRhang = self.start2 - bL, bR - self.end2
if self.orientation == '-':
bLhang, bRhang = bRhang, bLhang
s1 = aLhang + bRhang
s2 = aRhang + bLhang
s3 = aLhang + aRhang
s4 = bLhang + bRhang
# Dovetail (terminal) overlap
if s1 < max_hang:
type = 2 # b ~ a
elif s2 < max_hang:
type = 1 # a ~ b
# Containment overlap
elif s3 < max_hang:
type = 3 # a in b
elif s4 < max_hang:
type = 4 # b in a
else:
type = 0
return type
class Coords (LineFile):
"""
when parsing the .coords file, first skip first 5 lines
[S1] [E1] | [S2] [E2] | [LEN 1] [LEN 2] | [% IDY] | [TAGS]
then each row would be composed as this
"""
def __init__(self, filename, sorted=False, header=False):
if filename.endswith(".delta"):
coordsfile = filename.rsplit(".", 1)[0] + ".coords"
if need_update(filename, coordsfile):
fromdelta([filename])
filename = coordsfile
super(Coords, self).__init__(filename)
fp = open(filename)
if header:
self.cmd = fp.next()
for row in fp:
try:
self.append(CoordsLine(row))
except AssertionError:
pass
if sorted:
self.ref_sort()
def ref_sort(self):
# sort by reference positions
self.sort(key=lambda x: (x.ref, x.start1))
def quality_sort(self):
# sort descending with score = identity * coverage
self.sort(key=lambda x: (x.query, -x.quality))
@property
def hits(self):
"""
returns a dict with query => blastline
"""
self.quality_sort()
hits = dict((query, list(blines)) for (query, blines) in \
groupby(self, lambda x: x.query))
self.ref_sort()
return hits
@property
def best_hits(self):
"""
returns a dict with query => best mapped position
"""
self.quality_sort()
best_hits = dict((query, blines.next()) for (query, blines) in \
groupby(self, lambda x: x.query))
self.ref_sort()
return best_hits
def get_stats(coordsfile):
from maize.utils.range import range_union
logging.debug("Report stats on `%s`" % coordsfile)
coords = Coords(coordsfile)
ref_ivs = []
qry_ivs = []
identicals = 0
alignlen = 0
for c in coords:
qstart, qstop = c.start2, c.end2
if qstart > qstop:
qstart, qstop = qstop, qstart
qry_ivs.append((c.query, qstart, qstop))
sstart, sstop = c.start1, c.end1
if sstart > sstop:
sstart, sstop = sstop, sstart
ref_ivs.append((c.ref, sstart, sstop))
alen = sstop - sstart
alignlen += alen
identicals += c.identity / 100. * alen
qrycovered = range_union(qry_ivs)
refcovered = range_union(ref_ivs)
id_pct = identicals * 100. / alignlen
return qrycovered, refcovered, id_pct
def merge(args):
"""
%prog merge ref.fasta query.fasta *.delta
Merge delta files into a single delta.
"""
p = OptionParser(merge.__doc__)
p.set_outfile(outfile="merged_results.delta")
opts, args = p.parse_args(args)
if len(args) < 3:
sys.exit(not p.print_help())
ref, query = args[:2]
deltafiles = args[2:]
outfile = args.outfile
ref = get_abs_path(ref)
query = get_abs_path(query)
fw = must_open(outfile, "w")
print >> fw, " ".join((ref, query))
print >> fw, "NUCMER"
fw.close()
for d in deltafiles:
cmd = "awk 'NR > 2 {{print $0}}' {0}".format(d)
sh(cmd, outfile=outfile, append=True)
def blast(args):
"""
%prog blast <deltafile|coordsfile>
Covert delta or coordsfile to BLAST tabular output.
"""
p = OptionParser(blast.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
deltafile, = args
blastfile = deltafile.rsplit(".", 1)[0] + ".blast"
if need_update(deltafile, blastfile):
coords = Coords(deltafile)
fw = open(blastfile, "w")
for c in coords:
print >> fw, c.blastline
def fromdelta(args):
"""
%prog fromdelta deltafile
Convert deltafile to coordsfile.
"""
p = OptionParser(fromdelta.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
deltafile, = args
coordsfile = deltafile.rsplit(".", 1)[0] + ".coords"
cmd = "show-coords -rclH {0}".format(deltafile)
sh(cmd, outfile=coordsfile)
return coordsfile
def sort(args):
"""
%prog sort coordsfile
Sort coordsfile based on query or ref.
"""
import maize.formats.blast
return maize.formats.blast.sort(args + ["--coords"])
def coverage(args):
"""
%prog coverage coordsfile
Report the coverage per query record, useful to see which query matches
reference. The coords file MUST be filtered with supermap::
maize.algorithms.supermap --filter query
"""
p = OptionParser(coverage.__doc__)
sp1.add_argument("-c", dest="cutoff", default=0.5, type="float",
help="only report query with coverage greater than [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
coordsfile, = args
fp = open(coordsfile)
coords = []
for row in fp:
try:
c = CoordsLine(row)
except AssertionError:
continue
coords.append(c)
coords.sort(key=lambda x: x.query)
coverages = []
for query, lines in groupby(coords, key=lambda x: x.query):
cumulative_cutoff = sum(x.querycov for x in lines)
coverages.append((query, cumulative_cutoff))
coverages.sort(key=lambda x: (-x[1], x[0]))
for query, cumulative_cutoff in coverages:
if cumulative_cutoff < args.cutoff:
break
print("{0}\t{1:.2f}".format(query, cumulative_cutoff))
def annotate(args):
"""
%prog annotate coordsfile
Annotate coordsfile to append an additional column, with the following
overlaps: {0}.
"""
p = OptionParser(annotate.__doc__.format(", ".join(Overlap_types)))
sp1.add_argument("--maxhang", default=100, type="int",
help="Max hang to call dovetail overlap [default: %default]")
sp1.add_argument("--all", default=False, action="store_true",
help="Output all lines [default: terminal/containment]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
coordsfile, = args
fp = open(coordsfile)
for row in fp:
try:
c = CoordsLine(row)
except AssertionError:
continue
ov = c.overlap(args.maxhang)
if not args.all and ov == 0:
continue
print("{0}\t{1}".format(row.strip(), Overlap_types[ov]))
def print_stats(qrycovered, refcovered, id_pct):
from maize.utils.cbook import thousands
try:
refcovered = thousands(refcovered)
qrycovered = thousands(qrycovered)
except:
pass
m1 = "Reference coverage: {0} bp".format(refcovered)
m2 = "Query coverage: {0} bp".format(qrycovered)
m3 = "Identity: {0:.2f}%".format(id_pct)
print >> sys.stderr, "\n".join((m1, m2, m3))
def summary(args):
"""
%prog summary coordsfile
provide summary on id% and cov%, for both query and reference
"""
p = OptionParser(summary.__doc__)
sp1.add_argument("-s", dest="single", default=False, action="store_true",
help="provide stats per reference seq")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
coordsfile, = args
qrycovered, refcovered, id_pct = get_stats(coordsfile)
print_stats(qrycovered, refcovered, id_pct)
def filter(args):
"""
%prog filter <deltafile|coordsfile>
Produce a new delta/coords file and filter based on id% or cov%.
Use `delta-filter` for .delta file.
"""
p = OptionParser(filter.__doc__)
p.set_align(pctid=0, hitlen=0)
sp1.add_argument("--overlap", default=False, action="store_true",
help="Print overlap status (e.g. terminal, contained)")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pctid = args.pctid
hitlen = args.hitlen
filename, = args
if pctid == 0 and hitlen == 0:
return filename
pf, suffix = filename.rsplit(".", 1)
outfile = "".join((pf, ".P{0}L{1}.".format(int(pctid), int(hitlen)), suffix))
if not need_update(filename, outfile):
return outfile
if suffix == "delta":
cmd = "delta-filter -i {0} -l {1} {2}".format(pctid, hitlen, filename)
sh(cmd, outfile=outfile)
return outfile
fp = open(filename)
fw = must_open(outfile, "w")
for row in fp:
try:
c = CoordsLine(row)
except AssertionError:
continue
if c.identity < pctid:
continue
if c.len2 < hitlen:
continue
if args.overlap and not c.overlap:
continue
outrow = row.rstrip()
if args.overlap:
ov = Overlap_types[c.overlap]
outrow += "\t" + ov
print >> fw, outrow
return outfile
def bed(args):
"""
%prog bed coordsfile
will produce a bed list of mapped position and orientation (needs to
be beyond quality cutoff, say 50) in bed format
"""
p = OptionParser(bed.__doc__)
sp1.add_argument("--query", default=False, action="store_true",
help="print out query intervals rather than ref [default: %default]")
sp1.add_argument("--pctid", default=False, action="store_true",
help="use pctid in score [default: %default]")
sp1.add_argument("--cutoff", dest="cutoff", default=0, type="float",
help="get all the alignments with quality above threshold " +\
"[default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
coordsfile, = args
query = args.query
pctid = args.pctid
quality_cutoff = args.cutoff
coords = Coords(coordsfile)
for c in coords:
if c.quality < quality_cutoff:
continue
line = c.qbedline(pctid=pctid) if query else c.bedline(pctid=pctid)
print(line)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = 'coords utilities'
)
sp = parser.add_subparsers(title = 'available commands', dest = 'command')
sp1 = sp.add_parser('annotate', help='annotate overlap types in coordsfile',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = annotate)
sp1 = sp.add_parser('blast', help='convert to blast tabular output',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = blast)
sp1 = sp.add_parser('filter', help='filter based on id%% and cov%%, write a new coords file',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = filter)
sp1 = sp.add_parser('fromdelta', help='convert deltafile to coordsfile',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = fromdelta)
sp1 = sp.add_parser('merge', help='merge deltafiles',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = merge)
sp1 = sp.add_parser('sort', help='sort coords file based on query or subject',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = sort)
sp1 = sp.add_parser('summary', help='provide summary on id%% and cov%%',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = summary)
args = parser.parse_args()
if args.command:
args.func(args)
else:
print('Error: need to specify a sub command\n')
parser.print_help()
| gpl-2.0 | -849,455,419,382,932,000 | 28.089286 | 97 | 0.578085 | false |
OpenVolunteeringPlatform/django-ovp-core | ovp_core/migrations/0004_load_skills_and_causes.py | 1 | 1337 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-25 01:14
from __future__ import unicode_literals
from django.db import migrations
skills = ['Arts/Handcrafting', 'Communication', 'Dance/Music', 'Law', 'Education', 'Sports', 'Cooking', 'Management', 'Idioms', 'Computers/Technology', 'Health', 'Others']
causes = ['Professional Training', 'Fight Poverty', 'Conscious consumption', 'Culture, Sport and Art', 'Human Rights', 'Education', 'Youth', 'Elders', 'Environment', 'Citizen Participation', 'Animal Protection', 'Health', 'People with disabilities']
def load_data(apps, schema_editor):
Skill = apps.get_model("ovp_core", "Skill")
Cause = apps.get_model("ovp_core", "Cause")
for skill in skills:
s = Skill(name=skill)
s.save()
for cause in causes:
c = Cause(name=cause)
c.save()
def unload_data(apps, schema_editor): #pragma: no cover
Skill = apps.get_model("ovp_core", "Skill")
Cause = apps.get_model("ovp_core", "Cause")
for skill in skills:
s = Skill.objects.filter(name=skill)
s.delete()
for cause in causes:
c = Cause.objects.filter(name=cause)
c.delete()
class Migration(migrations.Migration):
dependencies = [
('ovp_core', '0003_cause_skill'),
]
operations = [
migrations.RunPython(load_data, reverse_code=unload_data)
]
| agpl-3.0 | -6,059,131,419,789,719,000 | 30.093023 | 249 | 0.662678 | false |
berndf/avg_q | python/avg_q/numpy_Script.py | 1 | 10710 | # Copyright (C) 2013-2021 Bernd Feige
# This file is part of avg_q and released under the GPL v3 (see avg_q/COPYING).
import avg_q
import numpy
# Layout helper function to get a 2D arrangement of nplots plots
def nrows_ncols_from_nplots(nplots):
ncols=numpy.sqrt(nplots)
if ncols.is_integer():
nrows=ncols
else:
ncols=numpy.ceil(ncols)
nrows=nplots/ncols
if not nrows.is_integer():
nrows=numpy.ceil(nrows)
return int(nrows),int(ncols)
class numpy_epoch(object):
def __init__(self,data=None):
self.comment=None
self.channelnames=[]
self.channelpos=[]
self.nr_of_points=0
self.nr_of_channels=0
self.itemsize=0
self.nrofaverages=None
self.trigpoints=None
self.xdata=None
if data is None:
self.data=None
self.nr_of_points=0
self.nr_of_channels=0
self.sfreq=None
else:
if type(data) is numpy.array:
self.data=data
else:
self.data=numpy.array(data,dtype='float32')
if len(self.data.shape)==1:
# If given a vector, make it a 1-channel array
self.data=self.data.reshape((self.data.shape[0],1))
self.nr_of_points,self.nr_of_channels=self.data.shape
self.sfreq=1
def __str__(self):
return(((self.comment+': ') if self.comment else "")
+"%d x %d sfreq %g" % (self.nr_of_points,self.nr_of_channels,self.sfreq))
class numpy_Epochsource(avg_q.Epochsource):
def __init__(self, epochs=None):
self.epochs=epochs
self.branch=[]
self.infile=avg_q.avg_q_file()
# Only used to transfer trigpoints to avg_q.Epochsource
self.trigpoints=None
def set_trigpoints(self,trigpoints):
raise Exception('numpy_Epochsource: set_trigpoints not implemented, trigpoints are a property of numpy_epoch')
def send(self,avg_q_instance):
if len(self.epochs)==0: return
for epoch in self.epochs:
nr_of_points,nr_of_channels=epoch.data.shape
avg_q_instance.write('''
read_generic -c %(readx)s -s %(sfreq)g -C %(nr_of_channels)d -e 1 %(trigtransfer_arg)s stdin 0 %(aftertrig)d float32
''' % {
'readx': '-x xdata' if epoch.xdata is not None else '',
'sfreq': epoch.sfreq if epoch.sfreq else 100.0,
'aftertrig': nr_of_points,
'nr_of_channels': nr_of_channels,
'trigtransfer_arg': '-T -R stdin' if epoch.trigpoints is not None else '',
})
if epoch.channelnames:
channelnames=epoch.channelnames
if epoch.channelpos:
channelpos=epoch.channelpos
else:
# If channelpos is not available, make it up
channelpos=[(i,0,0) for i in range(len(channelnames))]
methodline='>set_channelposition -s '+' '.join(["%s %g %g %g" % (channelnames[channel],channelpos[channel][0],channelpos[channel][1],channelpos[channel][2]) for channel in range(len(epoch.channelnames))])
avg_q_instance.write(methodline+'\n')
if epoch.comment:
avg_q_instance.write('>set_comment %s\n' % epoch.comment)
if epoch.nrofaverages:
avg_q_instance.write('>set nrofaverages %d\n' % epoch.nrofaverages)
for methodline in self.branch:
avg_q_instance.write(methodline+'\n')
def send_trigpoints(self,avg_q_instance):
# Actually send the data.
if len(self.epochs)==0: return
for epoch in self.epochs:
if epoch.trigpoints is not None:
self.trigpoints=epoch.trigpoints
avg_q.Epochsource.send_trigpoints(self,avg_q_instance)
# It's a bit unfortunate that array.array does support tofile() with pipes but numpy.array doesn't...
# So we have to take the route via a string buffer just as with reading
# We have to take good care that the data type corresponds to what avg_q reads (ie, float32)
thisdata=(numpy.append(epoch.xdata.reshape((epoch.xdata.shape[0],1)),epoch.data,axis=1) if epoch.xdata is not None else epoch.data).astype('float32')
avg_q_instance.avg_q.stdin.write(thisdata.tobytes())
avg_q_instance.avg_q.stdin.flush()
class numpy_Script(avg_q.Script):
epochs=[] # List of numpy_epoch objects
def read(self):
'''Read the current epoch into numpy array self.data, channels=columns
We support both reading all epochs from the iterated queue (if no collect
method is set) and reading the single result of the post-processing queue.
'''
# Save and restore the current state, since we work by (temporally)
# adding our own transforms
self.save_state()
transform="""
echo -F stdout Epoch Dataset\\n
query channelpositions stdout
query -N comment stdout
query -N sfreq stdout
query -N nr_of_points stdout
query -N itemsize stdout
query -N nrofaverages stdout
echo -F stdout Data:\\n
write_generic -x stdout float32
"""
if self.collect=='null_sink':
self.add_transform(transform)
else:
self.add_postprocess(transform)
rdr=self.runrdr()
self.epochs=[]
while True:
try:
n=next(rdr)
except StopIteration:
break
if n!='Epoch Dataset': break
epoch=numpy_epoch()
for r in rdr:
if '=' in r: break
channelname,x,y,z=r.split('\t')
epoch.channelnames.append(channelname)
epoch.channelpos.append((float(x),float(y),float(z)))
while r:
if r=='Data:': break
name,value=r.split('=',maxsplit=1)
if name=='comment':
epoch.comment=value
elif name=='sfreq':
epoch.sfreq=float(value)
elif name=='nr_of_points':
epoch.nr_of_points=int(value)
elif name=='itemsize':
epoch.itemsize=int(value)
elif name=='nrofaverages':
epoch.nrofaverages=int(value)
r=next(rdr)
epoch.nr_of_channels=len(epoch.channelnames)
#print(epoch)
# Problem: If executed too quickly, the read() below will return only partial data...
datapoints=epoch.nr_of_points*(1+epoch.nr_of_channels*epoch.itemsize)
datalength=4*datapoints
buf=self.avg_q_instance.avg_q.stdout.read(datalength)
while len(buf)!=datalength:
buf2=self.avg_q_instance.avg_q.stdout.read(datalength-len(buf))
buf+=buf2
#print(len(buf))
# http://docs.scipy.org/doc/numpy-1.7.0/reference/generated/numpy.frombuffer.html
data=numpy.frombuffer(buf,dtype=numpy.float32,count=datapoints)
data.shape=(epoch.nr_of_points,1+epoch.nr_of_channels*epoch.itemsize)
epoch.xdata=data[:,0]
epoch.data=data[:,1:]
self.epochs.append(epoch)
self.restore_state()
def plot_maps(self, ncols=None, vmin=None, vmax=None, globalscale=False, isolines=[0]):
'''globalscale arranges for vmin,vmax to actually be -1,+1 after global max(abs) scaling.'''
import scipy.interpolate
import matplotlib.pyplot as plt
def mapplot(nrows,ncols,xpos,ypos,z,nsteps=50):
#ncontours=15
xmin,xmax=xpos.min(),xpos.max()
ymin,ymax=ypos.min(),ypos.max()
xi=numpy.linspace(xmin,xmax,nsteps)
yi=numpy.linspace(ymin,ymax,nsteps)
nplots=z.shape[0]
for thisplot in range(0,nplots):
# cf. https://scipy-cookbook.readthedocs.io/items/Matplotlib_Gridding_irregularly_spaced_data.html
zi=scipy.interpolate.griddata((xpos,ypos),z[thisplot],(xi[None,:], yi[:,None]),method='cubic')
# Don't mess with arranging plots on a page if we only have a single plot...
if nplots>1:
plt.subplot(nrows,ncols,thisplot+1)
# pcolormesh is described to be much faster than pcolor
# Note that the default for edgecolors appears to be 'None' resulting in transparent lines between faces...
gplot=plt.pcolormesh(xi,yi,zi,norm=plt.Normalize(vmin=vmin,vmax=vmax),cmap='jet',shading='nearest',edgecolors='face',antialiaseds=False)
#gplot=plt.contourf(g,ncontours)
#plt.scatter(xpos,ypos,marker='o',c='black',s=5) # Draw sample points
if isolines:
plt.contour(xi,yi,zi,isolines,colors='black',linestyles='solid')
gplot.axes.set_axis_off()
gplot.axes.set_xlim(xmin,xmax)
gplot.axes.set_ylim(ymin,ymax)
self.save_state()
self.add_transform('extract_item 0')
# Arrange for epochs to be appended for plotting maps
if self.collect=='null_sink':
self.set_collect('append')
self.read()
for epoch in self.epochs:
if globalscale:
vmin=epoch.data.min()
vmax=epoch.data.max()
# Ensure symmetric scaling around 0
scale=numpy.max([-vmin,vmax])
epoch.data=epoch.data/scale
vmin= -1
vmax= 1
nplots=epoch.data.shape[0]
if ncols is not None:
nrows=nplots/ncols
if not nrows.is_integer():
nrows=numpy.ceil(nrows)
nrows=int(nrows)
else:
nrows,ncols=nrows_ncols_from_nplots(nplots)
mapplot(nrows,ncols,numpy.array([xyz[0] for xyz in epoch.channelpos]),numpy.array([xyz[1] for xyz in epoch.channelpos]),epoch.data)
self.restore_state()
def plot_traces(self, ncols=None, vmin=None, vmax=None, xlim=None, ylim=None, x_is_latency=False):
'''This creates one 2d plot for each channel, like for time-freq data (freq=x, time=epoch).
If x_is_latency=True, each matrix is transposed so x and y swapped.'''
import matplotlib.pyplot as plt
def traceplot(nrows,ncols,z,xlim=None,ylim=None,transpose=False):
#ncontours=15
thisplot=0
for z1 in z:
z1=numpy.array(z1)
if transpose: z1=numpy.transpose(z1)
plt.subplot(nrows,ncols,thisplot+1)
x=numpy.linspace(xlim[0],xlim[1],num=z1.shape[1]) if xlim else numpy.array(range(z1.shape[1]))
y=numpy.linspace(ylim[0],ylim[1],num=z1.shape[0]) if ylim else numpy.array(range(z1.shape[0]))
# pcolormesh is described to be much faster than pcolor
# Note that the default for edgecolors appears to be 'None' resulting in transparent lines between faces...
gplot=plt.pcolormesh(x,y,z1,norm=plt.Normalize(vmin=vmin,vmax=vmax),cmap='jet',shading='nearest',edgecolors='face',antialiaseds=False)
#gplot=plt.contourf(z1,ncontours)
gplot.axes.set_axis_off()
#print z1.shape
gplot.axes.set_xlim(min(x),max(x))
gplot.axes.set_ylim(min(y),max(y))
thisplot+=1
self.save_state()
self.add_transform('extract_item 0')
self.read()
if self.epochs[0].xdata is not None:
if x_is_latency:
if xlim is None:
xlim=(min(self.epochs[0].xdata),max(self.epochs[0].xdata))
else:
if ylim is None:
ylim=(min(self.epochs[0].xdata),max(self.epochs[0].xdata))
z=[] # One array per *channel*, each array collects all time points and epochs, epochs varying fastest
# z[channel][point] is a list of values (epochs)
for epoch in self.epochs:
for point in range(epoch.nr_of_points):
channels=epoch.data[point,:]
for channel in range(0,len(channels)):
if len(z)<=channel:
z.append([[channels[channel]]])
else:
if len(z[channel])<=point:
z[channel].append([channels[channel]])
else:
z[channel][point].append(channels[channel])
point+=1
nplots=len(z)
if ncols is not None:
nrows=nplots/ncols
if not nrows.is_integer():
nrows=numpy.ceil(nrows)
nrows=int(nrows)
else:
nrows,ncols=nrows_ncols_from_nplots(nplots)
traceplot(nrows,ncols,z,xlim=xlim,ylim=ylim,transpose=x_is_latency)
self.restore_state()
| gpl-3.0 | -4,866,024,738,652,813,000 | 36.711268 | 208 | 0.696825 | false |
stephanie-wang/ray | python/ray/test_utils.py | 1 | 5068 | import json
import fnmatch
import os
import subprocess
import sys
import tempfile
import time
import psutil
import ray
class RayTestTimeoutException(Exception):
"""Exception used to identify timeouts from test utilities."""
pass
def _pid_alive(pid):
"""Check if the process with this PID is alive or not.
Args:
pid: The pid to check.
Returns:
This returns false if the process is dead. Otherwise, it returns true.
"""
try:
os.kill(pid, 0)
return True
except OSError:
return False
def wait_for_pid_to_exit(pid, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if not _pid_alive(pid):
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for process to exit.")
def wait_for_children_of_pid(pid, num_children=1, timeout=20):
p = psutil.Process(pid)
start_time = time.time()
while time.time() - start_time < timeout:
num_alive = len(p.children(recursive=False))
if num_alive >= num_children:
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for process children to start "
"({}/{} started).".format(num_alive, num_children))
def wait_for_children_of_pid_to_exit(pid, timeout=20):
children = psutil.Process(pid).children()
if len(children) == 0:
return
_, alive = psutil.wait_procs(children, timeout=timeout)
if len(alive) > 0:
raise RayTestTimeoutException(
"Timed out while waiting for process children to exit."
" Children still alive: {}.".format([p.name() for p in alive]))
def kill_process_by_name(name, SIGKILL=False):
for p in psutil.process_iter(attrs=["name"]):
if p.info["name"] == name:
if SIGKILL:
p.kill()
else:
p.terminate()
def run_string_as_driver(driver_script):
"""Run a driver as a separate process.
Args:
driver_script: A string to run as a Python script.
Returns:
The script's output.
"""
# Save the driver script as a file so we can call it using subprocess.
with tempfile.NamedTemporaryFile() as f:
f.write(driver_script.encode("ascii"))
f.flush()
out = ray.utils.decode(
subprocess.check_output(
[sys.executable, f.name], stderr=subprocess.STDOUT))
return out
def run_string_as_driver_nonblocking(driver_script):
"""Start a driver as a separate process and return immediately.
Args:
driver_script: A string to run as a Python script.
Returns:
A handle to the driver process.
"""
# Save the driver script as a file so we can call it using subprocess. We
# do not delete this file because if we do then it may get removed before
# the Python process tries to run it.
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(driver_script.encode("ascii"))
f.flush()
return subprocess.Popen(
[sys.executable, f.name], stdout=subprocess.PIPE)
def flat_errors():
errors = []
for job_errors in ray.errors(all_jobs=True).values():
errors.extend(job_errors)
return errors
def relevant_errors(error_type):
return [error for error in flat_errors() if error["type"] == error_type]
def wait_for_errors(error_type, num_errors, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if len(relevant_errors(error_type)) >= num_errors:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out waiting for {} {} errors.".format(
num_errors, error_type))
def wait_for_condition(condition_predictor,
timeout_ms=1000,
retry_interval_ms=100):
"""A helper function that waits until a condition is met.
Args:
condition_predictor: A function that predicts the condition.
timeout_ms: Maximum timeout in milliseconds.
retry_interval_ms: Retry interval in milliseconds.
Return:
Whether the condition is met within the timeout.
"""
time_elapsed = 0
start = time.time()
while time_elapsed <= timeout_ms:
if condition_predictor():
return True
time_elapsed = (time.time() - start) * 1000
time.sleep(retry_interval_ms / 1000.0)
return False
def recursive_fnmatch(dirpath, pattern):
"""Looks at a file directory subtree for a filename pattern.
Similar to glob.glob(..., recursive=True) but also supports 2.7
"""
matches = []
for root, dirnames, filenames in os.walk(dirpath):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def generate_internal_config_map(**kwargs):
internal_config = json.dumps(kwargs)
ray_kwargs = {
"_internal_config": internal_config,
}
return ray_kwargs
| apache-2.0 | -3,934,694,632,991,343,600 | 27.47191 | 79 | 0.629834 | false |
hasgeek/funnel | funnel/models/helpers.py | 1 | 14724 | from __future__ import annotations
from copy import deepcopy
from textwrap import dedent
from typing import Dict, Iterable, Optional, Set, Type
import os.path
import re
from sqlalchemy import DDL, event
from sqlalchemy.dialects.postgresql.base import (
RESERVED_WORDS as POSTGRESQL_RESERVED_WORDS,
)
from flask import current_app
from better_profanity import profanity
from furl import furl
from typing_extensions import TypedDict
from zxcvbn import zxcvbn
import pymdownx.superfences
from coaster.utils import (
default_markdown_extension_configs,
default_markdown_extensions,
make_name,
)
from ..typing import T
from . import UrlType, db
__all__ = [
'RESERVED_NAMES',
'PASSWORD_MIN_LENGTH',
'PASSWORD_MAX_LENGTH',
'check_password_strength',
'profanity',
'markdown_content_options',
'add_to_class',
'add_search_trigger',
'visual_field_delimiter',
'add_search_trigger',
'valid_name',
'valid_username',
'quote_like',
'ImgeeFurl',
'ImgeeType',
]
RESERVED_NAMES: Set[str] = {
'_baseframe',
'about',
'account',
'admin',
'api',
'app',
'apps',
'auth',
'blog',
'boxoffice',
'brand',
'brands',
'by',
'client',
'clients',
'comments',
'confirm',
'contact',
'contacts',
'crew',
'dashboard',
'delete',
'edit',
'email',
'emails',
'embed',
'event',
'events',
'ftp',
'funnel',
'funnels',
'hacknight',
'hacknights',
'hgtv',
'imap',
'in',
'json',
'kharcha',
'login',
'logout',
'members',
'membership',
'new',
'news',
'notification',
'notifications',
'org',
'organisation',
'organisations',
'organization',
'organizations',
'orgs',
'pop',
'pop3',
'profile',
'profiles',
'project',
'projects',
'proposal',
'proposals',
'register',
'reset',
'search',
'siteadmin',
'smtp',
'static',
'ticket',
'tickets',
'token',
'tokens',
'update',
'updates',
'venue',
'venues',
'video',
'videos',
'workshop',
'workshops',
'www',
}
class PasswordCheckType(TypedDict):
"""Typed dictionary for :func:`check_password_strength`."""
is_weak: bool
score: str
warning: str
suggestions: str
#: Minimum length for a password
PASSWORD_MIN_LENGTH = 8
#: Maximum length for a password
PASSWORD_MAX_LENGTH = 100
#: Strong passwords require a strength of at least 3 as per the zxcvbn
#: project documentation.
PASSWORD_MIN_SCORE = 3
def check_password_strength(
password: str, user_inputs: Optional[Iterable] = None
) -> PasswordCheckType:
result = zxcvbn(password, user_inputs)
return {
'is_weak': (
len(password) < PASSWORD_MIN_LENGTH
or result['score'] < PASSWORD_MIN_SCORE
or bool(result['feedback']['warning'])
),
'score': result['score'],
'warning': result['feedback']['warning'],
'suggestions': result['feedback']['suggestions'],
}
# re.IGNORECASE needs re.ASCII because of a quirk in the characters it matches.
# https://docs.python.org/3/library/re.html#re.I
_username_valid_re = re.compile('^[a-z0-9]([a-z0-9-]*[a-z0-9])?$', re.I | re.A)
_name_valid_re = re.compile('^[a-z0-9]([a-z0-9-]*[a-z0-9])?$', re.A)
# Create a profanity word list by combining the built-in list of the better_profanity
# package (~835 terms) with an additional list (>1300 terms) from
# https://www.cs.cmu.edu/~biglou/resources/ stored here as a static file.
# This list is used to filter autogenerated ids (currently in Shortlink.new)
with open(os.path.join(os.path.dirname(__file__), 'bad-words.txt')) as badwordfile:
profanity.add_censor_words([_w.strip() for _w in badwordfile.readlines()])
visual_field_delimiter = ' ¦ '
markdown_content_options: dict = {
'extensions': deepcopy(default_markdown_extensions),
'extension_configs': deepcopy(default_markdown_extension_configs),
}
markdown_content_options['extensions'].append('toc') # Allow a table of contents
markdown_content_options['extension_configs']['toc'] = {
# Make headings link to themselves, for easier sharing
'anchorlink': True,
# Add a `h:` prefix to the heading id, to avoid conflict with template identifiers
'slugify': lambda value, separator: ('h:' + make_name(value, delim=separator)),
}
# Custom fences must use <pre><code> blocks and not <div> blocks, as linkify will mess
# with links inside <div> blocks
markdown_content_options['extension_configs'].setdefault('pymdownx.superfences', {})[
'custom_fences'
] = [
{
'name': 'mermaid',
'class': 'language-placeholder language-mermaid',
'format': pymdownx.superfences.fence_code_format,
},
{
'name': 'vega-lite',
'class': 'language-placeholder language-vega-lite',
'format': pymdownx.superfences.fence_code_format,
},
]
def add_to_class(cls: Type, name: Optional[str] = None):
"""
Add a new method to a class via a decorator. Takes an optional attribute name.
Usage::
@add_to_class(ExistingClass)
def new_method(self, *args):
pass
@add_to_class(ExistingClass, 'new_property')
@property
def existing_class_new_property(self):
pass
"""
def decorator(attr):
use_name = name or attr.__name__
if use_name in cls.__dict__:
raise AttributeError(f"{cls.__name__} already has attribute {use_name}")
setattr(cls, use_name, attr)
return attr
return decorator
def reopen(cls: Type[T]):
"""
Move the contents of the decorated class into an existing class and return it.
Usage::
@reopen(ExistingClass)
class __ExistingClass:
@property
def new_property(self):
pass
This is equivalent to::
def new_property(self):
pass
ExistingClass.new_property = property(new_property)
This decorator is syntactic sugar to make class extension visually similar to class
definition. It is not for monkey patching. It will refuse to overwrite existing
attributes, and will reject a decorated class that contains base classes or a
metaclass. If the existing class was processed by a metaclass, the new attributes
added to it may not receive the same processing.
This decorator is intended to aid legibility of bi-directional relationships in
SQLAlchemy models, specifically where a basic backref is augmented with methods or
properties that do more processing.
"""
def decorator(temp_cls: Type) -> Type[T]:
if temp_cls.__bases__ != (object,):
raise TypeError("Reopened class cannot add base classes")
if temp_cls.__class__ is not type:
raise TypeError("Reopened class cannot add a metaclass")
if {
'__slots__',
'__getattribute__',
'__getattr__',
'__setattr__',
'__delattr__',
}.intersection(set(temp_cls.__dict__.keys())):
raise TypeError("Reopened class contains unsupported __attributes__")
for attr, value in list(temp_cls.__dict__.items()):
# Skip the standard Python attributes, process the rest
if attr not in (
'__dict__',
'__doc__',
'__module__',
'__weakref__',
'__annotations__',
):
# Refuse to overwrite existing attributes
if hasattr(cls, attr):
raise AttributeError(f"{cls.__name__} already has attribute {attr}")
# All good? Copy the attribute over...
setattr(cls, attr, value)
# ...And remove it from the temporary class
delattr(temp_cls, attr)
# Merge typing annotations
elif attr == '__annotations__':
cls.__annotations__.update(value)
# Return the original class. Leave the temporary class to the garbage collector
return cls
return decorator
def valid_username(candidate: str) -> bool:
"""
Check if a username is valid.
Letters, numbers and non-terminal hyphens only.
"""
return not _username_valid_re.search(candidate) is None
def valid_name(candidate: str) -> bool:
"""
Check if a name is valid.
Lowercase letters, numbers and non-terminal hyphens only.
"""
return not _name_valid_re.search(candidate) is None
def pgquote(identifier: str) -> str:
"""Add double quotes to the given identifier if required (PostgreSQL only)."""
return (
('"%s"' % identifier) if identifier in POSTGRESQL_RESERVED_WORDS else identifier
)
def quote_like(query):
"""
Construct a LIKE query.
Usage::
column.like(quote_like(q))
"""
# Escape the '%' and '_' wildcards in SQL LIKE clauses.
# Some SQL dialects respond to '[' and ']', so remove them.
return (
query.replace('%', r'\%').replace('_', r'\_').replace('[', '').replace(']', '')
+ '%'
)
def add_search_trigger(model: db.Model, column_name: str) -> Dict[str, str]:
"""
Add a search trigger and returns SQL for use in migrations.
Typical use::
class MyModel(db.Model):
...
search_vector = db.deferred(db.Column(
TSVectorType(
'name', 'title', *indexed_columns,
weights={'name': 'A', 'title': 'B'},
regconfig='english'
),
nullable=False,
))
__table_args__ = (
db.Index(
'ix_mymodel_search_vector',
'search_vector',
postgresql_using='gin'
),
)
add_search_trigger(MyModel, 'search_vector')
To extract the SQL required in a migration:
$ flask shell
>>> print(models.add_search_trigger(models.MyModel, 'search_vector')['trigger'])
Available keys: ``update``, ``trigger`` (for upgrades) and ``drop`` (for downgrades).
:param model: Model class
:param str column_name: Name of the tsvector column in the model
"""
column = getattr(model, column_name)
function_name = model.__tablename__ + '_' + column_name + '_update'
trigger_name = model.__tablename__ + '_' + column_name + '_trigger'
weights = column.type.options.get('weights', {})
regconfig = column.type.options.get('regconfig', 'english')
trigger_fields = []
update_fields = []
for col in column.type.columns:
texpr = "to_tsvector('{regconfig}', COALESCE(NEW.{col}, ''))".format(
regconfig=regconfig, col=pgquote(col)
)
uexpr = "to_tsvector('{regconfig}', COALESCE({col}, ''))".format(
regconfig=regconfig, col=pgquote(col)
)
if col in weights:
texpr = "setweight({expr}, '{weight}')".format(
expr=texpr, weight=weights[col]
)
uexpr = "setweight({expr}, '{weight}')".format(
expr=uexpr, weight=weights[col]
)
trigger_fields.append(texpr)
update_fields.append(uexpr)
trigger_expr = ' || '.join(trigger_fields)
update_expr = ' || '.join(update_fields)
trigger_function = dedent(
'''
CREATE FUNCTION {function_name}() RETURNS trigger AS $$
BEGIN
NEW.{column_name} := {trigger_expr};
RETURN NEW;
END
$$ LANGUAGE plpgsql;
CREATE TRIGGER {trigger_name} BEFORE INSERT OR UPDATE ON {table_name}
FOR EACH ROW EXECUTE PROCEDURE {function_name}();
'''.format( # nosec
function_name=pgquote(function_name),
column_name=pgquote(column_name),
trigger_expr=trigger_expr,
trigger_name=pgquote(trigger_name),
table_name=pgquote(model.__tablename__),
)
)
update_statement = dedent( # nosec
''' # noqa: S608
UPDATE {table_name} SET {column_name} = {update_expr};
'''.format( # nosec
table_name=pgquote(model.__tablename__),
column_name=pgquote(column_name),
update_expr=update_expr,
)
)
drop_statement = dedent(
'''
DROP TRIGGER {trigger_name} ON {table_name};
DROP FUNCTION {function_name}();
'''.format( # nosec
trigger_name=pgquote(trigger_name),
table_name=pgquote(model.__tablename__),
function_name=pgquote(function_name),
)
)
# FIXME: `DDL().execute_if` accepts a string dialect, but sqlalchemy-stubs
# incorrectly declares the type as `Optional[Dialect]`
# https://github.com/dropbox/sqlalchemy-stubs/issues/181
event.listen(
model.__table__,
'after_create',
DDL(trigger_function).execute_if(
dialect='postgresql' # type: ignore[arg-type]
),
)
event.listen(
model.__table__,
'before_drop',
DDL(drop_statement).execute_if(dialect='postgresql'), # type: ignore[arg-type]
)
return {
'trigger': trigger_function,
'update': update_statement,
'drop': drop_statement,
}
class ImgeeFurl(furl):
def resize(self, width: int, height: Optional[int] = None) -> furl:
"""
Return image url with `?size=WxH` suffixed to it.
:param width: Width to resize the image to
:param height: Height to resize the image to
"""
if self.url:
copy = self.copy()
copy.args['size'] = f'{width}' if height is None else f'{width}x{height}'
return copy
return self
class ImgeeType(UrlType):
url_parser = ImgeeFurl
def process_bind_param(self, value, dialect):
value = super().process_bind_param(value, dialect)
if value:
allowed_domains = current_app.config.get('IMAGE_URL_DOMAINS', [])
allowed_schemes = current_app.config.get('IMAGE_URL_SCHEMES', [])
parsed = self.url_parser(value)
if allowed_domains and parsed.host not in allowed_domains:
raise ValueError(
"Image must be hosted on {hosts}".format(
hosts=' or '.join(allowed_domains)
)
)
if allowed_schemes and parsed.scheme not in allowed_schemes:
raise ValueError("Invalid scheme for the URL")
return value
| agpl-3.0 | -4,672,773,039,507,635,000 | 27.755859 | 89 | 0.583848 | false |
avanzosc/avanzosc6.1 | crm_case/__init__.py | 1 | 1125 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm
import crm_segmentation
#import report
#import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -9,070,018,155,918,726,000 | 36.5 | 78 | 0.618667 | false |
googleapis/python-compute | google/cloud/compute_v1/services/subnetworks/transports/base.py | 1 | 10790 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
from requests import __version__ as requests_version
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
grpc_version=None,
rest_version=requests_version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class SubnetworksTransport(abc.ABC):
"""Abstract transport class for Subnetworks."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes or self.AUTH_SCOPES
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.aggregated_list: gapic_v1.method.wrap_method(
self.aggregated_list, default_timeout=None, client_info=client_info,
),
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.expand_ip_cidr_range: gapic_v1.method.wrap_method(
self.expand_ip_cidr_range,
default_timeout=None,
client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.get_iam_policy: gapic_v1.method.wrap_method(
self.get_iam_policy, default_timeout=None, client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
self.list_usable: gapic_v1.method.wrap_method(
self.list_usable, default_timeout=None, client_info=client_info,
),
self.patch: gapic_v1.method.wrap_method(
self.patch, default_timeout=None, client_info=client_info,
),
self.set_iam_policy: gapic_v1.method.wrap_method(
self.set_iam_policy, default_timeout=None, client_info=client_info,
),
self.set_private_ip_google_access: gapic_v1.method.wrap_method(
self.set_private_ip_google_access,
default_timeout=None,
client_info=client_info,
),
self.test_iam_permissions: gapic_v1.method.wrap_method(
self.test_iam_permissions,
default_timeout=None,
client_info=client_info,
),
}
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListSubnetworksRequest],
Union[
compute.SubnetworkAggregatedList,
Awaitable[compute.SubnetworkAggregatedList],
],
]:
raise NotImplementedError()
@property
def delete(
self,
) -> Callable[
[compute.DeleteSubnetworkRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def expand_ip_cidr_range(
self,
) -> Callable[
[compute.ExpandIpCidrRangeSubnetworkRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetSubnetworkRequest],
Union[compute.Subnetwork, Awaitable[compute.Subnetwork]],
]:
raise NotImplementedError()
@property
def get_iam_policy(
self,
) -> Callable[
[compute.GetIamPolicySubnetworkRequest],
Union[compute.Policy, Awaitable[compute.Policy]],
]:
raise NotImplementedError()
@property
def insert(
self,
) -> Callable[
[compute.InsertSubnetworkRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> Callable[
[compute.ListSubnetworksRequest],
Union[compute.SubnetworkList, Awaitable[compute.SubnetworkList]],
]:
raise NotImplementedError()
@property
def list_usable(
self,
) -> Callable[
[compute.ListUsableSubnetworksRequest],
Union[
compute.UsableSubnetworksAggregatedList,
Awaitable[compute.UsableSubnetworksAggregatedList],
],
]:
raise NotImplementedError()
@property
def patch(
self,
) -> Callable[
[compute.PatchSubnetworkRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def set_iam_policy(
self,
) -> Callable[
[compute.SetIamPolicySubnetworkRequest],
Union[compute.Policy, Awaitable[compute.Policy]],
]:
raise NotImplementedError()
@property
def set_private_ip_google_access(
self,
) -> Callable[
[compute.SetPrivateIpGoogleAccessSubnetworkRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def test_iam_permissions(
self,
) -> Callable[
[compute.TestIamPermissionsSubnetworkRequest],
Union[
compute.TestPermissionsResponse, Awaitable[compute.TestPermissionsResponse]
],
]:
raise NotImplementedError()
__all__ = ("SubnetworksTransport",)
| apache-2.0 | 1,072,942,094,841,688,000 | 33.806452 | 103 | 0.621501 | false |
lidavidm/mathics-heroku | mathics/builtin/exptrig.py | 1 | 15655 | # -*- coding: utf8 -*-
"""
Exponential, trigonometric and hyperbolic functions
Mathics basically supports all important trigonometric and hyperbolic functions.
Numerical values and derivatives can be computed; however, most special exact values and simplification
rules are not implemented yet.
"""
from __future__ import with_statement
import sympy
import sympy.mpmath as mpmath
from mathics.builtin.base import Builtin, SympyConstant
from mathics.core.expression import Real, Expression, Integer
from mathics.core.numbers import dps
from mathics.builtin.numeric import get_precision
from mathics.builtin.arithmetic import _MPMathFunction
class Pi(SympyConstant):
u"""
<dl>
<dt>'Pi'
<dd>is the constant \u03c0.
</dl>
>> N[Pi]
= 3.14159265358979324
>> N[Pi, 50]
= 3.1415926535897932384626433832795028841971693993751
>> Attributes[Pi]
= {Constant, Protected, ReadProtected}
"""
sympy_name = 'pi'
def apply_N(self, precision, evaluation):
'N[Pi, precision_]'
precision = get_precision(precision, evaluation)
if precision is not None:
return Real(sympy.pi.n(dps(precision)), p=precision)
class E(SympyConstant):
"""
<dl>
<dt>'E'
<dd>is the constant e.
</dl>
>> N[E]
= 2.71828182845904524
>> N[E, 50]
= 2.7182818284590452353602874713526624977572470937
>> Attributes[E]
= {Constant, Protected, ReadProtected}
#> 5. E
= 13.5914091422952262
"""
sympy_name = 'E'
def apply_N(self, precision, evaluation):
'N[E, precision_]'
precision = get_precision(precision, evaluation)
if precision is not None:
return Real(sympy.E.n(dps(precision)), p=precision)
class GoldenRatio(SympyConstant):
"""
<dl>
<dt>'GoldenRatio'
<dd>is the golden ratio.
</dl>
>> N[GoldenRatio]
= 1.61803398874989485
"""
sympy_name = 'GoldenRatio'
rules = {
'N[GoldenRatio, prec_]': 'N[(1+Sqrt[5])/2, prec]',
}
class Exp(_MPMathFunction):
"""
<dl>
<dt>'Exp[$z$]'
<dd>returns the exponential function of $z$.
</dl>
>> Exp[1]
= E
>> Exp[10.0]
= 22026.4657948067169
>> Exp[x] //FullForm
= Power[E, x]
>> Plot[Exp[x], {x, 0, 3}]
= -Graphics-
"""
rules = {
'Exp[x_]': 'E ^ x',
'Derivative[1][Exp]': 'Exp',
}
class Log(_MPMathFunction):
"""
<dl>
<dt>'Log[$z$]'
<dd>returns the natural logarithm of $z$.
</dl>
>> Log[{0, 1, E, E * E, E ^ 3, E ^ x}]
= {-Infinity, 0, 1, 2, 3, Log[E ^ x]}
>> Log[0.]
= Indeterminate
>> Plot[Log[x], {x, 0, 5}]
= -Graphics-
#> Log[1000] / Log[10] // Simplify
= 3
#> Log[1.4]
= 0.336472236621212931
#> Log[1.4]
= 0.336472236621212931
#> Log[-1.4]
= 0.336472236621212931 + 3.14159265358979324 I
"""
nargs = 2
mpmath_name = 'log'
sympy_name = 'log'
rules = {
'Log[0.]': 'Indeterminate',
'Log[0]': 'DirectedInfinity[-1]',
'Log[1]': '0',
'Log[E]': '1',
'Log[E^x_Integer]': 'x',
'Derivative[1][Log]': '1/#&',
'Log[x_?InexactNumberQ]': 'Log[E, x]',
}
def prepare_sympy(self, leaves):
if len(leaves) == 2:
leaves = [leaves[1], leaves[0]]
return leaves
def eval(self, *args):
return mpmath.log(args[1], args[0])
class Log2(Builtin):
"""
<dl>
<dt>'Log2[$z$]'
<dd>returns the base-2 logarithm of $z$.
</dl>
>> Log2[4 ^ 8]
= 16
>> Log2[5.6]
= 2.48542682717024176
>> Log2[E ^ 2]
= 2 / Log[2]
"""
rules = {
'Log2[x_]': 'Log[2, x]',
}
class Log10(Builtin):
"""
<dl>
<dt>'Log10[$z$]'
<dd>returns the base-10 logarithm of $z$.
</dl>
>> Log10[1000]
= 3
>> Log10[{2., 5.}]
= {0.301029995663981195, 0.698970004336018805}
>> Log10[E ^ 3]
= 3 / Log[10]
"""
rules = {
'Log10[x_]': 'Log[10, x]',
}
class Sin(_MPMathFunction):
"""
<dl>
<dt>'Sin[$z$]'
<dd>returns the sine of $z$.
</dl>
>> Sin[0]
= 0
>> Sin[0.5]
= 0.479425538604203
>> Sin[3 Pi]
= 0
>> Sin[1.0 + I]
= 1.29845758141597729 + 0.634963914784736108 I
>> Plot[Sin[x], {x, -Pi, Pi}]
= -Graphics-
"""
mpmath_name = 'sin'
rules = {
'Sin[Pi]': '0',
'Sin[n_Integer*Pi]': '0',
'Sin[(1/2) * Pi]': '1',
'Sin[0]': '0',
'Derivative[1][Sin]': 'Cos[#]&',
}
class Cos(_MPMathFunction):
"""
<dl>
<dt>'Cos[$z$]'
<dd>returns the cosine of $z$.
</dl>
>> Cos[3 Pi]
= -1
"""
mpmath_name = 'cos'
rules = {
'Cos[Pi]': '-1',
'Cos[n_Integer * Pi]': '(-1)^n',
'Cos[(1/2) * Pi]': '0',
'Cos[0]': '1',
'Derivative[1][Cos]': '-Sin[#]&',
}
class Tan(_MPMathFunction):
"""
<dl>
<dt>'Tan[$z$]'
<dd>returns the tangent of $z$.
</dl>
>> Tan[0]
= 0
>> Tan[Pi / 2]
= ComplexInfinity
"""
mpmath_name = 'tan'
rules = {
'Tan[(1/2) * Pi]': 'ComplexInfinity',
'Tan[0]': '0',
'Derivative[1][Tan]': 'Sec[#]^2&',
}
class Sec(_MPMathFunction):
"""
<dl>
<dt>'Sec[$z$]'
<dd>returns the secant of $z$.
</dl>
>> Sec[0]
= 1
>> Sec[1] (* Sec[1] in Mathematica *)
= 1 / Cos[1]
>> Sec[1.]
= 1.85081571768092562
"""
mpmath_name = 'sec'
rules = {
'Derivative[1][Sec]': 'Sec[#] Tan[#]&',
'Sec[0]': '1',
}
def to_sympy(self, expr, **kwargs):
if len(expr.leaves) == 1:
return Expression('Power', Expression('Cos', expr.leaves[0]),
Integer(-1)).to_sympy()
class Csc(_MPMathFunction):
"""
<dl>
<dt>'Csc[$z$]'
<dd>returns the cosecant of $z$.
</dl>
>> Csc[0]
= ComplexInfinity
>> Csc[1] (* Csc[1] in Mathematica *)
= 1 / Sin[1]
>> Csc[1.]
= 1.18839510577812122
"""
mpmath_name = 'csc'
rules = {
'Derivative[1][Csc]': '-Cot[#] Csc[#]&',
'Csc[0]': 'ComplexInfinity',
}
def to_sympy(self, expr, **kwargs):
if len(expr.leaves) == 1:
return Expression('Power', Expression('Sin', expr.leaves[0]),
Integer(-1)).to_sympy()
class Cot(_MPMathFunction):
"""
<dl>
<dt>'Cot[$z$]'
<dd>returns the cotangent of $z$.
</dl>
>> Cot[0]
= ComplexInfinity
>> Cot[1.]
= 0.642092615934330703
"""
mpmath_name = 'cot'
rules = {
'Derivative[1][Cot]': '-Csc[#]^2&',
'Cot[0]': 'ComplexInfinity',
}
class ArcSin(_MPMathFunction):
"""
<dl>
<dt>'ArcSin[$z$]'
<dd>returns the inverse sine of $z$.
</dl>
>> ArcSin[0]
= 0
>> ArcSin[1]
= Pi / 2
"""
sympy_name = 'asin'
mpmath_name = 'asin'
rules = {
'Derivative[1][ArcSin]': '1/Sqrt[1-#^2]&',
'ArcSin[0]': '0',
'ArcSin[1]': 'Pi / 2',
}
class ArcCos(_MPMathFunction):
"""
<dl>
<dt>'ArcCos[$z$]'
<dd>returns the inverse cosine of $z$.
</dl>
>> ArcCos[1]
= 0
>> ArcCos[0]
= Pi / 2
>> Integrate[ArcCos[x], {x, -1, 1}]
= Pi
"""
sympy_name = 'acos'
mpmath_name = 'acos'
rules = {
'Derivative[1][ArcCos]': '-1/Sqrt[1-#^2]&',
'ArcCos[0]': 'Pi / 2',
'ArcCos[1]': '0',
}
class ArcTan(_MPMathFunction):
"""
<dl>
<dt>'ArcTan[$z$]'
<dd>returns the inverse tangent of $z$.
</dl>
>> ArcTan[1]
= Pi / 4
>> ArcTan[1.0]
= 0.78539816339744831
>> ArcTan[-1.0]
= -0.78539816339744831
>> ArcTan[1, 1]
= Pi / 4
#> ArcTan[-1, 1]
= 3 Pi / 4
#> ArcTan[1, -1]
= -Pi / 4
#> ArcTan[-1, -1]
= -3 Pi / 4
#> ArcTan[1, 0]
= 0
#> ArcTan[-1, 0]
= Pi
#> ArcTan[0, 1]
= Pi / 2
#> ArcTan[0, -1]
= -Pi / 2
"""
sympy_name = 'atan'
mpmath_name = 'atan'
rules = {
'ArcTan[1]': 'Pi/4',
'ArcTan[0]': '0',
'Derivative[1][ArcTan]': '1/(1+#^2)&',
'ArcTan[x_?RealNumberQ, y_?RealNumberQ]':
'''If[x == 0, If[y == 0, 0, If[y > 0, Pi/2, -Pi/2]], If[x > 0,
ArcTan[y/x], If[y >= 0, ArcTan[y/x] + Pi, ArcTan[y/x] - Pi]]]''',
}
class ArcSec(_MPMathFunction):
"""
<dl>
<dt>'ArcSec[$z$]'
<dd>returns the inverse secant of $z$.
</dl>
>> ArcSec[1]
= 0
>> ArcSec[-1]
= Pi
"""
sympy_name = ''
mpmath_name = 'asec'
rules = {
'Derivative[1][ArcSec]': '1 / (Sqrt[1 - 1/#^2] * #^2)&',
'ArcSec[0]': 'ComplexInfinity',
'ArcSec[1]': '0',
}
def to_sympy(self, expr, **kwargs):
if len(expr.leaves) == 1:
return Expression('ArcCos', Expression('Power', expr.leaves[0],
Integer(-1))).to_sympy()
class ArcCsc(_MPMathFunction):
"""
<dl>
<dt>'ArcCsc[$z$]'
<dd>returns the inverse cosecant of $z$.
</dl>
>> ArcCsc[1]
= Pi / 2
>> ArcCsc[-1]
= -Pi / 2
"""
sympy_name = ''
mpmath_name = 'acsc'
rules = {
'Derivative[1][ArcCsc]': '-1 / (Sqrt[1 - 1/#^2] * #^2)&',
'ArcCsc[0]': 'ComplexInfinity',
'ArcCsc[1]': 'Pi / 2',
}
def to_sympy(self, expr, **kwargs):
if len(expr.leaves) == 1:
return Expression('ArcSin', Expression('Power', expr.leaves[0],
Integer(-1))).to_sympy()
class ArcCot(_MPMathFunction):
"""
<dl>
<dt>'ArcCot[$z$]'
<dd>returns the inverse cotangent of $z$.
</dl>
>> ArcCot[0]
= Pi / 2
>> ArcCot[1]
= Pi / 4
"""
sympy_name = 'acot'
mpmath_name = 'acot'
rules = {
'Derivative[1][ArcCot]': '-1/(1+#^2)&',
'ArcCot[0]': 'Pi / 2',
'ArcCot[1]': 'Pi / 4',
}
class Sinh(_MPMathFunction):
"""
<dl>
<dt>'Sinh[$z$]'
<dd>returns the hyperbolic sine of $z$.
</dl>
>> Sinh[0]
= 0
"""
mpmath_name = 'sinh'
rules = {
'Derivative[1][Sinh]': 'Cosh[#]&',
}
class Cosh(_MPMathFunction):
"""
<dl>
<dt>'Cosh[$z$]'
<dd>returns the hyperbolic cosine of $z$.
</dl>
>> Cosh[0]
= 1
"""
mpmath_name = 'cosh'
rules = {
'Derivative[1][Cosh]': 'Sinh[#]&',
}
class Tanh(_MPMathFunction):
"""
<dl>
<dt>'Tanh[$z$]'
<dd>returns the hyperbolic tangent of $z$.
</dl>
>> Tanh[0]
= 0
"""
mpmath_name = 'tanh'
rules = {
'Derivative[1][Tanh]': 'Sech[#1]^2&',
}
class Sech(_MPMathFunction):
"""
<dl>
<dt>'Sech[$z$]'
<dd>returns the hyperbolic secant of $z$.
</dl>
>> Sech[0]
= 1
"""
sympy_name = ''
mpmath_name = 'sech'
rules = {
'Derivative[1][Sech]': '-Sech[#1] Tanh[#1]&',
}
def to_sympy(self, expr, **kwargs):
if len(expr.leaves) == 1:
return Expression('Power', Expression('Cosh', expr.leaves[0]),
Integer(-1)).to_sympy()
class Csch(_MPMathFunction):
"""
<dl>
<dt>'Csch[$z$]'
<dd>returns the hyperbolic cosecant of $z$.
</dl>
>> Csch[0]
= ComplexInfinity
"""
sympy_name = ''
mpmath_name = 'csch'
rules = {
'Csch[0]': 'ComplexInfinity',
'Csch[0.]': 'ComplexInfinity',
'Derivative[1][Csch]': '-Coth[#1] Csch[#1]&',
}
def to_sympy(self, expr, **kwargs):
if len(expr.leaves) == 1:
return Expression('Power', Expression('Sinh', expr.leaves[0]),
Integer(-1)).to_sympy()
class Coth(_MPMathFunction):
"""
<dl>
<dt>'Coth[$z$]'
<dd>returns the hyperbolic cotangent of $z$.
</dl>
>> Coth[0]
= ComplexInfinity
"""
mpmath_name = 'coth'
rules = {
'Coth[0]': 'ComplexInfinity',
'Coth[0.]': 'ComplexInfinity',
'Derivative[1][Coth]': '-Csch[#1]^2&',
}
class ArcSinh(_MPMathFunction):
"""
<dl>
<dt>'ArcSinh[$z$]'
<dd>returns the inverse hyperbolic sine of $z$.
</dl>
>> ArcSinh[0]
= 0
>> ArcSinh[0.]
= 0.
>> ArcSinh[1.0]
= 0.881373587019543025
"""
sympy_name = 'asinh'
mpmath_name = 'asinh'
rules = {
'Derivative[1][ArcSinh]': '1/Sqrt[1+#^2]&',
}
class ArcCosh(_MPMathFunction):
"""
<dl>
<dt>'ArcCosh[$z$]'
<dd>returns the inverse hyperbolic cosine of $z$.
</dl>
>> ArcCosh[0]
= I / 2 Pi
>> ArcCosh[0.]
= 0. + 1.57079632679489662 I
>> ArcCosh[0.00000000000000000000000000000000000000]
= 0. + 1.5707963267948966191479842624545426588 I
#> ArcCosh[1.4]
= 0.867014726490565104
"""
sympy_name = 'acosh'
mpmath_name = 'acosh'
rules = {
'ArcCosh[z:0.0]': 'N[I / 2 Pi, Precision[1+z]]',
'Derivative[1][ArcCosh]': '1/(Sqrt[#-1]*Sqrt[#+1])&',
}
class ArcTanh(_MPMathFunction):
"""
<dl>
<dt>'ArcTanh[$z$]'
<dd>returns the inverse hyperbolic tangent of $z$.
</dl>
>> ArcTanh[0]
= 0
>> ArcTanh[1]
= Infinity
>> ArcTanh[0]
= 0
>> ArcTanh[.5 + 2 I]
= 0.0964156202029961672 + 1.12655644083482235 I
>> ArcTanh[2 + I]
= ArcTanh[2 + I]
"""
sympy_name = 'atanh'
mpmath_name = 'atanh'
rules = {
'Derivative[1][ArcTanh]': '1/(1-#^2)&',
}
class ArcSech(_MPMathFunction):
"""
<dl>
<dt>'ArcSech[$z$]'
<dd>returns the inverse hyperbolic secant of $z$.
</dl>
>> ArcSech[0]
= Infinity
>> ArcSech[1]
= 0
>> ArcSech[0.5]
= 1.31695789692481671
"""
sympy_name = ''
mpmath_name = 'asech'
rules = {
'ArcSech[0]': 'Infinity',
'ArcSech[0.]': 'Indeterminate',
'Derivative[1][ArcSech]': '-1 / (# * Sqrt[(1-#)/(1+#)] (1+#)) &',
}
def to_sympy(self, expr, **kwargs):
if len(expr.leaves) == 1:
return Expression('ArcCosh', Expression('Power', expr.leaves[0],
Integer(-1))).to_sympy()
class ArcCsch(_MPMathFunction):
"""
<dl>
<dt>'ArcCsch[$z$]'
<dd>returns the inverse hyperbolic cosecant of $z$.
</dl>
>> ArcCsch[0]
= ComplexInfinity
>> ArcCsch[1.0]
= 0.881373587019543025
"""
sympy_name = ''
mpmath_name = 'acsch'
rules = {
'ArcCsch[0]': 'ComplexInfinity',
'ArcCsch[0.]': 'ComplexInfinity',
'Derivative[1][ArcCsch]': '-1 / (Sqrt[1+1/#^2] * #^2) &',
}
def to_sympy(self, expr, **kwargs):
if len(expr.leaves) == 1:
return Expression('ArcSinh', Expression('Power', expr.leaves[0],
Integer(-1))).to_sympy()
class ArcCoth(_MPMathFunction):
"""
<dl>
<dt>'ArcCoth[$z$]'
<dd>returns the inverse hyperbolic cotangent of $z$.
</dl>
>> ArcCoth[0]
= I / 2 Pi
>> ArcCoth[1]
= Infinity
>> ArcCoth[0.0]
= 0. + 1.57079632679489662 I
>> ArcCoth[0.5]
= 0.549306144334054846 - 1.57079632679489662 I
"""
sympy_name = 'acoth'
mpmath_name = 'acoth'
rules = {
'ArcCoth[z:0.0]': 'N[I / 2 Pi, Precision[1+z]]',
'Derivative[1][ArcCoth]': '1/(1-#^2)&',
}
| gpl-3.0 | -3,177,187,880,955,658,000 | 18.185049 | 103 | 0.477611 | false |
Mariaanisimova/pythonintask | BITs/2014/Shmireychik_S_V/task_7_17.py | 1 | 1249 | #Задача №7. Вариант 17
#Программа в которой компьютер загадывает название одного из пяти космических челноков проекта Спейс шаттл, а игрок должен его угадать.
#Если он угадывает,то получает баллы. Если не угадывает,то отномаются.
#Шмирейчик С.В.
#01.04.2016
import random
SpaceShuttles=('Колумбия','Челленджер','Дискавери','Атлантис','Индевор')
SpaceShuttle=random.randint(0,4)
rand=SpaceShuttles[SpaceShuttle]
Ball=100
print("я загадал одного из пяти космических челноков проекта Спейс шаттл")
#print (rand)
otvet=0
while(otvet)!=(rand):
otvet=input("Введате одно название космических челноков проекта Спейс шаттл")
if(otvet)!=(rand):
print("Вы не угадали. Попробуйте снова.")
Ball/=2
elif(otvet)==(rand):
print("Вы угадали.")
print("ваши баллы:"+str(Ball))
break
input("Нажмите enter для выхода") | apache-2.0 | -695,678,174,551,882,200 | 31.64 | 135 | 0.735399 | false |
cykustcc/bib2reSTcitation | bib2reSTcitation.py | 1 | 2664 | #Author Yukun Chen
#email: [email protected]
#Date: Sept 04 2015
import getopt
import argparse
import sys
import re
from collections import defaultdict
def displaymatch(match):
if match is None:
return None
return '<Match: %r, groups=%r>' % (match.group(), match.groups())
def bib2rest(input_bibfile,output_txtfile):
print input_bibfile
print output_txtfile
start_pattern = re.compile(r"^(?: |\t)*\@(?:book|article|incollection|inproceedings)\{([a-z0-9]+), *$")
title_pattern = re.compile(r"^(?: |\t)*title=\{([a-zA-Z0-9 ]+)\}(?: |\t)*,(?: |\t)*$")
author_pattern = re.compile(r"^(?: |\t)*author=\{([a-zA-Z0-9 ,;\.\-]+)\}(?: |\t)*,(?: |\t)*$")
other_info_pattern = re.compile(r"^(?: |\t)*(?:journal|volume|number|year|publisher|pages|organization|booktitle)=\{([a-zA-Z0-9 ,;\.-]+)\}(?: |\t)*,(?: |\t)*$")
end_pattern = re.compile("^(?: |\t)*}(?: |\t)*$")
with open(input_bibfile,'rb') as input_handle:
with open(output_txtfile,'wb') as output_handle:
in_a_bib_block = False;
rest_ref_block = "";
title = "";
author = "";
ref="";
output_handle.write(".. _references:\n\n==========\nReferences\n==========\n\n")
for line in input_handle:
if not in_a_bib_block:
# not in a bib block
if start_pattern.match(line):
matches = start_pattern.match(line)
in_a_bib_block = True
ref = matches.group(1)
else:
pass
else:
# in a bib block
if end_pattern.match(line):
matches = end_pattern.match(line)
in_a_bib_block = False
rest_ref_block = ".. [" + ref +"]" + " " + author +", " + title +", " + other_info
output_handle.write(rest_ref_block+"\n\n")
elif title_pattern.match(line):
matches = title_pattern.match(line)
title = matches.group(1)
elif author_pattern.match(line):
matches = author_pattern.match(line)
author = matches.group(1)
elif other_info_pattern.match(line):
matches = other_info_pattern.match(line)
other_info = matches.group(1)
rest_ref_block = rest_ref_block + ", "+ other_info
else:
pass
if __name__ == '__main__':
throot = "/".join(sys.path[0].split("/")[:])
parser = argparse.ArgumentParser(description='bib2reSTcitation is a tool to convert bib tex file to reStructuredText Markup citation format.')
parser.add_argument('-o', '--output', help='output file path')
parser.add_argument('-i', '--input', help='input file path')
args = parser.parse_args()
input_file = args.input
if input_file==None:
input_file = 'tex.bib'
output_file = args.output
if output_file==None:
output_file = "references.txt"
bib2rest(input_file,output_file)
| mit | 4,266,559,960,915,698,700 | 32.734177 | 161 | 0.61036 | false |
40323230/Pyslvs-PyQt5 | pyslvs_ui/info/info.py | 1 | 6021 | # -*- coding: utf-8 -*-
"""Information.
+ Module versions.
+ Help descriptions.
+ Check for update function.
"""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2021"
__license__ = "AGPL"
__email__ = "[email protected]"
from typing import Optional
from importlib.util import find_spec
from enum import auto, IntEnum
from sys import version_info as _vi
from platform import system, release, machine, python_compiler
from argparse import ArgumentParser
from dataclasses import dataclass
from pyslvs import __version__ as _kernel_ver
from pyslvs_ui import __version__
from pyslvs_ui.qt_patch import API
assert _kernel_ver == __version__, (
f"different version between kernel and interface: "
f"{_kernel_ver}, {__version__}")
def has_module(name: str) -> bool:
"""Test module import."""
return find_spec(name) is not None
HAS_SLVS = has_module('python_solvespace')
HAS_SCIPY = has_module('scipy')
SYS_INFO = [
f"Pyslvs {__version__}",
f"OS Type: {system()} {release()} [{machine()}]",
f"Python Version: {_vi.major}.{_vi.minor}.{_vi.micro} ({_vi.releaselevel})",
f"Python Compiler: {python_compiler()}",
f"Qt: {API}",
]
SYS_INFO = tuple(SYS_INFO)
del has_module
class Kernel(IntEnum):
"""Kernel list. Mapped to options."""
PYSLVS = 0
SOLVESPACE = auto() if HAS_SLVS else 0
SKETCH_SOLVE = auto()
SAME_AS_SOLVING = auto()
@property
def title(self) -> str:
"""Reformat enum's names."""
return self.name.capitalize().replace("_", " ")
@dataclass(repr=False, eq=False)
class Arguments:
"""Argument container."""
cmd: Optional[str]
c: str = ""
filepath: str = ""
kernel: str = ""
debug_mode: bool = False
fusion: bool = False
appimage_extract: bool = False
appimage_mount: bool = False
appimage_offset: bool = False
def parse_args() -> Arguments:
parser = ArgumentParser(
prog='pyslvs',
description=(
f"Pyslvs {__version__} - "
f"Open Source Planar Linkage Mechanism Simulation "
f"and Mechanical Synthesis System"
),
epilog=f"{__copyright__} {__license__} {__author__} {__email__}",
add_help=False
)
main_info = parser.add_argument_group("information options")
main_info.add_argument(
'-v',
'--version',
action='version',
version=SYS_INFO[0]
)
main_info.add_argument(
'-d',
'--debug-mode',
action='store_true',
help="show debug message to stdout, and "
"change the logger from INFO into DEBUG level"
)
s = parser.add_subparsers(title="CLI command", dest='cmd')
s.add_parser(
'test',
help="just test the module import states and exit",
add_help=False
)
gui_cmd = s.add_parser('gui', help="arguments for gui only", add_help=False)
gui_startup = gui_cmd.add_argument_group("startup options")
gui_startup.add_argument(
'filepath',
default="",
nargs='?',
type=str,
help="read a specific project from the file path"
)
gui_startup.add_argument(
'-c',
metavar="start path",
default="",
nargs='?',
type=str,
help="change to specified path when startup Pyslvs"
)
gui_startup.add_argument(
'--kernel',
metavar="kernel",
default='',
nargs='?',
type=str,
choices=['pyslvs', 'python_solvespace', 'sketch_solve'],
help="startup Pyslvs with specified solver, "
"default is depending on local setting"
)
qt = gui_cmd.add_argument_group("Qt options")
qt.add_argument(
'--fusion',
default=False,
action='store_true',
help="run Pyslvs in Fusion style"
)
qt.add_argument(
'--full-screen',
default=False,
action='store_true',
help="start Pyslvs with full-screen mode"
)
qt.add_argument(
'--platform',
metavar="plugins",
default="",
nargs='?',
type=str,
help="startup Pyslvs with specified Qt platform plugins, "
"such as WebGL (webgl:[port])"
)
gui_info = gui_cmd.add_argument_group("information options")
extract_cmd = s.add_parser(
'extract',
help="extract data from a supported file",
add_help=False
)
extract_cmd.add_argument(
'filepath',
default="",
type=str,
help="input file path"
)
extract_info = extract_cmd.add_argument_group("information options")
for group in (main_info, gui_info, extract_info):
group.add_argument(
'-h',
'--help',
action='help',
help="show this help message and exit"
)
if system() == "Linux":
# AppImage options
appimage = parser.add_argument_group(
"AppImage arguments",
"these options only work in package state. "
"Pyslvs is a type 2 package, "
"for more information: https://docs.appimage.org/"
)
appimage.add_argument(
'--appimage-extract',
action='store_true',
help="extract the files of package into a 'squashfs-root' folder"
)
appimage.add_argument(
'--appimage-mount',
action='store_true',
help="temporarily mount entire package into a folder, "
"it can stop by terminating this program"
)
appimage.add_argument(
'--appimage-offset',
action='store_true',
help="obtain offset value of 'mount' command, then mount it with: "
"\"sudo mount PACKAGE MOUNT -o offset=VALUE\""
)
return Arguments(**vars(parser.parse_args()))
KERNELS = [Kernel.PYSLVS, Kernel.SKETCH_SOLVE]
if HAS_SLVS:
KERNELS.insert(1, Kernel.SOLVESPACE)
KERNELS = tuple(KERNELS)
ARGUMENTS = parse_args()
del parse_args
| agpl-3.0 | -4,527,536,843,177,410,000 | 27.808612 | 80 | 0.57914 | false |
apapillon/django-twitsocket | twitsocket/management/commands/lister.py | 1 | 2822 | import json
import urllib2
import urllib
import time
from django.conf import settings
from django.core.management.base import NoArgsCommand
import oauth2 as oauth
from twitsocket.models import Tweet
LIST_MEMBERS = 'https://api.twitter.com/1/%s/members.json' % settings.TWITTER_LIST
def oauth_request(url, method='GET', params={}, data=None):
qs = ''
if method == 'GET':
qs = '&'.join(['%s=%s' % (key, value) for key, value in params.items()])
if qs:
url += '?%s' % qs
consumer = oauth.Consumer(secret=settings.CONSUMER_SECRET,
key=settings.CONSUMER_KEY)
token = oauth.Token(secret=settings.TOKEN_SECRET,
key=settings.TOKEN_KEY)
oparams = {
'oauth_version': '1.0',
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': token.key,
'oauth_consumer_key': consumer.key,
}
if method == 'POST':
oparams.update(params)
req = oauth.Request(method=method, url=url, parameters=oparams)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, consumer, token)
if method == 'POST':
return urllib2.Request(url, data, headers=req.to_header())
return urllib2.Request(url, headers=req.to_header())
class Command(NoArgsCommand):
"""Adds all the twitter users to the list"""
def handle_noargs(self, **options):
members = self.get_list_members()
users = self.get_users(Tweet.objects.all())
for u in users:
if u not in members:
print("Adding %s to list" % u)
self.add_to_list(u)
time.sleep(1)
def get_list_members(self):
more_pages = True
members = []
cursor = -1
while more_pages:
request = oauth_request(LIST_MEMBERS, params={'cursor': cursor})
data = urllib2.urlopen(request).read()
payload = json.loads(data)
cursor = payload['next_cursor']
for user in payload['users']:
members.append(user['id'])
more_pages = len(payload['users']) == 20
return members
def get_users(self, queryset):
users = []
for tweet in queryset:
content = tweet.get_content()
if 'retweeted_status' in content:
continue
user_id = content['user']['id']
if user_id not in users:
users.append(user_id)
return users
def add_to_list(self, user_id):
data = urllib.urlencode({'id': user_id})
request = oauth_request(LIST_MEMBERS, method='POST',
params={'id': user_id}, data=data)
response = urllib2.urlopen(request).read()
| bsd-3-clause | -6,183,423,698,888,465,000 | 32.595238 | 82 | 0.576896 | false |
quequino/CloudStream-for-KODI | resources/lib/xfilesharing.py | 1 | 33713 | '''
xfilesharing XBMC Plugin
Copyright (C) 2013-2014 ddurdle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cloudservice
import os
import re
import urllib, urllib2
import cookielib
import xbmc, xbmcaddon, xbmcgui, xbmcplugin
# global variables
PLUGIN_NAME = 'plugin.video.cloudstream'
PLUGIN_URL = 'plugin://'+PLUGIN_NAME+'/'
ADDON = xbmcaddon.Addon(id=PLUGIN_NAME)
# helper methods
def log(msg, err=False):
if err:
xbmc.log(ADDON.getAddonInfo('name') + ': ' + msg, xbmc.LOGERROR)
else:
xbmc.log(ADDON.getAddonInfo('name') + ': ' + msg, xbmc.LOGDEBUG)
#
#
#
class xfilesharing(cloudservice.cloudservice):
# magic numbers
MEDIA_TYPE_VIDEO = 1
MEDIA_TYPE_FOLDER = 0
##
# initialize (setting 1) username, 2) password, 3) authorization token, 4) user agent string
##
def __init__(self, name, domain, user, password, auth, user_agent):
return super(xfilesharing,self).__init__(name, domain, user, password, auth, user_agent)
#return cloudservice.__init__(self,domain, user, password, auth, user_agent)
##
# perform login
##
def login(self):
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
# default User-Agent ('Python-urllib/2.6') will *not* work
opener.addheaders = [('User-Agent', self.user_agent)]
if self.domain == 'uptostream.com':
self.domain = 'uptobox.com'
if 'http://' in self.domain:
url = self.domain
else:
url = 'http://' + self.domain + '/'
values = {
'op' : 'login',
'login' : self.user,
'redirect' : url,
'password' : self.password
}
# try login
try:
response = opener.open(url,urllib.urlencode(values))
except urllib2.URLError, e:
if e.code == 403:
#login denied
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30017))
log(str(e), True)
return
response_data = response.read()
response.close()
loginResult = False
#validate successful login
for r in re.finditer('my_account',
response_data, re.DOTALL):
loginResult = True
#validate successful login
for r in re.finditer('logout',
response_data, re.DOTALL):
loginResult = True
if (loginResult == False):
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30017))
log('login failed', True)
return
for cookie in self.cookiejar:
for r in re.finditer(' ([^\=]+)\=([^\s]+)\s',
str(cookie), re.DOTALL):
cookieType,cookieValue = r.groups()
if cookieType == 'xfss':
self.auth = cookieValue
if cookieType == 'xfsts':
self.auth = cookieValue
return
##
# return the appropriate "headers" for FireDrive requests that include 1) user agent, 2) authorization cookie
# returns: list containing the header
##
def getHeadersList(self,referer=''):
if ((self.auth != '' or self.auth != 0) and referer == ''):
return { 'User-Agent' : self.user_agent, 'Cookie' : 'lang=english; login='+self.user+'; xfsts='+self.auth+'; xfss='+self.auth+';' }
elif (self.auth != '' or self.auth != 0):
return { 'User-Agent' : self.user_agent, 'Referer': referer, 'Cookie' : 'lang=english; login='+self.user+'; xfsts='+self.auth+'; xfss='+self.auth+';' }
else:
return { 'User-Agent' : self.user_agent }
##
# return the appropriate "headers" for FireDrive requests that include 1) user agent, 2) authorization cookie
# returns: URL-encoded header string
##
def getHeadersEncoded(self, referer=''):
return urllib.urlencode(self.getHeadersList(referer))
##
# retrieve a list of videos, using playback type stream
# parameters: prompt for video quality (optional), cache type (optional)
# returns: list of videos
##
def getVideosList(self, folderID=0, cacheType=0):
if 'http://' in self.domain:
url = self.domain
else:
url = 'http://' + self.domain
if 'streamcloud.eu' in self.domain:
url = url + '/'
# retrieve all documents
if folderID == 0:
url = url+'?op=my_files'
else:
url = url+'?op=my_files&fld_id='+folderID
videos = {}
if True:
req = urllib2.Request(url, None, self.getHeadersList())
# if action fails, validate login
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
self.login()
req = urllib2.Request(url, None, self.getHeadersList())
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
log(str(e), True)
return
else:
log(str(e), True)
return
response_data = response.read()
response.close()
for r in re.finditer('placeholder\=\"(Username)\" id\=i\"(nputLoginEmail)\" name\=\"login\"' ,
response_data, re.DOTALL):
loginUsername,loginUsernameName = r.groups()
self.login()
req = urllib2.Request(url, None, self.getHeadersList())
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
log(str(e), True)
return
response_data = response.read()
response.close()
# parsing page for videos
# video-entry
for r in re.finditer('<a id="([^\"]+)" href="([^\"]+)">([^\<]+)</a>' ,
response_data, re.DOTALL):
fileID,url,fileName = r.groups()
# streaming
videos[fileName] = {'url': 'plugin://plugin.video.cloudstream?mode=streamURL&instance='+self.instanceName+'&url=' + url, 'mediaType' : self.MEDIA_TYPE_VIDEO}
for r in re.finditer('<input type="checkbox" name="file_id".*?<a href="([^\"]+)">([^\<]+)</a>' ,
response_data, re.DOTALL):
url,fileName = r.groups()
# streaming
videos[fileName] = {'url': 'plugin://plugin.video.cloudstream?mode=streamURL&instance='+self.instanceName+'&url=' + url, 'mediaType' : self.MEDIA_TYPE_VIDEO}
# video-entry - bestream
for r in re.finditer('<TD align=left>[^\<]+<a href="([^\"]+)">([^\<]+)</a>' ,
response_data, re.DOTALL):
url,fileName = r.groups()
# streaming
videos[fileName] = {'url': 'plugin://plugin.video.cloudstream?mode=streamURL&instance='+self.instanceName+'&url=' + url, 'mediaType' : self.MEDIA_TYPE_VIDEO}
# video-entry - uptobox
for r in re.finditer('<td style="[^\"]+"><a href="([^\"]+)".*?>([^\<]+)</a></td>' ,
response_data, re.DOTALL):
url,fileName = r.groups()
# streaming
videos[fileName] = {'url': 'plugin://plugin.video.cloudstream?mode=streamURL&instance='+self.instanceName+'&url=' + url, 'mediaType' : self.MEDIA_TYPE_VIDEO}
if 'realvid.net' in self.domain:
for r in re.finditer('<a href="[^\"]+">([^\<]+)</a>\s+</TD>' ,
response_data, re.DOTALL):
url,fileName = r.groups()
#flatten folders (no clean way of handling subfolders, so just make the root list all folders & subfolders
#therefore, skip listing folders if we're not in root
# if folderID == 0:
# folder-entry
# for r in re.finditer('<a href=".*?fld_id=([^\"]+)"><b>([^\<]+)</b></a>' ,
# folderID = 0
# for r in re.finditer('<option value="(\d\d+)">([^\<]+)</option>' ,
# response_data, re.DOTALL):
# folderID,folderName = r.groups()
#remove from folderName
# folderName = re.sub('\ \;', '', folderName)
# folder
# if int(folderID) != 0:
# videos[folderName] = {'url': 'plugin://plugin.video.cloudstream?mode=folder&instance='+self.instanceName+'&folderID=' + folderID, 'mediaType' : self.MEDIA_TYPE_FOLDER}
# if folderID == 0:
for r in re.finditer('<a href=".*?fld_id=([^\"]+)"><b>([^\<]+)</b></a>' ,
response_data, re.DOTALL):
folderID,folderName = r.groups()
# folder
if int(folderID) != 0 and folderName != ' . . ':
videos[folderName] = {'url': 'plugin://plugin.video.cloudstream?mode=folder&instance='+self.instanceName+'&folderID=' + folderID, 'mediaType' : self.MEDIA_TYPE_FOLDER}
return videos
##
# retrieve a video link
# parameters: title of video, whether to prompt for quality/format (optional), cache type (optional)
# returns: list of URLs for the video or single URL of video (if not prompting for quality)
##
def getPublicLink(self,url,cacheType=0):
fname = ''
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
opener.addheaders = [ ('User-Agent' , self.user_agent)]
req = urllib2.Request(url)
try:
response = opener.open(req)
except urllib2.URLError, e:
pass
response.close()
url = response.url
# opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar), MyHTTPErrorProcessor)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
opener.addheaders = [ ('User-Agent' , self.user_agent), ('Referer', url), ('Cookie', 'lang=english; login='+self.user+'; xfsts='+self.auth+'; xfss='+self.auth+';')]
req = urllib2.Request(url)
# if action fails, validate login
try:
response = opener.open(req)
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
self.login()
req = urllib2.Request(url, None, self.getHeadersList())
try:
response = opener.open(req)
except urllib2.URLError, e:
log(str(e), True)
return ('','')
else:
log(str(e), True)
return ('','')
response_data = response.read()
response.close()
for r in re.finditer('\<title\>([^\<]+)\<',
response_data, re.DOTALL | re.I):
title = r.group(1)
if fname == '':
fname = title
url = response.url
req = urllib2.Request(url)
for r in re.finditer('name\=\"(code)\" class\=\"(captcha_code)' ,
response_data, re.DOTALL):
loginUsername,loginUsernameName = r.groups()
self.login()
req = urllib2.Request(url, None, self.getHeadersList())
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
log(str(e), True)
return ('','')
response_data = response.read()
response.close()
if self.domain == 'vidzi.tv':
for r in re.finditer('(file)\: \"([^\"]+)\.mp4\"' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
return (streamURL + '.mp4', fname)
confirmID = 0
values = {}
# fetch video title, download URL and docid for stream link
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="usr_login" value="([^\"]*)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="fname" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">' ,response_data, re.DOTALL):
op,usr_login,id,fname,referer = r.groups()
values = {
'op' : op,
'usr_login' : usr_login,
'id' : id,
'fname' : fname,
'referer' : referer,
'method_free' : 'Free Download'
}
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="usr_login" value="([^\"]*)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="fname" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="hash" value="([^\"]*)">.*?<input type="submit" name="imhuman" value="([^\"]*)" id="btn_download">' ,response_data, re.DOTALL):
op,usr_login,id,fname,referer,hash,submit = r.groups()
values = {
'op' : op,
'usr_login' : usr_login,
'id' : id,
'fname' : fname,
'referer' : referer,
'hash' : hash,
'imhuman' : submit
}
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="usr_login" value="([^\"]*)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="fname" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="hash" value="([^\"]*)">.*?<input type="hidden" name="inhu" value="([^\"]*)">.*?<input type="submit" name="imhuman" value="([^\"]*)" id="btn_download">' ,response_data, re.DOTALL):
op,usr_login,id,fname,referer,hash,inhu,submit = r.groups()
values = {
'_vhash' : 'i1102394cE',
'gfk' : 'i22abd2449',
'op' : op,
'usr_login' : usr_login,
'id' : id,
'fname' : fname,
'referer' : referer,
'hash' : hash,
'inhu' : inhu,
'imhuman' : submit
}
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="method_free" value="([^\"]*)">' ,response_data, re.DOTALL):
op,id,referer,submit = r.groups()
values = {
'op' : op,
'id' : id,
'referer' : referer,
'method_free' : submit,
'download_direct' : 1
}
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="rand" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="method_free" value="([^\"]*)">' ,response_data, re.DOTALL):
op,id,rand,referer,submit = r.groups()
values = {
'op' : op,
'id' : id,
'rand' : rand,
'referer' : referer,
'method_free' : submit,
'download_direct' : 1
}
for r in re.finditer('<input type="hidden" name="ipcount_val" id="ipcount_val" value="([^\"]+)">.*?<input type="hidden" name="op" value="([^\"]+)">.*? <input type="hidden" name="usr_login" value="([^\"]*)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="fname" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">' ,response_data, re.DOTALL):
ipcount,op,usr_login,id,fname,referer = r.groups()
values = {
'ipcount_val' : ipcount,
'op' : op,
'usr_login' : usr_login,
'id' : id,
'fname' : fname,
'referer' : referer,
'method_free' : 'Slow access'
}
values = {}
variable = 'op'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'usr_login'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'id'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'fname'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'referer'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'hash'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'inhu'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'method_free'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'method_premium'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'rand'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'down_direct'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'file_size_real'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'imhuman'
for r in re.finditer('<input type="(submit)" name="'+variable+'" value="([^\"]*)" id="btn_download">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'gfk'
for r in re.finditer('(name): \''+variable+'\', value: \'([^\']*)\'' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = '_vhash'
for r in re.finditer('(name): \''+variable+'\', value: \'([^\']*)\'' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
# values['referer'] = ''
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="rand" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="plugins_are_not_allowed" value="([^\"]+)"/>.*?<input type="hidden" name="method_free" value="([^\"]*)">' ,response_data, re.DOTALL):
op,id,rand,referer,plugins,submit = r.groups()
values = {
'op' : op,
'id' : id,
'rand' : rand,
'referer' : referer,
'plugins_are_not_allowed' : plugins,
'method_free' : submit,
'download_direct' : 1
}
# req = urllib2.Request(url, urllib.urlencode(values), self.getHeadersList(url))
req = urllib2.Request(url)
if self.domain == 'thefile.me':
values['method_free'] = 'Free Download'
elif self.domain == 'sharesix.com':
values['method_free'] = 'Free'
elif 'streamcloud.eu' in self.domain:
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30037) + str(10))
xbmc.sleep((int(10)+1)*1000)
elif self.domain == 'vidhog.com':
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30037) + str(15))
xbmc.sleep((int(15)+1)*1000)
elif self.domain == 'vidto.me':
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30037) + str(6))
xbmc.sleep((int(6)+1)*1000)
elif self.domain == 'vodlocker.com':
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30037) + str(3))
xbmc.sleep((int(3)+1)*1000)
elif self.domain == 'hcbit.com':
try:
# response = urllib2.urlopen(req)
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
self.login()
try:
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
log(str(e), True)
return ('', '')
else:
log(str(e), True)
return ('', '')
try:
if response.info().getheader('Location') != '':
return (response.info().getheader('Location') + '|' + self.getHeadersEncoded(url), fname)
except:
for r in re.finditer('\'(file)\'\,\'([^\']+)\'' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
return (streamURL + '|' + self.getHeadersEncoded(url), fname)
for r in re.finditer('\<td (nowrap)\>([^\<]+)\<\/td\>' ,response_data, re.DOTALL):
deliminator,fileName = r.groups()
for r in re.finditer('(\|)([^\|]{42})\|' ,response_data, re.DOTALL):
deliminator,fileID = r.groups()
streamURL = 'http://cloud1.hcbit.com/cgi-bin/dl.cgi/'+fileID+'/'+fileName
return (streamURL + '|' + self.getHeadersEncoded(url), fname)
if self.domain == 'bestreams.net':
file_id = ''
aff = ''
variable = 'file_id'
for r in re.finditer('\''+variable+'\', (\')([^\']*)\'' ,response_data, re.DOTALL):
hidden,value = r.groups()
file_id = value
variable = 'aff'
for r in re.finditer('\''+variable+'\', (\')([^\']*)\'' ,response_data, re.DOTALL):
hidden,value = r.groups()
aff = value
xbmc.sleep((int(2)+1)*1000)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
opener.addheaders = [ ('User-Agent' , self.user_agent), ('Referer', url), ('Cookie', 'lang=1; file_id='+file_id+'; aff='+aff+';')]
elif self.domain == 'thevideo.me':
for r in re.finditer('\,\s+\'file\'\s+\:\s+\'([^\']+)\'',
response_data, re.DOTALL):
streamURL = r.group(1)
return (streamURL,fname)
elif self.domain == 'vidzi.tv':
for r in re.finditer('\s+file:\s+\"([^\"]+)\"',
response_data, re.DOTALL):
streamURL = r.group(1)
return (streamURL,fname)
# if action fails, validate login
try:
# response = urllib2.urlopen(req)
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
self.login()
try:
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
log(str(e), True)
return ('','')
else:
log(str(e), True)
return ('','')
response_data = response.read()
response.close()
op=''
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="rand" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="method_free" value="([^\"]*)">' ,response_data, re.DOTALL):
op,id,rand,referer,submit = r.groups()
values = {
'op' : op,
'id' : id,
'rand' : rand,
'referer' : referer,
'method_free' : submit,
'download_direct' : 1
}
streamURL=''
title = ''
for r in re.finditer('\<(title)\>([^\>]*)\<\/title\>' ,response_data, re.DOTALL):
titleID,title = r.groups()
# for thefile
if self.domain == 'thefile.me':
downloadAddress = ''
for r in re.finditer('\<(img) src\=\"http\:\/\/([^\/]+)\/[^\"]+\" style' ,response_data, re.DOTALL):
downloadTag,downloadAddress = r.groups()
for r in re.finditer('(\|)([^\|]{56})\|' ,response_data, re.DOTALL):
deliminator,fileID = r.groups()
streamURL = 'http://'+str(downloadAddress)+'/d/'+fileID+'/video.mp4'
elif self.domain == 'sharerepo.com':
for r in re.finditer('(file)\: \'([^\']+)\'\,' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
for r in re.finditer('(\|)([^\|]{60})\|' ,response_data, re.DOTALL):
deliminator,fileID = r.groups()
streamURL = 'http://37.48.80.43/d/'+fileID+'/video.mp4?start=0'
elif self.domain == 'filenuke.com':
for r in re.finditer('(\|)([^\|]{56})\|' ,response_data, re.DOTALL):
deliminator,fileID = r.groups()
streamURL = 'http://37.252.3.244/d/'+fileID+'/video.flv?start=0'
elif self.domain == 'sharerepo.com':
for r in re.finditer('(file)\: \'([^\']+)\'\,' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
elif self.domain == 'letwatch.us':
for r in re.finditer('\[IMG\]http://([^\/]+)\/',
response_data, re.DOTALL):
IP = r.group(1)
for r in re.finditer('\|([^\|]{60})\|',
response_data, re.DOTALL):
fileID = r.group(1)
streamURL = 'http://'+IP+'/'+fileID+'/v.flv'
elif self.domain == 'thevideo.me':
for r in re.finditer('\,\s+\'file\'\s+\:\s+\'([^\']+)\'',
response_data, re.DOTALL):
streamURL = r.group(1)
elif self.domain == 'vidto.me':
for r in re.finditer('var file_link = \'([^\']+)\'',
response_data, re.DOTALL):
streamURL = r.group(1)
elif self.domain == 'allmyvideos.net':
for r in re.finditer('\"file\" : \"([^\"]+)\"',
response_data, re.DOTALL):
streamURL = r.group(1)
elif self.domain == 'realvid.net':
for r in re.finditer('file:\s?\'([^\']+)\'',
response_data, re.DOTALL):
streamURL = r.group(1)
elif self.domain == 'uptobox.com' or self.domain == 'uptostream.com':
for r in re.finditer('\<a href\=\"([^\"]+)\"\>\s+\<span class\=\"button_upload green\"\>',
response_data, re.DOTALL):
streamURL = r.group(1)
return (streamURL, fname)
for r in re.finditer('\<source src=\'([^\']+)\'',
response_data, re.DOTALL):
streamURL = r.group(1)
return (streamURL, fname)
timeout = 0
if op != "" and streamURL == '':
for r in re.finditer('Wait<strong><span id="(.*?)">(\d+)</span> seconds</strong>' ,response_data, re.DOTALL):
id,timeout = r.groups()
for r in re.finditer('<p class="(err)"><center><b>(.*?)</b>' ,response_data, re.DOTALL):
id,error = r.groups()
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), error)
return ('','')
req = urllib2.Request(url)
if timeout > 0:
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30037) + str(timeout))
xbmc.sleep((int(timeout)+1)*1000)
# if action fails, validate login
try:
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
self.login()
try:
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
log(str(e), True)
return ('','')
else:
log(str(e), True)
return ('','')
response_data = response.read()
response.close()
for r in re.finditer('<a href="([^\"]+)">(Click here to start your download)</a>' ,response_data, re.DOTALL):
streamURL,downloadlink = r.groups()
#vodlocker.com
if streamURL == '':
# fetch video title, download URL and docid for stream link
for r in re.finditer('(file)\: \"([^\"]+)"\,' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
if 'mp4' in streamURL:
break
# mightyupload.com
if streamURL == '':
# fetch video title, download URL and docid for stream link
for r in re.finditer('var (file_link) = \'([^\']+)\'' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
# vidhog.com
if streamURL == '':
# fetch video title, download URL and docid for stream link
for r in re.finditer('(product_download_url)=([^\']+)\'' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
# vidspot.net
if streamURL == '':
# fetch video title, download URL and docid for stream link
for r in re.finditer('"(file)" : "([^\"]+)"\,' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
# uploadc.com
if streamURL == '':
# fetch video title, download URL and docid for stream link
for r in re.finditer('\'(file)\',\'([^\']+)\'\)\;' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
streamURL = streamURL + '|' + self.getHeadersEncoded(url)
# return 'http://93.120.27.101:8777/pgjtbhuu6coammfvg5gfae6xogigs5cw6gsx3ey7yt6hmihwhpcixuiaqmza/v.mp4'
return (streamURL, fname)
class MyHTTPErrorProcessor(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
# only add this line to stop 302 redirection.
if code == 302: return response
if not (200 <= code < 300):
response = self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
| gpl-2.0 | -3,813,098,866,034,628,600 | 39.423261 | 506 | 0.497434 | false |
esarafianou/rupture | backend/breach/models/round.py | 1 | 4368 | from __future__ import unicode_literals
from django.db import models
from django.core.exceptions import ValidationError
from breach.analyzer import decide_next_world_state
from itertools import groupby
class Round(models.Model):
class Meta:
unique_together = (('victim', 'index'),)
def check_block_align(self):
try:
return self.block_align
except AttributeError:
self.block_align = self.victim.target.block_align
return self.block_align
def check_huffman_pool(self):
try:
return self.huffman_pool
except AttributeError:
self.huffman_pool = self.victim.target.huffman_pool
return self.huffman_pool
def get_method(self):
try:
return self.method
except AttributeError:
self.method = self.victim.target.method
return self.method
def clean(self):
if not self.knownsecret.startswith(self.victim.target.prefix):
raise ValidationError('Knownsecret must start with known target prefix')
if not set(self.knownalphabet) <= set(self.victim.target.alphabet):
raise ValidationError("Knownalphabet must be a subset of target's alphabet")
# Get the stored samplesets of the round.
# For every round's batch, decide next world state based on all the
# samplesets collected up to this batch and save the results in the attack_details_list.
# Each element of the attack_details_list contains the needed details for each batch
def fetch_per_batch_info(self):
samplesets = self.sampleset_set.filter(round_id=self.id, success=True).order_by('batch')
batch_info = []
batches = groupby(samplesets, lambda x: x.batch)
for batch, batch_samplesets in batches:
list_batch_samplesets = list(batch_samplesets)
print batch, list_batch_samplesets
decision = decide_next_world_state(list_batch_samplesets)
batch_details = {
'round': self.index,
'knownsecret': self.knownsecret,
'batch': batch,
'alignmentalphabet': list_batch_samplesets[0].alignmentalphabet,
'possible_knownprefix': decision['state']['knownsecret'],
'confidence': decision['confidence']
}
batch_info.append(batch_details)
return batch_info
victim = models.ForeignKey('breach.Victim')
index = models.IntegerField(
default=1,
help_text=('Which round of the attack this is. The first round has ',
'index 1.')
)
batch = models.IntegerField(
default=0,
help_text=('Which batch of the round is currently being attempted. '
'A new batch starts any time samplesets for the round '
'are created, either because the round is starting or '
'because not enough condidence was built.')
)
maxroundcardinality = models.IntegerField(
default=1,
help_text=('The maximum amount of symbols that will be tested in this '
'round by a single sampleset. '
'This can be larger or equal to the current sampleset\'s '
'candidatealphabet length, as other samplesets may not '
'have the same candidatealphabet length. This discrepancy '
'occurs when the target alphabet is not a perfect power of '
'2.')
)
minroundcardinality = models.IntegerField(
default=1,
help_text=('The minimum amount of symbols that will be tested in this '
'round by a single sampleset. '
'This can be less or equal to the current sampleset\'s '
'candidatealphabet length.')
)
amount = models.IntegerField(
default=1,
help_text='Number of samples contained in each sampleset of this round.'
)
# sampleset knownstate: knownsecret and knownalphabet
knownsecret = models.CharField(
default='',
max_length=255,
help_text='Known secret before the sample set was collected'
)
knownalphabet = models.CharField(
max_length=255,
help_text='The candidate alphabet for the next unknown character'
)
| mit | 3,845,078,980,088,209,000 | 37.654867 | 96 | 0.619963 | false |
google-research/recsim | recsim/environments/interest_exploration_test.py | 1 | 1847 | # coding=utf-8
# coding=utf-8
# Copyright 2019 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for recsim.environments.interest_exploration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from recsim.environments import interest_exploration
import tensorflow.compat.v1 as tf
class InterestExplorationTest(tf.test.TestCase):
def setUp(self):
super(InterestExplorationTest, self).setUp()
self._num_topics = 2
env_config = {
'num_candidates': 20,
'slate_size': 2,
'resample_documents': False,
'seed': 1,
}
self._env = interest_exploration.create_environment(env_config)
def test_step(self):
self._env.seed(0)
obs0 = self._env.reset()
self.assertEqual((0,), obs0['user'].shape)
slate = np.array([0, 1])
obs, reward, done, _ = self._env.step(slate)
doc_obs0 = list(obs0['doc'].values())
doc_obs = list(obs['doc'].values())
for i, resp in enumerate(obs['response']):
self.assertFalse(resp['click'])
self.assertEqual(doc_obs0[i]['cluster_id'], resp['cluster_id'])
self.assertEqual(doc_obs[i]['cluster_id'], resp['cluster_id'])
self.assertEqual(0, reward)
self.assertFalse(done)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 1,407,724,409,636,544,300 | 31.982143 | 74 | 0.685436 | false |
manuelli/director | src/python/director/visualization.py | 1 | 39578 | import director.objectmodel as om
import director.applogic as app
from shallowCopy import shallowCopy
import director.vtkAll as vtk
from director import transformUtils
from director import callbacks
from director import frameupdater
from director.fieldcontainer import FieldContainer
from PythonQt import QtCore, QtGui
import numpy as np
import os
import colorsys
import weakref
import itertools
class PolyDataItem(om.ObjectModelItem):
defaultScalarRangeMap = {
# 'intensity' : (400, 4000),
'spindle_angle' : (0, 360),
'azimuth' : (-2.5, 2.5),
'scan_delta' : (0.0, 0.3),
'point distance to plane' : (-0.2, 0.2),
'normal angle to plane' : (0.0, 10.0),
}
def __init__(self, name, polyData, view):
om.ObjectModelItem.__init__(self, name, om.Icons.Robot)
self.views = []
self.polyData = polyData
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInput(self.polyData)
self.actor = vtk.vtkActor()
self.actor.SetMapper(self.mapper)
self.shadowActor = None
self.scalarBarWidget = None
self.extraViewRenderers = {}
self.rangeMap = dict(PolyDataItem.defaultScalarRangeMap)
self.addProperty('Color By', 0, attributes=om.PropertyAttributes(enumNames=['Solid Color']))
self.addProperty('Visible', True)
self.addProperty('Alpha', 1.0,
attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1, hidden=False))
self.addProperty('Point Size', self.actor.GetProperty().GetPointSize(),
attributes=om.PropertyAttributes(decimals=0, minimum=1, maximum=20, singleStep=1, hidden=False))
self.addProperty('Surface Mode', 0,
attributes=om.PropertyAttributes(enumNames=['Surface', 'Wireframe', 'Surface with edges', 'Points'], hidden=True))
self.addProperty('Color', [1.0, 1.0, 1.0])
self.addProperty('Show Scalar Bar', False)
self._updateSurfaceProperty()
self._updateColorByProperty()
if view is not None:
self.addToView(view)
def _renderAllViews(self):
for view in self.views:
view.render()
def hasDataSet(self, dataSet):
return dataSet == self.polyData
def setPolyData(self, polyData):
self.polyData = polyData
self.mapper.SetInput(polyData)
self._updateSurfaceProperty()
self._updateColorByProperty()
self._updateColorBy(retainColorMap=True)
if self.getProperty('Visible'):
self._renderAllViews()
def setRangeMap(self, key, value):
self.rangeMap[key] = value
def getArrayNames(self):
pointData = self.polyData.GetPointData()
return [pointData.GetArrayName(i) for i in xrange(pointData.GetNumberOfArrays())]
def setSolidColor(self, color):
self.setProperty('Color', [float(c) for c in color])
self.colorBy(None)
def _isPointCloud(self):
return self.polyData.GetNumberOfPoints() and (self.polyData.GetNumberOfCells() == self.polyData.GetNumberOfVerts())
def colorBy(self, arrayName, scalarRange=None, lut=None):
if not arrayName:
self.mapper.ScalarVisibilityOff()
self.polyData.GetPointData().SetActiveScalars(None)
return
array = self.polyData.GetPointData().GetArray(arrayName)
if not array:
print 'colorBy(%s): array not found' % arrayName
self.mapper.ScalarVisibilityOff()
self.polyData.GetPointData().SetActiveScalars(None)
return
self.polyData.GetPointData().SetActiveScalars(arrayName)
if not lut:
lut = self._getDefaultColorMap(array, scalarRange)
#self.mapper.SetColorModeToMapScalars()
self.mapper.ScalarVisibilityOn()
self.mapper.SetUseLookupTableScalarRange(True)
self.mapper.SetLookupTable(lut)
self.mapper.SetInterpolateScalarsBeforeMapping(not self._isPointCloud())
if self.getProperty('Visible'):
self._renderAllViews()
def getChildFrame(self):
frameName = self.getProperty('Name') + ' frame'
return self.findChild(frameName)
def addToView(self, view):
if view in self.views:
return
self.views.append(view)
view.renderer().AddActor(self.actor)
if self.shadowActor:
view.renderer().AddActor(self.shadowActor)
view.render()
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Point Size':
self.actor.GetProperty().SetPointSize(self.getProperty(propertyName))
elif propertyName == 'Alpha':
self.actor.GetProperty().SetOpacity(self.getProperty(propertyName))
if self.shadowActor:
self.shadowActor.GetProperty().SetOpacity(self.getProperty(propertyName))
elif propertyName == 'Visible':
self.actor.SetVisibility(self.getProperty(propertyName))
if self.shadowActor:
self.shadowActor.SetVisibility(self.getProperty(propertyName))
elif propertyName == 'Surface Mode':
mode = self.properties.getPropertyEnumValue(propertyName)
prop = self.actor.GetProperty()
if mode == 'Surface':
prop.SetRepresentationToSurface()
prop.EdgeVisibilityOff()
if mode == 'Wireframe':
prop.SetRepresentationToWireframe()
elif mode == 'Surface with edges':
prop.SetRepresentationToSurface()
prop.EdgeVisibilityOn()
elif mode == 'Points':
prop.SetRepresentationToPoints()
elif propertyName == 'Color':
color = self.getProperty(propertyName)
self.actor.GetProperty().SetColor(color)
elif propertyName == 'Color By':
self._updateColorBy()
elif propertyName == 'Show Scalar Bar':
self._updateScalarBar()
self._renderAllViews()
def setScalarRange(self, rangeMin, rangeMax):
arrayName = self.getPropertyEnumValue('Color By')
if arrayName != 'Solid Color':
lut = self.mapper.GetLookupTable()
self.colorBy(arrayName, scalarRange=(rangeMin, rangeMax))
def _updateSurfaceProperty(self):
enableSurfaceMode = self.polyData.GetNumberOfPolys() or self.polyData.GetNumberOfStrips()
self.properties.setPropertyAttribute('Surface Mode', 'hidden', not enableSurfaceMode)
def _updateColorBy(self, retainColorMap=False):
arrayName = self.getPropertyEnumValue('Color By')
if arrayName == 'Solid Color':
self.colorBy(None)
else:
lut = self.mapper.GetLookupTable() if retainColorMap else None
self.colorBy(arrayName, lut=lut)
self._updateScalarBar()
def _updateColorByProperty(self):
enumNames = ['Solid Color'] + self.getArrayNames()
currentValue = self.properties.getProperty('Color By')
if currentValue >= len(enumNames):
self.setProperty('Color By', 0)
self.properties.setPropertyAttribute('Color By', 'enumNames', enumNames)
def _updateScalarBar(self):
barEnabled = self.getProperty('Show Scalar Bar')
colorBy = self.getProperty('Color By')
if barEnabled and colorBy != 0:
self._showScalarBar()
else:
self._hideScalarBar()
def _hideScalarBar(self):
if self.scalarBarWidget:
self.scalarBarWidget.Off()
self.scalarBarWidget.SetInteractor(None)
self.scalarBarWidget = None
self._renderAllViews()
def _showScalarBar(self):
title = self.properties.getPropertyEnumValue('Color By')
view = self.views[0]
lut = self.mapper.GetLookupTable()
self.scalarBarWidget = createScalarBarWidget(view, lut, title)
self._renderAllViews()
def _setScalarBarTextColor(self, color=(0,0,0)):
act = self.scalarBarWidget.GetScalarBarActor()
act.GetTitleTextProperty().SetColor(color)
act.GetLabelTextProperty().SetColor(color)
def _setScalarBarTitle(self, titleText):
act = self.scalarBarWidget.GetScalarBarActor()
act.SetTitle(titleText)
def getCoolToWarmColorMap(self, scalarRange):
f = vtk.vtkDiscretizableColorTransferFunction()
f.DiscretizeOn()
f.SetColorSpaceToDiverging()
f.SetNumberOfValues(256)
f.AddRGBPoint(scalarRange[0], 0.23, 0.299, 0.754)
f.AddRGBPoint(scalarRange[1], 0.706, 0.016, 0.15)
f.Build()
return f
def _getDefaultColorMap(self, array, scalarRange=None, hueRange=None):
name = array.GetName()
blueToRed = (0.667, 0)
redtoBlue = (0, 0.667)
hueMap = {
'Axes' : redtoBlue
}
scalarRange = scalarRange or self.rangeMap.get(name, array.GetRange())
hueRange = hueRange or hueMap.get(name, blueToRed)
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(256)
lut.SetHueRange(hueRange)
lut.SetRange(scalarRange)
lut.Build()
return lut
#return self.getCoolToWarmColorMap(scalarRange)
def shadowOn(self):
if self.shadowActor:
return
mat = [[1, 0, -1, 0],
[0, 1, -1, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]]
shadowT = transformUtils.getTransformFromNumpy(mat)
baseTransform = self.actor.GetUserTransform()
if baseTransform:
shadowT.PreMultiply()
shadowT.Concatenate(baseTransform)
self.shadowActor = vtk.vtkActor()
self.shadowActor.SetMapper(self.mapper)
self.shadowActor.SetUserTransform(shadowT)
self.shadowActor.GetProperty().LightingOff()
self.shadowActor.GetProperty().SetColor(0, 0, 0)
for view in self.views:
view.renderer().AddActor(self.shadowActor)
def shadowOff(self):
for view in self.views:
view.renderer().RemoveActor(self.shadowActor)
self.shadowActor = None
def onRemoveFromObjectModel(self):
om.ObjectModelItem.onRemoveFromObjectModel(self)
self.removeFromAllViews()
def removeFromAllViews(self):
for view in list(self.views):
self.removeFromView(view)
assert len(self.views) == 0
self._hideScalarBar()
def removeFromView(self, view):
assert view in self.views
self.views.remove(view)
view.renderer().RemoveActor(self.actor)
if self.shadowActor:
view.renderer().RemoveActor(self.shadowActor)
for renderer in self.extraViewRenderers.get(view, []):
renderer.RemoveActor(self.actor)
view.render()
class TextItem(om.ObjectModelItem):
def __init__(self, name, text='', view=None):
om.ObjectModelItem.__init__(self, name)
self.views = []
self.actor = vtk.vtkTextActor()
prop = self.actor.GetTextProperty()
prop.SetFontSize(18)
self.actor.SetPosition(10,10)
self.actor.SetInput(text)
self.addProperty('Visible', True)
self.addProperty('Text', text)
self.addProperty('Position', [10, 10], attributes=om.PropertyAttributes(minimum=0, maximum=3000, singleStep=1))
self.addProperty('Font Size', 18, attributes=om.PropertyAttributes(minimum=6, maximum=128, singleStep=1))
self.addProperty('Bold', False)
self.addProperty('Italic', False)
if view:
self.addToView(view)
def addToView(self, view):
if view in self.views:
return
self.views.append(view)
view.renderer().AddActor(self.actor)
view.render()
def _renderAllViews(self):
for view in self.views:
view.render()
def onRemoveFromObjectModel(self):
om.ObjectModelItem.onRemoveFromObjectModel(self)
self.removeFromAllViews()
def removeFromAllViews(self):
for view in list(self.views):
self.removeFromView(view)
def removeFromView(self, view):
assert view in self.views
self.views.remove(view)
view.renderer().RemoveActor(self.actor)
view.render()
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Visible':
self.actor.SetVisibility(self.getProperty(propertyName))
self._renderAllViews()
elif propertyName == 'Text':
view = app.getCurrentRenderView()
self.actor.SetInput(self.getProperty(propertyName))
elif propertyName == 'Position':
pos = self.getProperty(propertyName)
self.actor.SetPosition(pos[0], pos[1])
elif propertyName == 'Font Size':
self.actor.GetTextProperty().SetFontSize(self.getProperty(propertyName))
elif propertyName == 'Bold Size':
self.actor.GetTextProperty().SetBold(self.getProperty(propertyName))
elif propertyName == 'Italic':
self.actor.GetTextProperty().SetItalic(self.getProperty(propertyName))
if self.getProperty('Visible'):
self._renderAllViews()
def updateText(text, name, **kwargs):
obj = om.findObjectByName(name)
obj = obj or showText(text, name, **kwargs)
obj.setProperty('Text', text)
return obj
def showText(text, name, fontSize=18, position=(10, 10), parent=None, view=None):
view = view or app.getCurrentRenderView()
assert view
item = TextItem(name, text, view=view)
item.setProperty('Font Size', fontSize)
item.setProperty('Position', list(position))
if isinstance(parent, str):
parentObj = om.getOrCreateContainer(parent)
else:
parentObj = parent
om.addToObjectModel(item, parentObj)
return item
def createAxesPolyData(scale, useTube):
axes = vtk.vtkAxes()
axes.SetComputeNormals(0)
axes.SetScaleFactor(scale)
axes.Update()
if useTube:
tube = vtk.vtkTubeFilter()
tube.SetInput(axes.GetOutput())
tube.SetRadius(0.002)
tube.SetNumberOfSides(12)
tube.Update()
axes = tube
return shallowCopy(axes.GetOutput())
class FrameItem(PolyDataItem):
def __init__(self, name, transform, view):
PolyDataItem.__init__(self, name, vtk.vtkPolyData(), view)
self.transform = transform
self._blockSignals = False
self.actor.SetUserTransform(transform)
self.widget = vtk.vtkFrameWidget()
self.widget.CreateDefaultRepresentation()
self.widget.EnabledOff()
self.rep = self.widget.GetRepresentation()
self.rep.SetTransform(transform)
self.traceData = None
self._frameSync = None
self.addProperty('Scale', 1.0, attributes=om.PropertyAttributes(decimals=2, minimum=0.01, maximum=100, singleStep=0.1, hidden=False))
self.addProperty('Edit', False)
self.addProperty('Trace', False)
self.addProperty('Tube', False)
self.properties.setPropertyIndex('Edit', 0)
self.properties.setPropertyIndex('Trace', 1)
self.properties.setPropertyIndex('Tube', 2)
self.callbacks.addSignal('FrameModified')
self.onTransformModifiedCallback = None
self.observerTag = self.transform.AddObserver('ModifiedEvent', self.onTransformModified)
self._updateAxesGeometry()
self.setProperty('Color By', 'Axes')
self.setProperty('Icon', om.Icons.Axes)
def connectFrameModified(self, func):
return self.callbacks.connect('FrameModified', func)
def disconnectFrameModified(self, callbackId):
self.callbacks.disconnect(callbackId)
def onTransformModified(self, transform, event):
if not self._blockSignals:
if self.onTransformModifiedCallback:
self.onTransformModifiedCallback(self)
self.callbacks.process('FrameModified', self)
def addToView(self, view):
PolyDataItem.addToView(self, view)
def copyFrame(self, transform):
self._blockSignals = True
self.transform.SetMatrix(transform.GetMatrix())
self._blockSignals = False
self.transform.Modified()
parent = self.parent()
if (parent and parent.getProperty('Visible')) or self.getProperty('Visible'):
self._renderAllViews()
def getFrameSync(self):
if self._frameSync is None:
self._frameSync = FrameSync()
self._frameSync.addFrame(self)
return self._frameSync
def _updateAxesGeometry(self):
scale = self.getProperty('Scale')
self.rep.SetWorldSize(scale)
self.setPolyData(createAxesPolyData(scale, self.getProperty('Tube')))
def _onPropertyChanged(self, propertySet, propertyName):
PolyDataItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Scale':
scale = self.getProperty(propertyName)
self.rep.SetWorldSize(scale)
self._updateAxesGeometry()
elif propertyName == 'Edit':
view = app.getCurrentRenderView()
if view not in self.views:
view = self.views[0]
self.widget.SetInteractor(view.renderWindow().GetInteractor())
self.widget.SetEnabled(self.getProperty(propertyName))
isEditing = self.getProperty(propertyName)
if isEditing:
frameupdater.registerFrame(self)
elif propertyName == 'Trace':
trace = self.getProperty(propertyName)
if trace and not self.traceData:
self.traceData = FrameTraceVisualizer(self)
elif not trace and self.traceData:
om.removeFromObjectModel(self.traceData.getTraceData())
self.traceData = None
elif propertyName == 'Tube':
self._updateAxesGeometry()
def onRemoveFromObjectModel(self):
PolyDataItem.onRemoveFromObjectModel(self)
self.transform.RemoveObserver(self.observerTag)
self.widget.SetInteractor(None)
self.widget.EnabledOff()
for view in self.views:
view.renderer().RemoveActor(self.actor)
view.render()
class FrameTraceVisualizer(object):
def __init__(self, frame):
self.frame = frame
self.traceName = '%s trace' % frame.getProperty('Name')
self.lastPosition = np.array(frame.transform.GetPosition())
self.lineCell = vtk.vtkLine()
frame.connectFrameModified(self.onFrameModified)
def getTraceData(self):
t = self.frame.findChild(self.traceName)
if not t:
pts = vtk.vtkPoints()
pts.SetDataTypeToDouble()
pts.InsertNextPoint(self.frame.transform.GetPosition())
pd = vtk.vtkPolyData()
pd.SetPoints(pts)
pd.SetLines(vtk.vtkCellArray())
t = showPolyData(pd, self.traceName, parent=self.frame)
return t
def addPoint(self, point):
traceData = self.getTraceData()
pd = traceData.polyData
pd.GetPoints().InsertNextPoint(point)
numberOfPoints = pd.GetNumberOfPoints()
line = self.lineCell
ids = line.GetPointIds()
ids.SetId(0, numberOfPoints-2)
ids.SetId(1, numberOfPoints-1)
pd.GetLines().InsertNextCell(line.GetPointIds())
pd.Modified()
traceData._renderAllViews()
def onFrameModified(self, frame):
position = np.array(frame.transform.GetPosition())
if not np.allclose(position, self.lastPosition):
self.addPoint(position)
class FrameSync(object):
class FrameData(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __init__(self):
self.frames = {}
self._blockCallbacks = False
self._ids = itertools.count()
def addFrame(self, frame, ignoreIncoming=False):
if frame is None:
return
if self._findFrameId(frame) is not None:
return
frameId = self._ids.next()
callbackId = frame.connectFrameModified(self._onFrameModified)
self.frames[frameId] = FrameSync.FrameData(
ref=weakref.ref(frame),
baseTransform=self._computeBaseTransform(frame),
callbackId=callbackId,
ignoreIncoming=ignoreIncoming)
def removeFrame(self, frame):
frameId = self._findFrameId(frame)
if frameId is None:
raise KeyError(frame)
frame.disconnectFrameModified(self.frames[frameId].callbackId)
self._removeFrameId(frameId)
def _computeBaseTransform(self, frame):
currentDelta = None
for frameId, frameData in self.frames.items():
if frameData.ref() is None:
self._removeFrameId(frameId)
elif frameData.ref() is frame:
continue
else:
currentDelta = transformUtils.copyFrame(frameData.baseTransform.GetLinearInverse())
currentDelta.Concatenate(transformUtils.copyFrame(frameData.ref().transform))
break
t = transformUtils.copyFrame(frame.transform)
t.PostMultiply()
if currentDelta:
t.Concatenate(currentDelta.GetLinearInverse())
return t
def _removeFrameId(self, frameId):
del self.frames[frameId]
def _findFrameId(self, frame):
for frameId, frameData in self.frames.items():
if frameData.ref() is None:
self._removeFrameId(frameId)
elif frameData.ref() is frame:
return frameId
def _moveFrame(self, frameId, modifiedFrameId):
frameData = self.frames[frameId]
modifiedFrameData = self.frames[modifiedFrameId]
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(frameData.baseTransform)
t.Concatenate(modifiedFrameData.baseTransform.GetLinearInverse())
t.Concatenate(modifiedFrameData.ref().transform)
frameData.ref().copyFrame(t)
def _onFrameModified(self, frame):
if self._blockCallbacks:
return
modifiedFrameId = self._findFrameId(frame)
assert modifiedFrameId is not None
#print self, 'onFrameModified:', self.frames[modifiedFrameId].ref().getProperty('Name')
if self.frames[modifiedFrameId].ignoreIncoming:
self.frames[modifiedFrameId].baseTransform = self._computeBaseTransform(frame)
return
self._blockCallbacks = True
for frameId, frameData in self.frames.items():
if frameData.ref() is None:
self._removeFrameId(frameId)
elif frameId != modifiedFrameId:
#print ' ', self, 'moving:', self.frames[frameId].ref().getProperty('Name')
self._moveFrame(frameId, modifiedFrameId)
self._blockCallbacks = False
class ViewOptionsItem(om.ObjectModelItem):
def __init__(self, view):
om.ObjectModelItem.__init__(self, 'view options')
self.view = view
self.addProperty('Camera projection', 0, attributes=om.PropertyAttributes(enumNames=['Perspective', 'Parallel']))
self.addProperty('View angle', view.camera().GetViewAngle(), attributes=om.PropertyAttributes(minimum=2, maximum=180))
self.addProperty('Key light intensity', view.lightKit().GetKeyLightIntensity(), attributes=om.PropertyAttributes(minimum=0, maximum=5, singleStep=0.1, decimals=2))
self.addProperty('Light kit', True)
self.addProperty('Eye dome lighting', False)
self.addProperty('Orientation widget', True)
self.addProperty('Interactive render', True)
self.addProperty('Gradient background', True)
self.addProperty('Background color', view.backgroundRenderer().GetBackground())
self.addProperty('Background color 2', view.backgroundRenderer().GetBackground2())
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName in ('Gradient background', 'Background color', 'Background color 2'):
colors = [self.getProperty('Background color'), self.getProperty('Background color 2')]
if not self.getProperty('Gradient background'):
colors[1] = colors[0]
self.view.renderer().SetBackground(colors[0])
self.view.renderer().SetBackground2(colors[1])
elif propertyName == 'Camera projection':
if self.getPropertyEnumValue(propertyName) == 'Perspective':
self.view.camera().ParallelProjectionOff()
else:
self.view.camera().ParallelProjectionOn()
elif propertyName == 'Orientation widget':
if self.getProperty(propertyName):
self.view.orientationMarkerWidget().On()
else:
self.view.orientationMarkerWidget().Off()
elif propertyName == 'View angle':
angle = self.getProperty(propertyName)
self.view.camera().SetViewAngle(angle)
elif propertyName == 'Key light intensity':
intensity = self.getProperty(propertyName)
self.view.lightKit().SetKeyLightIntensity(intensity)
elif propertyName == 'Light kit':
self.view.setLightKitEnabled(self.getProperty(propertyName))
elif propertyName == 'Eye dome lighting':
if self.getProperty(propertyName):
enableEyeDomeLighting(self.view)
else:
disableEyeDomeLighting(self.view)
elif propertyName == 'Interactive render':
if self.getProperty(propertyName):
self.view.renderWindow().GetInteractor().EnableRenderOn()
else:
self.view.renderWindow().GetInteractor().EnableRenderOff()
self.view.render()
def showGrid(view, cellSize=0.5, numberOfCells=25, name='grid', parent='sensors', color=[1,1,1], alpha=0.05, gridTransform=None):
grid = vtk.vtkGridSource()
grid.SetScale(cellSize)
grid.SetGridSize(numberOfCells)
grid.SetSurfaceEnabled(True)
grid.Update()
gridObj = showPolyData(grid.GetOutput(), 'grid', view=view, alpha=alpha, color=color, visible=True, parent=parent)
gridObj.gridSource = grid
gridObj.actor.GetProperty().LightingOff()
gridObj.actor.SetPickable(False)
gridTransform = gridTransform or vtk.vtkTransform()
gridObj.actor.SetUserTransform(gridTransform)
showFrame(gridTransform, 'grid frame', scale=0.2, visible=False, parent=gridObj, view=view)
gridObj.setProperty('Surface Mode', 'Wireframe')
def computeViewBoundsNoGrid():
if not gridObj.getProperty('Visible'):
return
gridObj.actor.SetUseBounds(False)
bounds = view.renderer().ComputeVisiblePropBounds()
gridObj.actor.SetUseBounds(True)
if vtk.vtkMath.AreBoundsInitialized(bounds):
view.addCustomBounds(bounds)
else:
view.addCustomBounds([-1, 1, -1, 1, -1, 1])
view.connect('computeBoundsRequest(ddQVTKWidgetView*)', computeViewBoundsNoGrid)
return gridObj
def createScalarBarWidget(view, lookupTable, title):
w = vtk.vtkScalarBarWidget()
bar = w.GetScalarBarActor()
bar.SetTitle(title)
bar.SetLookupTable(lookupTable)
w.SetRepositionable(True)
w.SetInteractor(view.renderWindow().GetInteractor())
w.On()
rep = w.GetRepresentation()
rep.SetOrientation(0)
rep.SetPosition(0.77, 0.92)
rep.SetPosition2(0.20, 0.07)
return w
def updatePolyData(polyData, name, **kwargs):
obj = om.findObjectByName(name)
obj = obj or showPolyData(polyData, name, **kwargs)
obj.setPolyData(polyData)
return obj
def updateFrame(frame, name, **kwargs):
obj = om.findObjectByName(name)
obj = obj or showFrame(frame, name, **kwargs)
obj.copyFrame(frame)
return obj
def showFrame(frame, name, view=None, parent='segmentation', scale=0.35, visible=True):
view = view or app.getCurrentRenderView()
assert view
if isinstance(parent, str):
parentObj = om.getOrCreateContainer(parent)
else:
parentObj = parent
item = FrameItem(name, frame, view)
om.addToObjectModel(item, parentObj)
item.setProperty('Visible', visible)
item.setProperty('Scale', scale)
return item
def showPolyData(polyData, name, color=None, colorByName=None, colorByRange=None, alpha=1.0, visible=True, view=None, parent='segmentation', cls=None):
view = view or app.getCurrentRenderView()
assert view
cls = cls or PolyDataItem
item = cls(name, polyData, view)
if isinstance(parent, str):
parentObj = om.getOrCreateContainer(parent)
else:
parentObj = parent
om.addToObjectModel(item, parentObj)
item.setProperty('Visible', visible)
item.setProperty('Alpha', alpha)
if colorByName and colorByName not in item.getArrayNames():
print 'showPolyData(colorByName=%s): array not found' % colorByName
colorByName = None
if colorByName:
item.setProperty('Color By', colorByName)
item.colorBy(colorByName, colorByRange)
else:
color = [1.0, 1.0, 1.0] if color is None else color
item.setProperty('Color', [float(c) for c in color])
item.colorBy(None)
return item
def addChildFrame(obj, initialTransform=None):
'''
Adds a child frame to the given PolyDataItem. If initialTransform is given,
the object's polydata is transformed using the inverse of initialTransform
and then a child frame is assigned to the object to maintain its original
position.
'''
if obj.getChildFrame():
return
if initialTransform:
pd = transformPolyData(obj.polyData, initialTransform.GetLinearInverse())
obj.setPolyData(pd)
t = obj.actor.GetUserTransform()
if t is None:
t = vtk.vtkTransform()
t.PostMultiply()
frame = showFrame(t, obj.getProperty('Name') + ' frame', parent=obj, scale=0.2, visible=False)
obj.actor.SetUserTransform(t)
return frame
def getRandomColor():
'''
Return a random color as a list of RGB values between 0.0 and 1.0.
'''
return colorsys.hsv_to_rgb(np.random.rand(), 1.0, 0.9)
def showClusterObjects(clusters, parent):
colors = [ QtCore.Qt.red,
QtCore.Qt.blue,
QtCore.Qt.yellow,
QtCore.Qt.green,
QtCore.Qt.magenta,
QtCore.Qt.cyan,
QtCore.Qt.darkCyan,
QtCore.Qt.darkGreen,
QtCore.Qt.darkMagenta ]
colors = [QtGui.QColor(c) for c in colors]
colors = [(c.red()/255.0, c.green()/255.0, c.blue()/255.0) for c in colors]
objects = []
for i, cluster in enumerate(clusters):
name = 'object %d' % i
color = colors[i % len(colors)]
clusterObj = showPolyData(cluster.mesh, name, color=color, parent=parent, alpha=1.0)
clusterFrame = showFrame(cluster.frame, name + ' frame', scale=0.2, visible=False, parent=clusterObj)
clusterBox = showPolyData(cluster.box, name + ' box', color=color, parent=clusterObj, alpha=0.6, visible=False)
clusterPoints = showPolyData(cluster.points, name + ' points', color=color, parent=clusterObj, visible=False, alpha=1.0)
if hasattr(cluster,'oriented_frame'):
orientedFrame = showFrame(cluster.oriented_frame, name + ' oriented frame', scale=0.2, visible=False, parent=clusterObj)
clusterPoints.setProperty('Point Size', 7)
clusterPoints.colorBy(None)
clusterObj.data = cluster
objects.append(clusterObj)
for obj in [clusterObj, clusterBox, clusterPoints]:
obj.actor.SetUserTransform(cluster.frame)
return objects
captionWidget = None
def hideCaptionWidget():
global captionWidget
if captionWidget is not None:
captionWidget.Off()
captionWidget.Render()
def showCaptionWidget(position, text, view=None):
view = view or app.getCurrentRenderView()
assert view
global captionWidget
if not captionWidget:
rep = vtk.vtkCaptionRepresentation()
rep.SetPosition(0.2, 0.8)
w = vtk.vtkCaptionWidget()
w.SetInteractor(view.renderWindow().GetInteractor())
w.SetRepresentation(rep)
w.On()
captionWidget = w
rep = captionWidget.GetRepresentation()
rep.SetAnchorPosition(position)
rep.GetCaptionActor2D().SetCaption(text)
a = rep.GetCaptionActor2D()
pr = a.GetTextActor().GetTextProperty()
pr.SetJustificationToCentered()
pr.SetVerticalJustificationToCentered()
pr.SetItalic(0)
pr.SetBold(0)
pr.SetShadow(0)
pr.SetFontFamilyToArial()
c2 = rep.GetPosition2Coordinate()
c2.SetCoordinateSystemToDisplay()
c2.SetValue(12*len(text),30)
# disable border
#rep.SetShowBorder(0)
a.SetThreeDimensionalLeader(0)
a.SetLeaderGlyphSize(0.005)
captionWidget.On()
captionWidget.Render()
def getRayFromDisplayPoint(view, displayPoint):
'''
Given a view and an XY display point, returns two XYZ world points which
are the display point at the near/far clipping planes of the view.
'''
worldPt1 = [0,0,0,0]
worldPt2 = [0,0,0,0]
renderer = view.renderer()
vtk.vtkInteractorObserver.ComputeDisplayToWorld(renderer, displayPoint[0], displayPoint[1], 0, worldPt1)
vtk.vtkInteractorObserver.ComputeDisplayToWorld(renderer, displayPoint[0], displayPoint[1], 1, worldPt2)
worldPt1 = np.array(worldPt1[:3])
worldPt2 = np.array(worldPt2[:3])
return worldPt1, worldPt2
def pickImage(displayPoint, view, obj=None):
picker = vtk.vtkCellPicker()
if isinstance(obj, str):
obj = om.findObjectByName(obj)
assert obj
if obj:
picker.AddPickList(obj.actor)
picker.PickFromListOn()
picker.Pick(displayPoint[0], displayPoint[1], 0, view.renderer())
pickedDataset = picker.GetDataSet()
if obj:
return picker.GetPointIJK()
else:
return pickedDataset, picker.GetPointIJK()
def pickProp(displayPoint, view):
for tolerance in (0.0, 0.005, 0.01):
pickType = 'render' if tolerance == 0.0 else 'cells'
pickData = pickPoint(displayPoint, view, pickType=pickType, tolerance=tolerance)
pickedPoint = pickData.pickedPoint
pickedProp = pickData.pickedProp
pickedDataset = pickData.pickedDataset
if pickedProp is not None:
return pickedPoint, pickedProp, pickedDataset
return None, None, None
def pickPoint(displayPoint, view, obj=None, pickType='points', tolerance=0.01):
"""
:param displayPoint:
:param view:
:param obj:
:param pickType:
:param tolerance:
:return: FieldContainer with fields
pickedPoint
pickedProp
pickedDataset
pickedNormal - is None if no normal can be comp
pickedCellId - is None unless pickType="cells"
"""
assert pickType in ('points', 'cells', 'render')
view = view or app.getCurrentRenderView()
assert view
if isinstance(obj, str):
obj = om.findObjectByName(obj)
assert obj
if pickType == 'render':
picker = vtk.vtkPropPicker()
else:
picker = vtk.vtkPointPicker() if pickType == 'points' else vtk.vtkCellPicker()
picker.SetTolerance(tolerance)
if obj is not None:
if isinstance(obj, list):
for o in obj:
picker.AddPickList(o.actor)
obj = None
else:
picker.AddPickList(obj.actor)
picker.PickFromListOn()
picker.Pick(displayPoint[0], displayPoint[1], 0, view.renderer())
pickedProp = picker.GetViewProp()
pickedPoint = np.array(picker.GetPickPosition())
pickedDataset = pickedProp.GetMapper().GetInput() if isinstance(pickedProp, vtk.vtkActor) else None
if pickType == "cells":
pickedCellId = picker.GetCellId()
else:
pickedCellId = None
# populate pickedNormal if possible
pickedNormal = None
if pickType == 'cells':
pickedNormal = np.array(picker.GetPickNormal())
elif pickType == 'points' and pickedDataset:
pointId = picker.GetPointId()
normals = pickedDataset.GetPointData().GetNormals()
if normals:
pickedNormal = np.array(normals.GetTuple3(pointId))
#if pickedDataset and pickType == 'cells':
# print 'point id:', pickedDataset.GetCell(picker.GetCellId()).GetPointIds().GetId(picker.GetSubId())
#if pickType == 'points':
# print 'point id:', picker.GetPointId()
fields = FieldContainer(
pickedPoint=pickedPoint,
pickedProp=pickedProp,
pickedDataset=pickedDataset,
pickedNormal=pickedNormal,
pickedCellId=pickedCellId
)
return fields
def mapMousePosition(widget, mouseEvent):
mousePosition = mouseEvent.pos()
return mousePosition.x(), widget.height - mousePosition.y()
def getObjectByDataSet(polyData):
for obj in om.getObjects():
if obj.hasDataSet(polyData):
return obj
def getObjectByProp(prop):
if not prop:
return None
for obj in om.getObjects():
if isinstance(obj, FrameItem) and obj.widget.GetRepresentation() == prop:
return obj
if isinstance(prop, vtk.vtkActor):
return getObjectByDataSet(prop.GetMapper().GetInput())
def findPickedObject(displayPoint, view):
pickedPoint, pickedProp, pickedDataset = pickProp(displayPoint, view)
obj = getObjectByProp(pickedProp)
return obj, pickedPoint
def enableEyeDomeLighting(view):
seq = vtk.vtkSequencePass()
opaque = vtk.vtkOpaquePass()
peeling = vtk.vtkDepthPeelingPass()
peeling.SetMaximumNumberOfPeels(200)
peeling.SetOcclusionRatio(0.1)
translucent = vtk.vtkTranslucentPass()
peeling.SetTranslucentPass(translucent)
volume = vtk.vtkVolumetricPass()
overlay = vtk.vtkOverlayPass()
lights = vtk.vtkLightsPass()
passes=vtk.vtkRenderPassCollection()
passes.AddItem(lights)
passes.AddItem(opaque)
#passes.AddItem(peeling)
passes.AddItem(translucent)
#passes.AddItem(volume)
#passes.AddItem(overlay)
seq.SetPasses(passes)
edlPass = vtk.vtkEDLShading()
cameraPass = vtk.vtkCameraPass()
edlPass.SetDelegatePass(cameraPass)
cameraPass.SetDelegatePass(seq)
view.renderer().SetPass(edlPass)
def disableEyeDomeLighting(view):
view.renderer().SetPass(None)
def showImage(filename):
'''
Returns a QLabel displaying the image contents of given filename.
Make sure to assign the label, it will destruct when it goes out
of scope.
'''
image = QtGui.QImage(filename)
assert not image.isNull()
imageLabel = QtGui.QLabel()
imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
imageLabel.setScaledContents(True)
imageLabel.resize(imageLabel.pixmap.size())
imageLabel.setWindowTitle(os.path.basename(filename))
imageLabel.show()
| bsd-3-clause | 8,944,330,065,811,042,000 | 31.151097 | 171 | 0.645839 | false |
johnboyington/homework | me777/hw5/hw5_p4.py | 1 | 1418 | import numpy as np
from numpy.random import rand
import matplotlib.pyplot as plt
def walk_on_lines(r, n, bounds, bound_temp, tol):
s = 0
for z in range(n):
x = r
found = False
while not found:
L, R = abs(bounds[0] - x), abs(bounds[1] - x)
if L < R:
line = [bounds[0], x + L]
else:
line = [x - R, bounds[1]]
x = line[0] + rand()*(line[1] - line[0])
if x - bounds[0] < tol:
s += bound_temp[0]
found = True
elif bounds[1] - x < tol:
s += bound_temp[1]
found = True
return s / n
xs = np.linspace(1, 9, 50)
t = [[], [], [], []]
for x in xs:
t[0].append(walk_on_lines(x, 10000, [0, 10], [10, 50], 0.1))
t[1].append(walk_on_lines(x, 1000, [0, 10], [10, 50], 0.1))
t[2].append(walk_on_lines(x, 100, [0, 10], [10, 50], 0.1))
t[3].append(walk_on_lines(x, 10, [0, 10], [10, 50], 0.1))
fig = plt.figure(0)
ax = fig.add_subplot(111)
ax.plot(xs, t[3], color='orange', linestyle=':', label='10 histories')
ax.plot(xs, t[2], color='indigo', linestyle='-.', label='100 histories')
ax.plot(xs, t[1], color='green', linestyle='--', label='1000 histories')
ax.plot(xs, t[0], color='red', linestyle='-', label='10000 histories')
ax.set_xlabel('x')
ax.set_ylabel('u(x)')
ax.legend()
plt.savefig('p4.png', dpi=300) | gpl-3.0 | -8,456,888,572,681,269,000 | 29.847826 | 72 | 0.507757 | false |
sgrvinod/ml4seti-Effsubsee | folds/create_h5_tensors.py | 1 | 11714 | import ibmseti
import os
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import shuffle
import numpy as np
import h5py
from tqdm import tqdm
def create_tensors_hdf5_logmod2_ph(dataset_name, dataset_folder, output_folder, number_of_folds,
time_freq_resolution):
"""
Reads signals, divides into stratified folds, performs windowing and FFT, generates two features:
log(amplitude^2) and phase of signal.
Note: to be used for training/val, in h5Dataset object (from h5_dataloaders.py) only!
Writes the folds' tensors, overall mean and standard deviation tensors, to a specified hdf5 file.
Args:
dataset_name (string): name of signal dataset.
dataset_folder (path): folder containing signal files.
output_folder (path): output location of hdf5 file.
number_of_folds (int): number of stratified folds to divide the signals into.
time_freq_resolution (tuple of ints): number of time steps and frequency windows.
"""
features_name = 'logmod2-ph'
number_of_features = 2
# Check for some things
assert time_freq_resolution[0] * time_freq_resolution[1] == 32 * 6144 and len(
time_freq_resolution) == 2, 'Invalid time-frequency resolution!'
assert os.path.isdir(dataset_folder), 'Invalid dataset directory!'
# Read CSV
files_in_dataset_folder = os.listdir(dataset_folder)
signal_classification = pd.read_csv(
os.path.join(dataset_folder, [f for f in files_in_dataset_folder if f.endswith('.csv')][0]))
assert 'UUID' in signal_classification.columns and 'SIGNAL_CLASSIFICATION' in signal_classification.columns
signal_classification = shuffle(signal_classification)
signal_classification = signal_classification.reset_index(drop=True)
# Check distribution of classes
print "Classes' count for all folds:"
print signal_classification.groupby(['SIGNAL_CLASSIFICATION']).count()
# Create folds
uuids = signal_classification['UUID'].to_frame()
classes = signal_classification['SIGNAL_CLASSIFICATION']
skf = StratifiedKFold(n_splits=5).split(uuids, classes)
fold_signals = []
for i, fold in enumerate(skf):
dataframe_slice = signal_classification.iloc[fold[1], :]
print "Classes' count for Fold %d:" % (i + 1)
print dataframe_slice.groupby(['SIGNAL_CLASSIFICATION']).count()
fold_signals.append(zip(list(dataframe_slice['UUID']), list(dataframe_slice['SIGNAL_CLASSIFICATION'])))
# Create/open hdf5 file
hdf5_file_name = dataset_name + '__' + str(number_of_folds) + 'folds__' + str(
time_freq_resolution[0]) + 't__' + str(
time_freq_resolution[1]) + 'f__' + features_name + '.hdf5'
# Create tensors and write to file
print "\nWriting %d folds to %s..." % (number_of_folds, hdf5_file_name)
for i, fold in enumerate(fold_signals):
with h5py.File(os.path.join(output_folder, hdf5_file_name), 'a') as h:
fold_data = h.create_dataset('fold' + str(i + 1) + '_data',
(1, time_freq_resolution[0], time_freq_resolution[1], number_of_features),
maxshape=(
None, time_freq_resolution[0], time_freq_resolution[1],
number_of_features))
fold_target = h.create_dataset('fold' + str(i + 1) + '_target', (1, 1), dtype='|S13', maxshape=(None, 1))
print "\nPopulating data and targets for Fold %d... " % (i + 1)
for j, signal in enumerate(tqdm(fold)):
try:
with open(os.path.join(dataset_folder, signal[0] + '.dat'), 'r') as f:
aca = ibmseti.compamp.SimCompamp(f.read())
except IOError:
continue
complex_data = aca.complex_data()
complex_data = complex_data.reshape(time_freq_resolution[0], time_freq_resolution[1])
complex_data = complex_data * np.hanning(complex_data.shape[1])
cpfft = np.fft.fftshift(np.fft.fft(complex_data), 1)
spectrogram = np.abs(cpfft)
features = np.stack((np.log(spectrogram ** 2),
np.arctan(cpfft.imag / cpfft.real)), -1)
fold_data[fold_data.shape[0] - 1] = features
fold_target[fold_data.shape[0] - 1] = np.array([signal[1]])
if j == len(fold) - 1:
break # Don't resize/add an extra row if after the last signal in the fold
fold_data.resize(fold_data.shape[0] + 1, axis=0)
fold_target.resize(fold_target.shape[0] + 1, axis=0)
del features
del fold_data, fold_target
print "\nFolds written to %s" % hdf5_file_name
# Calculate mean tensor (for normalization, later)
print "\nCalculating mean tensor (across frequency bins and channels) without loading folds into memory..."
total_number_of_signals = 0
for i in range(number_of_folds):
print "\nReviewing Fold %d..." % (i + 1)
with h5py.File(os.path.join(output_folder, hdf5_file_name), 'r') as h:
dset = h['fold' + str(i + 1) + '_data']
if i == 0:
sum_tensor = np.zeros(dset.shape[1:], dtype=float)
for j in tqdm(range(dset.shape[0])):
sum_tensor = sum_tensor + dset[j]
total_number_of_signals = total_number_of_signals + dset.shape[0]
del dset
mean_tensor = sum_tensor / total_number_of_signals
mean_tensor = np.mean(mean_tensor, axis=0)
mean_tensor = np.repeat(mean_tensor[np.newaxis, :, :], time_freq_resolution[0], axis=0)
print "\nCalculated mean tensor (across frequency bins and channels)."
# Calculate standard deviation tensor (for normalization, later)
print "\nCalculating std-deviation tensor (across frequency bins and channels) without loading folds into memory..."
total_number_of_signals = 0
for i in range(number_of_folds):
print "\nReviewing Fold %d..." % (i + 1)
with h5py.File(os.path.join(output_folder, hdf5_file_name), 'r') as h:
dset = h['fold' + str(i + 1) + '_data']
if i == 0:
sum_of_squared_differences_tensor = np.zeros(dset.shape[1:], dtype=float)
for j in tqdm(range(dset.shape[0])):
assert mean_tensor.shape == dset[j].shape
sum_of_squared_differences_tensor = sum_of_squared_differences_tensor + (dset[j] - mean_tensor) ** 2
total_number_of_signals = total_number_of_signals + dset.shape[0]
del dset
mean_of_squared_differences_tensor = sum_of_squared_differences_tensor / total_number_of_signals
mean_of_squared_differences_tensor = np.mean(mean_of_squared_differences_tensor, axis=0)
std_deviation_tensor = np.sqrt(mean_of_squared_differences_tensor)
std_deviation_tensor = np.repeat(std_deviation_tensor[np.newaxis, :, :], time_freq_resolution[0], axis=0)
print "\nCalculated std-deviation tensor (across frequency bins and channels)."
assert mean_tensor.shape == std_deviation_tensor.shape
# Store mean and standard deviation tensors to the hdf5 file
print "\nStoring these to the same hdf5 file..."
with h5py.File(os.path.join(output_folder, hdf5_file_name), 'a') as h:
mean = h.create_dataset('mean', mean_tensor.shape)
std_dev = h.create_dataset('std_dev', std_deviation_tensor.shape)
mean[:] = mean_tensor
std_dev[:] = std_deviation_tensor
print "\nAll done!"
def create_test_tensors_hdf5_logmod2_ph(dataset_name, dataset_folder, output_folder, time_freq_resolution):
"""
Reads signals, performs windowing and FFT, generates two features:
log(amplitude^2) and phase of signal.
Note: to be used for testing, in h5TestDataset object (from h5_dataloaders.py) only!
Writes the test data tensor to a specified hdf5 file.
Args:
dataset_name (string): name of signal dataset.
dataset_folder (path): folder containing signal files.
output_folder (path): output location of hdf5 file.
time_freq_resolution (tuple of ints): number of time steps and frequency windows.
"""
features_name = 'logmod2-ph'
number_of_features = 2
# Check for some things
assert time_freq_resolution[0] * time_freq_resolution[1] == 32 * 6144 and len(
time_freq_resolution) == 2, 'Invalid time-frequency resolution!'
assert os.path.isdir(dataset_folder), 'Invalid dataset directory!'
# Read CSV and get UUIDs
files_in_dataset_folder = os.listdir(dataset_folder)
signal_classification = pd.read_csv(
os.path.join(dataset_folder, [f for f in files_in_dataset_folder if f.endswith('.csv')][0]))
assert 'UUID' in signal_classification.columns
uuids = list(signal_classification['UUID'])
print "There are %d signals in this test set." % len(uuids)
# HDF5 file name
hdf5_file_name = 'TEST__' + dataset_name + '__' + str(
time_freq_resolution[0]) + 't__' + str(
time_freq_resolution[1]) + 'f__' + features_name + '.hdf5'
# Create tensors and write to file
print "\nWriting tensors to %s..." % (hdf5_file_name)
with h5py.File(os.path.join(output_folder, hdf5_file_name), 'a') as h:
h_data = h.create_dataset('data',
(len(uuids), time_freq_resolution[0], time_freq_resolution[1], number_of_features))
h_uuids = h.create_dataset('uuids', shape=(len(uuids), 1), dtype='|S50')
for j, uuid in enumerate(tqdm(uuids)):
try:
with open(os.path.join(dataset_folder, uuid + '.dat'), 'r') as f:
aca = ibmseti.compamp.SimCompamp(f.read())
except IOError:
continue
complex_data = aca.complex_data()
complex_data = complex_data.reshape(time_freq_resolution[0], time_freq_resolution[1])
complex_data = complex_data * np.hanning(complex_data.shape[1])
cpfft = np.fft.fftshift(np.fft.fft(complex_data), 1)
spectrogram = np.abs(cpfft)
features = np.stack((np.log(spectrogram ** 2),
np.arctan(cpfft.imag / cpfft.real)), -1)
h_data[j] = features
h_uuids[j] = np.array([uuid])
del features
del h_data, h_uuids
print "\nTest data and UUIDs written to %s" % hdf5_file_name
print "\nVerifying that things are in the same order as in the CSV..."
with h5py.File(os.path.join(output_folder, hdf5_file_name), 'r') as h:
for j, uuid in enumerate(tqdm(uuids)):
if uuid != h['uuids'][:][j][0]:
print uuid, h['uuids'][:][j][0]
raise ValueError("Value at index %d differs - %s != %s!" % (j + 1, uuid, h['uuids'][:][j][0]))
print "All done!"
if __name__ == '__main__':
# create_tensors_hdf5_logmod2_ph(dataset_name='primary_full_v3',
# dataset_folder='../sti raw files/primary_full_dataset_v3',
# output_folder='./',
# number_of_folds=5,
# time_freq_resolution=(32 * 12, 6144 / 12))
create_test_tensors_hdf5_logmod2_ph(dataset_name='testset_final',
dataset_folder='../sti raw files/primary_testset_final_v3',
output_folder='./',
time_freq_resolution=(32 * 12, 6144 / 12))
| apache-2.0 | 3,222,924,417,227,544,600 | 49.930435 | 120 | 0.603807 | false |
ARM-software/mbed-beetle | tools/get_config.py | 1 | 3238 | #! /usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from os.path import isdir, abspath, dirname, join
from os import _exit
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.utils import args_error
from tools.options import get_default_options_parser
from tools.build_api import get_config
from config import Config
try:
import tools.private_settings as ps
except:
ps = object()
if __name__ == '__main__':
# Parse Options
parser = get_default_options_parser(add_clean=False, add_options=False)
parser.add_option("--source", dest="source_dir",
default=None, help="The source (input) directory", action="append")
parser.add_option("--prefix", dest="prefix", action="append",
default=None, help="Restrict listing to parameters that have this prefix")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Verbose diagnostic output")
(options, args) = parser.parse_args()
for path in options.source_dir :
if not isdir(path) :
args_error(parser, "[ERROR] you passed \"{}\" to --source, which does not exist".
format(path))
# Target
if options.mcu is None :
args_error(parser, "[ERROR] You should specify an MCU")
target = options.mcu
# Toolchain
if options.tool is None:
args_error(parser, "[ERROR] You should specify a TOOLCHAIN")
toolchain = options.tool
options.prefix = options.prefix or [""]
try:
params, macros = get_config(options.source_dir, target, toolchain)
if not params and not macros:
print "No configuration data available."
_exit(0)
if params:
print "Configuration parameters"
print "------------------------"
for p in params:
for s in options.prefix:
if p.startswith(s):
print(str(params[p]) if not options.verbose else params[p].get_verbose_description())
break
print ""
print "Macros"
print "------"
if macros:
print 'Defined with "macros":', macros
print "Generated from configuration parameters:", Config.parameters_to_macros(params)
except KeyboardInterrupt, e:
print "\n[CTRL+c] exit"
except Exception,e:
if options.verbose:
import traceback
traceback.print_exc(file=sys.stdout)
else:
print "[ERROR] %s" % str(e)
sys.exit(1)
| apache-2.0 | 6,115,368,512,132,941,000 | 33.446809 | 109 | 0.625077 | false |
blablacar/exabgp | lib/exabgp/reactor/peer.py | 1 | 22086 | # encoding: utf-8
"""
peer.py
Created by Thomas Mangin on 2009-08-25.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import time
# import traceback
from exabgp.bgp.timer import ReceiveTimer
from exabgp.bgp.timer import SendTimer
from exabgp.bgp.message import Message
from exabgp.bgp.fsm import FSM
from exabgp.bgp.message.open.capability import Capability
from exabgp.bgp.message.open.capability import REFRESH
from exabgp.bgp.message import NOP
from exabgp.bgp.message import Update
from exabgp.bgp.message.refresh import RouteRefresh
from exabgp.bgp.message import Notification
from exabgp.bgp.message import Notify
from exabgp.reactor.protocol import Protocol
from exabgp.reactor.network.error import NetworkError
from exabgp.reactor.api.processes import ProcessError
from exabgp.rib.change import Change
from exabgp.configuration.environment import environment
from exabgp.logger import Logger
from exabgp.logger import FakeLogger
from exabgp.logger import LazyFormat
from exabgp.util.trace import trace
from exabgp.util.panic import no_panic
from exabgp.util.panic import footer
class ACTION (object):
CLOSE = 0x01
LATER = 0x02
NOW = 0x03
class SEND (object):
DONE = 0x01
NORMAL = 0x02
REFRESH = 0x04
# As we can not know if this is our first start or not, this flag is used to
# always make the program act like it was recovering from a failure
# If set to FALSE, no EOR and OPEN Flags set for Restart will be set in the
# OPEN Graceful Restart Capability
FORCE_GRACEFUL = True
class Interrupted (Exception):
def __init__ (self,direction):
Exception.__init__(self)
self.direction = direction
# ======================================================================== Delay
# Exponential backup for outgoing connection
class Delay (object):
def __init__ (self):
self._time = time.time()
self._next = 0
def reset (self):
self._time = time.time()
self._next = 0
def increase (self):
self._time = time.time() + self._next
self._next = min(int(1 + self._next * 1.2),60)
def backoff (self):
return self._time > time.time()
# =========================================================================== KA
#
class KA (object):
def __init__ (self, log, proto):
self._generator = self._keepalive(proto)
self.send_timer = SendTimer(log,proto.negotiated.holdtime)
def _keepalive (self, proto):
need_ka = False
generator = None
while True:
# SEND KEEPALIVES
need_ka |= self.send_timer.need_ka()
if need_ka:
if not generator:
generator = proto.new_keepalive()
need_ka = False
if not generator:
yield False
continue
try:
# try to close the generator and raise a StopIteration in one call
generator.next()
generator.next()
# still running
yield True
except NetworkError:
raise Notify(4,0,'problem with network while trying to send keepalive')
except StopIteration:
generator = None
yield False
def __call__ (self):
# True if we need or are trying
# False if we do not need to send one
try:
return self._generator.next()
except StopIteration:
raise Notify(4,0,'could not send keepalive')
# =================================================================== Direction
# Incoming/Outgoing dependent data
class Direction (object):
def __init__ (self,name,code,fsm,proto,enabled,generator):
self.name = name
self.code = code
self.fsm = fsm
self.proto = proto
self.enabled = enabled
self.generator = generator
self.opposite = None
# ======================================================================== Peer
# Present a File like interface to socket.socket
class Peer (object):
def __init__ (self, neighbor, reactor):
try:
self.logger = Logger()
# We only to try to connect via TCP once
self.once = environment.settings().tcp.once
self.bind = True if environment.settings().tcp.bind else False
except RuntimeError:
self.logger = FakeLogger()
self.once = False
self.bind = True
self.reactor = reactor
self.neighbor = neighbor
# The next restart neighbor definition
self._neighbor = None
# The peer should restart after a stop
self._restart = True
# The peer was restarted (to know what kind of open to send for graceful restart)
self._restarted = FORCE_GRACEFUL
# We want to remove routes which are not in the configuration anymote afte a signal to reload
self._reconfigure = True
# We want to send all the known routes
self._resend_routes = SEND.DONE
# We have new routes for the peers
self._have_routes = True
# We have been asked to teardown the session with this code
self._teardown = None
self._delay = Delay()
self.recv_timer = None
self._incoming = Direction (
'in',
self._accept,
FSM(FSM.IDLE),
None,
False,
False
)
self._outgoing = Direction (
'out',
self._connect,
FSM(FSM.IDLE),
None,
None if not self.neighbor.passive else False,
None if not self.neighbor.passive else False
)
self._incoming.opposite = self._outgoing
self._outgoing.opposite = self._incoming
def _reset (self, direction, message='',error=''):
direction.fsm.change(FSM.IDLE)
if not self._restart:
direction.generator = False
direction.proto = None
return
if direction.proto:
direction.proto.close('%s loop, peer reset, message [%s] error[%s]' % (direction.name,message,str(error)))
direction.proto = None
direction.generator = direction.enabled
self._teardown = None
if direction.name == 'out':
self._delay.increase()
self.neighbor.rib.reset()
# If we are restarting, and the neighbor definition is different, update the neighbor
if self._neighbor:
self.neighbor = self._neighbor
self._neighbor = None
def _stop (self, direction, message):
direction.generator = False
direction.proto.close('%s loop, stop, message [%s]' % (direction.name,message))
direction.proto = None
# logging
def me (self, message):
return "peer %s ASN %-7s %s" % (self.neighbor.peer_address,self.neighbor.peer_as,message)
# control
def stop (self):
self._teardown = 3
self._restart = False
self._restarted = False
self._delay.reset()
def resend (self):
self._resend_routes = SEND.NORMAL
self._delay.reset()
def send_new (self, changes=None,update=None):
if changes:
self.neighbor.rib.outgoing.replace(changes)
self._have_routes = self.neighbor.flush if update is None else update
def reestablish (self, restart_neighbor=None):
# we want to tear down the session and re-establish it
self._teardown = 3
self._restart = True
self._restarted = True
self._resend_routes = SEND.NORMAL
self._neighbor = restart_neighbor
self._delay.reset()
def reconfigure (self, restart_neighbor=None):
# we want to update the route which were in the configuration file
self._reconfigure = True
self._neighbor = restart_neighbor
self._resend_routes = SEND.NORMAL
self._neighbor = restart_neighbor
def teardown (self, code, restart=True):
self._restart = restart
self._teardown = code
self._delay.reset()
# sockets we must monitor
def sockets (self):
ios = []
for proto in (self._incoming.proto,self._outgoing.proto):
if proto and proto.connection and proto.connection.io:
ios.append(proto.connection.io)
return ios
def incoming (self, connection):
# if the other side fails, we go back to idle
if self._incoming.proto not in (True,False,None):
self.logger.network('we already have a peer at this address')
return False
# self._incoming.fsm.change(FSM.ACTIVE)
self._incoming.proto = Protocol(self).accept(connection)
# Let's make sure we do some work with this connection
self._incoming.generator = None
return True
def established (self):
return self._incoming.fsm == FSM.ESTABLISHED or self._outgoing.fsm == FSM.ESTABLISHED
def _accept (self):
self._incoming.fsm.change(FSM.CONNECT)
# we can do this as Protocol is a mutable object
proto = self._incoming.proto
# send OPEN
message = Message.CODE.NOP
for message in proto.new_open(self._restarted):
if ord(message.TYPE) == Message.CODE.NOP:
yield ACTION.NOW
proto.negotiated.sent(message)
self._incoming.fsm.change(FSM.OPENSENT)
# Read OPEN
wait = environment.settings().bgp.openwait
opentimer = ReceiveTimer(self.me,wait,1,1,'waited for open too long, we do not like stuck in active')
# Only yield if we have not the open, otherwise the reactor can run the other connection
# which would be bad as we need to do the collission check without going to the other peer
for message in proto.read_open(self.neighbor.peer_address.top()):
opentimer.check_ka(message)
if ord(message.TYPE) == Message.CODE.NOP:
yield ACTION.LATER
self._incoming.fsm.change(FSM.OPENCONFIRM)
proto.negotiated.received(message)
proto.validate_open()
if self._outgoing.fsm == FSM.OPENCONFIRM:
self.logger.network('incoming connection finds the outgoing connection is in openconfirm')
local_id = self.neighbor.router_id.pack()
remote_id = proto.negotiated.received_open.router_id.pack()
if local_id < remote_id:
self.logger.network('closing the outgoing connection')
self._stop(self._outgoing,'collision local id < remote id')
yield ACTION.LATER
else:
self.logger.network('aborting the incoming connection')
raise Interrupted(self._incoming)
# Send KEEPALIVE
for message in self._incoming.proto.new_keepalive('OPENCONFIRM'):
yield ACTION.NOW
# Start keeping keepalive timer
self.recv_timer = ReceiveTimer(self.me,proto.negotiated.holdtime,4,0)
# Read KEEPALIVE
for message in proto.read_keepalive():
self.recv_timer.check_ka(message)
yield ACTION.NOW
self._incoming.fsm.change(FSM.ESTABLISHED)
# let the caller know that we were sucesfull
yield ACTION.NOW
def _connect (self):
# try to establish the outgoing connection
self._outgoing.fsm.change(FSM.ACTIVE)
proto = Protocol(self)
generator = proto.connect()
connected = False
try:
while not connected:
if self._teardown:
raise StopIteration()
connected = generator.next()
# we want to come back as soon as possible
yield ACTION.LATER
except StopIteration:
# Connection failed
if not connected:
proto.close('connection to %s:%d failed' % (self.neighbor.peer_address,proto.port))
# A connection arrived before we could establish !
if not connected or self._incoming.proto:
yield ACTION.NOW
raise Interrupted(self._outgoing)
self._outgoing.fsm.change(FSM.CONNECT)
self._outgoing.proto = proto
# send OPEN
# Only yield if we have not the open, otherwise the reactor can run the other connection
# which would be bad as we need to set the state without going to the other peer
message = Message.CODE.NOP
for message in proto.new_open(self._restarted):
if ord(message.TYPE) == Message.CODE.NOP:
yield ACTION.NOW
proto.negotiated.sent(message)
self._outgoing.fsm.change(FSM.OPENSENT)
# Read OPEN
wait = environment.settings().bgp.openwait
opentimer = ReceiveTimer(self.me,wait,1,1,'waited for open too long, we do not like stuck in active')
for message in self._outgoing.proto.read_open(self.neighbor.peer_address.top()):
opentimer.check_ka(message)
# XXX: FIXME: change the whole code to use the ord and not the chr version
# Only yield if we have not the open, otherwise the reactor can run the other connection
# which would be bad as we need to do the collission check
if ord(message.TYPE) == Message.CODE.NOP:
yield ACTION.LATER
self._outgoing.fsm.change(FSM.OPENCONFIRM)
proto.negotiated.received(message)
proto.validate_open()
if self._incoming.fsm == FSM.OPENCONFIRM:
self.logger.network('outgoing connection finds the incoming connection is in openconfirm')
local_id = self.neighbor.router_id.pack()
remote_id = proto.negotiated.received_open.router_id.pack()
if local_id < remote_id:
self.logger.network('aborting the outgoing connection')
raise Interrupted(self._outgoing)
else:
self.logger.network('closing the incoming connection')
self._stop(self._incoming,'collision local id < remote id')
yield ACTION.LATER
# Send KEEPALIVE
for message in proto.new_keepalive('OPENCONFIRM'):
yield ACTION.NOW
# Start keeping keepalive timer
self.recv_timer = ReceiveTimer(self.me,proto.negotiated.holdtime,4,0)
# Read KEEPALIVE
for message in self._outgoing.proto.read_keepalive():
self.recv_timer.check_ka(message)
yield ACTION.NOW
self._outgoing.fsm.change(FSM.ESTABLISHED)
# let the caller know that we were sucesfull
yield ACTION.NOW
def _main (self, direction):
"""yield True if we want to come back to it asap, None if nothing urgent, and False if stopped"""
if self._teardown:
raise Notify(6,3)
proto = direction.proto
# Announce to the process BGP is up
self.logger.network('Connected to peer %s (%s)' % (self.neighbor.name(),direction.name))
if self.neighbor.api['neighbor-changes']:
try:
self.reactor.processes.up(self.neighbor)
except ProcessError:
# Can not find any better error code than 6,0 !
# XXX: We can not restart the program so this will come back again and again - FIX
# XXX: In the main loop we do exit on this kind of error
raise Notify(6,0,'ExaBGP Internal error, sorry.')
send_eor = not self.neighbor.manual_eor
new_routes = None
self._resend_routes = SEND.NORMAL
send_families = []
# Every last asm message should be re-announced on restart
for family in self.neighbor.asm:
if family in self.neighbor.families():
self.neighbor.messages.appendleft(self.neighbor.asm[family])
operational = None
refresh = None
command_eor = None
number = 0
refresh_enhanced = True if proto.negotiated.refresh == REFRESH.ENHANCED else False
send_ka = KA(self.me,proto)
while not self._teardown:
for message in proto.read_message():
self.recv_timer.check_ka(message)
if send_ka() is not False:
# we need and will send a keepalive
while send_ka() is None:
yield ACTION.NOW
# Received update
if message.TYPE == Update.TYPE:
number += 1
self.logger.routes(LazyFormat(self.me('<< UPDATE (%d)' % number),message.attributes,lambda _: "%s%s" % (' attributes' if _ else '',_)))
for nlri in message.nlris:
self.neighbor.rib.incoming.insert_received(Change(nlri,message.attributes))
self.logger.routes(LazyFormat(self.me('<< UPDATE (%d) nlri ' % number),nlri,str))
elif message.TYPE == RouteRefresh.TYPE:
if message.reserved == RouteRefresh.request:
self._resend_routes = SEND.REFRESH
send_families.append((message.afi,message.safi))
# SEND OPERATIONAL
if self.neighbor.operational:
if not operational:
new_operational = self.neighbor.messages.popleft() if self.neighbor.messages else None
if new_operational:
operational = proto.new_operational(new_operational,proto.negotiated)
if operational:
try:
operational.next()
except StopIteration:
operational = None
# make sure that if some operational message are received via the API
# that we do not eat memory for nothing
elif self.neighbor.messages:
self.neighbor.messages.popleft()
# SEND REFRESH
if self.neighbor.route_refresh:
if not refresh:
new_refresh = self.neighbor.refresh.popleft() if self.neighbor.refresh else None
if new_refresh:
refresh = proto.new_refresh(new_refresh)
if refresh:
try:
refresh.next()
except StopIteration:
refresh = None
# Take the routes already sent to that peer and resend them
if self._reconfigure:
self._reconfigure = False
# we are here following a configuration change
if self._neighbor:
# see what changed in the configuration
self.neighbor.rib.outgoing.replace(self._neighbor.backup_changes,self._neighbor.changes)
# do not keep the previous routes in memory as they are not useful anymore
self._neighbor.backup_changes = []
self._have_routes = True
# Take the routes already sent to that peer and resend them
if self._resend_routes != SEND.DONE:
enhanced = True if refresh_enhanced and self._resend_routes == SEND.REFRESH else False
self._resend_routes = SEND.DONE
self.neighbor.rib.outgoing.resend(send_families,enhanced)
self._have_routes = True
send_families = []
# Need to send update
if self._have_routes and not new_routes:
self._have_routes = False
# XXX: in proto really. hum to think about ?
new_routes = proto.new_update()
if new_routes:
try:
count = 20
while count:
# This can raise a NetworkError
new_routes.next()
count -= 1
except StopIteration:
new_routes = None
elif send_eor:
send_eor = False
for _ in proto.new_eors():
yield ACTION.NOW
self.logger.message(self.me('>> EOR(s)'))
# SEND MANUAL KEEPALIVE (only if we have no more routes to send)
elif not command_eor and self.neighbor.eor:
new_eor = self.neighbor.eor.popleft()
command_eor = proto.new_eors(new_eor.afi,new_eor.safi)
if command_eor:
try:
command_eor.next()
except StopIteration:
command_eor = None
if new_routes or message.TYPE != NOP.TYPE:
yield ACTION.NOW
elif self.neighbor.messages or operational:
yield ACTION.NOW
elif self.neighbor.eor or command_eor:
yield ACTION.NOW
else:
yield ACTION.LATER
# read_message will loop until new message arrives with NOP
if self._teardown:
break
# If graceful restart, silent shutdown
if self.neighbor.graceful_restart and proto.negotiated.sent_open.capabilities.announced(Capability.CODE.GRACEFUL_RESTART):
self.logger.network('Closing the session without notification','error')
proto.close('graceful restarted negotiated, closing without sending any notification')
raise NetworkError('closing')
# notify our peer of the shutdown
raise Notify(6,self._teardown)
def _run (self, direction):
"""yield True if we want the reactor to give us back the hand with the same peer loop, None if we do not have any more work to do"""
try:
for action in direction.code():
yield action
for action in self._main(direction):
yield action
# CONNECTION FAILURE
except NetworkError,network:
# we tried to connect once, it failed and it was not a manual request, we stop
if self.once and not self._teardown:
self.logger.network('only one attempt to connect is allowed, stopping the peer')
self.stop()
self._reset(direction,'closing connection',network)
return
# NOTIFY THE PEER OF AN ERROR
except Notify,notify:
if direction.proto:
try:
generator = direction.proto.new_notification(notify)
try:
maximum = 20
while maximum:
generator.next()
maximum -= 1
yield ACTION.NOW if maximum > 10 else ACTION.LATER
except StopIteration:
pass
except (NetworkError,ProcessError):
self.logger.network(self.me('NOTIFICATION NOT SENT'),'error')
self._reset(direction,'notification sent (%d,%d)' % (notify.code,notify.subcode),notify)
else:
self._reset(direction)
return
# THE PEER NOTIFIED US OF AN ERROR
except Notification,notification:
# we tried to connect once, it failed and it was not a manual request, we stop
if self.once and not self._teardown:
self.logger.network('only one attempt to connect is allowed, stopping the peer')
self.stop()
self._reset(direction,'notification received (%d,%d)' % (notification.code,notification.subcode),notification)
return
# RECEIVED a Message TYPE we did not expect
except Message,message:
self._reset(direction,'unexpected message received',message)
return
# PROBLEM WRITING TO OUR FORKED PROCESSES
except ProcessError, process:
self._reset(direction,'process problem',process)
return
# ....
except Interrupted,interruption:
self._reset(interruption.direction)
return
# UNHANDLED PROBLEMS
except Exception,exc:
# Those messages can not be filtered in purpose
self.logger.raw('\n'.join([
no_panic,
self.me(''),
'',
str(type(exc)),
str(exc),
trace(),
footer
]))
self._reset(direction)
return
# loop
def run (self):
if self.reactor.processes.broken(self.neighbor):
# XXX: we should perhaps try to restart the process ??
self.logger.processes('ExaBGP lost the helper process for this peer - stopping','error')
self.stop()
return True
back = ACTION.LATER if self._restart else ACTION.CLOSE
for direction in (self._incoming, self._outgoing):
if direction.generator:
try:
# This generator only stops when it raises
r = direction.generator.next()
# if r is ACTION.NOW: status = 'immediately'
# elif r is ACTION.LATER: status = 'next second'
# elif r is ACTION.CLOSE: status = 'stop'
# else: status = 'buggy'
# self.logger.network('%s loop %11s, state is %s' % (direction.name,status,direction.fsm),'debug')
if r == ACTION.NOW:
back = ACTION.NOW
elif r == ACTION.LATER:
back = ACTION.LATER if back != ACTION.NOW else ACTION.NOW
except StopIteration:
# Trying to run a closed loop, no point continuing
direction.generator = direction.enabled
elif direction.generator is None:
if direction.opposite.fsm in [FSM.OPENCONFIRM,FSM.ESTABLISHED]:
self.logger.network('%s loop, stopping, other one is established' % direction.name,'debug')
direction.generator = False
continue
if direction.name == 'out' and self._delay.backoff():
self.logger.network('%s loop, skipping, not time yet' % direction.name,'debug')
back = ACTION.LATER
continue
if self._restart:
self.logger.network('%s loop, intialising' % direction.name,'debug')
direction.generator = self._run(direction)
back = ACTION.LATER # make sure we go through a clean loop
return back
| bsd-3-clause | 1,086,304,427,348,369,500 | 29.254795 | 140 | 0.692158 | false |
aagusti/o-sipkd | osipkd/views/ak/New folder/ak_jurnal_ppkd_item.py | 1 | 9421 | import os
import uuid
from osipkd.tools import row2dict, xls_reader
from datetime import datetime
from sqlalchemy import not_, func
from sqlalchemy.orm import aliased
from pyramid.view import (view_config,)
from pyramid.httpexceptions import ( HTTPFound, )
import colander
from deform import (Form, widget, ValidationFailure, )
from osipkd.models import DBSession
from osipkd.models.apbd_tu import AkJurnal, AkJurnalItem
from osipkd.models.pemda_model import Rekening, Sap, RekeningSap
from osipkd.models.apbd_anggaran import KegiatanSub, Kegiatan, KegiatanItem
from datatables import ColumnDT, DataTables
from osipkd.views.base_view import BaseViews
SESS_ADD_FAILED = 'Tambah ak-jurnal-ppkd-item gagal'
SESS_EDIT_FAILED = 'Edit ak-jurnal-ppkd-item gagal'
class view_ak_jurnal_ppkd_item(BaseViews):
########
# List #
########
@view_config(route_name='ak-jurnal-ppkd-item', renderer='templates/ak-jurnal-ppkd-item/list.pt',
permission='read')
def view_list(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
return dict(project='EIS')
##########
# Action #
##########
@view_config(route_name='ak-jurnal-ppkd-item-act', renderer='json',
permission='read')
def ak_jurnal_ppkd_item_act(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
pk_id = 'id' in params and params['id'] and int(params['id']) or 0
if url_dict['act']=='grid':
ak_jurnal_id = url_dict['ak_jurnal_id'].isdigit() and url_dict['ak_jurnal_id'] or 0
columns = []
columns.append(ColumnDT('id'))
columns.append(ColumnDT('sapkd'))
columns.append(ColumnDT('sapnm'))
columns.append(ColumnDT('amount', filter=self._number_format))
columns.append(ColumnDT('notes'))
columns.append(ColumnDT('rekkd'))
columns.append(ColumnDT('reknm'))
columns.append(ColumnDT('kegiatan_sub_id'))
columns.append(ColumnDT('rekening_id'))
columns.append(ColumnDT('ak_jurnal_id'))
columns.append(ColumnDT('subkd'))
columns.append(ColumnDT('subnm'))
rek = aliased(Rekening)
sap = aliased(Sap)
sub = aliased(KegiatanSub)
query = DBSession.query(AkJurnalItem.id,
sap.kode.label('sapkd'),
sap.nama.label('sapnm'),
AkJurnalItem.amount,
AkJurnalItem.notes,
rek.kode.label('rekkd'),
rek.nama.label('reknm'),
AkJurnalItem.kegiatan_sub_id,
AkJurnalItem.rekening_id,
AkJurnalItem.ak_jurnal_id,
sub.kode.label('subkd'),
sub.nama.label('subnm'),
).join(AkJurnal,
).outerjoin(rek, AkJurnalItem.rekening_id == rek.id
).outerjoin(sap, AkJurnalItem.sap_id == sap.id
).outerjoin(sub, AkJurnalItem.kegiatan_sub_id == sub.id
).filter(AkJurnalItem.ak_jurnal_id==ak_jurnal_id,
AkJurnalItem.ak_jurnal_id==AkJurnal.id,
).group_by(AkJurnalItem.id,
sap.kode.label('sapkd'),
sap.nama.label('sapnm'),
AkJurnalItem.amount,
AkJurnalItem.notes,
rek.kode.label('rekkd'),
rek.nama.label('reknm'),
AkJurnalItem.kegiatan_sub_id,
AkJurnalItem.rekening_id,
AkJurnalItem.ak_jurnal_id,
sub.kode.label('subkd'),
sub.nama.label('subnm'),
)
rowTable = DataTables(req, AkJurnalItem, query, columns)
return rowTable.output_result()
################
# Tambah Cepat#
################
@view_config(route_name='ak-jurnal-ppkd-item-add', renderer='json',
permission='add')
def view_add(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
ak_jurnal_id = 'ak_jurnal_id' in url_dict and url_dict['ak_jurnal_id'] or 0
controls = dict(req.POST.items())
jurnal_item_id = 'jurnal_item_id' in controls and controls['jurnal_item_id'] or 0
if jurnal_item_id:
row = DBSession.query(AkJurnalItem)\
.join(AkJurnal)\
.filter(AkJurnalItem.id==jurnal_item_id,
AkJurnal.unit_id==ses['unit_id'],
AkJurnalItem.ak_jurnal_id==ak_jurnal_id).first()
if not row:
return {"success": False, 'msg':'Jurnal tidak ditemukan'}
else:
row = AkJurnalItem()
row.ak_jurnal_id = ak_jurnal_id
row.kegiatan_sub_id = controls['kegiatan_sub_id'] or 0
row.rekening_id = controls['rekening_id'] or 0
row.sap_id = controls['sap_id'] or 0
row.amount = controls['amount'].replace('.','')
row.notes = controls['notes']
DBSession.add(row)
DBSession.flush()
return {"success": True, 'id': row.id, "msg":'Success Tambah Data'}
try:
pass
except:
return {'success':False, 'msg':'Gagal Tambah Data'}
def query_id(self):
return DBSession.query(AkJurnalItem).filter(AkJurnalItem.id==self.request.matchdict['id'],
AkJurnalItem.ak_jurnal_id==self.request.matchdict['ak_jurnal_id'])
def id_not_found(self):
msg = 'Jurnal Item ID %s Tidak Ditemukan.' % self.request.matchdict['id']
return {'success': False, 'msg':msg}
########
# Edit #
########
@view_config(route_name='ak-jurnal-ppkd-item-edit', renderer='json',
permission='edit')
def view_edit(self):
request = self.request
row = self.query_id().first()
if not row:
return id_not_found(request)
form = self.get_form(EditSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
save_request(dict(controls), row)
return route_list()
elif SESS_EDIT_FAILED in request.session:
return self.session_failed(SESS_EDIT_FAILED)
values = row.to_dict()
r=DBSession.query(Rekening).filter(Rekening.id==row.rekening_id).first()
if r:
values['rekening_kd'] = r.kode
values['rekening_nm'] = r.nama
else:
values['rekening_id'] = 0
values['rekening_kd'] = ""
values['rekening_nm'] = ""
a=DBSession.query(KegiatanSub).filter(KegiatanSub.id==row.kegiatan_sub_id).first()
if a:
values['kegiatan_sub_kd'] = a.kode
values['kegiatan_sub_nm'] = a.nama
else:
values['kegiatan_sub_id'] = 0
values['kegiatan_sub_kd'] = ""
values['kegiatan_sub_nm'] = ""
aa=DBSession.query(Sap).filter(Sap.id==row.sap_id).first()
if aa:
values['sap_kd'] = aa.kode
values['sap_nm'] = aa.nama
else:
values['sap_id'] = 0
values['sap_kd'] = ""
values['sap_nm'] = ""
form.set_appstruct(values)
return dict(form=form)
##########
# Delete #
##########
@view_config(route_name='ak-jurnal-ppkd-item-delete', renderer='json',
permission='delete')
def view_delete(self):
request = self.request
ses = self.session
q = self.query_id().join(AkJurnal).filter(AkJurnal.unit_id==ses['unit_id'])
row = q.first()
if not row:
return self.id_not_found()
q = self.query_id()
q.delete()
DBSession.flush()
return {'success': True, 'msg':'Sukses Hapus Data'}
| mit | 236,121,345,891,928,500 | 40.246637 | 118 | 0.478824 | false |
romanorac/discomll | discomll/examples/ensemble/dwf_rand_breastcancer_url.py | 1 | 1134 | from disco.core import result_iterator
from discomll import dataset
from discomll.ensemble import distributed_weighted_forest_rand
from discomll.utils import accuracy
train = dataset.Data(data_tag=[
["http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data"]],
id_index=0,
X_indices=xrange(1, 10),
X_meta="http://ropot.ijs.si/data/datasets_meta/breastcancer_meta.csv",
y_index=10,
delimiter=",")
fit_model = distributed_weighted_forest_rand.fit(train, trees_per_chunk=3, max_tree_nodes=50, min_samples_leaf=5,
min_samples_split=10, class_majority=1,
measure="info_gain", num_medoids=10, accuracy=1, separate_max=True,
random_state=None, save_results=True)
# predict training dataset
predictions = distributed_weighted_forest_rand.predict(train, fit_model)
# output results
for k, v in result_iterator(predictions):
print k, v[0]
# measure accuracy
ca = accuracy.measure(train, predictions)
print ca
| apache-2.0 | -3,606,847,400,242,093,000 | 38.103448 | 119 | 0.656966 | false |
dahebolangkuan/ToughRADIUS | radiusd/plugins/admin_update_cache.py | 1 | 1839 | #!/usr/bin/env python
#coding=utf-8
from twisted.python import log
from store import store
from settings import *
import logging
import json
def process(req=None,admin=None):
msg_id = req.get("msg_id")
cache_class = req.get("cache_class")
if not cache_class:
reply = json.dumps({'msg_id':msg_id,'data':u'cache_class is empty','code':1})
return admin.sendMessage(reply,False)
def send_ok(op):
reply = json.dumps({'msg_id':msg_id,'data':u'%s ok'%op,'code':0})
admin.sendMessage(reply,False)
if cache_class == 'param':
store.update_param_cache()
send_ok("param cache update")
elif cache_class == 'account' and req.get("account_number"):
store.update_user_cache(req.get("account_number"))
send_ok("account cache update")
elif cache_class == 'bas' and req.get("ip_addr"):
store.update_bas_cache(req.get("ip_addr"))
send_ok("bas cache update")
elif cache_class == 'roster' and req.get("mac_addr"):
store.update_roster_cache(req.get("mac_addr"))
send_ok("roster cache update")
elif cache_class == 'product' and req.get("product_id"):
store.update_product_cache(req.get("product_id"))
send_ok("product cache update")
elif cache_class == 'reject_delay' and req.get("reject_delay"):
try:
_delay = int(req.get("reject_delay"))
if _delay >= 0 and _delay <= 9:
admin.auth_server.auth_delay.reject_delay = _delay
send_ok("reject_delay update")
except:
reply = json.dumps({'msg_id':msg_id,'data':u'error reject_delay param','code':0})
admin.sendMessage(reply,False)
else:
reply = json.dumps({'msg_id':msg_id,'data':u'do nothing','code':0})
admin.sendMessage(reply,False)
| bsd-2-clause | -7,725,087,970,054,210,000 | 36.530612 | 93 | 0.604133 | false |
jwhitlock/dsh-orderwrt-bug | setup.py | 1 | 1622 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Packaging setup for dsh-orderwrt-bug."""
from sample import __version__ as version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
'Django>=1.6',
]
test_requirements = [
'dj_database_url',
'django_nose',
'django_extensions',
'jingo',
]
setup(
name='dsh-orderwrt-bug',
version=version,
description='Demo of OrderWRT bug',
long_description=readme + '\n\n' + history,
author='John Whitlock',
author_email='[email protected]',
url='https://github.com/jwhitlock/dsh-orderwrt-bug',
packages=[
'sample',
],
package_dir={
'sample': 'sample',
},
include_package_data=True,
install_requires=requirements,
license="MPL 2.0",
zip_safe=False,
keywords='dsh-orderwrt-bug',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='sample_site.runtests.runtests',
tests_require=test_requirements
)
| mpl-2.0 | -2,333,552,451,090,873,000 | 25.590164 | 74 | 0.609125 | false |
bitcraft/pyglet | contrib/wydget/wydget/widgets/drawer.py | 1 | 6309 | from wydget import anim
from wydget.widgets.frame import Frame
class Drawer(Frame):
"""A *transparent container* that may hide and expose its contents.
"""
name = 'drawer'
HIDDEN = 'hidden'
EXPOSED = 'exposed'
LEFT = 'left'
RIGHT = 'right'
TOP = 'top'
BOTTOM = 'bottom'
def __init__(self, parent, state=HIDDEN, side=LEFT,
is_transparent=True, **kw):
super().__init__(parent, is_transparent=is_transparent,
**kw)
self.state = state
self.side = side
if state == self.HIDDEN:
self.setVisible(False)
def toggle_state(self):
if self.state == self.EXPOSED:
self.hide()
else:
self.expose()
_anim = None
def expose(self):
if self.state == self.EXPOSED:
return
if self._anim is not None and self._anim.is_running:
self._anim.cancel()
self._anim = ExposeAnimation(self)
self.setVisible(True)
self.state = self.EXPOSED
def hide(self):
if self.state == self.HIDDEN:
return
if self._anim is not None and self._anim.is_running:
self._anim.cancel()
self._anim = HideAnimation(self)
self.state = self.HIDDEN
class HideAnimation(anim.Animation):
def __init__(self, drawer, duration=.25, function=anim.cosine90):
self.drawer = drawer
self.duration = duration
self.function = function
if drawer.side == Drawer.LEFT:
self.sx = int(drawer.x)
self.ex = int(drawer.x - drawer.width)
self.sw = int(drawer.width)
self.ew = 0
elif drawer.side == Drawer.RIGHT:
self.sx = int(drawer.x)
self.ex = int(drawer.x + drawer.width)
self.sw = int(drawer.width)
self.ew = 0
elif drawer.side == Drawer.TOP:
self.sy = int(drawer.y)
self.ey = int(drawer.y - drawer.height)
self.sh = int(drawer.height)
self.eh = 0
elif drawer.side == Drawer.BOTTOM:
self.sy = int(drawer.y)
self.ey = int(drawer.y + drawer.height)
self.sh = int(drawer.height)
self.eh = 0
super().__init__()
def cancel(self):
self.drawer.setVisible(False)
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
self.drawer.setViewClip((self.sx, 0, self.ew,
self.drawer.height))
self.drawer.x = self.ex
else:
self.drawer.setViewClip((0, self.sy, self.drawer.width,
self.eh))
self.drawer.y = self.ey
super().cancel()
def animate(self, dt):
self.anim_time += dt
if self.anim_time >= self.duration:
self.cancel()
else:
t = self.anim_time / self.duration
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
x = anim.tween(self.sx, self.ex, t, self.function)
w = anim.tween(self.sw, self.ew, t, self.function)
if self.drawer.side == Drawer.LEFT:
vcx = self.sw - w
elif self.drawer.side == Drawer.RIGHT:
vcx = 0
self.drawer.setViewClip((vcx, 0, w, self.drawer.height))
self.drawer.x = x
else:
y = anim.tween(self.sy, self.ey, t, self.function)
h = anim.tween(self.sh, self.eh, t, self.function)
if self.drawer.side == Drawer.TOP:
vcy = self.sh - h
elif self.drawer.side == Drawer.BOTTOM:
vcy = 0
self.drawer.setViewClip((0, vcy, self.drawer.width, h))
self.drawer.y = y
class ExposeAnimation(anim.Animation):
def __init__(self, drawer, duration=.25, function=anim.cosine90):
self.drawer = drawer
self.duration = duration
self.function = function
if drawer.side == Drawer.LEFT:
self.sx = int(drawer.x)
self.ex = int(drawer.x + drawer.width)
self.sw = 0
self.ew = int(drawer.width)
elif drawer.side == Drawer.RIGHT:
self.sx = int(drawer.x)
self.ex = int(drawer.x - drawer.width)
self.sw = 0
self.ew = int(drawer.width)
elif drawer.side == Drawer.TOP:
self.sy = int(drawer.y)
self.ey = int(drawer.y + drawer.height)
self.sh = 0
self.eh = int(drawer.height)
elif drawer.side == Drawer.BOTTOM:
self.sy = int(drawer.y)
self.ey = int(drawer.y - drawer.height)
self.sh = 0
self.eh = int(drawer.height)
super().__init__()
def cancel(self):
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
self.drawer.setViewClip((0, 0, self.ew, self.drawer.height))
self.drawer.x = self.ex
else:
self.drawer.setViewClip((0, 0, self.drawer.width, self.eh))
self.drawer.y = self.ey
super().cancel()
def animate(self, dt):
self.anim_time += dt
if self.anim_time >= self.duration:
self.cancel()
else:
t = self.anim_time / self.duration
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
x = anim.tween(self.sx, self.ex, t, self.function)
w = anim.tween(self.sw, self.ew, t, self.function)
if self.drawer.side == Drawer.LEFT:
vcx = self.ew - w
elif self.drawer.side == Drawer.RIGHT:
vcx = 0
self.drawer.setViewClip((vcx, 0, w, self.drawer.height))
self.drawer.x = x
else:
y = anim.tween(self.sy, self.ey, t, self.function)
h = anim.tween(self.sh, self.eh, t, self.function)
if self.drawer.side == Drawer.TOP:
vcy = self.eh - h
elif self.drawer.side == Drawer.BOTTOM:
vcy = 0
self.drawer.setViewClip((0, vcy, self.drawer.width, h))
self.drawer.y = y
| bsd-3-clause | -1,548,507,284,185,789,000 | 34.24581 | 72 | 0.511333 | false |
marcelcaraciolo/foursquare | pyfoursquare/test_foursquare.py | 1 | 3753 | import unittest
import urllib
from foursquare import OAuthHandler, API, BasicAuthHandler, FoursquareError
from models import Tip, User
class TestAuthentication(unittest.TestCase):
CLIENT_ID = 'YOUR_CLIENT_ID'
CLIENT_SECRET = 'YOUR_CLIENT_SECRET'
REDIRECT_URI = 'YOUR_CALLBACK'
def _test_create_OAuthHandler(self):
auth = OAuthHandler(TestAuthentication.CLIENT_ID,
TestAuthentication.CLIENT_SECRET,
TestAuthentication.REDIRECT_URI)
self.assertEquals(auth._client_id, TestAuthentication.CLIENT_ID)
self.assertEquals(auth._client_secret, TestAuthentication.CLIENT_SECRET)
self.assertEquals(auth.callback, TestAuthentication.REDIRECT_URI)
def _test_get_authorization_url(self):
auth = OAuthHandler(TestAuthentication.CLIENT_ID, TestAuthentication.CLIENT_SECRET,
TestAuthentication.REDIRECT_URI)
self.assertEquals(auth.get_authorization_url(),
('https://foursquare.com/oauth2/authenticate?redirect_uri=%s' +
'&response_type=code&client_id=%s')
% (urllib.quote(self.REDIRECT_URI).replace('/', '%2F'), self.CLIENT_ID)
)
def _test_get_access_token(self):
auth = OAuthHandler(TestAuthentication.CLIENT_ID, TestAuthentication.CLIENT_SECRET,
TestAuthentication.REDIRECT_URI)
code = 'YOUR_CODE'
self.assert_(auth.get_access_token(code) is not None)
class TestAPI(unittest.TestCase):
CLIENT_ID = 'YOUR_CLIENT_ID'
CLIENT_SECRET = 'YOUR_CLIENT_SECRET'
REDIRECT_URI = 'YOUR_CALLBACK'
def setUp(self):
self.auth = OAuthHandler(TestAuthentication.CLIENT_ID,
TestAuthentication.CLIENT_SECRET,
TestAuthentication.REDIRECT_URI)
self.auth.set_access_token('YOUR_CODE')
def test_create_api(self):
api = API(self.auth)
self.assertEquals(api.auth, self.auth)
self.assertEquals(api.host, 'api.foursquare.com')
self.assertEquals(api.api_root, '/v2')
self.assertEquals(api.retry_errors, None)
self.assertEquals(api.retry_count, 0)
self.assertEquals(api.retry_delay, 0)
def test_venues_search(self):
api = API(self.auth)
result = api.venues_search(query='Burburinho', ll='-8.063542,-34.872891')
self.assertEquals('Burburinho', result[0].name)
#Without authentication
basic_auth = BasicAuthHandler(TestAuthentication.CLIENT_ID,
TestAuthentication.CLIENT_SECRET,
TestAuthentication.REDIRECT_URI)
api = API(basic_auth)
result = api.venues_search(query='Burburinho', ll='-8.063542,-34.872891')
self.assertEquals('Burburinho', result[0].name)
self.assertRaises(FoursquareError, api.venues_tips, id='4bb0e776f964a52099683ce3')
def test_venues(self):
api = API(self.auth)
self.assertEquals('Burburinho', api.venues(id='4bb0e776f964a52099683ce3').name)
def test_venues_tips(self):
api = API(self.auth)
r = api.venues_tips(id='4bb0e776f964a52099683ce3')
self.assert_(isinstance(r[0], Tip))
r = api.venues_tips(id='40a55d80f964a52020f31ee3', limit=10, offset=0)
self.assert_(isinstance(r[0], Tip))
self.assertEquals(len(r), 10)
r = api.venues_tips(id='40a55d80f964a52020f31ee3', limit=10, offset=10)
self.assertEquals(len(r), 10)
def test_users(self):
api = API(self.auth)
r = api.users(id='self')
self.assert_(isinstance(r, User))
self.assert_(isinstance(r.friends()[0], User))
self.assertEquals(len(r.checkins()), 20)
unittest.main()
| mit | 3,737,443,895,409,420,000 | 39.793478 | 91 | 0.645084 | false |
Mynti207/cs207project | tsdb/tsdb_serialization.py | 1 | 3930 | import json
from collections import OrderedDict
LENGTH_FIELD_LENGTH = 4
def serialize(json_obj):
'''
Turn a JSON object into bytes suitable for writing out to the network.
Includes a fixed-width length field to simplify reconstruction on the other
end of the wire.
Parameters
----------
json_obj : object in json format
The object to be serialized
Returns
-------
buf : bytes
The serialized object
'''
# serialize, i.e. return the bytes on the wire
obj_serialized = bytearray(json.dumps(json_obj), 'utf-8')
# start the buffer based on the fixed-width length field
buf = (len(obj_serialized) +
LENGTH_FIELD_LENGTH).to_bytes(LENGTH_FIELD_LENGTH,
byteorder="little")
# add the serialized json object to the buffer
buf += obj_serialized
# return the buffer
return buf
class Deserializer(object):
'''
A buffering and bytes-to-json engine.
Data can be received in arbitrary chunks of bytes, and we need a way to
reconstruct variable-length JSON objects from that interface. This class
buffers up bytes until it can detect that it has a full JSON object (via
a length field pulled off the wire). To use this, add bytes with the
append() function and call ready() to check if we've reconstructed a JSON
object. If True, then call deserialize to return it. That object will be
removed from this buffer after it is returned.
'''
def __init__(self):
'''
Initializes the Deserializer class.
Parameters
----------
None
Returns
-------
An initialized Deserializer object
'''
# initialize blank buffer
self.buf = b''
self.buflen = -1
def append(self, data):
'''
Appends data to the Deserializer's buffer.
Parameters
----------
data : bytes
Binary data to be deserialized
Returns
-------
Nothing, modifies in-place.
'''
self.buf += data
self._maybe_set_length()
def _maybe_set_length(self):
'''
Calculates and stores the length of the Deserializer's buffer.
Parameters
----------
None
Returns
-------
Nothing, modifies in-place.
'''
# only calculate if there is data in the buffer
if self.buflen < 0 and len(self.buf) >= LENGTH_FIELD_LENGTH:
# update buffer length
self.buflen = int.from_bytes(self.buf[0:LENGTH_FIELD_LENGTH],
byteorder="little")
def ready(self):
'''
Determines whether the buffer is ready to be deserialized, based
on its length.
Parameters
----------
None
Returns
-------
Boolean : whether the buffer is ready to be deserialized
'''
return (self.buflen > 0 and len(self.buf) >= self.buflen)
def deserialize(self):
'''
Deserializes the buffer.
Parameters
----------
None
Returns
-------
json : deserialized buffer
'''
# deserialize the data in the buffer
json_str = self.buf[LENGTH_FIELD_LENGTH:self.buflen].decode()
# remove the deserialized data from the buffer
self.buf = self.buf[self.buflen:]
self.buflen = -1
# preserve the buffer, as there may already be more data in it
self._maybe_set_length()
# try to load the deserialized data as a json object
try:
# if it loads successfully, return it
return json.loads(json_str, object_pairs_hook=OrderedDict)
except json.JSONDecodeError:
# otherwise it is not valid json data, so don't return it
return None
| mit | -9,098,841,175,235,348,000 | 26.103448 | 79 | 0.573537 | false |
praekelt/panya-social | social/class_modifiers.py | 1 | 2132 | from django.contrib import auth
from django.contrib.auth.models import AnonymousUser, User
from panya.utils import modify_class
def _user_has_field_perm(user, perm, obj, field):
anon = user.is_anonymous()
for backend in auth.get_backends():
if not anon or backend.supports_anonymous_user:
if hasattr(backend, "has_field_perm"):
if field is not None:
if (backend.supports_field_permissions and
backend.has_field_perm(user, perm, obj, field)):
return True
else:
if backend.has_field_perm(user, perm):
return True
return False
class UserModifier(object):
"""
Modifies user class to include has_field_perm method used in a similar manner than has_perm,
except it checks field permissions as opposed to object permissions.
"""
def has_field_perm(self, perm, obj=None, field=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an field
is provided, permissions for this specific field are checked.
"""
# Inactive users have no permissions.
if not self.is_active:
return False
# Superusers have all permissions.
if self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_field_perm(self, perm, obj, field)
class AnonymousUserModifier(object):
"""
Modifies user class to include has_field_perm method used in a similar manner than has_perm,
except it checks field permissions as opposed to object permissions.
"""
def has_field_perm(self, perm, obj=None, field=None):
return _user_has_field_perm(self, perm, obj=obj, field=field)
modify_class(User, UserModifier)
modify_class(AnonymousUser, AnonymousUserModifier)
| bsd-3-clause | -75,271,136,306,387,950 | 39.226415 | 97 | 0.646811 | false |
aeklant/scipy | scipy/io/matlab/tests/test_mio_funcs.py | 1 | 1382 | ''' Jottings to work out format for __function_workspace__ matrix at end
of mat file.
'''
import os.path
import io
from numpy.compat import asstr
from scipy.io.matlab.mio5 import MatFile5Reader
test_data_path = os.path.join(os.path.dirname(__file__), 'data')
def read_minimat_vars(rdr):
rdr.initialize_read()
mdict = {'__globals__': []}
i = 0
while not rdr.end_of_stream():
hdr, next_position = rdr.read_var_header()
name = asstr(hdr.name)
if name == '':
name = 'var_%d' % i
i += 1
res = rdr.read_var_array(hdr, process=False)
rdr.mat_stream.seek(next_position)
mdict[name] = res
if hdr.is_global:
mdict['__globals__'].append(name)
return mdict
def read_workspace_vars(fname):
fp = open(fname, 'rb')
rdr = MatFile5Reader(fp, struct_as_record=True)
vars = rdr.get_variables()
fws = vars['__function_workspace__']
ws_bs = io.BytesIO(fws.tostring())
ws_bs.seek(2)
rdr.mat_stream = ws_bs
# Guess byte order.
mi = rdr.mat_stream.read(2)
rdr.byte_order = mi == b'IM' and '<' or '>'
rdr.mat_stream.read(4) # presumably byte padding
mdict = read_minimat_vars(rdr)
fp.close()
return mdict
def test_jottings():
# example
fname = os.path.join(test_data_path, 'parabola.mat')
read_workspace_vars(fname)
| bsd-3-clause | 2,204,432,114,049,731,600 | 25.075472 | 72 | 0.60275 | false |
saurabh6790/frappe | frappe/test_runner.py | 1 | 12502 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
import unittest, json, sys, os
import time
import xmlrunner
import importlib
from frappe.modules import load_doctype_module, get_module_name
import frappe.utils.scheduler
import cProfile, pstats
from six import StringIO
from six.moves import reload_module
from frappe.model.naming import revert_series_if_last
unittest_runner = unittest.TextTestRunner
SLOW_TEST_THRESHOLD = 2
def xmlrunner_wrapper(output):
"""Convenience wrapper to keep method signature unchanged for XMLTestRunner and TextTestRunner"""
def _runner(*args, **kwargs):
kwargs['output'] = output
return xmlrunner.XMLTestRunner(*args, **kwargs)
return _runner
def main(app=None, module=None, doctype=None, verbose=False, tests=(),
force=False, profile=False, junit_xml_output=None, ui_tests=False,
doctype_list_path=None, skip_test_records=False, failfast=False):
global unittest_runner
if doctype_list_path:
app, doctype_list_path = doctype_list_path.split(os.path.sep, 1)
with open(frappe.get_app_path(app, doctype_list_path), 'r') as f:
doctype = f.read().strip().splitlines()
if ui_tests:
print("Selenium testing has been deprecated\nUse bench --site {site_name} run-ui-tests for Cypress tests")
xmloutput_fh = None
if junit_xml_output:
xmloutput_fh = open(junit_xml_output, 'wb')
unittest_runner = xmlrunner_wrapper(xmloutput_fh)
else:
unittest_runner = unittest.TextTestRunner
try:
frappe.flags.print_messages = verbose
frappe.flags.in_test = True
if not frappe.db:
frappe.connect()
# if not frappe.conf.get("db_name").startswith("test_"):
# raise Exception, 'db_name must start with "test_"'
# workaround! since there is no separate test db
frappe.clear_cache()
frappe.utils.scheduler.disable_scheduler()
set_test_email_config()
if not frappe.flags.skip_before_tests:
if verbose:
print('Running "before_tests" hooks')
for fn in frappe.get_hooks("before_tests", app_name=app):
frappe.get_attr(fn)()
if doctype:
ret = run_tests_for_doctype(doctype, verbose, tests, force, profile, junit_xml_output=junit_xml_output)
elif module:
ret = run_tests_for_module(module, verbose, tests, profile, junit_xml_output=junit_xml_output)
else:
ret = run_all_tests(app, verbose, profile, ui_tests, failfast=failfast, junit_xml_output=junit_xml_output)
if frappe.db: frappe.db.commit()
# workaround! since there is no separate test db
frappe.clear_cache()
return ret
finally:
if xmloutput_fh:
xmloutput_fh.flush()
xmloutput_fh.close()
def set_test_email_config():
frappe.conf.update({
"auto_email_id": "[email protected]",
"mail_server": "smtp.example.com",
"mail_login": "[email protected]",
"mail_password": "test",
"admin_password": "admin"
})
class TimeLoggingTestResult(unittest.TextTestResult):
def startTest(self, test):
self._started_at = time.time()
super(TimeLoggingTestResult, self).startTest(test)
def addSuccess(self, test):
elapsed = time.time() - self._started_at
name = self.getDescription(test)
if elapsed >= SLOW_TEST_THRESHOLD:
self.stream.write("\n{} ({:.03}s)\n".format(name, elapsed))
super(TimeLoggingTestResult, self).addSuccess(test)
def run_all_tests(app=None, verbose=False, profile=False, ui_tests=False, failfast=False, junit_xml_output=False):
import os
apps = [app] if app else frappe.get_installed_apps()
test_suite = unittest.TestSuite()
for app in apps:
for path, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in ('locals', '.git', 'public', '__pycache__'):
if dontwalk in folders:
folders.remove(dontwalk)
# for predictability
folders.sort()
files.sort()
# print path
for filename in files:
if filename.startswith("test_") and filename.endswith(".py")\
and filename != 'test_runner.py':
# print filename[:-3]
_add_test(app, path, filename, verbose,
test_suite, ui_tests)
if junit_xml_output:
runner = unittest_runner(verbosity=1+(verbose and 1 or 0), failfast=failfast)
else:
runner = unittest_runner(resultclass=TimeLoggingTestResult, verbosity=1+(verbose and 1 or 0), failfast=failfast)
if profile:
pr = cProfile.Profile()
pr.enable()
out = runner.run(test_suite)
if profile:
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return out
def run_tests_for_doctype(doctypes, verbose=False, tests=(), force=False, profile=False, junit_xml_output=False):
modules = []
if not isinstance(doctypes, (list, tuple)):
doctypes = [doctypes]
for doctype in doctypes:
module = frappe.db.get_value("DocType", doctype, "module")
if not module:
print('Invalid doctype {0}'.format(doctype))
sys.exit(1)
test_module = get_module_name(doctype, module, "test_")
if force:
for name in frappe.db.sql_list("select name from `tab%s`" % doctype):
frappe.delete_doc(doctype, name, force=True)
make_test_records(doctype, verbose=verbose, force=force)
modules.append(importlib.import_module(test_module))
return _run_unittest(modules, verbose=verbose, tests=tests, profile=profile, junit_xml_output=junit_xml_output)
def run_tests_for_module(module, verbose=False, tests=(), profile=False, junit_xml_output=False):
module = importlib.import_module(module)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
return _run_unittest(module, verbose=verbose, tests=tests, profile=profile, junit_xml_output=junit_xml_output)
def _run_unittest(modules, verbose=False, tests=(), profile=False, junit_xml_output=False):
frappe.db.begin()
test_suite = unittest.TestSuite()
if not isinstance(modules, (list, tuple)):
modules = [modules]
for module in modules:
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
if tests:
for each in module_test_cases:
for test_case in each.__dict__["_tests"]:
if test_case.__dict__["_testMethodName"] in tests:
test_suite.addTest(test_case)
else:
test_suite.addTest(module_test_cases)
if junit_xml_output:
runner = unittest_runner(verbosity=1+(verbose and 1 or 0))
else:
runner = unittest_runner(resultclass=TimeLoggingTestResult, verbosity=1+(verbose and 1 or 0))
if profile:
pr = cProfile.Profile()
pr.enable()
frappe.flags.tests_verbose = verbose
out = runner.run(test_suite)
if profile:
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return out
def _add_test(app, path, filename, verbose, test_suite=None, ui_tests=False):
import os
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
# in /doctype/doctype/boilerplate/
return
app_path = frappe.get_pymodule_path(app)
relative_path = os.path.relpath(path, app_path)
if relative_path=='.':
module_name = app
else:
module_name = '{app}.{relative_path}.{module_name}'.format(app=app,
relative_path=relative_path.replace('/', '.'), module_name=filename[:-3])
module = importlib.import_module(module_name)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
is_ui_test = True if hasattr(module, 'TestDriver') else False
if is_ui_test != ui_tests:
return
if not test_suite:
test_suite = unittest.TestSuite()
if os.path.basename(os.path.dirname(path))=="doctype":
txt_file = os.path.join(path, filename[5:].replace(".py", ".json"))
if os.path.exists(txt_file):
with open(txt_file, 'r') as f:
doc = json.loads(f.read())
doctype = doc["name"]
make_test_records(doctype, verbose)
test_suite.addTest(unittest.TestLoader().loadTestsFromModule(module))
def make_test_records(doctype, verbose=0, force=False):
if not frappe.db:
frappe.connect()
if frappe.flags.skip_test_records:
return
for options in get_dependencies(doctype):
if options == "[Select]":
continue
if not options in frappe.local.test_objects:
frappe.local.test_objects[options] = []
make_test_records(options, verbose, force)
make_test_records_for_doctype(options, verbose, force)
def get_modules(doctype):
module = frappe.db.get_value("DocType", doctype, "module")
try:
test_module = load_doctype_module(doctype, module, "test_")
if test_module:
reload_module(test_module)
except ImportError:
test_module = None
return module, test_module
def get_dependencies(doctype):
module, test_module = get_modules(doctype)
meta = frappe.get_meta(doctype)
link_fields = meta.get_link_fields()
for df in meta.get_table_fields():
link_fields.extend(frappe.get_meta(df.options).get_link_fields())
options_list = [df.options for df in link_fields] + [doctype]
if hasattr(test_module, "test_dependencies"):
options_list += test_module.test_dependencies
options_list = list(set(options_list))
if hasattr(test_module, "test_ignore"):
for doctype_name in test_module.test_ignore:
if doctype_name in options_list:
options_list.remove(doctype_name)
options_list.sort()
return options_list
def make_test_records_for_doctype(doctype, verbose=0, force=False):
if not force and doctype in get_test_record_log():
return
module, test_module = get_modules(doctype)
if verbose:
print("Making for " + doctype)
if hasattr(test_module, "_make_test_records"):
frappe.local.test_objects[doctype] += test_module._make_test_records(verbose)
elif hasattr(test_module, "test_records"):
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_module.test_records, verbose, force)
else:
test_records = frappe.get_test_records(doctype)
if test_records:
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_records, verbose, force)
elif verbose:
print_mandatory_fields(doctype)
add_to_test_record_log(doctype)
def make_test_objects(doctype, test_records=None, verbose=None, reset=False):
'''Make test objects from given list of `test_records` or from `test_records.json`'''
records = []
def revert_naming(d):
if getattr(d, 'naming_series', None):
revert_series_if_last(d.naming_series, d.name)
if test_records is None:
test_records = frappe.get_test_records(doctype)
for doc in test_records:
if not doc.get("doctype"):
doc["doctype"] = doctype
d = frappe.copy_doc(doc)
if d.meta.get_field("naming_series"):
if not d.naming_series:
d.naming_series = "_T-" + d.doctype + "-"
if doc.get('name'):
d.name = doc.get('name')
else:
d.set_new_name()
if frappe.db.exists(d.doctype, d.name) and not reset:
frappe.db.rollback()
# do not create test records, if already exists
continue
# submit if docstatus is set to 1 for test record
docstatus = d.docstatus
d.docstatus = 0
try:
d.run_method("before_test_insert")
d.insert()
if docstatus == 1:
d.submit()
except frappe.NameError:
revert_naming(d)
except Exception as e:
if d.flags.ignore_these_exceptions_in_test and e.__class__ in d.flags.ignore_these_exceptions_in_test:
revert_naming(d)
else:
raise
records.append(d.name)
frappe.db.commit()
return records
def print_mandatory_fields(doctype):
print("Please setup make_test_records for: " + doctype)
print("-" * 60)
meta = frappe.get_meta(doctype)
print("Autoname: " + (meta.autoname or ""))
print("Mandatory Fields: ")
for d in meta.get("fields", {"reqd":1}):
print(d.parent + ":" + d.fieldname + " | " + d.fieldtype + " | " + (d.options or ""))
print()
def add_to_test_record_log(doctype):
'''Add `doctype` to site/.test_log
`.test_log` is a cache of all doctypes for which test records are created'''
test_record_log = get_test_record_log()
if not doctype in test_record_log:
frappe.flags.test_record_log.append(doctype)
with open(frappe.get_site_path('.test_log'), 'w') as f:
f.write('\n'.join(filter(None, frappe.flags.test_record_log)))
def get_test_record_log():
'''Return the list of doctypes for which test records have been created'''
if 'test_record_log' not in frappe.flags:
if os.path.exists(frappe.get_site_path('.test_log')):
with open(frappe.get_site_path('.test_log'), 'r') as f:
frappe.flags.test_record_log = f.read().splitlines()
else:
frappe.flags.test_record_log = []
return frappe.flags.test_record_log
| mit | -3,969,909,880,128,501,000 | 28.416471 | 114 | 0.707007 | false |
LandRegistry/digital-register-api | integration_tests/test_es_access.py | 1 | 10335 | import elasticsearch
import mock
import pytest
import requests
from time import sleep
from config import CONFIG_DICT
from service import es_access
PROPERTY_BY_POSTCODE_DOC_TYPE = 'property_by_postcode_3'
PROPERTY_BY_ADDRESS_DOC_TYPE = 'property_by_address'
class TestEsAccess:
def setup_method(self, method):
self._ensure_empty_index()
def test_get_properties_for_postcode_throws_exception_on_unsuccessful_attempt_to_talk_to_es(self):
with mock.patch.dict(es_access.app.config, {'ELASTICSEARCH_ENDPOINT_URI': 'http://non-existing2342345.co.uk'}):
with pytest.raises(Exception) as e:
es_access.get_properties_for_postcode('XX000XX', 10, 0)
assert type(e.value) == elasticsearch.exceptions.ConnectionError
def test_get_properties_for_postcode_returns_properties_with_right_data(self):
postcode = 'XX000XX'
title_number = 'TITLE1001'
address_string = 'address string'
house_number = 123
entry_datetime = '2015-09-09T12:34:56.123+00'
self._create_property_for_postcode(postcode, title_number, address_string, house_number, entry_datetime)
self._wait_for_elasticsearch()
titles = es_access.get_properties_for_postcode(postcode, 10, 0)
assert len(titles) == 1
title = titles[0]
assert title.postcode == postcode
assert title.title_number == title_number
assert title.address_string == address_string
assert title.house_number_or_first_number == house_number
assert title.entry_datetime == entry_datetime
def test_get_properties_for_postcode_returns_properties_sorted_by_number_then_address(self):
postcode = 'XX000XX'
title_number_1, title_number_2, title_number_3 = 'TITLE1001', 'TITLE1002', 'TITLE1003'
self._create_property_for_postcode(postcode, title_number_1, 'address b 1', house_number=2)
self._create_property_for_postcode(postcode, title_number_2, 'address a 1', house_number=1)
self._create_property_for_postcode(postcode, title_number_3, 'address b 1', house_number=1)
self._wait_for_elasticsearch()
titles = es_access.get_properties_for_postcode(postcode, 10, 0)
assert self._get_title_numbers(titles) == [title_number_2, title_number_3, title_number_1]
def test_get_properties_for_postcode_returns_empty_list_when_no_matches(self):
properties = es_access.get_properties_for_postcode('XX000XX', 10, 0)
assert properties == []
def test_get_properties_for_postcode_does_not_return_addresses_from_different_postcodes(self):
postcode_1 = 'XX000XX'
postcode_2 = 'YY000YY'
title_number_1 = 'TITLE1001'
title_number_2 = 'TITLE1002'
self._create_property_for_postcode(postcode_1, title_number_1, 'address a 1')
self._create_property_for_postcode(postcode_2, title_number_2, 'address b 1')
self._wait_for_elasticsearch()
properties_for_postcode_1 = es_access.get_properties_for_postcode(postcode_1, 10, 0)
assert self._get_title_numbers(properties_for_postcode_1) == [title_number_1]
properties_for_postcode_2 = es_access.get_properties_for_postcode(postcode_2, 10, 0)
assert self._get_title_numbers(properties_for_postcode_2) == [title_number_2]
def test_get_properties_for_postcode_returns_the_right_page_of_records(self):
postcode = 'XX000XX'
for i in range(1, 6):
self._create_property_for_postcode('XX000XX', 'TITLE{}'.format(i), 'address {}'.format(i), i)
self._wait_for_elasticsearch()
first_page = es_access.get_properties_for_postcode(postcode, 2, 0)
assert self._get_title_numbers(first_page) == ['TITLE1', 'TITLE2']
second_page = es_access.get_properties_for_postcode(postcode, 2, 1)
assert self._get_title_numbers(second_page) == ['TITLE3', 'TITLE4']
third_page = es_access.get_properties_for_postcode(postcode, 2, 2)
assert self._get_title_numbers(third_page) == ['TITLE5']
def test_get_properties_for_address_throws_exception_on_unsuccessful_attempt_to_talk_to_es(self):
with mock.patch.dict(es_access.app.config, {'ELASTICSEARCH_ENDPOINT_URI': 'http://non-existing2342345.co.uk'}):
with pytest.raises(Exception) as e:
es_access.get_properties_for_address('XX000XX', 10, 0)
assert type(e.value) == elasticsearch.exceptions.ConnectionError
def test_get_properties_for_address_returns_properties_with_right_data(self):
title_number = 'TITLE1001'
address_string = 'address string'
entry_datetime = '2015-09-09T12:34:56.123+00'
self._create_property_for_address(title_number, address_string, entry_datetime)
self._wait_for_elasticsearch()
titles = es_access.get_properties_for_address(address_string, 10, 0)
assert len(titles) == 1
title = titles[0]
assert title.title_number == title_number
assert title.address_string == address_string
assert title.entry_datetime == entry_datetime
def test_get_properties_for_address_returns_properties_sorted_by_match_strength(self):
title_number_1, title_number_2, title_number_3 = 'TITLE1001', 'TITLE1002', 'TITLE1003'
self._create_property_for_address(title_number_1, 'almost same address')
self._create_property_for_address(title_number_2, 'other address')
self._create_property_for_address(title_number_3, 'same address')
self._wait_for_elasticsearch()
titles = es_access.get_properties_for_address('same address', 10, 0)
assert self._get_title_numbers(titles) == [title_number_3, title_number_1, title_number_2]
def test_get_properties_for_address_returns_empty_list_when_no_matches(self):
titles = es_access.get_properties_for_address('non-existing address', 10, 0)
assert titles == []
def test_get_properties_for_address_returns_the_right_page_of_records(self):
search_phrase = 'strongest match first'
address_1 = 'match first'
address_2 = 'weakest match'
address_3 = 'strongest match first'
title_number_1 = "MIDDLE"
title_number_2 = "WEAKEST"
title_number_3 = "STRONGEST"
self._create_property_for_address(title_number_1, address_1)
self._create_property_for_address(title_number_2, address_2)
self._create_property_for_address(title_number_3, address_3)
self._wait_for_elasticsearch()
first_page = es_access.get_properties_for_address(search_phrase, page_size=2, page_number=0)
assert self._get_title_numbers(first_page) == ['STRONGEST', 'MIDDLE']
second_page = es_access.get_properties_for_address(search_phrase, page_size=2, page_number=1)
assert self._get_title_numbers(second_page) == ['WEAKEST']
def test_get_info_throws_exception_on_unsuccessful_attempt_to_talk_to_es(self):
with mock.patch.dict(es_access.app.config, {'ELASTICSEARCH_ENDPOINT_URI': 'http://non-existing2342345.co.uk'}):
with pytest.raises(Exception) as e:
es_access.get_info()
assert type(e.value) == elasticsearch.exceptions.ConnectionError
def test_get_info_returns_cluster_info(self):
result = es_access.get_info()
assert result.get('status') == 200
assert result.get('cluster_name') == 'elasticsearch'
def _drop_index(self):
requests.delete(self._get_index_uri())
def _ensure_empty_index(self):
self._drop_index()
index = {
'mappings': {
PROPERTY_BY_POSTCODE_DOC_TYPE: {
'properties': {
'title_number': {'type': 'string', 'index': 'no'},
'postcode': {'type': 'string', 'index': 'not_analyzed'},
'house_number_or_first_number': {'type': 'integer', 'index': 'not_analyzed'},
'address_string': {'type': 'string', 'index': 'not_analyzed'},
'entry_datetime': {
'type': 'date',
'format': 'date_time',
'index': 'no'
},
}
},
PROPERTY_BY_ADDRESS_DOC_TYPE: {
'properties': {
'title_number': {'type': 'string', 'index': 'no'},
'address_string': {'type': 'string', 'index': 'analyzed'},
'entry_datetime': {'type': 'date', 'format': 'date_time', 'index': 'no'},
}
}
}
}
response = requests.put(self._get_index_uri(), json=index)
assert response.status_code == 200
def _create_property_for_postcode(
self, postcode, title_number, address_string, house_number=1, entry_datetime='2015-09-09T12:34:56.123+00'):
entry_json = {
'title_number': title_number,
'entry_datetime': entry_datetime,
'postcode': postcode,
'house_number_or_first_number': house_number,
'address_string': address_string,
}
uri = '{}/{}/'.format(self._get_index_uri(), PROPERTY_BY_POSTCODE_DOC_TYPE)
response = requests.post(uri, json=entry_json)
assert response.status_code == 201
def _create_property_for_address(
self, title_number, address_string, entry_datetime='2015-09-09T12:34:56.123+00'):
entry_json = {
'title_number': title_number,
'entry_datetime': entry_datetime,
'address_string': address_string,
}
uri = '{}/{}/'.format(self._get_index_uri(), PROPERTY_BY_ADDRESS_DOC_TYPE)
response = requests.post(uri, json=entry_json)
assert response.status_code == 201
def _get_title_numbers(self, search_results):
return list(map(lambda result: result.title_number, search_results))
def _get_index_uri(self):
return self._get_elasticsearch_uri() + '/' + CONFIG_DICT['ELASTICSEARCH_INDEX_NAME']
def _get_elasticsearch_uri(self):
return CONFIG_DICT['ELASTICSEARCH_ENDPOINT_URI']
def _wait_for_elasticsearch(self):
sleep(1.5)
| mit | -1,743,692,951,915,325,200 | 41.883817 | 119 | 0.623803 | false |
surdy/dcos | packages/dcos-integration-test/extra/test_units.py | 1 | 5833 | import glob
import logging
import os
import pathlib
import stat
import subprocess
import pytest
__maintainer__ = 'gpaul'
__contact__ = '[email protected]'
@pytest.mark.supportedwindows
def test_verify_units():
"""Test that all systemd units are valid."""
def _check_units(path):
"""Verify all the units given by `path'"""
for file in glob.glob(path):
cmd = subprocess.run(
["/usr/bin/systemd-analyze", "verify", "--no-pager", file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
# systemd-analyze returns 0 even if there were warnings, so we
# assert that the command output was empty.
if cmd.stdout:
# We specifically allow directives that exist in newer systemd
# versions but will cause older systemd versions to complain.
# The "old" systemd version we are using as a baseline is
# systemd 219, which ships with CentOS 7.2.1511.
def _check_line(line):
# `systemd-analyze verify` checks for errors in the given
# unit files, as well as other files that are loaded
# transitively. We do not want our tests to fail when
# third-party software ships bad unit files, so we
# explicitly check that 'dcos-' is present on a
# line before checking if it is valid.
if "dcos-" not in line:
return True
# The TasksMax directive exists in newer versions of systemd
# where it is important to set. As we want to support multiple
# versions of systemd our tests must ignore errors that
# complain that it is an unknown directive.
ignore_new_directives = ["TasksMax"]
for directive in ignore_new_directives:
# When systemd does not understand a directive it
# prints a line with the following format:
#
# [/etc/systemd/system/foo.service:5] Unknown lvalue 'EExecStat' in section 'Service'
#
# We ignore such errors when the lvalue is one of the
# well-known directives that got added to newer
# versions of systemd.
unknown_lvalue_err = "Unknown lvalue '%s'" % directive
if unknown_lvalue_err in line:
# This version of systemd does not understand this
# directive. It got added in newer versions.
# As systemd ignores directives it does not
# understand this is not a problem and we simply
# ignore this error.
pass
else:
# Whatever problem systemd-analyze sees in this
# line is more significant than a simple
# 'unknown lvalue' complaint. We treat it as a
# valid issue and fail.
return False
return True
for line in cmd.stdout.split("\n"):
if not _check_line(line):
pytest.fail("Invalid systemd unit: " + line)
_check_units("/etc/systemd/system/dcos-*.service")
_check_units("/etc/systemd/system/dcos-*.socket")
@pytest.mark.supportedwindows
def test_socket_units():
"""Test that socket units configure socket files in /run/dcos
that are owned by 'dcos_adminrouter'.
"""
def _check_unit(file):
logging.info("Checking socket unit {}".format(file))
out = subprocess.check_output(
["/usr/bin/systemctl", "show", "--no-pager", os.path.basename(file)],
stderr=subprocess.STDOUT,
universal_newlines=True)
user = ""
group = ""
mode = ""
had_unix_socket = False
for line in out.split("\n"):
parts = line.split("=")
if len(parts) != 2:
continue
k, v = parts
if k == "SocketUser":
user = v
if k == "SocketGroup":
group = v
if k == "ListenStream":
# Unix sockets are distinguished from IP sockets by having a '/' as the first
# character in the value of the ListenStream directive.
if v.startswith("/"):
had_unix_socket = True
assert v.startswith("/run/dcos/"), "DC/OS unix sockets must go in the /run/dcos directory"
if k == "SocketMode":
mode = v
if not had_unix_socket:
# This socket file doesn't declare any unix sockets, ignore.
return
assert user == "root"
assert group == "dcos_adminrouter"
assert mode == "0660"
for file in glob.glob("/etc/systemd/system/dcos-*.socket"):
_check_unit(file)
@pytest.mark.supportedwindows
def test_socket_files():
"""Test that all socket files in /run/dcos are owned by 'dcos_adminrouter'."""
for file in glob.glob("/run/dcos/*"):
path = pathlib.Path(file)
if not path.is_socket():
# This is not a unix socket file, ignore.
continue
logging.info("Checking socket file {}".format(file))
assert path.owner() == "root"
assert path.group() == "dcos_adminrouter"
assert stat.S_IMODE(path.stat().st_mode) == 0o660
| apache-2.0 | 7,323,465,166,184,007,000 | 42.857143 | 112 | 0.527002 | false |
gitcoinco/web | app/avatar/migrations/0009_avatartextoverlayinput.py | 1 | 1176 | # Generated by Django 2.2.4 on 2020-03-26 19:49
from django.db import migrations, models
import economy.models
class Migration(migrations.Migration):
dependencies = [
('avatar', '0008_auto_20200301_2047'),
]
operations = [
migrations.CreateModel(
name='AvatarTextOverlayInput',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(db_index=True, default=economy.models.get_time)),
('modified_on', models.DateTimeField(default=economy.models.get_time)),
('active', models.BooleanField(db_index=True, default=False)),
('text', models.TextField(blank=True, default='')),
('coment', models.TextField(blank=True, default='')),
('num_uses_total', models.IntegerField(default=0)),
('num_uses_remaining', models.IntegerField(default=0)),
('current_uses', models.IntegerField(default=0)),
],
options={
'abstract': False,
},
),
]
| agpl-3.0 | 7,582,269,842,014,924,000 | 36.935484 | 114 | 0.571429 | false |
cristianav/PyCRC-old | CRCModules/CRC16SICK.py | 1 | 1820 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
# CRC16SICK MODULE
#
# Cristian NAVALICI cristian.navalici at gmail dot com
#
from ctypes import c_ushort
class CRC16SICK(object):
# The CRC's are computed using polynomials. Here is the most used coefficient for CRC16 SICK
crc16SICK_constant = 0x8005
def __init__(self):
pass
def calculate(self, input_data = None):
try:
is_string = isinstance(input_data, str)
is_bytes = isinstance(input_data, bytes)
if not is_string and not is_bytes:
raise Exception("Please provide a string or a byte sequence as argument for calculation.")
crcValue = 0x0000
for idx, c in enumerate(input_data):
d = ord(c) if is_string else c
short_c = 0x00ff & d
idx_previous = idx - 1
if idx_previous == -1:
prev_c = 0
else:
prev_c = input_data[idx_previous]
prev_c = ord(prev_c) if is_string else prev_c
short_p = ( 0x00ff & prev_c) << 8;
if ( crcValue & 0x8000 ): crcValue = c_ushort(crcValue << 1).value ^ self.crc16SICK_constant
else: crcValue = c_ushort(crcValue << 1).value
crcValue &= 0xffff
crcValue ^= ( short_c | short_p )
# After processing, the one's complement of the CRC is calcluated and the
# two bytes of the CRC are swapped.
low_byte = (crcValue & 0xff00) >> 8
high_byte = (crcValue & 0x00ff) << 8
crcValue = low_byte | high_byte;
return crcValue
except Exception as e:
print ("EXCEPTION(calculate): {}".format(e))
| gpl-3.0 | -2,695,530,379,084,512,000 | 31.5 | 110 | 0.531868 | false |
thehub/hubplus | apps/plus_permissions/types/TgGroup.py | 1 | 19483 | from apps.plus_permissions.interfaces import InterfaceReadProperty, InterfaceWriteProperty, InterfaceCallProperty
from apps.plus_permissions.models import SetSliderOptions, SliderOptions, SetAgentDefaults, SetPossibleTypes, SetSliderAgents, PossibleTypes, get_interface_map, SetVisibleAgents, SetVisibleTypes, SetTypeLabels
from apps.plus_groups.models import TgGroup, MemberInvite
from apps.plus_permissions.OurPost import OurPost
from apps.plus_contacts.models import Application, Contact
from apps.plus_wiki.models import WikiPage
from apps.profiles.models import Profile
from apps.plus_permissions.site import Site
from apps.plus_links.models import Link
from apps.plus_resources.models import Resource
from apps.plus_feed.models import FeedItem, AggregateFeed
from django.db.models.signals import post_save
import datetime
from copy import deepcopy
content_type = TgGroup
from apps.plus_permissions.default_agents import get_or_create_root_location, get_anonymous_group, get_all_members_group, get_creator_agent
def setup_group_security(group, context_agent, context_admin, creator, permission_prototype):
group.to_security_context()
sec_context = group.get_security_context()
sec_context.set_context_agent(context_agent.get_ref())
sec_context.set_context_admin(context_admin.get_ref())
sec_context.save()
group.add_member(creator)
group.add_member(context_admin)
group.save()
group.get_security_context().set_up(permission_prototype)
ref = group.get_ref()
ref.creator = creator
ref.permission_prototype = permission_prototype
ref.save()
#if group.id != get_all_members_group().id :
# get_all_members_group().add_member(group)
# override object managers, filter, get, get_or_create
def get_or_create(group_name=None, display_name=None, place=None, level=None, user=None,
group_type='interest', description='', permission_prototype='public', address='') :
"""get or create a group
"""
# note : we can't use get_or_create for TgGroup, because the created date clause won't match on a different day
# from the day the record was created.
if not user:
raise TypeError("We must have a user to create a group, since otherwise it will be inaccessible")
if not place:
place = get_or_create_root_location()
xs = TgGroup.objects.filter(group_name=group_name)
if len(xs) > 0 :
group = xs[0]
created = False
else :
created = True
group = TgGroup(group_name=group_name, display_name=display_name, level=level,
place=place, description=description, group_type=group_type, address=address)
group.save()
group_post_create(group, user, permission_prototype)
return group, created
def group_post_create(group, user, permission_prototype=None) :
if not permission_prototype :
permission_prototype = 'public'
group.save() # ensures our post_save signal is fired to create gen_ref, even if we came via syncer
if group.level == 'member':
admin_group, created = TgGroup.objects.get_or_create(
group_name=group.group_name + "_hosts",
display_name=group.display_name + " Hosts",
level='host',
place=group.place,
user=user,
group_type='administrative',
description="Admin Group for %s" % group.display_name,
)
setup_group_security(group, group, admin_group, user, permission_prototype)
elif group.level == 'host' or group.level == 'director' :
setup_group_security(group, group, group, user, 'private')
from django.conf import settings
if group.group_name != settings.VIRTUAL_HUB_NAME+'_hosts' :
site_admin = get_all_members_group().get_admin_group()
group.add_member(site_admin)
site_admin.flush_members_cache()
def get_admin_group(self) :
return self.get_security_context().context_admin.obj
TgGroup.get_admin_group = get_admin_group
# we need a special set_permissions interface which is only editable by the scontext_admin and determines who can set permissions or override them for an object.
class TgGroupViewer:
pk = InterfaceReadProperty
description = InterfaceReadProperty
place = InterfaceReadProperty
website = InterfaceReadProperty
display_name = InterfaceReadProperty
get_display_name = InterfaceCallProperty # huh
group_name = InterfaceReadProperty
group_type = InterfaceReadProperty
address = InterfaceReadProperty
apply = InterfaceCallProperty
leave = InterfaceCallProperty
get_users = InterfaceCallProperty
get_no_members = InterfaceCallProperty
get_no_users = InterfaceCallProperty
get_admin_group = InterfaceCallProperty
homeplace = InterfaceReadProperty
get_group_type_display = InterfaceReadProperty
status = InterfaceReadProperty
is_hub_type = InterfaceCallProperty
users = InterfaceReadProperty
group_type = InterfaceReadProperty
is_group = InterfaceCallProperty
is_user = InterfaceCallProperty
get_url = InterfaceCallProperty
get_display_name = InterfaceCallProperty
get_description = InterfaceCallProperty
get_author_name = InterfaceCallProperty
get_author_copyright = InterfaceCallProperty
get_created_date = InterfaceCallProperty
get_feed_extras = InterfaceCallProperty
class TgGroupEditor:
pk = InterfaceReadProperty
description = InterfaceWriteProperty
place = InterfaceWriteProperty
website = InterfaceWriteProperty
display_name = InterfaceWriteProperty
message_members = InterfaceCallProperty
add_link = InterfaceCallProperty
change_avatar = InterfaceCallProperty
status = InterfaceWriteProperty
group_type = InterfaceWriteProperty
address = InterfaceWriteProperty
get_resources_of_class = InterfaceCallProperty
class TgGroupDelete:
delete = InterfaceCallProperty
class TgGroupJoin:
pk = InterfaceReadProperty
join = InterfaceCallProperty
class TgGroupLeave:
leave = InterfaceCallProperty
class TgGroupComment:
pk = InterfaceReadProperty
comment = InterfaceCallProperty
class TgGroupUploader:
upload = InterfaceCallProperty
class TgGroupMessage:
message_members = InterfaceCallProperty
class TgGroupTypeEditor:
group_type = InterfaceWriteProperty
class TgGroupManageMembers:
pk = InterfaceReadProperty
add_member = InterfaceCallProperty
accept_member = InterfaceCallProperty
remove_member = InterfaceCallProperty
class TgGroupInvite:
invite_member = InterfaceCallProperty
create_Contact = InterfaceCallProperty
class TgGroupStatusViewer:
current_status = InterfaceCallProperty
class SetManagePermissions:
pass
from apps.plus_permissions.models import add_type_to_interface_map
if not get_interface_map(TgGroup):
TgGroupInterfaces = {'Viewer': TgGroupViewer,
'Editor': TgGroupEditor,
'Delete': TgGroupDelete,
'ManageMembers': TgGroupManageMembers,
'Join': TgGroupJoin,
'Leave': TgGroupLeave,
'Commentor':TgGroupComment,
'Uploader':TgGroupUploader,
'Message':TgGroupMessage,
'Invite':TgGroupInvite,
'GroupTypeEditor':TgGroupTypeEditor,
'StatusViewer':TgGroupStatusViewer,
'SetManagePermissions':SetManagePermissions}
add_type_to_interface_map(TgGroup, TgGroupInterfaces)
# use InterfaceOrder to draw the slider and constraints, these are used in rendering the sliders and in validating the results
# these exist on a per type basis and are globals for their type.
# they don't need to be stored in the db
if not SliderOptions.get(TgGroup, False):
SetSliderOptions(TgGroup,
{'InterfaceOrder':['Viewer', 'Editor', 'Join', 'Uploader', 'Commentor', 'Invite', 'Message', 'ManageMembers', 'Delete', 'ManagePermissions'],
'InterfaceLabels':{'Viewer':'View',
'Editor': 'Edit',
'Commentor': 'Comment',
'Message' : 'Message Group',
'ManageMembers': 'Manage Membership',
'ManagePermissions':'Change Permissions'}})
# ChildTypes are used to determine what types of objects can be created in this security context (and acquire security context from this). These are used when creating an explicit security context for an object of this type.
if TgGroup not in PossibleTypes:
child_types = [OurPost, Site, Application, Contact, Profile, WikiPage, Link, Resource,
MemberInvite, AggregateFeed, FeedItem]
SetPossibleTypes(TgGroup, child_types)
SetVisibleTypes(content_type, [TgGroup, WikiPage, Resource, Application, FeedItem])
SetTypeLabels(content_type, 'Group')
# if the security context is in this agent, this set of slider_agents apply, irrespective of the type of resource they are
def get_slider_agents(scontext) :
return [
('anonymous_group', get_anonymous_group().get_ref()),
('all_members_group', get_all_members_group().get_ref()),
('context_agent', scontext.context_agent),
('creator', get_creator_agent()),
('context_admin', scontext.context_admin)
]
SetSliderAgents(TgGroup, get_slider_agents)
def visible_agents():
return ['anonymous_group', 'all_members_group', 'context_agent', 'creator', 'context_admin']
SetVisibleAgents(TgGroup, visible_agents())
# The agent must have a set of default levels for every type which can be created within it. Other objects don't need these as they will be copied from acquired security context according to the possible types available at the "lower" level. We have different AgentDefaults for different group types e.g. standard, public, or private.
#constraints - note that "higher" means wider access. Therefore if "anonymous can't edit" we must set that Editor<$anonymous OR if Editor functionality can't be given to a wider group than Viewer then we must set Editor < Viewer.
def setup_defaults():
public_defaults = {'TgGroup':
{'defaults':
{'Viewer':'anonymous_group',
'Editor':'creator',
'Delete':'creator',
'ManageMembers':'creator',
'Join':'all_members_group',
'Leave':'context_agent',
'Invite':'creator',
'ManagePermissions':'context_admin',
'SetManagePermissions':'context_admin',
'CreateLink':'context_agent',
'CreateComment':'all_members_group',
'CreateWikiPage':'context_agent',
'CreateResource':'context_agent',
'CreateApplication':'all_members_group',
'CreateMemberInvite':'context_agent',
'Message':'context_agent',
'StatusViewer':'anonymous_group',
'GroupTypeEditor':'context_admin',
'Commentor':'context_agent',
'Unknown': 'context_admin'
},
'constraints':
['Viewer>=Editor', 'Join>=ManageMembers', 'ManageMembers<=$anonymous_group', 'ManagePermissions<=$context_agent']
},
'WikiPage':
{'defaults':
{'Viewer':'anonymous_group',
'Editor':'context_agent',
'Creator':'creator',
'Manager':'context_admin',
'Commentor':'all_members_group',
'CommentViewer':'anonymous_group',
'Unknown':'context_admin',
'ManagePermissions':'creator'},
'constraints': ['Viewer>=Editor', 'Editor<$anonymous_group', 'Viewer>=Commentor']
},
'OurPost':
{ 'defaults' :
{'Viewer':'all_members_group',
'Editor':'creator',
'Commentor':'context_agent',
'ManagePermissions':'creator',
'Unknown': 'context_admin'},
'constraints':['Viewer>=Editor', 'Editor<$anonymous_group']
},
'Site' :
{'defaults':
{'CreateApplication':'anonymous_group',
'ManagePermissions':'context_admin',
'CreateVirtual':'all_members_group',
'CreateHub':'context_admin',
'Unknown': 'context_admin'},
'constraints': []
},
'Application':
{'defaults' :
{'Viewer':'context_admin',
'Editor':'creator',
'Accept':'context_admin',
'Reject':'context_admin',
'ManagePermissions':'context_admin',
'Unknown': 'context_admin',
},
'constraints':['Viewer>=Editor', 'Editor<$anonymous_group']
},
'MemberInvite':
{'defaults' :
{'Viewer':'context_admin',
'Editor':'creator',
'Accept':'creator',
'ManagePermissions':'context_admin',
'Unknown': 'context_admin',
},
'constraints':['Viewer>=Editor', 'Editor<$anonymous_group']
},
'Contact':
{'defaults' :
{'ContactAdmin':'context_admin',
'ContactInvite':'all_members_group',
'ManagePermissions':'context_admin',
'Unknown': 'context_admin'
},
'constraints':[]
},
'Link':
{'defaults' :
{ 'Viewer':'anonymous_group',
'Editor':'creator',
'Delete':'context_agent',
'Manager':'context_agent',
'ManagePermissions':'context_admin',
'Unknown':'context_admin'
},
'constraints':['Viewer>=Manager']
},
'Resource':
{'defaults' :
{ 'Viewer': 'all_members_group',
'Editor': 'context_agent',
'Commentor': 'all_members_group',
'Delete' : 'context_admin',
'Manager': 'context_agent',
'ManagePermissions':'context_admin',
'Unknown': 'context_admin',
},
'constraints':['Viewer>=Manager']
},
'FeedItem':
{'defaults':
{'Viewer' : 'all_members_group',
'Delete' : 'context_agent',
'Unknown':'context_agent'},
'constraints': []
},
'AggregateFeed':
{'defaults':
{'Viewer' : 'context_agent',
'Unknown' : 'context_agent'},
'constrains':[]
},
'Profile':
{'defaults':
{'Viewer': 'anonymous_group',
'Editor': 'creator',
'EmailAddressViewer' : 'context_agent',
'HomeViewer' : 'context_agent',
'WorkViewer' : 'context_agent',
'MobileViewer' : 'context_agent',
'FaxViewer' : 'context_agent',
'AddressViewer' : 'context_agent',
'SkypeViewer' : 'context_agent',
'SipViewer' : 'context_agent',
'ManagePermissions':'context_admin',
'Unknown' : 'creator',
},
'constraints':[]
}
}
# start by cloning the public, then we'll over-ride the differences using plus_lib/utils/overlay
# XXX don't need to keep doing this ... make sure its called only once
from apps.plus_lib.dict_tools import overlay
open_defaults = deepcopy(public_defaults)
open_defaults = overlay(open_defaults,{'TgGroup':{'defaults':{'Viewer':'all_members_group'}}})
open_defaults = overlay(open_defaults,{'WikiPage':{'defaults':{'Viewer':'all_members_group'}}})
open_defaults = overlay(open_defaults,{'Resource':{'defaults':{'Viewer':'all_members_group'}}})
open_defaults = overlay(open_defaults,{'WikiPage':{'defaults':{'Commentor':'context_agent'}}})
open_defatuls = overlay(open_defaults,{'Resource':{'defaults':{'Commentor':'context_agent'}}})
invite_defaults = deepcopy(open_defaults)
invite_defaults = overlay(invite_defaults,{'TgGroup':{'defaults':{'Join':'context_agent'}}})
invite_defaults = overlay(invite_defaults,{'TgGroup':{'defaults':{'CreateMemberInvite':'context_agent'}}})
hub_defaults = deepcopy(invite_defaults)
hub_defaults = overlay(hub_defaults, {'TgGroup':{'defaults':{'Invite':'context_admin'}}})
hub_defaults = overlay(hub_defaults, {'TgGroup':{'defaults':{'CreateMemberInvite':'context_admin'}}})
private_defaults = deepcopy(invite_defaults)
private_defaults = overlay(private_defaults,{'TgGroup':{'defaults':{'Viewer':'context_agent'}}})
private_defaults = overlay(private_defaults,{'TgGroup':{'defaults':{'Invite':'context_admin'}}})
private_defaults = overlay(private_defaults,{'WikiPage':{'defaults':{'Viewer':'context_agent'}}})
private_defaults = overlay(private_defaults,{'Resource':{'defaults':{'Viewer':'context_agent'}}})
return {'public':public_defaults,
'private':private_defaults,
'open' : open_defaults,
'invite' : invite_defaults}
AgentDefaults = setup_defaults()
SetAgentDefaults(TgGroup, AgentDefaults)
| gpl-3.0 | 4,864,147,382,982,466,000 | 42.488839 | 334 | 0.563312 | false |
mozeq/fros | src/pyfros/controls.py | 1 | 4623 | ## Copyright (C) 2013 ABRT team <[email protected]>
## Copyright (C) 2013 Red Hat, Inc.
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
# pylint has troubles importing from gi.repository because
# it uses introspection
# pylint: disable=E0611
#from gi.repository import GLib
from gi.repository import Gtk
# pylint: disable=F0401
from gi.repository.Gtk import SizeGroupMode
from gi.repository import Gdk
from pyfros.i18n import _
from pyfros.froslogging import info
class Controls(Gtk.Window):
# selected plugin
controller = None
def _update_progressbar(self, percent):
self.progress.set_visible(True) # don't check, just make sure it's visible
self.progress.set_fraction(percent / 100) # progressbar uses promiles
# xgettext:no-c-format
self.progress.set_text("Encoding: {0!s}% complete".format(percent))
def _area_selected(self, result):
if result is True:
self.rec_button.set_sensitive(True)
def __init__(self, controller):
Gtk.Window.__init__(self)
self.controller = controller
self.controller.SetProgressUpdate(self._update_progressbar)
buttons_size_group = Gtk.SizeGroup(SizeGroupMode.BOTH)
main_vbox = Gtk.VBox()
main_hbox = Gtk.HBox()
# pylint: disable=E1101
self.add(main_vbox)
# pylint: disable=E1101
self.set_decorated(False)
# move away from the UI!
self.wroot = Gdk.get_default_root_window()
self.wwidth = self.wroot.get_width()
self.wheight = self.wroot.get_height()
#progress bar
self.progress = Gtk.ProgressBar()
self.progress.set_no_show_all(True)
#stop button
self.stop_button = Gtk.Button(stock=Gtk.STOCK_MEDIA_STOP)
self.stop_button.connect("clicked", self._stop_recording)
self.stop_button.set_sensitive(False)
buttons_size_group.add_widget(self.stop_button)
main_hbox.pack_start(self.stop_button, False, False, 0)
#start button
self.rec_button = Gtk.Button(stock=Gtk.STOCK_MEDIA_RECORD)
self.rec_button.connect("clicked", self._start_recording)
# have to select window first
self.rec_button.set_sensitive(False)
buttons_size_group.add_widget(self.rec_button)
main_hbox.pack_start(self.rec_button, False, False, 0)
# select button
select_button = Gtk.Button(_("Select window"))
select_button.connect("clicked", self.controller.SelectArea, self._area_selected)
buttons_size_group.add_widget(select_button)
main_hbox.pack_start(select_button, False, False, 0)
# close button
close_button = Gtk.Button(stock=Gtk.STOCK_CLOSE)
close_button.connect("clicked", Gtk.main_quit)
buttons_size_group.add_widget(close_button)
main_hbox.pack_start(close_button, False, False, 0)
main_vbox.pack_start(main_hbox, True, True, 0)
main_vbox.pack_start(self.progress, True, True, 0)
self.connect("destroy", Gtk.main_quit)
def _stop_recording(self, button):
self.controller.StopScreencast(Gtk.main_quit)
button.set_sensitive(False)
self.rec_button.set_sensitive(True)
def _start_recording(self, button):
info("start recording")
res = self.controller.Screencast()
if res.success:
info("Capturing screencast to {0}".format(res.filename))
button.set_sensitive(False)
self.stop_button.set_sensitive(True)
def show_all(self, *args, **kwargs):
# pylint: disable=E1101
super(Controls, self).show_all(*args, **kwargs)
# pylint: disable=E1101
width, height = self.get_size()
# pylint: disable=E1101
self.move(self.wwidth - (width + 50), self.wheight - (height + 50))
# pylint: disable=E1101
self.present() # move it on top or do some magic to drag the attention
| gpl-2.0 | -7,644,101,290,094,040,000 | 38.177966 | 89 | 0.665801 | false |
andreymal/tabun_api | doc/source/conf.py | 1 | 5408 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sys
import os
import shlex
import tabun_api
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'tabun_api'
copyright = '2019, andreymal'
author = 'andreymal'
# The short X.Y version
version = tabun_api.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'ru'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['.*']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'tabun_apidoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tabun_api.tex', 'tabun\\_api Documentation',
'andreymal', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tabun_api', 'tabun_api Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tabun_api', u'tabun_api Documentation',
author, 'tabun_api', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| mit | 8,847,082,609,616,655,000 | 28.075269 | 79 | 0.641642 | false |
jmmnn/Sample_ST_search | randomDocs.py | 1 | 2475 |
# import nltk
# from nltk.corpus import wordnet as wn
import pandas as pd
import string
import random
titles = [
'Analysis: What Xbox One Scorpio means for the future of the console wars',
'Ancient Humans Didn not Turn to Cannibalism For the Calories',
'Twitter co-founder Ev Williams is selling 30 percent of his stock for personal reasons',
'Comcast to sell unlimited mobile plans that get throttled after 20GB',
]
content = [
'Enlarge / The exploded innards on what will surely be the most powerful game console in existence... until the next one. Eurogamer / Digital Foundry · reader comments 96.',
'Humans have been eating other humans since the beginning of time, but the motivations behind this macabre practice are complex and often unclear.',
'Twitter co-founder and current board member Ev Williams is going to offload some of his Twitter stock. Williams, who was once Twitters CEO and is the companys largest individual shareholder, said Thursday that he plans to sell a “minority of my TWTR',
'Comcast today announced pricing for its forthcoming smartphone data plans and said the Xfinity Mobile service will be available as an add-on for Comcast home Internet customers before the end of June.',
]
#this is the main function
def randomDoc():
document = {
'Country Origin' : random.sample(['Cuba' , 'France' , 'China' , 'Russia', 'Mexico', 'United Kingdom', 'USA'],1),
'Entity Origin' : random.sample(['United Nations' , 'European Union' , 'Ministry of Foreign Affairs' , 'Permanent Mission to the UN'],2),
'Domain Origin' : random.sample(['gov.fr' , 'gouv.fr' , '.gov' , '.go.uk' , 'gov.cn' , '.edu' , 'edu.mx'],1),
'Title' : random.sample(titles,1),
'Content' : random.sample(content,1),
'url' : 'https://unite.un.org/',
'Language' : random.sample(['English', 'French' , 'Spanish' , 'Russian' , 'Arabic' , 'Chinese'],2),
'Resource Type' : random.sample(['Training', 'Publications', 'Software', 'Projects', 'Regulations' , 'Patents' , 'News', 'Database'],4),
'Resource Format' : random.sample(['Video', 'Audio', 'Website', 'Book' , 'Dataset'],2),
'SDG Goal' : random.sample(['Poverty', 'Hunger', 'Water', 'Climate Change', 'Cities' , 'Partnerships'],3),
'Indexed Date' : '2015-31-12'
}
return document
# Tests
#print (randomDoc())
#print (random.sample(['Video', 'Audio', 'Website', 'Book' , 'Dataset'], 2))
## Not used for now.
# randSentences = nltk.corpus.brown.sents() #Brings 50k sentences
| gpl-3.0 | 7,353,274,640,336,475,000 | 55.181818 | 252 | 0.699838 | false |
Jacob-Barhak/MIST | Transitions.py | 1 | 25760 | ################################################################################
###############################################################################
# Copyright (C) 2013-2014 Jacob Barhak
# Copyright (C) 2009-2012 The Regents of the University of Michigan
#
# This file is part of the MIcroSimulation Tool (MIST).
# The MIcroSimulation Tool (MIST) is free software: you
# can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# The MIcroSimulation Tool (MIST) is distributed in the
# hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
###############################################################################
#
# ADDITIONAL CLARIFICATION
#
# The MIcroSimulation Tool (MIST) is distributed in the
# hope that it will be useful, but "as is" and WITHOUT ANY WARRANTY of any
# kind, including any warranty that it will not infringe on any property
# rights of another party or the IMPLIED WARRANTIES OF MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. THE AUTHORS assume no responsibilities
# with respect to the use of the MIcroSimulation Tool (MIST).
#
# The MIcroSimulation Tool (MIST) was derived from the Indirect Estimation
# and Simulation Tool (IEST) and uses code distributed under the IEST name.
# The change of the name signifies a split from the original design that
# focuses on microsimulation. For the sake of completeness, the copyright
# statement from the original tool developed by the University of Michigan
# is provided below and is also mentioned above.
#
###############################################################################
############################ Original Copyright ###############################
###############################################################################
# Copyright (C) 2009-2012 The Regents of the University of Michigan
# Initially developed by Deanna Isaman, Jacob Barhak, Donghee Lee
#
# This file is part of the Indirect Estimation and Simulation Tool (IEST).
# The Indirect Estimation and Simulation Tool (IEST) is free software: you
# can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# The Indirect Estimation and Simulation Tool (IEST) is distributed in the
# hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
################################################################################
# #
# This file contains a form to define Transition(s) for a Study/Mode #
################################################################################
import DataDef as DB
import CDMLib as cdml
import wx, copy
class RowPanel(cdml.CDMPanel):
""" RowPanel class for Transitions """
def __init__(self, id_model, *args, **kwds):
""" Constructor of RowPanel class """
kwdsnew = copy.copy(kwds)
kwdsnew['style'] = wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL # Set the style of RowPanel class
cdml.CDMPanel.__init__(self, True, *args, **kwdsnew) # Second parameter should be True always for any RowPanel class
# Use userData to save ID of Study/Model
self.userData = id_model
# Create variables using Transition class and initialize those variables
# using initial values defined in Transition Class
self.record = cdml.GetInstanceAttr(DB.Transition)
self.record.StudyModelID = self.userData
# Button and hidden StaticText to display panel status
self.btn_del = cdml.Button(self, wx.ID_DELETE, "x", style=wx.BU_EXACTFIT)
self.st_status = wx.StaticText(self, -1, " ")
# Create controls to enter/display the variables in Transition object
# For controls include text area set wx.TE_NOHIDESEL always.
# This style need for the Find function
self.cc_state_from = cdml.Combo(self, cdml.IDP_BUTTON1, style=wx.TE_NOHIDESEL, validator=cdml.KeyValidator(cdml.NO_EDIT))
self.cc_state_to = cdml.Combo(self, cdml.IDP_BUTTON2, style=wx.TE_NOHIDESEL, validator=cdml.KeyValidator(cdml.NO_EDIT))
self.tc_probability = cdml.Text(self, cdml.IDP_BUTTON3, '', style=wx.TE_NOHIDESEL|wx.TE_MULTILINE)
self.tc_notes = cdml.Text(self, -1, "", style=wx.TE_NOHIDESEL|wx.TE_MULTILINE)
self.__set_properties()
self.__do_layout()
# Bind an event handler to check/display formulae for the parameters
# self.Bind(wx.EVT_IDLE, self.CheckFormula)
# To modify the state and parameters, assign event handles for some controls
# Because focus management isn't need for the modification,
# event handlers are assigned directly to the controls instead of the FrameEventHandler method in CDMFrame class
#self.cc_state_from.GetTextCtrl().Bind(wx.EVT_LEFT_DCLICK, self.OnButtonDblClick )
#self.cc_state_to.GetTextCtrl().Bind(wx.EVT_LEFT_DCLICK, self.OnButtonDblClick )
self.tc_probability.Bind(wx.EVT_LEFT_DCLICK, self.OnButtonDblClick )
def __set_properties(self):
""" Set properties of panel and controls """
self.SetSize((960,-1))
self.btn_del.SetMinSize((20, 20))
self.st_status.SetFont(wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
# Define number of columns, column title and width
# It's tuple of tupels
# Syntax : ((Column Name, width), Column Name, width), ...)
# Column number is calculated automatically.
columns = (('Name', 150), ('Notes', 332))
StandardSize = (150, -1)
self.cc_state_from.SetMinSize(StandardSize)
self.cc_state_from.SetColumns(columns)
# Set Sort Id and Data Type of a control
# The sortId should be matched to the sortId of field title in the title section
# If sortId is not set, no action occurs when user click the field title
self.cc_state_from.sortId = 0
self.cc_state_to.SetMinSize(StandardSize)
self.cc_state_to.SetColumns(columns)
self.cc_state_to.sortId = 1
self.tc_probability.SetMinSize(StandardSize)
self.tc_probability.sortId = 2
self.tc_notes.SetMinSize(StandardSize)
self.tc_notes.sortId = 3
def __do_layout(self):
""" Set position of each control """
grid_sizer_1 = wx.GridBagSizer(0, 0)
grid_sizer_1.Add(self.btn_del, (0,0), (1,1), wx.ALL, 1) # 'x' button
grid_sizer_1.Add(self.st_status, (2,0), (1,1), wx.ALL, 1) # hidden text for status display
grid_sizer_1.Add(self.cc_state_from, (0,1), (1,1), wx.ALL, 1) # From State
grid_sizer_1.Add(self.cc_state_to, (1,1), (1,1), wx.ALL, 1) # To State
grid_sizer_1.Add(self.tc_probability, (0,2), (3,1), wx.ALL|wx.EXPAND, 1) # Probability
grid_sizer_1.Add(self.tc_notes, (0,4), (3,1), wx.ALL|wx.EXPAND, 1)
self.SetSizer(grid_sizer_1)
grid_sizer_1.Fit(self)
def GetValues(self):
"""
Retrieve current values in a row panel.
RowPanel class must implement this method.
"""
# create a copy of field variables
record = copy.copy(self.record)
record.StudyModelID = (self.userData)
record.FromState = (self.cc_state_from.GetValue())
record.ToState = (self.cc_state_to.GetValue())
record.Probability = str(self.tc_probability.GetValue())
record.Notes = str(self.tc_notes.GetValue())
return record
def SetValues(self, record, init=False):
"""
Write current data in controls on a row panel
RowPanel class must implement this method.
"""
# Since there are combo items in use, first populate their list
self.SetComboItem()
self.userData = (record.StudyModelID)
self.Key = (record.StudyModelID, record.FromState, record.ToState)
self.cc_state_from.SetValue((record.FromState))
self.cc_state_to.SetValue((record.ToState))
self.tc_probability.SetValue(str(record.Probability))
self.tc_notes.SetValue(str(record.Notes))
def SaveRecord(self, record):
"""
Save/Modify the data of StudyModel object
This method is called by CheckFocus method in CDMLib
RowPanel class must implement this method.
"""
# create new Transition object
entry = DB.Transition( StudyModelID = record.StudyModelID,
FromState = record.FromState,
ToState = record.ToState,
Probability = str(record.Probability),
Notes = str(record.Notes) )
frame = self.GetTopLevelParent()
if self.Id == 0: # if previous panel is new, create new object
entry = DB.Transitions.AddNew(entry, ProjectBypassID = frame.idPrj)
elif self.Id > 0: # if previous panel is existing one, replace record
entry = DB.Transitions.Modify(self.Key, entry, ProjectBypassID = frame.idPrj)
return entry
def TextRecordID(self):
""" Returns the identity of the record as text """
if self.Id == 0:
Result = 'New Transition'
FromState = None
ToState = None
else:
Result = 'Transition saved as "'
FromState = self.record.FromState
ToState = self.record.ToState
if DB.States.has_key(FromState):
Result = Result + ' From-State ' + str(DB.States[FromState].Name)
if DB.States.has_key(ToState):
Result = Result + ' To-State ' + str(DB.States[ToState].Name)
if DB.States.has_key(self.record.StudyModelID):
Result = Result + ' For Model ' + str(DB.StudyModels[self.record.StudyModelID].Name)
Result = Result + '"'
FromStateEdited = self.cc_state_from.GetValue()
ToStateEdited = self.cc_state_to.GetValue()
if FromState != FromStateEdited or ToState != ToStateEdited:
Result = Result + ' Currently changed to'
if DB.States.has_key(FromStateEdited):
Result = Result + ' From-State ' + str(DB.States[FromStateEdited].Name)
else:
Result = Result + ' From-State is blank'
if DB.States.has_key(ToStateEdited):
Result = Result + ' To-State ' + str(DB.States[ToStateEdited].Name)
else:
Result = Result + ' To-State is blank'
return Result
def SetComboItem(self):
"""
Set items of ComboCtrl in RowPanel class when focus is moved in current RowPanel instance
The items are removed when lost focus --> Implemented in CDMFrame class
RowPanel class that have combo controls must implement this method.
"""
if self.userData != None:
StatesInStudyModel = DB.StudyModels[self.userData].FindStatesInStudyModel()
states = [ (str(state.Name), str(state.Notes), state.ID)
for state in DB.States.values() if state.ID in StatesInStudyModel ]
self.cc_state_from.SetItems(states)
self.cc_state_to.SetItems(states)
# Following methods are dedicated to the instance of RowPanel class for Transitions form
def OnButtonDblClick(self, event):
"""
Event handler to open child form
"""
tc = event.GetEventObject()
cc = tc.GetParent()
if cc.Id in [ cdml.IDP_BUTTON1, cdml.IDP_BUTTON2 ]:
collection = DB.States
key = cc.GetValue()
form = 'States'
type_parm = ''
elif tc.Id == cdml.IDP_BUTTON3:
collection = DB.Params
key = tc.GetValue()
form = 'Parameters'
if tc.Id == cdml.IDP_BUTTON3:
type_parm = [ 'Number', 'Integer', 'Epression']
else:
raise ValueError, "Assertion Error: Button does not exist"
if key == '' or key == 0:
msg = 'This ' + form[:-1] + ' is not defined yet.\n'
msg += "Do you want to create a new " + form[:-1] + '?'
ans = cdml.dlgSimpleMsg('ERROR', msg, wx.YES_NO, wx.ICON_ERROR, Parent = self)
if ans == wx.ID_NO : return
cdml.OpenForm(form, self, cdml.ID_MODE_SINGL, key, type_parm)
elif not cdml.GetRecordByKey(collection, key) :
msg = 'The entry "' + str(key) + '" does not exist in ' + form + '.'
ans = cdml.dlgSimpleMsg('ERROR', msg, wx.OK, wx.ICON_ERROR, Parent = self)
return
else:
frame = self.GetTopLevelParent()
cdml.OpenForm(form, self, cdml.ID_MODE_SINGL, key, type_parm, frame.idPrj)
class MainFrame(cdml.CDMFrame):
""" MainFrame class for the Transitions """
def __init__(self, mode=None, data=None, type=None, id_prj=0, *args, **kwds):
""" Constructor of the MainFrame class """
self.idPrj = id_prj
cdml.CDMFrame.__init__(self, mode, data, type, *args, **kwds)
# Deine Popup menu items
# Format : tuple of list --> ([Label, Event handler, Id] , [], [], ... )
# Label : label of an item
# Event handler : name of event handler
# Id : Id of current menu item
# Special label : '-'--> separator, '+' --> submenu items
# First item after last '+' marked items is the title of the submenu
# If an item doesn't have event handler, the second parameter should be 'None'
# If an item doesn't have Id, the third item should be -1
# If a form need to manage instances of RowPanel class,
# the event handler should be 'self.FrameEventHandler'
# Otherwise, dedicated event handler should be implemented in that class (ex. see Project or PopulationData form)
self.pup_menus = ( ["Undo", self.FrameEventHandler, wx.ID_UNDO ],
["-" , None, -1],
["Add" , self.FrameEventHandler, wx.ID_ADD],
["Delete" , self.FrameEventHandler, wx.ID_DELETE],
["-" , None, -1 ],
["Find" , self.FrameEventHandler, wx.ID_FIND],
["-" , None, -1 ],
["+From State" , self.FrameEventHandler, cdml.IDF_BUTTON4],
["+To State" , self.FrameEventHandler, cdml.IDF_BUTTON5],
["+Probability" , self.FrameEventHandler, cdml.IDF_BUTTON6],
["Sort By", None, -1])
# Define the window menus
cdml.GenerateStandardMenu(self)
# create panel for field titles
# IMPORTANT NOTE:
# In current version, name of a panel for the title section should be "pn_title"
# And should be an instance of CDMPanel class with False as a first argument
self.pn_title = cdml.CDMPanel(False, self, -1)
self.st_title = wx.StaticText(self.pn_title, -1, "Transitions Between States in a Model")
# Create text and combo control to display the list of studies and models
# Due to this controls, two step initialization need to be implemented for Transition form
# --> transition list could be set up after selecting a study or model using this combo control
self.st_study_model = wx.StaticText(self.pn_title, -1, "Model")
self.cc_study_model = cdml.Combo(self.pn_title, validator = cdml.KeyValidator(cdml.NO_EDIT))
# Create bitmap buttons to display title of each field
# Syntax : cdml.BitmapButton( parent, id, bitmap, label )
# Don't need to set bitmap here. It will be assigned in the event handler when pressed
# For the sort function, the labels need to be same with the variable name in database object
self.button_1 = cdml.BitmapButton(self.pn_title, cdml.IDF_BUTTON1, None, "From State")
self.button_2 = cdml.BitmapButton(self.pn_title, cdml.IDF_BUTTON2, None, "To State")
self.button_3 = cdml.BitmapButton(self.pn_title, cdml.IDF_BUTTON3, None, "Probability")
self.button_4 = cdml.BitmapButton(self.pn_title, cdml.IDF_BUTTON8, None, "Notes")
# Create Add/Find buttons
# Syntax : cdml.Button( parent, ID )
# ID should be wx.ID_ADD for add button and wx.ID_FIND for find button in all forms
self.btn_add = cdml.Button(self.pn_title, wx.ID_ADD)
self.btn_find = cdml.Button(self.pn_title, wx.ID_FIND)
self.btn_copy_from_model = cdml.Button(self.pn_title, cdml.IDF_BUTTON11, 'Copy From Model')
# Scroll window that the RowPanel objects will be placed
# IMPORTANT NOTE:
# In current version, all forms that need to manage the instance(s) of RowPanel class
# should have an instance of wx.ScrolledWindow class.
# Also the name of the panel should be "pn_view"
self.pn_view = wx.ScrolledWindow(self, -1, style=wx.SUNKEN_BORDER|wx.TAB_TRAVERSAL)
self.__set_properties()
self.__do_layout()
# Assign event handler for the buttons in title section -- to check the focus change
self.pn_title.Bind(wx.EVT_BUTTON, self.FrameEventHandler, id=cdml.IDF_BUTTON1, id2=cdml.IDF_BUTTON8)
self.btn_add.Bind(wx.EVT_BUTTON, self.FrameEventHandler)
self.btn_find.Bind(wx.EVT_BUTTON, self.FrameEventHandler)
self.btn_copy_from_model.Bind(wx.EVT_BUTTON, self.CopyTransitionsFromAnotherStudyModel)
self.cc_study_model.Bind(wx.EVT_LEFT_UP, self.FrameEventHandler)
self.cc_study_model.GetTextCtrl().Bind(wx.EVT_LEFT_UP, self.FrameEventHandler)
# The next line was commented since it worked fine on windows yet did
# not work on a Linux system. Therefore instead of handling the mouse
# click we are looking at the selection of the item form the list. For
# some reason this forces repainting of the screen. Yet since it works
# on both Linux and Windows, this solution was a compromise
# self.cc_study_model.Bind(wx.EVT_COMMAND_LEFT_CLICK, self.FrameEventHandler)
self.cc_study_model.Bind(wx.EVT_LIST_ITEM_SELECTED, self.InitTransitions)
self.InitTransitions() # display existing data
# Define the default method to handle the menu selections
OnMenuSelected = cdml.OnMenuSelected
def __set_properties(self):
""" Set properties of frame and controls """
self.SetTitle("TRANSITIONS")
self.SetSize((960, 600))
self.SetCollection('Transitions') # or self.Collection = 'Transitions'
self.HelpContext = 'Transitions'
self.st_title.SetFont(wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.cc_study_model.SetColumns( (('Name', 150), ('Notes', 332)) )
self.cc_study_model.SetEvent((None, cdml.ID_EVT_OWN, self.InitTransitions))
self.cc_study_model.typeData = cdml.ID_TYPE_COMBO
self.pn_title.isRow = False
self.pn_view.SetScrollRate(10, 10)
StandardSize = (150, -1)
# set sort id and event id for field titles
for i in range(1,5):
btn = getattr(self, 'button_' + str(i))
btn.SetMinSize(StandardSize)
btn.sortId = i-1
btn.evtID = cdml.ID_EVT_SORT
# Build and assign study/model list for the combo control in title section
study_models = [(sm.Name, sm.Notes, sm.ID) for sm in DB.StudyModels.values() ]
self.cc_study_model.SetItems(study_models, allowBlank = False)
# Set default study or model according to the opening mode
init_id = cdml.iif( self.openData == None, DB.StudyModels.keys()[0], self.openData)
self.cc_study_model.SetValue(init_id)
# If specific Study/Model ID was given, disable the Combo control
self.cc_study_model.Enable(self.openData==None)
def __do_layout(self):
""" Set the position of controls """
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1 = wx.GridBagSizer(0, 0)
grid_sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer_2.Add(self.st_study_model, 1, wx.ALL|wx.ALIGN_RIGHT)
grid_sizer_2.Add(self.cc_study_model, 4, wx.ALL|wx.EXPAND)
grid_sizer_2.Add((1,1),1)
grid_sizer_2.Add(self.btn_add, 1)
grid_sizer_2.Add(self.btn_copy_from_model, 2)
grid_sizer_2.Add(self.btn_find, 1)
grid_sizer_1.Add(self.st_title, (0,1), (1,5), wx.ALIGN_LEFT, 10)
grid_sizer_1.Add(grid_sizer_2, (1,1), (1,6), wx.ALL|wx.EXPAND, 0) #
grid_sizer_1.Add((28,0), (2,0))
grid_sizer_1.Add(self.button_1, (2,1), (1,1), wx.ALL, 1) # From
grid_sizer_1.Add(self.button_2, (3,1), (1,1), wx.ALL, 1) # To
grid_sizer_1.Add(self.button_3, (2,2), (3,1), wx.ALL|wx.EXPAND, 1) # Probability
grid_sizer_1.Add(self.button_4, (2,3), (2,1), wx.ALL|wx.EXPAND, 1) # Notes title
self.pn_title.SetSizer(grid_sizer_1)
sizer_1.Add(self.pn_title, 2, wx.EXPAND, 1)
sizer_1.Add(self.pn_view, 10, wx.EXPAND, 1)
self.pn_view.SetSizer(sizer_3)
self.SetSizer(sizer_1)
self.Layout()
# Actual routine to add new panel
# Create an instance of RowPanel object
# this method is called by AddPanel and Initialize method
# Most form may not need this method. However, Transitions could be changed according to the value of Study/Model combo control
# two stage initialization need to be implemented for Transition form and Parameter form
def SetupPanel(self, py=0):
""" Addtional code of AddPanel method defined in CDMLib.py """
# Get an ID of study or model from combo control in the title section
id = self.cc_study_model.GetValue()
new_panel = RowPanel(id, self.pn_view, 0, pos=(0,py))
# If the item selected, change the label of a field title.
self.button_3.SetLabel("Probability")
self.button_4.SetLabel("Notes")
self.button_4.Refresh()
# Same as above, according to the type of item(Study or Model)
# 4 titles will be shown or hidden.
return new_panel
def CopyTransitionsFromAnotherStudyModel(self, event=None):
"""
Allow the user to copy all the transitions from an existing study/model
This will bring a dialog box for the user and allow choosing the study
to copy transitions from.
"""
DestinationStudyModelID = self.openData
if DestinationStudyModelID == None or DestinationStudyModelID not in DB.StudyModels.keys():
raise ValueError, "ASSERTION ERROR: invalid destination study model while copying"
return
SortedByNameStudyModelKeys = sorted(DB.StudyModels.keys(), key = lambda Entry: ( DB.StudyModels[Entry].Name , Entry))
# For a study show studies to copy from, for a model show models.
SourceStudyModelNames = map (lambda Entry: str(DB.StudyModels[Entry].Name), SortedByNameStudyModelKeys)
dlg = wx.SingleChoiceDialog(self, 'Please select a Model to copy transitions from', 'Copy all Transitions From a Model', SourceStudyModelNames, wx.CHOICEDLG_STYLE )
if dlg.ShowModal() == wx.ID_OK: # then open blank project form
SelectionIndex = dlg.GetSelection()
if 0 <= SelectionIndex <= (len(SourceStudyModelNames)-1):
SourceStudyModelID = SortedByNameStudyModelKeys[SelectionIndex]
frame = self.GetTopLevelParent()
(RecordsCopied,RecordsToCopy) = DB.StudyModels[DestinationStudyModelID].CopyTransitionsFromAnotherStudyModel(SourceStudyModelID, ProjectBypassID = frame.idPrj)
cdml.dlgSimpleMsg('Completed transition copying from another model', str(RecordsCopied) +' out of ' + str(RecordsToCopy) +' transitions were copied. ', wx.OK, wx.ICON_INFORMATION, Parent = self)
self.InitTransitions()
def InitTransitions(self, event=None):
"""
Display Transitions for selected Study/Model
According to the value of combo control and opening mode, this method build the list of object
then call the Initialize method
"""
if self.openMode == None:
self.openData = self.cc_study_model.GetValue()
StudyModel = DB.StudyModels[self.openData]
SortedTransitionKeys = StudyModel.FindTransitions(SortOption = 'SortByOrderInSubProcess')
objects = map (lambda TransKey: DB.Transitions[TransKey], SortedTransitionKeys )
# decide if study or model
self.Initialize(objects)
if __name__ == "__main__":
app = wx.App(0)
#wx.InitAllImageHandlers() Deprecated
DB.LoadAllData('InData' + DB.os.sep + 'Testing.zip')
frame_1 = MainFrame(mode=None, data=None, type=None, id_prj=0, parent=None)
app.SetTopWindow(frame_1)
frame_1.CenterOnScreen()
frame_1.Show()
app.MainLoop()
| gpl-3.0 | -406,100,797,457,983,550 | 44.352113 | 210 | 0.616227 | false |
Vito2015/tcc3-portal | tcc3portal/tcc_frontend/__init__.py | 1 | 1994 | # coding:utf-8
"""
tcc3portal.tcc_frontend
~~~~~~~~~~~~~~~~~~~
tcc3portal tcc_frontend ui templates package.
:copyright: (c) 2015 by Vito.
:license: GNU, see LICENSE for more details.
"""
from flask import Blueprint, Flask, url_for, current_app
def tcc_frontend_find_resource(filename, cdn, local=True):
"""Resource finding function, also available in templates."""
cdns = current_app.extensions['tcc_frontend']['cdns']
resource_url = cdns[cdn].get_resource_url(filename)
return resource_url
def get_app_config(variable_name):
try:
return current_app.config[variable_name]
except KeyError:
return None
class StaticCDN(object):
"""A CDN that serves content from the local application.
:param static_endpoint: Endpoint to use.
"""
def __init__(self, static_endpoint='static'):
self.static_endpoint = static_endpoint
def get_resource_url(self, filename):
extra_args = {}
return url_for(self.static_endpoint, filename=filename, **extra_args)
class TccFrontend(object):
def __init__(self, app):
if app is not None:
self.init_app(app)
def init_app(self, app):
""" Init Flask app.
:type app: Flask
"""
blueprint = Blueprint('tcc_frontend',
__name__,
static_folder="static",
template_folder="templates",
static_url_path=app.static_url_path+'/tcc_frontend')
app.register_blueprint(blueprint)
app.jinja_env.globals['tcc_frontend_find_resource'] =\
tcc_frontend_find_resource
local = StaticCDN('tcc_frontend.static')
static = StaticCDN()
app.extensions['tcc_frontend'] = {
'cdns': {
'local': local,
'static': static,
},
}
app.jinja_env.globals['get_app_config'] = get_app_config
| gpl-2.0 | 6,461,207,089,957,628,000 | 27.084507 | 82 | 0.575727 | false |
vacancy/TensorArtist | tartist/data/rflow/push_pipe.py | 1 | 3288 | # -*- coding:utf8 -*-
# File : push_pipe.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 4/2/17
#
# This file is part of TensorArtist.
from . import configs, utils
from ...core.utils.meta import notnone_property
import zmq
import threading
import queue
import contextlib
import collections
import pickle
import functools
# import msgpack
# import msgpack_numpy
# msgpack_numpy.patch()
# dumpb = functools.partial(msgpack.dumps, use_bin_type=True)
# loadb = msgpack.loads
import pickle
dumpb = pickle.dumps
loadb = pickle.loads
__all__ = ['PushPipe', 'PullPipe', 'make_push_pair']
class PullPipe(object):
def __init__(self, name, mode='tcp'):
self._name = name
self._mode = mode
self._conn_info = None
self._context = zmq.Context()
self._sock = self._context.socket(zmq.PULL)
self._sock.set_hwm(2)
@notnone_property
def conn_info(self):
return self._conn_info
def initialize(self):
if self._conn_info is not None:
return
if self._mode == 'tcp':
port = self._sock.bind_to_random_port('tcp://*')
self._conn_info = 'tcp://{}:{}'.format(utils.get_addr(), port)
elif self._mode == 'ipc':
self._conn_info = utils.bind_to_random_ipc(self._sock, self._name)
def finalize(self):
utils.graceful_close(self._sock)
self._context.term()
@contextlib.contextmanager
def activate(self):
self.initialize()
try:
yield
finally:
self.finalize()
def recv(self):
try:
return loadb(self._sock.recv(copy=False).bytes)
except zmq.ContextTerminated:
pass
class PushPipe(object):
def __init__(self, conn_info, send_qsize=10):
self._conn_info = conn_info
self._send_qsize = send_qsize
self._context = None
self._sock = None
self._send_queue = None
self._send_thread = None
def initialize(self):
self._context = zmq.Context()
self._sock = self._context.socket(zmq.PUSH)
self._sock.set_hwm(2)
self._sock.connect(self._conn_info)
self._send_queue = queue.Queue(maxsize=self._send_qsize)
self._send_thread = threading.Thread(target=self.mainloop_send, daemon=True)
self._send_thread.start()
def finalize(self):
utils.graceful_close(self._sock)
self._context.term()
@contextlib.contextmanager
def activate(self):
self.initialize()
try:
yield
finally:
self.finalize()
def mainloop_send(self):
try:
while True:
job = self._send_queue.get()
self._sock.send(dumpb(job), copy=False)
except zmq.ContextTerminated:
pass
def send(self, payload):
self._send_queue.put(payload)
return self
def make_push_pair(name, nr_workers=None, mode='tcp', send_qsize=10):
pull = PullPipe(name, mode=mode)
pull.initialize()
nr_pushs = nr_workers or 1
pushs = [PushPipe(pull.conn_info, send_qsize=send_qsize) for i in range(nr_pushs)]
if nr_workers is None:
return pull, pushs[0]
return pull, pushs
| mit | 4,289,777,976,493,546,000 | 24.488372 | 86 | 0.591545 | false |
brianjking/nDeploy | scripts/accountmodify_hook_pre.py | 1 | 3296 | #!/usr/bin/env python
import yaml
import sys
import json
import os
import subprocess
__author__ = "Anoop P Alias"
__copyright__ = "Copyright 2014, PiServe Technologies Pvt Ltd , India"
__license__ = "GPL"
__email__ = "[email protected]"
installation_path = "/opt/nDeploy" # Absolute Installation Path
backend_config_file = installation_path+"/conf/backends.yaml"
nginx_dir = "/etc/nginx/sites-enabled/"
cpjson = json.load(sys.stdin)
mydict = cpjson["data"]
cpanelnewuser = mydict["newuser"]
cpaneluser = mydict["user"]
maindomain = mydict["domain"]
cpuserdatayaml = "/var/cpanel/userdata/" + cpaneluser + "/main"
cpaneluser_data_stream = open(cpuserdatayaml, 'r')
yaml_parsed_cpaneluser = yaml.safe_load(cpaneluser_data_stream)
cpaneluser_data_stream.close()
main_domain = yaml_parsed_cpaneluser.get('main_domain')
sub_domains = yaml_parsed_cpaneluser.get('sub_domains')
if cpanelnewuser != cpaneluser:
subprocess.call("touch "+installation_path+"/lock/todel_"+cpaneluser, shell=True)
fhandle = open(installation_path+"/lock/todel_"+cpaneluser, 'a')
fhandle.write(installation_path+"/domain-data/"+main_domain+"\n")
fhandle.write(installation_path+"/user-data/"+cpaneluser+"\n")
fhandle.write(nginx_dir+main_domain+".conf\n")
fhandle.write(nginx_dir+main_domain+".include\n")
subprocess.call("rm -rf /var/resin/hosts/"+main_domain, shell=True)
if os.path.isfile("/var/cpanel/userdata/" + cpaneluser + "/" + main_domain + "_SSL"):
fhandle.write(installation_path+"/domain-data/"+main_domain+"_SSL\n")
fhandle.write(nginx_dir+main_domain+"_SSL.conf\n")
fhandle.write(nginx_dir+main_domain+"_SSL.include\n")
for domain_in_subdomains in sub_domains:
fhandle.write(installation_path+"/domain-data/"+domain_in_subdomains+"\n")
fhandle.write(nginx_dir+domain_in_subdomains+".conf\n")
fhandle.write(nginx_dir+domain_in_subdomains+".include\n")
subprocess.call("rm -rf /var/resin/hosts/"+domain_in_subdomains, shell=True)
if os.path.isfile("/var/cpanel/userdata/" + cpaneluser + "/" + domain_in_subdomains + "_SSL"):
fhandle.write(installation_path+"/domain-data/"+domain_in_subdomains+"_SSL\n")
fhandle.write(nginx_dir+domain_in_subdomains+"_SSL.conf\n")
fhandle.write(nginx_dir+domain_in_subdomains+"_SSL.include\n")
fhandle.close()
print(("1 nDeploy:olduser:"+cpaneluser+":newuser:"+cpanelnewuser))
elif maindomain != main_domain:
subprocess.call("touch "+installation_path+"/lock/todel_"+cpaneluser, shell=True)
fhandle = open(installation_path+"/lock/todel_"+cpaneluser, 'a')
fhandle.write(installation_path+"/domain-data/"+main_domain+"\n")
fhandle.write(nginx_dir+main_domain+".conf\n")
fhandle.write(nginx_dir+main_domain+".include\n")
if os.path.isfile("/var/cpanel/userdata/" + cpaneluser + "/" + main_domain + "_SSL"):
fhandle.write(installation_path+"/domain-data/"+main_domain+"_SSL\n")
fhandle.write(nginx_dir+main_domain+"_SSL.conf\n")
fhandle.write(nginx_dir+main_domain+"_SSL.include\n")
subprocess.call("/usr/sbin/nginx -s reload", shell=True)
fhandle.close()
print(("1 nDeploy:olddomain:"+main_domain+":newdomain:"+maindomain))
else:
print("1 nDeploy::skiphook")
| gpl-3.0 | -5,765,584,925,556,428,000 | 46.085714 | 102 | 0.691141 | false |
Dawny33/Code | Compiler Design Assignments/Week1/C_literals_paser.py | 1 | 1278 | f = open("C:\\Users\\acer\\Desktop\\Compiler Design\\simple_c_program.c", 'r')
arr = []
for line in f:
for i in line.split():
arr.append(i)
#print type(arr[8])
char_arr = []
for token in arr:
for ch in token:
char_arr.append(ch)
#print char_arr
#For identifying integer literals.
num = "023456789"
symbols = "+-/%*"
det_lits = []
#print type(det_lits)
for fin in range(len(char_arr)):
if (char_arr[fin] in num) and (char_arr[fin-1] not in symbols):
det_lits.append(char_arr[fin])
print det_lits
#for multi-digit numbers.
for fin3 in range(len(char_arr)):
if (char_arr[fin3] in num):
buff = [num]
for p in range(len(char_arr[fin3:])):
if (char_arr[p] in num):
buff.append(char_arr[p])
print buff
#For identifying string literals.
str_lits = []
found = False
buff_str = ""
for ij in range(len(char_arr)):
for fin2 in range(ij):
if (char_arr[fin2] == '"'):
flag = fin2+1
while char_arr[flag] != '"' and not found:
str_lits.append(char_arr[flag])
str_lits.append(buff_str)
flag += 1
if char_arr[flag] == '"':
found = True
print str_lits
| gpl-3.0 | -824,359,838,254,555,600 | 21.421053 | 78 | 0.541471 | false |
lilleswing/deepchem | deepchem/feat/molecule_featurizers/raw_featurizer.py | 1 | 1341 | from typing import Union
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class RawFeaturizer(MolecularFeaturizer):
"""Encodes a molecule as a SMILES string or RDKit mol.
This featurizer can be useful when you're trying to transform a large
collection of RDKit mol objects as Smiles strings, or alternatively as a
"no-op" featurizer in your molecular pipeline.
Notes
-----
This class requires RDKit to be installed.
"""
def __init__(self, smiles: bool = False):
"""Initialize this featurizer.
Parameters
----------
smiles: bool, optional (default False)
If True, encode this molecule as a SMILES string. Else as a RDKit mol.
"""
try:
from rdkit import Chem # noqa
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
self.smiles = smiles
def _featurize(self, mol: RDKitMol) -> Union[str, RDKitMol]:
"""Calculate either smiles string or pass through raw molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
str or rdkit.Chem.rdchem.Mol
SMILES string or RDKit Mol object.
"""
from rdkit import Chem
if self.smiles:
return Chem.MolToSmiles(mol)
else:
return mol
| mit | 7,890,447,776,362,306,000 | 24.788462 | 76 | 0.677852 | false |
vidartf/hyperspy | hyperspy/tests/signal/test_eds_sem.py | 1 | 13067 | # Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import sys
import numpy as np
import nose.tools as nt
from hyperspy.signals import EDSSEMSpectrum
from hyperspy.defaults_parser import preferences
from hyperspy.components1d import Gaussian
from hyperspy import utils
from hyperspy.misc.test_utils import assert_warns
class Test_metadata:
def setUp(self):
# Create an empty spectrum
s = EDSSEMSpectrum(np.ones((4, 2, 1024)))
s.axes_manager.signal_axes[0].scale = 1e-3
s.axes_manager.signal_axes[0].units = "keV"
s.axes_manager.signal_axes[0].name = "Energy"
s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time = 3.1
s.metadata.Acquisition_instrument.SEM.beam_energy = 15.0
s.metadata.Acquisition_instrument.SEM.tilt_stage = -38
s.metadata.Acquisition_instrument.SEM.Detector.EDS.azimuth_angle = 63
s.metadata.Acquisition_instrument.SEM.Detector.EDS.elevation_angle = 35
self.signal = s
def test_sum_live_time(self):
s = self.signal
old_metadata = s.metadata.deepcopy()
sSum = s.sum(0)
nt.assert_equal(
sSum.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time,
3.1 * 2)
# Check that metadata is unchanged
print(old_metadata, s.metadata) # Capture for comparison on error
nt.assert_dict_equal(old_metadata.as_dictionary(),
s.metadata.as_dictionary(),
"Source metadata changed")
def test_sum_live_time2(self):
s = self.signal
old_metadata = s.metadata.deepcopy()
sSum = s.sum((0, 1))
nt.assert_equal(
sSum.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time,
3.1 *
2 * 4)
# Check that metadata is unchanged
print(old_metadata, s.metadata) # Capture for comparison on error
nt.assert_dict_equal(old_metadata.as_dictionary(),
s.metadata.as_dictionary(),
"Source metadata changed")
def test_sum_live_time_out_arg(self):
s = self.signal
sSum = s.sum(0)
s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time = 4.2
s_resum = s.sum(0)
r = s.sum(0, out=sSum)
nt.assert_is_none(r)
nt.assert_equal(
s_resum.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time,
sSum.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time)
np.testing.assert_allclose(s_resum.data, sSum.data)
def test_rebin_live_time(self):
s = self.signal
old_metadata = s.metadata.deepcopy()
dim = s.axes_manager.shape
s = s.rebin([dim[0] / 2, dim[1] / 2, dim[2]])
nt.assert_equal(
s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time,
3.1 *
2 *
2)
# Check that metadata is unchanged
print(old_metadata, self.signal.metadata) # Captured on error
nt.assert_dict_equal(old_metadata.as_dictionary(),
self.signal.metadata.as_dictionary(),
"Source metadata changed")
def test_add_elements(self):
s = self.signal
s.add_elements(['Al', 'Ni'])
nt.assert_equal(s.metadata.Sample.elements, ['Al', 'Ni'])
s.add_elements(['Al', 'Ni'])
nt.assert_equal(s.metadata.Sample.elements, ['Al', 'Ni'])
s.add_elements(["Fe", ])
nt.assert_equal(s.metadata.Sample.elements, ['Al', "Fe", 'Ni'])
s.set_elements(['Al', 'Ni'])
nt.assert_equal(s.metadata.Sample.elements, ['Al', 'Ni'])
def test_add_lines(self):
s = self.signal
s.add_lines(lines=())
nt.assert_equal(s.metadata.Sample.xray_lines, [])
s.add_lines(("Fe_Ln",))
nt.assert_equal(s.metadata.Sample.xray_lines, ["Fe_Ln"])
s.add_lines(("Fe_Ln",))
nt.assert_equal(s.metadata.Sample.xray_lines, ["Fe_Ln"])
s.add_elements(["Ti", ])
s.add_lines(())
nt.assert_equal(
s.metadata.Sample.xray_lines, ['Fe_Ln', 'Ti_La'])
s.set_lines((), only_one=False, only_lines=False)
nt.assert_equal(s.metadata.Sample.xray_lines,
['Fe_La', 'Fe_Lb3', 'Fe_Ll', 'Fe_Ln', 'Ti_La',
'Ti_Lb3', 'Ti_Ll', 'Ti_Ln'])
s.metadata.Acquisition_instrument.SEM.beam_energy = 0.4
s.set_lines((), only_one=False, only_lines=False)
nt.assert_equal(s.metadata.Sample.xray_lines, ['Ti_Ll'])
def test_add_lines_auto(self):
s = self.signal
s.axes_manager.signal_axes[0].scale = 1e-2
s.set_elements(["Ti", "Al"])
s.set_lines(['Al_Ka'])
nt.assert_equal(
s.metadata.Sample.xray_lines, ['Al_Ka', 'Ti_Ka'])
del s.metadata.Sample.xray_lines
s.set_elements(['Al', 'Ni'])
s.add_lines()
nt.assert_equal(
s.metadata.Sample.xray_lines, ['Al_Ka', 'Ni_Ka'])
s.metadata.Acquisition_instrument.SEM.beam_energy = 10.0
s.set_lines([])
nt.assert_equal(
s.metadata.Sample.xray_lines, ['Al_Ka', 'Ni_La'])
s.metadata.Acquisition_instrument.SEM.beam_energy = 200
s.set_elements(['Au', 'Ni'])
s.set_lines([])
nt.assert_equal(s.metadata.Sample.xray_lines,
['Au_La', 'Ni_Ka'])
def test_default_param(self):
s = self.signal
mp = s.metadata
nt.assert_equal(
mp.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa,
preferences.EDS.eds_mn_ka)
def test_SEM_to_TEM(self):
s = self.signal.inav[0, 0]
signal_type = 'EDS_TEM'
mp = s.metadata
mp.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa = \
125.3
sTEM = s.deepcopy()
sTEM.set_signal_type(signal_type)
mpTEM = sTEM.metadata
results = [
mp.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa,
signal_type]
resultsTEM = [
(mpTEM.Acquisition_instrument.TEM.Detector.EDS.
energy_resolution_MnKa),
mpTEM.Signal.signal_type]
nt.assert_equal(results, resultsTEM)
def test_get_calibration_from(self):
s = self.signal
scalib = EDSSEMSpectrum(np.ones(1024))
energy_axis = scalib.axes_manager.signal_axes[0]
energy_axis.scale = 0.01
energy_axis.offset = -0.10
s.get_calibration_from(scalib)
nt.assert_equal(s.axes_manager.signal_axes[0].scale, energy_axis.scale)
def test_take_off_angle(self):
s = self.signal
nt.assert_almost_equal(s.get_take_off_angle(), 12.886929785732487,
places=sys.float_info.dig - 2)
class Test_get_lines_intentisity:
def setUp(self):
# Create an empty spectrum
s = EDSSEMSpectrum(np.zeros((2, 2, 3, 100)))
energy_axis = s.axes_manager.signal_axes[0]
energy_axis.scale = 0.04
energy_axis.units = 'keV'
energy_axis.name = "Energy"
g = Gaussian()
g.sigma.value = 0.05
g.centre.value = 1.487
s.data[:] = g.function(energy_axis.axis)
s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time = 3.1
s.metadata.Acquisition_instrument.SEM.beam_energy = 15.0
self.signal = s
def test(self):
s = self.signal
sAl = s.get_lines_intensity(["Al_Ka"],
plot_result=False,
integration_windows=5)[0]
np.testing.assert_allclose(24.99516, sAl.data[0, 0, 0], atol=1e-3)
sAl = s.inav[0].get_lines_intensity(
["Al_Ka"], plot_result=False, integration_windows=5)[0]
np.testing.assert_allclose(24.99516, sAl.data[0, 0], atol=1e-3)
sAl = s.inav[0, 0].get_lines_intensity(
["Al_Ka"], plot_result=False, integration_windows=5)[0]
np.testing.assert_allclose(24.99516, sAl.data[0], atol=1e-3)
sAl = s.inav[0, 0, 0].get_lines_intensity(
["Al_Ka"], plot_result=False, integration_windows=5)[0]
np.testing.assert_allclose(24.99516, sAl.data, atol=1e-3)
s.axes_manager[-1].offset = 1.0
with assert_warns(message="C_Ka is not in the data energy range."):
sC = s.get_lines_intensity(["C_Ka"], plot_result=False)
nt.assert_equal(len(sC), 0)
nt.assert_equal(sAl.metadata.Sample.elements, ["Al"])
nt.assert_equal(sAl.metadata.Sample.xray_lines, ["Al_Ka"])
def test_eV(self):
s = self.signal
energy_axis = s.axes_manager.signal_axes[0]
energy_axis.scale = 40
energy_axis.units = 'eV'
sAl = s.get_lines_intensity(["Al_Ka"],
plot_result=False,
integration_windows=5)[0]
np.testing.assert_allclose(24.99516, sAl.data[0, 0, 0], atol=1e-3)
def test_background_substraction(self):
s = self.signal
intens = s.get_lines_intensity(["Al_Ka"], plot_result=False)[0].data
s += 1.
np.testing.assert_allclose(s.estimate_background_windows(
xray_lines=["Al_Ka"])[0, 0], 1.25666201, atol=1e-3)
np.testing.assert_allclose(
s.get_lines_intensity(
["Al_Ka"],
background_windows=s.estimate_background_windows(
[4, 4], xray_lines=["Al_Ka"]),
plot_result=False)[0].data,
intens, atol=1e-3)
def test_estimate_integration_windows(self):
s = self.signal
np.testing.assert_allclose(
s.estimate_integration_windows(3.0, ["Al_Ka"]),
[[1.371, 1.601]], atol=1e-2)
def test_with_signals_examples(self):
from hyperspy.misc.example_signals_loading import \
load_1D_EDS_SEM_spectrum as EDS_SEM_Spectrum
s = EDS_SEM_Spectrum()
np.testing.assert_allclose(
utils.stack(s.get_lines_intensity()).data.squeeze(),
np.array([84163, 89063, 96117, 96700, 99075]))
class Test_tools_bulk:
def setUp(self):
s = EDSSEMSpectrum(np.ones(1024))
s.metadata.Acquisition_instrument.SEM.beam_energy = 5.0
energy_axis = s.axes_manager.signal_axes[0]
energy_axis.scale = 0.01
energy_axis.units = 'keV'
s.set_elements(['Al', 'Zn'])
s.add_lines()
self.signal = s
def test_electron_range(self):
s = self.signal
mp = s.metadata
elec_range = utils.eds.electron_range(
mp.Sample.elements[0],
mp.Acquisition_instrument.SEM.beam_energy,
density='auto',
tilt=mp.Acquisition_instrument.SEM.tilt_stage)
np.testing.assert_allclose(elec_range, 0.41350651162374225)
def test_xray_range(self):
s = self.signal
mp = s.metadata
xr_range = utils.eds.xray_range(
mp.Sample.xray_lines[0],
mp.Acquisition_instrument.SEM.beam_energy,
density=4.37499648818)
np.testing.assert_allclose(xr_range, 0.1900368800933955)
class Test_energy_units:
def setUp(self):
s = EDSSEMSpectrum(np.ones(1024))
s.metadata.Acquisition_instrument.SEM.beam_energy = 5.0
s.axes_manager.signal_axes[0].units = 'keV'
s.set_microscope_parameters(energy_resolution_MnKa=130)
self.signal = s
def test_beam_energy(self):
s = self.signal
nt.assert_equal(s._get_beam_energy(), 5.0)
s.axes_manager.signal_axes[0].units = 'eV'
nt.assert_equal(s._get_beam_energy(), 5000.0)
s.axes_manager.signal_axes[0].units = 'keV'
def test_line_energy(self):
s = self.signal
nt.assert_equal(s._get_line_energy('Al_Ka'), 1.4865)
s.axes_manager.signal_axes[0].units = 'eV'
nt.assert_equal(s._get_line_energy('Al_Ka'), 1486.5)
s.axes_manager.signal_axes[0].units = 'keV'
np.testing.assert_allclose(s._get_line_energy('Al_Ka', FWHM_MnKa='auto'),
(1.4865, 0.07661266213883969))
np.testing.assert_allclose(s._get_line_energy('Al_Ka', FWHM_MnKa=128),
(1.4865, 0.073167615787314))
| gpl-3.0 | 7,186,505,502,727,921,000 | 38.477341 | 81 | 0.586286 | false |
AutohomeRadar/Windows-Agent | utils/log.py | 1 | 1091 | import logging
import os
import inspect
import g
def get_logger(logger_name='[PythonService]', dirpath=None):
logger = logging.getLogger(logger_name)
if dirpath is None:
# dirpath = os.path.join(os.path.dirname(__file__), os.path.abspath('..'))
dirpath = os.path.dirname(__file__)
# dirpaht = "D:\"
handler = logging.FileHandler(os.path.join(dirpath, "service.log"))
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if g.DEBUG:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.DEBUG)
return logger
def init_log(path=None):
if g.DEBUG:
logging.basicConfig(filename=os.path.join(path, 'app.log'),level=logging.DEBUG, format='%(asctime)s %(filename)-12s %(levelname)-8s %(message)s')
else:
logging.basicConfig(filename=os.path.join(path, 'app.log'),level=logging.INFO, format='%(asctime)s %(filename)-12s %(levelname)-8s %(message)s')
| apache-2.0 | 1,451,566,442,821,719,300 | 30.088235 | 153 | 0.636114 | false |
psychopy/versions | psychopy/app/pavlovia_ui/search.py | 1 | 11197 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from past.builtins import basestring
from .project import DetailsPanel
from .functions import logInPavlovia
from ._base import BaseFrame
from psychopy.localization import _translate
from psychopy.projects import pavlovia
import copy
import wx
from pkg_resources import parse_version
from wx.lib import scrolledpanel as scrlpanel
import wx.lib.mixins.listctrl as listmixin
import requests
starChar = u"\u2B50"
forkChar = u"\u2442"
class SearchFrame(wx.Dialog):
def __init__(self, app, parent=None, style=None,
pos=wx.DefaultPosition):
if style is None:
style = (wx.DEFAULT_DIALOG_STYLE | wx.CENTER |
wx.TAB_TRAVERSAL | wx.RESIZE_BORDER)
title = _translate("Search for projects online")
self.frameType = 'ProjectSearch'
wx.Dialog.__init__(self, parent, -1, title=title, style=style,
size=(800, 500), pos=pos)
self.app = app
self.project = None
self.parent = parent
# info about last search (NB None means no search but [] or '' means empty)
self.lastSearchStr = None
self.lastSearchOwn = None
self.lastSearchGp = None
self.lastSearchPub = None
# self.mainPanel = wx.Panel(self, wx.ID_ANY)
self.searchLabel = wx.StaticText(self, wx.ID_ANY, _translate('Search:'))
self.searchCtrl = wx.TextCtrl(self, wx.ID_ANY)
self.searchCtrl.Bind(wx.EVT_BUTTON, self.onSearch)
self.searchBtn = wx.Button(self, wx.ID_ANY, _translate("Search"))
self.searchBtn.Bind(wx.EVT_BUTTON, self.onSearch)
self.searchBtn.SetDefault()
self.searchInclPublic = wx.CheckBox(self, wx.ID_ANY,
label="Public")
self.searchInclPublic.Bind(wx.EVT_CHECKBOX, self.onSearch)
self.searchInclPublic.SetValue(True)
self.searchInclGroup = wx.CheckBox(self, wx.ID_ANY,
label="My groups")
self.searchInclGroup.Bind(wx.EVT_CHECKBOX, self.onSearch)
self.searchInclGroup.SetValue(True)
self.searchBuilderOnly = wx.CheckBox(self, wx.ID_ANY,
label="Only Builder")
self.searchBuilderOnly.Bind(wx.EVT_CHECKBOX, self.onSearch)
# then the search results
self.searchResults = ProjectListCtrl(self)
# on the right
self.detailsPanel = DetailsPanel(parent=self)
# sizers layout
self.searchBtnSizer = wx.BoxSizer(wx.HORIZONTAL)
self.searchBtnSizer.Add(self.searchCtrl, 1, wx.EXPAND | wx.ALL, 5)
self.searchBtnSizer.Add(self.searchBtn, 0, wx.EXPAND | wx.ALL, 5)
self.optionsSizer = wx.WrapSizer()
self.optionsSizer.AddMany([self.searchInclGroup, self.searchInclPublic,
self.searchBuilderOnly])
self.leftSizer = wx.BoxSizer(wx.VERTICAL)
self.leftSizer.Add(self.searchLabel, 0, wx.EXPAND | wx.ALL, 5)
self.leftSizer.Add(self.optionsSizer)
self.leftSizer.Add(self.searchBtnSizer, 0, wx.EXPAND | wx.ALL, 5)
self.leftSizer.Add(self.searchResults, 1, wx.EXPAND | wx.ALL, 5)
self.mainSizer = wx.BoxSizer(wx.HORIZONTAL)
self.mainSizer.Add(self.leftSizer, 1, wx.EXPAND | wx.ALL, 5)
self.mainSizer.Add(self.detailsPanel, 1, wx.EXPAND | wx.ALL, 5)
self.SetSizer(self.mainSizer) # don't fit until search is populated
if self.parent:
self.CenterOnParent()
self.Layout()
# if projects == 'no user':
# msg = _translate("Log in to search your own projects")
# loginBtn = wx.Button(self, wx.ID_ANY, label=msg)
# loginBtn.Bind(wx.EVT_BUTTON, self.onLoginClick)
def getSearchOptions(self):
opts = {}
opts['inclPublic'] = self.searchInclPublic.GetValue()
opts['builderOnly'] = self.searchBuilderOnly.GetValue()
opts['inclGroup'] = self.searchBuilderOnly.GetValue()
return opts
def onSearch(self, evt=None):
opts = self.getSearchOptions()
searchStr = self.searchCtrl.GetValue()
newSearch = (searchStr!=self.lastSearchStr)
self.lastSearchStr = newSearch
session = pavlovia.getCurrentSession()
# search own
if newSearch:
try:
self.lastSearchOwn = session.gitlab.projects.list(owned=True, search=searchStr)
except requests.exceptions.ConnectionError:
print("Failed to establish a new connection: No internet?")
return None
# search my groups
if opts['inclGroup'] and (newSearch or self.lastSearchGp is None):
# group projects: owned=False, membership=True
self.lastSearchGp = session.gitlab.projects.list(
owned=False, membership=True, search=searchStr)
elif not opts['inclGroup']: # set to None (to indicate non-search not simply empty result)
self.lastSearchGp = None
elif opts['inclGroup'] and not newSearch:
pass # we have last search and we need it so do nothing
else:
print("ERROR: During Pavlovia search we found opts['inclGroup']={}, newSearch={}"
.format(opts['inclGroup'], newSearch))
# search public
if opts['inclPublic'] and (newSearch or self.lastSearchPub is None):
self.lastSearchPub = session.gitlab.projects.list(owned=False, membership=False,
search=searchStr)
elif not opts['inclPublic']: # set to None (to indicate non-search not simply empty result)
self.lastSearchPub = None
elif opts['inclPublic'] and not newSearch:
pass # we have last search and we need it so do nothing
else:
print("ERROR: During Pavlovia search we found opts['inclPublic']={}, newSearch={}"
.format(opts['inclPublic'], newSearch))
projs = copy.copy(self.lastSearchOwn)
if opts['inclGroup']:
projs.extend(self.lastSearchGp)
if opts['inclPublic']:
projs.extend(self.lastSearchPub)
projs = getUniqueByID(projs)
projs = [pavlovia.PavloviaProject(proj) for proj in projs if proj.id]
self.searchResults.setContents(projs)
self.searchResults.Update()
self.Layout()
def onLoginClick(self, event):
user = logInPavlovia(parent=self.parent)
def Show(self):
# show the dialog then start search
wx.Dialog.Show(self)
wx.Yield()
self.onSearch() # trigger the search update
class ProjectListCtrl(wx.ListCtrl, listmixin.ListCtrlAutoWidthMixin):
"""A scrollable panel showing a list of projects. To be used within the
Project Search dialog
"""
def __init__(self, parent, frame=None):
wx.ListCtrl.__init__(self, parent, wx.ID_ANY,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL)
listmixin.ListCtrlAutoWidthMixin.__init__(self)
self.AlwaysShowScrollbars(True)
self.parent = parent
if frame is None:
self.frame = parent
else:
self.frame = frame
self.projList = []
self.columnNames = [starChar, forkChar, 'Group', 'Name', 'Description']
self._currentSortCol = 0
self._currentSortRev = False
# Give it some columns.
# The ID col we'll customize a bit:
for n, columnName in enumerate(self.columnNames):
if len(columnName) < 3: # for short names center the text
self.InsertColumn(n, columnName, wx.LIST_FORMAT_CENTER)
else:
self.InsertColumn(n, columnName)
# set the column sizes *after* adding the items
for n, columnName in enumerate(self.columnNames):
self.SetColumnWidth(n, wx.LIST_AUTOSIZE)
# after creating columns we can create the sort mixin
# listmixin.ColumnSorterMixin.__init__(self, len(columnList))
self.SetAutoLayout(True)
self.Bind(wx.EVT_LIST_COL_CLICK, self.onColumnClick)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onChangeSelection)
def setContents(self, projects):
self.DeleteAllItems()
# first time around we have a list of PavloviaProjects
if projects and isinstance(projects[0], pavlovia.PavloviaProject):
self.projList = []
for index, thisProj in enumerate(projects):
if not hasattr(thisProj, 'id'):
continue
data = (thisProj.star_count, thisProj.forks_count,
thisProj.group, thisProj.name, thisProj.description)
proj = {}
proj[starChar] = thisProj.star_count
proj[forkChar] = thisProj.forks_count
proj['Name'] = thisProj.name
proj['Group'] = thisProj.group
proj['Description'] = thisProj.description
proj['id'] = thisProj.id
self.projList.append(proj)
self.Append(data) # append to the wx table
# subsequent iterations are simple dicts
else:
self.projList = projects
for index, thisProj in enumerate(projects):
data = (thisProj[starChar], thisProj[forkChar],
thisProj['Group'], thisProj['Name'],
thisProj['Description'])
self.Append(data) # append to the wx table
self.resizeCols(finalOnly=False)
self.Update()
def resizeCols(self, finalOnly):
# resize the columns
for n in range(self.ColumnCount):
if not finalOnly:
self.SetColumnWidth(n, wx.LIST_AUTOSIZE_USEHEADER)
if self.GetColumnWidth(n) > 100:
self.SetColumnWidth(n, 100)
def onChangeSelection(self, event):
proj = self.projList[event.GetIndex()]
self.frame.detailsPanel.setProject(proj['id'])
def onColumnClick(self, event=None):
col = event.Column
if col == self._currentSortCol: # toggle direction
self._currentSortRev = not(self._currentSortRev)
self._currentSortCol = col
projs = sortProjects(self.projList, self.columnNames[col],
reverse=self._currentSortRev)
self.setContents(projs)
def sortProjects(seq, name, reverse=False):
return sorted(seq, key=lambda k: k[name], reverse=reverse)
def getUniqueByID(seq):
"""Very fast function to remove duplicates from a list while preserving order
Based on sort f8() by Dave Kirby
benchmarked at https://www.peterbe.com/plog/uniqifiers-benchmark
Requires Python>=2.7 (requires set())
"""
# Order preserving
seen = set()
return [x for x in seq if x.id not in seen and not seen.add(x.id)]
| gpl-3.0 | -3,274,404,417,799,227,000 | 39.422383 | 100 | 0.612753 | false |
mikecrittenden/zen-coding-gedit | zencoding/stparser.py | 1 | 3770 | '''
Zen Coding's settings parser
Created on Jun 14, 2009
@author: Sergey Chikuyonok (http://chikuyonok.ru)
'''
from copy import deepcopy
import re
import types
from .zen_settings import zen_settings
_original_settings = deepcopy(zen_settings)
TYPE_ABBREVIATION = 'zen-tag',
TYPE_EXPANDO = 'zen-expando',
TYPE_REFERENCE = 'zen-reference';
""" Reference to another abbreviation or tag """
re_tag = r'^<([\w\-]+(?:\:[\w\-]+)?)((?:\s+[\w\-]+(?:\s*=\s*(?:(?:"[^"]*")|(?:\'[^\']*\')|[^>\s]+))?)*)\s*(\/?)>'
"Regular expression for XML tag matching"
re_attrs = r'([\w\-]+)\s*=\s*([\'"])(.*?)\2'
"Regular expression for matching XML attributes"
class Entry:
"""
Unified object for parsed data
"""
def __init__(self, entry_type, key, value):
"""
@type entry_type: str
@type key: str
@type value: dict
"""
self.type = entry_type
self.key = key
self.value = value
def _make_expando(key, value):
"""
Make expando from string
@type key: str
@type value: str
@return: Entry
"""
return Entry(TYPE_EXPANDO, key, value)
def _make_abbreviation(key, tag_name, attrs, is_empty=False):
"""
Make abbreviation from string
@param key: Abbreviation key
@type key: str
@param tag_name: Expanded element's tag name
@type tag_name: str
@param attrs: Expanded element's attributes
@type attrs: str
@param is_empty: Is expanded element empty or not
@type is_empty: bool
@return: dict
"""
result = {
'name': tag_name,
'is_empty': is_empty
};
if attrs:
result['attributes'] = [];
for m in re.findall(re_attrs, attrs):
result['attributes'].append({
'name': m[0],
'value': m[2]
})
return Entry(TYPE_ABBREVIATION, key, result)
def _parse_abbreviations(obj):
"""
Parses all abbreviations inside dictionary
@param obj: dict
"""
for key, value in list(obj.items()):
key = key.strip()
if key[-1] == '+':
# this is expando, leave 'value' as is
obj[key] = _make_expando(key, value)
else:
m = re.search(re_tag, value)
if m:
obj[key] = _make_abbreviation(key, m.group(1), m.group(2), (m.group(3) == '/'))
else:
# assume it's reference to another abbreviation
obj[key] = Entry(TYPE_REFERENCE, key, value)
def parse(settings):
"""
Parse user's settings. This function must be called *before* any activity
in zen coding (for example, expanding abbreviation)
@type settings: dict
"""
for p, value in list(settings.items()):
if p == 'abbreviations':
_parse_abbreviations(value)
elif p == 'extends':
settings[p] = [v.strip() for v in value.split(',')]
elif type(value) == dict:
parse(value)
def extend(parent, child):
"""
Recursevly extends parent dictionary with children's keys. Used for merging
default settings with user's
@type parent: dict
@type child: dict
"""
for p, value in list(child.items()):
if type(value) == dict:
if p not in parent:
parent[p] = {}
extend(parent[p], value)
else:
parent[p] = value
def create_maps(obj):
"""
Create hash maps on certain string properties of zen settings
@type obj: dict
"""
for p, value in list(obj.items()):
if p == 'element_types':
for k, v in list(value.items()):
if isinstance(v, str):
value[k] = [el.strip() for el in v.split(',')]
elif type(value) == dict:
create_maps(value)
if __name__ == '__main__':
pass
def get_settings(user_settings=None):
"""
Main function that gather all settings and returns parsed dictionary
@param user_settings: A dictionary of user-defined settings
"""
settings = deepcopy(_original_settings)
create_maps(settings)
if user_settings:
user_settings = deepcopy(user_settings)
create_maps(user_settings)
extend(settings, user_settings)
# now we need to parse final set of settings
parse(settings)
return settings
| unlicense | 4,056,994,817,509,625,000 | 22.416149 | 113 | 0.648276 | false |
lutcheti/webtext | src/request/backends/BackendMovie.py | 1 | 6501 | # -- coding: utf-8 --
###########################################################################
# #
# WebText #
# #
# Lucca Hirschi #
# <[email protected]> #
# #
# Copyright 2014 Lucca Hirschi #
# #
# This file is part of OwnShare. #
# OwnShare is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# OwnShare is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with OwnShare. If not, see <http://www.gnu.org/licenses/>. #
# #
###########################################################################
from __future__ import unicode_literals # implicitly declaring all strings as unicode strings
import logging
import subprocess # for launching bash programs
import datetime
from mainClass import *
from static import *
# -- Setup Logging --
logging = logging.getLogger(__name__)
def showtimes_zip(movie, zipcode):
logging.info("Starting allocine (zip)")
bashPrefix = "php "+os.path.dirname(os.path.abspath(__file__))+"/allocine/allocine_showtimes_zip.php "
bashC = bashPrefix+str(movie)+" "+str(zipcode)
logging.info("Before subprocess: %s." % bashC)
try:
process = subprocess.Popen(bashC.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
logging.error("showtimes_zip > Popen | Execution failed:" + str(e))
return(MESS_BUG())
output = unicode(process.communicate()[0], "utf-8")
if "error" in output.lower() or len(output) == 0: # TODO: if error occurs in a cinema/movie ?
logging.info("PHP failed: %s." % output)
return(MESS_BUG())
cine = output.split("THEATER")
day = int(str(datetime.date.today()).split('-')[2])
answer = ""
for c in cine:
lines = c.split("\n")
if len(lines) == 1:
continue
answer += lines[0]+"\n"
for i in xrange(1,len(lines)):
if len(lines[i]) > 4 and int(lines[i].split()[3]) == day :
answer += lines[i]+"\n"
if i < len(lines) -1:
answer += lines[i+1]+"\n"
break
answer = ("J'ai compris que tu voulais avoir "
"les séances de %s dans le %s, voici "
"ce que j'ai trouvé:\n" % (str(movie),str(zipcode)) + answer)
return(answer)
def showtimes_theater(theater):
logging.info("Starting allocine (theater)")
bashPrefix = "php "+os.path.dirname(os.path.abspath(__file__))+"/allocine/allocine_showtimes_theater.php "
bashC = bashPrefix+str(theater)
logging.info("Before subprocess: %s." % bashC)
try:
process = subprocess.Popen(bashC.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
logging.error("showtimes_theater > Popen | Execution failed:" + str(e))
return(MESS_BUG())
output = unicode(process.communicate()[0], "utf-8")
if "error" in output.lower() or len(output) == 0: # TODO: if error occurs in a cinema/movie ?
logging.info("PHP failed: %s." % output)
return("Erreur avec le backend PHP\nUsage pour cine: 'cine [titre] [zip] ou cine [nom de cinema]'\n")
movies = output.split("MOVIE")
day = int(str(datetime.date.today()).split('-')[2])
answer = ""
for c in movies:
lines = c.split("\n")
if len(lines) == 1:
continue
answer += lines[0]+"\n"
for i in xrange(1,len(lines)):
if len(lines[i]) > 4 and int(lines[i].split()[3]) == day :
answer += lines[i]+"\n"
# let's get the movies of the day for now, otherwise uncomment the two following lines
# if i < len(lines) -1:
# answer += lines[i+1]+"\n"
break
answer = ("J'ai compris que tu voulais avoir "
"les séances au %s, voici "
"ce que j'ai trouvé:\n%s" % (str(theater), answer))
return(answer)
def likelyCorrect(a):
return("séance" in a.lower() or "séance" in a.lower()) # fix this
class BackendMovie(Backend):
backendName = MOVIES # defined in static.py
def answer(self, request, config):
if len(request.argsList) < 1 or len(request.argsList) > 2:
return("Mauvais usage. Rappel: " + self.help())
elif len(request.argsList) == 1:
return(showtimes_theater(request.argsList[0]))
else:
movie = request.argsList[0]
zipcode = request.argsList[1]
return(showtimes_zip(movie, zipcode))
def test(self, user):
r1 = Request(user, "cine", ["louxor"], [], "")
r2 = Request(user, "cine", ["citizen", "75006"], [], "")
for r in [r1,r2]:
logging.info("Checking a request [%s]" % r)
a = self.answer(r, {})
logging.info(a + "\n")
if not(likelyCorrect(a)):
return False
return True
def help(self):
return("Usage pour cine: 'cine; [titre] ; [zip] pour obtenir les séances du filme [titre] "
"autour de [zip]. Autre usage: 'cine; [nom de cinema]' pour obtenir toutes les séances "
"de la journée dans [nom de cinema].")
bMovie = BackendMovie()
| gpl-3.0 | -3,935,210,647,555,079,000 | 45.042553 | 110 | 0.498922 | false |
vipmunot/Data-Analysis-using-Python | Data Analysis with Pandas Intermediate/Data Manipulation with pandas-165.py | 1 | 2073 | ## 1. Overview ##
import pandas as pd
food_info = pd.read_csv('food_info.csv')
col_names = food_info.columns.tolist()
print(food_info.head(3))
## 2. Transforming a Column ##
div_1000 = food_info["Iron_(mg)"] / 1000
add_100 = food_info["Iron_(mg)"] + 100
sub_100 = food_info["Iron_(mg)"] - 100
mult_2 = food_info["Iron_(mg)"]*2
sodium_grams = food_info["Sodium_(mg)"] / 1000
sugar_milligrams = food_info["Sugar_Tot_(g)"] * 1000
## 3. Performing Math with Multiple Columns ##
water_energy = food_info["Water_(g)"] * food_info["Energ_Kcal"]
print(water_energy[0:5])
grams_of_protein_per_gram_of_water = food_info['Protein_(g)']/ food_info['Water_(g)']
milligrams_of_calcium_and_iron = food_info['Calcium_(mg)']+ food_info['Iron_(mg)']
## 4. Create a Nutritional Index ##
weighted_protein = 2 * food_info['Protein_(g)']
weighted_fat = -0.75 * food_info['Lipid_Tot_(g)']
initial_rating = weighted_protein + weighted_fat
## 5. Normalizing Columns in a Data Set ##
print(food_info["Protein_(g)"][0:5])
max_protein = food_info["Protein_(g)"].max()
normalized_protein = food_info["Protein_(g)"]/food_info["Protein_(g)"].max()
normalized_fat = food_info["Lipid_Tot_(g)"]/food_info["Lipid_Tot_(g)"].max()
## 6. Creating a New Column ##
food_info['Normalized_Protein'] = normalized_protein
food_info['Normalized_Fat'] = normalized_fat
## 7. Create a Normalized Nutritional Index ##
food_info["Normalized_Protein"] = food_info["Protein_(g)"] / food_info["Protein_(g)"].max()
food_info["Normalized_Fat"] = food_info["Lipid_Tot_(g)"] / food_info["Lipid_Tot_(g)"].max()
food_info['Norm_Nutr_Index'] = 2 * food_info["Normalized_Protein"] - 0.75 * food_info["Normalized_Fat"]
## 8. Sorting a DataFrame by a Column ##
food_info["Normalized_Protein"] = food_info["Protein_(g)"] / food_info["Protein_(g)"].max()
food_info["Normalized_Fat"] = food_info["Lipid_Tot_(g)"] / food_info["Lipid_Tot_(g)"].max()
food_info["Norm_Nutr_Index"] = 2*food_info["Normalized_Protein"] + (-0.75*food_info["Normalized_Fat"])
food_info.sort_values("Norm_Nutr_Index",inplace =True,ascending = False) | mit | 1,887,816,080,682,771,700 | 38.132075 | 103 | 0.668596 | false |
javaes/sailfish-msg-importer | tests/test_SMSBackup.py | 1 | 2440 | import unittest
from sail_sms import SMSBackup, SMSImporter, SMSParser
import time
import os
class SMSBackupTest(unittest.TestCase):
def setUp(self):
self.backup_tool = SMSBackup("assets/test_commhistory.db", "assets/")
parser = SMSParser("assets/samples.xml")
self.sms_list = parser.get_all_sms_in_sf_format()
self.importer = SMSImporter("assets/test_commhistory.db")
self.empty_backup = None
self.non_empty_backup = None
def tearDown(self):
self.importer.reload_db()
self.importer.remove_all_groups_and_msgs()
if self.empty_backup is not None:
if os.path.isfile(self.empty_backup):
os.remove(self.empty_backup)
if self.non_empty_backup is not None:
if os.path.isfile(self.non_empty_backup):
os.remove(self.non_empty_backup)
if os.path.isdir("backup_folder/"):
os.rmdir("backup_folder/")
def test_create_and_restore_backup(self):
timestamp = int(time.time())
self.backup_tool.create_backup(timestamp)
self.empty_backup = "assets/commhistory-" + str(timestamp) + ".db"
self.assertTrue(os.path.isfile(self.empty_backup))
self.importer.import_sms(self.sms_list[0])
self.importer.reload_db()
time.sleep(1)
timestamp = int(time.time())
self.backup_tool.create_backup(timestamp)
self.non_empty_backup = "assets/commhistory-" + str(timestamp) + ".db"
self.assertTrue(os.path.isfile(self.non_empty_backup))
self.backup_tool.restore_backup(self.empty_backup)
self.importer.reload_db()
self.assertEqual(self.importer.get_msg_count(), 0)
self.assertEqual(self.importer.get_group_count(), 0)
self.backup_tool.restore_backup(self.non_empty_backup)
self.importer.reload_db()
self.assertEqual(self.importer.get_msg_count(), 1)
self.assertEqual(self.importer.get_group_count(), 1)
def test_should_create_backup_folder(self):
bt = SMSBackup("assets/test_commhistory.db", "backup_folder/")
timestamp = int(time.time())
self.empty_backup = "backup_folder/commhistory-" + str(timestamp) + ".db"
bt.create_backup(timestamp)
self.assertTrue(os.path.isfile(self.empty_backup))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | apache-2.0 | 8,166,459,498,394,153,000 | 39.683333 | 81 | 0.634426 | false |
tensorflow/lucid | lucid/recipes/activation_atlas/layout.py | 1 | 2698 | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import numpy as np
import logging
from umap import UMAP
log = logging.getLogger(__name__)
def normalize_layout(layout, min_percentile=1, max_percentile=99, relative_margin=0.1):
"""Removes outliers and scales layout to between [0,1]."""
# compute percentiles
mins = np.percentile(layout, min_percentile, axis=(0))
maxs = np.percentile(layout, max_percentile, axis=(0))
# add margins
mins -= relative_margin * (maxs - mins)
maxs += relative_margin * (maxs - mins)
# `clip` broadcasts, `[None]`s added only for readability
clipped = np.clip(layout, mins, maxs)
# embed within [0,1] along both axes
clipped -= clipped.min(axis=0)
clipped /= clipped.max(axis=0)
return clipped
def aligned_umap(activations, umap_options={}, normalize=True, verbose=False):
"""`activations` can be a list of ndarrays. In that case a list of layouts is returned."""
umap_defaults = dict(
n_components=2, n_neighbors=50, min_dist=0.05, verbose=verbose, metric="cosine"
)
umap_defaults.update(umap_options)
# if passed a list of activations, we combine them and later split the layouts
if type(activations) is list or type(activations) is tuple:
num_activation_groups = len(activations)
combined_activations = np.concatenate(activations)
else:
num_activation_groups = 1
combined_activations = activations
try:
layout = UMAP(**umap_defaults).fit_transform(combined_activations)
except (RecursionError, SystemError) as exception:
log.error("UMAP failed to fit these activations. We're not yet sure why this sometimes occurs.")
raise ValueError("UMAP failed to fit activations: %s", exception)
if normalize:
layout = normalize_layout(layout)
if num_activation_groups > 1:
layouts = np.split(layout, num_activation_groups, axis=0)
return layouts
else:
return layout
| apache-2.0 | 7,013,863,676,461,393,000 | 35.459459 | 104 | 0.67828 | false |
geoserver/wps-remote | src/wpsremote/output_file_parameter.py | 1 | 17313 | # (c) 2016 Open Source Geospatial Foundation - all rights reserved
# (c) 2014 - 2015 Centre for Maritime Research and Experimentation (CMRE)
# (c) 2013 - 2014 German Aerospace Center (DLR)
# This code is licensed under the GPL 2.0 license, available at the root
# application directory.
import os
import tempfile
import path
import json
import uuid
__author__ = "Alessio Fabiani"
__copyright__ = "Copyright 2016 Open Source Geospatial Foundation - all rights reserved"
__license__ = "GPL"
# ############################################################################################################# #
# #
# Text/Plain Output Map Format #
# #
# ############################################################################################################# #
class OutputFileParameter(object):
def __init__(self, par_name, d, template_vars_for_param_types=None, wps_execution_shared_dir=None, uploader=None):
# {"type": "string", "description": "xml OAA output file", "filepath" : "%workdir\\\\output_file.xml" }
self._name = par_name
self._type = None
self._description = None
self._title = None
self._filepath = None
self._output_mime_type = None
self._wps_execution_shared_dir = wps_execution_shared_dir
self._uploader = uploader
self._backup_on_wps_execution_shared_dir = None
self._upload_data = None
self._upload_data_root = None
self._publish_as_layer = None
self._publish_layer_name = None
self._publish_default_style = None
self._publish_target_workspace = None
self._publish_metadata = None
for k, v in d.items():
if hasattr(self, "_" + k):
if template_vars_for_param_types is not None and isinstance(v, str):
for var, val in template_vars_for_param_types.items():
if var in v:
v = v.replace("%" + var, val)
setattr(self, "_" + k, v)
self._filepath = path.path(self._filepath)
def get_name(self):
return self._name
def as_json_string(self):
# {"type": "string", "description": "A persons surname", "max": 1, "default": "Meier"}
res = {}
attrib_to_convert = ['_type',
'_description',
'_title',
'_output_mime_type',
'_publish_as_layer',
'_publish_layer_name',
'_publish_default_style',
'_publish_target_workspace']
attribute_list = [a for a in dir(self) if not a.startswith('__') and not callable(getattr(self, a))]
attribute_list_filtered = [x for x in attribute_list if x in attrib_to_convert]
for a in attribute_list_filtered:
res[a[1:]] = getattr(self, a)
return json.dumps(res)
def get_value(self):
if self._backup_on_wps_execution_shared_dir is not None and \
self._backup_on_wps_execution_shared_dir.lower() == "true" and \
self._wps_execution_shared_dir is not None:
unique_dirname = str(uuid.uuid4())
bkp_dir = path.path(self._wps_execution_shared_dir + "/" + unique_dirname)
bkp_dir.makedirs()
dst = bkp_dir.abspath() + "/" + self._filepath.basename()
self._filepath.copy(dst)
dst = path.path(dst)
return dst.text()
elif self._upload_data is not None and self._upload_data.lower() == "true" and self._uploader is not None:
unique_dirname = str(uuid.uuid4())
bkp_dir = path.path(tempfile.gettempdir() + '/' + unique_dirname)
bkp_dir.makedirs()
dst = bkp_dir.abspath() + '/' + self._filepath.basename()
self._filepath.copy(dst)
dst = path.path(dst)
src_path = os.path.abspath(os.path.join(dst.abspath(), os.pardir))
if self._upload_data_root:
unique_dirname = self._upload_data_root + '/' + unique_dirname
self._uploader.Upload(hostdir=unique_dirname, text='', binary='*.*', src=src_path)
return self._filepath.text()
else:
return self._filepath.text()
def get_type(self):
return "textual"
def get_description(self):
return self._description
def get_title(self):
return self._title
def get_output_mime_type(self):
return self._output_mime_type
def is_publish_as_layer(self):
return (self._publish_as_layer is not None and self._publish_as_layer.lower() == "true")
def get_publish_layer_name(self):
return self._publish_layer_name
def get_publish_default_style(self):
return self._publish_default_style
def get_publish_target_workspace(self):
return self._publish_target_workspace
def get_metadata(self):
if self._publish_metadata is not None:
metadata_file = path.path(self._publish_metadata)
if metadata_file.isfile():
return metadata_file.text()
return ' '
# ############################################################################################################# #
# #
# RAW File Output Map Format #
# #
# ############################################################################################################# #
class RawFileParameter(object):
def __init__(self, par_name, d, template_vars_for_param_types=None, wps_execution_shared_dir=None, uploader=None):
# {"type": "string", "description": "xml OAA output file", "filepath" : "%workdir\\\\output_file.xml" }
self._name = par_name
self._type = None
self._description = None
self._title = None
self._filepath = None
self._output_mime_type = None
self._wps_execution_shared_dir = wps_execution_shared_dir
self._uploader = uploader
self._backup_on_wps_execution_shared_dir = None
self._upload_data = None
self._upload_data_root = None
self._publish_as_layer = None
self._publish_layer_name = None
self._publish_default_style = None
self._publish_target_workspace = None
self._publish_metadata = None
for k, v in d.items():
if hasattr(self, "_" + k):
if template_vars_for_param_types is not None and isinstance(v, str):
for var, val in template_vars_for_param_types.items():
if var in v:
v = v.replace("%" + var, val)
setattr(self, "_" + k, v)
self._filepath = path.path(self._filepath)
def get_name(self):
return self._name
def as_json_string(self):
# {"type": "string", "description": "A persons surname", "max": 1, "default": "Meier"}
res = {}
attrib_to_convert = ['_type',
'_description',
'_title',
'_output_mime_type',
'_publish_as_layer',
'_publish_layer_name',
'_publish_default_style',
'_publish_target_workspace']
attribute_list = [a for a in dir(self) if not a.startswith('__') and not callable(getattr(self, a))]
attribute_list_filtered = [x for x in attribute_list if x in attrib_to_convert]
for a in attribute_list_filtered:
res[a[1:]] = getattr(self, a)
return json.dumps(res)
def get_value(self):
if self._backup_on_wps_execution_shared_dir is not None and \
self._backup_on_wps_execution_shared_dir.lower() == "true" and \
self._wps_execution_shared_dir is not None:
unique_dirname = str(uuid.uuid4())
bkp_dir = path.path(self._wps_execution_shared_dir + "/" + unique_dirname)
bkp_dir.makedirs()
dst = bkp_dir.abspath() + "/" + self._filepath.basename()
self._filepath.copy(dst)
dst = path.path(dst)
return dst
elif self._upload_data is not None and self._upload_data.lower() == "true" and self._uploader is not None:
unique_dirname = str(uuid.uuid4())
if self._upload_data_root:
unique_dirname = self._upload_data_root + '/' + unique_dirname
src_path = os.path.abspath(os.path.join(self._filepath.abspath(), os.pardir))
basename = os.path.basename(self._filepath.abspath())
basename = os.path.splitext(basename)[0]
self._uploader.Upload(hostdir=unique_dirname, text='', binary=basename+'*.*', src=src_path)
return path.path(unique_dirname + "/" + self._filepath.basename())
else:
return self._filepath
def get_type(self):
return self._type
def get_description(self):
return self._description
def get_title(self):
return self._title
def get_output_mime_type(self):
return self._output_mime_type
def is_publish_as_layer(self):
return (self._publish_as_layer is not None and self._publish_as_layer.lower() == "true")
def get_publish_layer_name(self):
return self._publish_layer_name
def get_publish_default_style(self):
return self._publish_default_style
def get_publish_target_workspace(self):
return self._publish_target_workspace
def get_metadata(self):
if self._publish_metadata is not None:
metadata_file = path.path(self._publish_metadata)
if metadata_file.isfile():
return metadata_file.text()
return ' '
# ############################################################################################################# #
# #
# OWC Json Output Map Format #
# #
# ############################################################################################################# #
class OWCFileParameter(object):
def __init__(
self,
par_name,
d,
parameters_types_defs,
template_vars_for_param_types=None,
wps_execution_shared_dir=None,
uploader=None):
# {"type": "string", "description": "xml OAA output file", "filepath" : "%workdir\\\\output_file.xml" }
self._name = par_name
self._type = None
self._description = None
self._title = None
self._output_mime_type = None
self._layers_to_publish = None
self._wps_execution_shared_dir = wps_execution_shared_dir
self._uploader = uploader
self._backup_on_wps_execution_shared_dir = None
self._upload_data = None
self._upload_data_root = None
self._publish_as_layer = "true"
self._publish_layer_name = None
self._publish_metadata = None
for k, v in d.items():
if hasattr(self, "_" + k):
if template_vars_for_param_types is not None and isinstance(v, str):
for var, val in template_vars_for_param_types.items():
if var in v:
v = v.replace("%" + var, val)
setattr(self, "_" + k, v)
self._files_to_publish = ''
self._default_styles = ''
self._target_workspaces = ''
if self._layers_to_publish is not None:
self._parameters_types_defs = parameters_types_defs
layer_names = self._layers_to_publish.split(';')
for name in layer_names:
if self._parameters_types_defs[name] is not None:
publish_layer_name = self._parameters_types_defs[name].get('publish_layer_name')
publish_layer_name = publish_layer_name if publish_layer_name is not None else ' '
publish_default_style = self._parameters_types_defs[name].get('publish_default_style')
publish_default_style = publish_default_style if publish_default_style is not None else ' '
publish_target_workspace = self._parameters_types_defs[name].get('publish_target_workspace')
publish_target_workspace = publish_target_workspace if publish_target_workspace is not None else ' '
self._files_to_publish += publish_layer_name + ";"
self._default_styles += publish_default_style + ";"
self._target_workspaces += publish_target_workspace + ";"
def get_name(self):
return self._name
def as_json_string(self):
# {"type": "string", "description": "A persons surname", "max": 1, "default": "Meier"}
res = {}
attrib_to_convert = ['_type',
'_description',
'_title',
'_output_mime_type',
'_publish_as_layer',
'_publish_layer_name',
'_files_to_publish',
'_default_styles',
'_target_workspaces']
attribute_list = [a for a in dir(self) if not a.startswith('__') and not callable(getattr(self, a))]
attribute_list_filtered = [x for x in attribute_list if x in attrib_to_convert]
for a in attribute_list_filtered:
res[a[1:]] = getattr(self, a)
return json.dumps(res)
def get_value(self):
if self._backup_on_wps_execution_shared_dir is not None and \
self._backup_on_wps_execution_shared_dir.lower() == "true" and \
self._wps_execution_shared_dir is not None:
unique_dirname = str(uuid.uuid4())
bkp_dir = path.path(self._wps_execution_shared_dir + "/" + unique_dirname)
bkp_dir.makedirs()
tokens = self._files_to_publish.split(';')
files_to_publish = ""
for token in tokens:
filepath = path.path(token)
dst = bkp_dir.abspath() + "/" + filepath.basename()
filepath.copy(dst)
dst = path.path(dst)
if len(files_to_publish) > 0:
files_to_publish = files_to_publish + ";"
files_to_publish = files_to_publish + dst.abspath()
return files_to_publish
elif self._upload_data is not None and self._upload_data.lower() == "true" and self._uploader is not None:
unique_dirname = str(uuid.uuid4())
bkp_dir = path.path(tempfile.gettempdir() + '/' + unique_dirname)
bkp_dir.makedirs()
tokens = self._files_to_publish.split(';')
if self._upload_data_root:
unique_dirname = self._upload_data_root + '/' + unique_dirname
files_to_publish = ""
for token in tokens:
filepath = path.path(token)
dst = bkp_dir.abspath() + '/' + filepath.basename()
filepath.copy(dst)
dst = path.path(dst)
if len(files_to_publish) > 0:
files_to_publish = files_to_publish + ';'
files_to_publish = files_to_publish + '/' + unique_dirname + '/' + filepath.basename()
self._uploader.Upload(hostdir=unique_dirname, text='', binary='*.*', src=bkp_dir.abspath())
return files_to_publish
else:
return self._files_to_publish
def get_type(self):
return self._type
def get_description(self):
return self._description
def get_title(self):
return self._title
def get_output_mime_type(self):
return self._output_mime_type
def is_publish_as_layer(self):
return (self._publish_as_layer is not None and self._publish_as_layer.lower() == "true")
def get_publish_layer_name(self):
return self._publish_layer_name
def get_publish_default_style(self):
return self._default_styles
def get_publish_target_workspace(self):
return self._target_workspaces
def get_metadata(self):
if self._publish_metadata is not None:
metadata_file = path.path(self._publish_metadata)
if metadata_file.isfile():
return metadata_file.text()
return ' '
| gpl-2.0 | 3,631,209,823,113,164,300 | 39.736471 | 120 | 0.50517 | false |
sivakumar-kailasam/Repeat-Macro | repeatMacro.py | 1 | 1053 | #
# Sivakumar Kailasam and lowliet
#
import sublime, sublime_plugin
class RepeatMacroCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().show_input_panel("Repeat count or [Enter] to run till end of file", "", self.__execute, None, None)
def __execute(self, text):
if not text.isdigit() and len(text) > 0:
print("Repeat Macro | Wrong number")
# elif len(text) > 0 and int(text) > (self.__get_last_line() - self.__get_current_line()):
# print("Repeat Macro | Number too big (bigger than number of lines in file)")
else:
current_line = self.__get_current_line()
last_line = current_line + int(text) if len(text) > 0 else self.__get_last_line()
for i in range(current_line, last_line):
self.view.run_command("run_macro")
def __get_current_line(self):
return self.view.rowcol(self.view.sel()[0].begin())[0] + 1
def __get_last_line(self):
return self.view.rowcol(self.view.size())[0] + 2 | mit | 7,555,469,475,639,688,000 | 39.538462 | 126 | 0.604938 | false |
alexkuz/pg_unidecode | builder/builder.py | 1 | 2589 | # -*- coding: utf-8 -*-
"""
Generates source data
"""
from __future__ import print_function
import imp
import os
import io
import re
import sys
DATA_DIR = os.environ.get("DATA_DIR")
UNIDECODE_REPOPATH = os.environ.get("UNIDECODE_REPOPATH")
if not DATA_DIR and UNIDECODE_REPOPATH:
DATA_DIR = os.path.join(UNIDECODE_REPOPATH, "unidecode")
if not DATA_DIR:
print("You must set the environment variable UNIDECODE_REPOPATH with the path of unidecode repository"
", cloned from https://github.com/avian2/unidecode", file=sys.stderr)
sys.exit(2)
BUILD_DIR = 'src/data'
def create_data(data_file, pos_file):
"""
create_data
"""
data_file.write(u'char chars[] = "\\\n')
pos_file.write(u'int pos[] = {\n')
pos = 0
ranges = {}
for subdir, _, files in os.walk(DATA_DIR):
ranges = {int(re.sub(r"(^x|\.py$)", "", filename), 16):
os.path.join(subdir, filename)
for filename in files
if filename.startswith('x') and
filename.endswith('py')}
max_rng = max(ranges.keys())
for rng in xrange(0, max_rng + 1):
if rng != 0:
pos_file.write(u',')
if not rng in ranges:
pos_file.write(u",".join([u"0" for i in xrange(0, 256)]))
pos_file.write(u'\n')
continue
path = ranges[rng]
module = imp.load_source('module', path)
data_len = len(module.data)
data = [module.data[i] if i < data_len else ''
for i in xrange(0, 256)]
for (i, char) in enumerate(data):
charlen = len(char)
char = char.replace('\\', '\\\\')
char = ''.join([u'\\x%02x' % ord(c)
if ord(c) < 0x20
else c
for c in char])
char = char.replace('"', '\\"')
char = char.replace('%', '\\%')
char = char.replace('?', '\\?')
data_file.write(unicode(char))
pos_file.write((u',%d' if i else u'%d') % pos)
pos += charlen
data_file.write(u'\\\n')
pos_file.write(u'\n')
data_file.write(u'";\n')
pos_file.write(u'};\n')
def build():
"""
build
"""
if not os.path.exists(BUILD_DIR):
os.makedirs(BUILD_DIR)
chars_path = os.path.join(BUILD_DIR, 'chars.h')
pos_path = os.path.join(BUILD_DIR, 'pos.h')
with io.open(chars_path, mode='w') as data_file:
with io.open(pos_path, mode='w') as pos_file:
create_data(data_file, pos_file)
build()
| gpl-2.0 | 2,638,439,943,840,021,000 | 25.96875 | 106 | 0.526458 | false |
ManchesterIO/mollyproject-next | tests/molly/test_config_loader.py | 1 | 3624 | from flask.ext.script import Manager
from mock import Mock
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import unittest2 as unittest
from molly.config import ConfigLoader, ConfigError
from tests.test_providers.provider import Provider as TestProvider
from tests.test_services.service import Service as TestService
class ConfigLoaderTestCase(unittest.TestCase):
def setUp(self):
self._flask = Mock()
self._config_loader = ConfigLoader(self._flask)
def tearDown(self):
SIMPLE_TEST_CONFIG.reset()
PROVIDER_TEST_CONFIG.reset()
def test_config_loader_returns_list_of_apps(self):
apps, services = self._config_loader.load_from_config(StringIO(""))
self.assertEquals(0, len(apps))
def test_config_loader_loads_apps(self):
apps, services = self._config_loader.load_from_config(SIMPLE_TEST_CONFIG)
self.assertEquals('test', apps[0].instance_name)
def test_config_loader_passes_config_dict_to_app(self):
apps, services = self._config_loader.load_from_config(SIMPLE_TEST_CONFIG)
self.assertEquals('bar', apps[0].config['foo'])
def test_config_loader_creates_providers_for_app(self):
apps, services = self._config_loader.load_from_config(PROVIDER_TEST_CONFIG)
self.assertIsInstance(apps[0].providers[0], TestProvider)
def test_config_loader_puts_config_into_provider(self):
apps, services = self._config_loader.load_from_config(PROVIDER_TEST_CONFIG)
self.assertEquals('baz', apps[0].providers[0].config['bar'])
def test_config_loader_raises_config_exception_on_no_such_app(self):
self.assertRaises(ConfigError, self._config_loader.load_from_config, BAD_APP_CONFIG)
def test_config_loader_raises_config_exception_on_no_such_provider(self):
self.assertRaises(ConfigError, self._config_loader.load_from_config, BAD_PROVIDER_CONFIG)
def test_module_is_compulsory_field(self):
self.assertRaises(ConfigError, self._config_loader.load_from_config, MISSING_APP_CONFIG)
def test_provider_is_compulsory_field(self):
self.assertRaises(ConfigError, self._config_loader.load_from_config, MISSING_PROVIDER_CONFIG)
def test_services_are_passed_to_app(self):
apps, services = self._config_loader.load_from_config(SERVICES_CONFIG)
self.assertIsInstance(apps[0].services['test'], TestService)
self.assertIsInstance(services['test'], TestService)
def test_global_config_is_extracted(self):
self._config_loader.load_from_config(GLOBAL_CONFIG)
self._flask.config.update.assert_called_once_with({'DEBUG': True})
def test_default_cli_service_is_created(self):
apps, services = self._config_loader.load_from_config(StringIO(""))
self.assertIsInstance(services['cli'], Manager)
SIMPLE_TEST_CONFIG = StringIO("""
[test]
module = tests.test_apps.app
foo = bar
""")
PROVIDER_TEST_CONFIG = StringIO("""
[test]
module = tests.test_apps.app
foo = bar
provider.test = tests.test_providers.provider
provider.test.bar = baz
""")
BAD_APP_CONFIG = StringIO("""
[test]
module = does.not.exist
""")
BAD_PROVIDER_CONFIG = StringIO("""
[test]
module = tests.test_apps.app
provider.test = does.not.exist
""")
MISSING_APP_CONFIG = StringIO("""
[test]
foo = bar
""")
MISSING_PROVIDER_CONFIG = StringIO("""
[test]
module = tests.test_apps.app
provider.test.bar = baz
""")
SERVICES_CONFIG = StringIO("""
[test]
module = tests.test_apps.app
[services]
test = tests.test_services.service
""")
GLOBAL_CONFIG = StringIO("""
[global]
DEBUG = True
""")
| apache-2.0 | 8,892,699,815,454,835,000 | 29.453782 | 101 | 0.707506 | false |
DolphinDream/sverchok | old_nodes/formula_mk4.py | 1 | 9177 | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import ast
from math import *
from collections import defaultdict
import bpy
from bpy.props import BoolProperty, StringProperty, EnumProperty, FloatVectorProperty, IntProperty
import json
import io
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, match_long_repeat, zip_long_repeat, throttle_and_update_node
from sverchok.utils import logging
from sverchok.utils.modules.eval_formula import get_variables, safe_eval
class SvFormulaNodeMk4(bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: Formula
Tooltip: Calculate by custom formula.
"""
bl_idname = 'SvFormulaNodeMk4'
bl_label = 'Formula+'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_FORMULA'
replacement_nodes = [('SvFormulaNodeMk5', None, None)]
@throttle_and_update_node
def on_update(self, context):
self.adjust_sockets()
@throttle_and_update_node
def on_update_dims(self, context):
if self.dimensions < 4:
self.formula4 = ""
if self.dimensions < 3:
self.formula3 = ""
if self.dimensions < 2:
self.formula2 = ""
self.adjust_sockets()
dimensions : IntProperty(name="Dimensions", default=1, min=1, max=4, update=on_update_dims)
formula1: StringProperty(default="x+y", update=on_update)
formula2: StringProperty(update=on_update)
formula3: StringProperty(update=on_update)
formula4: StringProperty(update=on_update)
separate: BoolProperty(name="Separate", default=False, update=updateNode)
wrapping: bpy.props.EnumProperty(
items=[(k, k, '', i) for i, k in enumerate(["-1", "0", "+1"])],
description="+1: adds a set of square brackets around the output\n 0: Keeps result unchanged\n-1: Removes a set of outer square brackets",
default="0", update=updateNode
)
use_ast: BoolProperty(name="AST", description="uses the ast.literal_eval module", update=updateNode)
ui_message: StringProperty(name="ui message")
def formulas(self):
return [self.formula1, self.formula2, self.formula3, self.formula4]
def formula(self, k):
return self.formulas()[k]
def draw_buttons(self, context, layout):
if self.ui_message:
r = layout.row()
r.alert = True
r.label(text=self.ui_message, icon='INFO')
layout.prop(self, "formula1", text="")
if self.dimensions > 1:
layout.prop(self, "formula2", text="")
if self.dimensions > 2:
layout.prop(self, "formula3", text="")
if self.dimensions > 3:
layout.prop(self, "formula4", text="")
row = layout.row()
if self.inputs:
row.prop(self, "separate", text="Split", toggle=True)
else:
row.prop(self, "use_ast", text="", icon="SCRIPTPLUGINS")
row.prop(self, "wrapping", expand=True)
def draw_buttons_ext(self, context, layout):
layout.prop(self, "dimensions")
self.draw_buttons(context, layout)
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "x")
self.outputs.new('SvStringsSocket', "Result")
def get_variables(self):
variables = set()
for formula in self.formulas():
vs = get_variables(formula)
variables.update(vs)
return list(sorted(variables))
def adjust_sockets(self):
variables = self.get_variables()
# if current node sockets match the variables sequence, do nothing skip
# this is the logic path that will be encountered most often.
if len(self.inputs) == len(variables):
if variables == [socket.name for socket in self.inputs]:
# self.info("no UI change: socket inputs same")
return
# else to avoid making things complicated we rebuild the UI inputs, even when it is technically sub optimal
self.hot_reload_sockets()
def clear_and_repopulate_sockets_from_variables(self):
with self.sv_throttle_tree_update():
self.inputs.clear()
variables = self.get_variables()
for v in variables:
self.inputs.new('SvStringsSocket', v)
def hot_reload_sockets(self):
"""
function hoisted from functorb, with deletions and edits
- store current input socket links by name/origin
- wipe all inputs
- recreate new sockets from variables
- relink former links by name on this socket, but by index from their origin.
"""
self.info('handling input wipe and relink')
nodes = self.id_data.nodes
node_tree = self.id_data
# if any current connections... gather them
reconnections = []
for i in (i for i in self.inputs if i.is_linked):
for L in i.links:
link = lambda: None
link.from_node = L.from_socket.node.name
link.from_socket = L.from_socket.index # index used here because these can come from reroute
link.to_socket = L.to_socket.name # this node will always have unique socket names
reconnections.append(link)
self.clear_and_repopulate_sockets_from_variables()
# restore connections where applicable (by socket name), if no links.. this is a no op.
for link in reconnections:
try:
from_part = nodes[link.from_node].outputs[link.from_socket]
to_part = self.inputs[link.to_socket]
node_tree.links.new(from_part, to_part)
except Exception as err:
str_from = f'nodes[{link.from_node}].outputs[{link.from_socket}]'
str_to = f'nodes[{self}].inputs[{link.to_socket}]'
self.exception(f'failed: {str_from} -> {str_to}')
self.exception(err)
def sv_update(self):
'''
update analyzes the state of the node and returns if the criteria to start processing
are not met.
'''
if not any(len(formula) for formula in self.formulas()):
return
self.adjust_sockets()
def get_input(self):
variables = self.get_variables()
inputs = {}
for var in variables:
if var in self.inputs and self.inputs[var].is_linked:
inputs[var] = self.inputs[var].sv_get()
return inputs
def all_inputs_connected(self):
if self.inputs:
if not all(socket.is_linked for socket in self.inputs):
return False
return True
def process(self):
if not self.outputs[0].is_linked:
return
# if the user specifies a variable, they must also link a value into that socket, this will prevent Exception
self.ui_message = ""
if not self.all_inputs_connected():
self.ui_message = "node not fully connected"
return
var_names = self.get_variables()
inputs = self.get_input()
results = []
if var_names:
input_values = [inputs.get(name) for name in var_names]
parameters = match_long_repeat(input_values)
for objects in zip(*parameters):
object_results = []
for values in zip_long_repeat(*objects):
variables = dict(zip(var_names, values))
vector = []
for formula in self.formulas():
if formula:
value = safe_eval(formula, variables)
vector.append(value)
if self.separate:
object_results.append(vector)
else:
object_results.extend(vector)
results.append(object_results)
else:
def joined_formulas(f1, f2, f3, f4):
built_string = ""
if f1: built_string += f1
if f2: built_string += f",{f2}"
if f3: built_string += f",{f3}"
if f4: built_string += f",{f4}"
return list(ast.literal_eval(built_string))
if self.use_ast:
results = joined_formulas(*self.formulas())
else:
vector = []
for formula in self.formulas():
if formula:
value = safe_eval(formula, dict())
vector.append(value)
results.extend(vector)
if self.wrapping == "+1":
results = [results]
elif self.wrapping == "-1":
results = results[0] if len(results) else results
self.outputs['Result'].sv_set(results)
classes = [SvFormulaNodeMk4]
register, unregister = bpy.utils.register_classes_factory(classes)
| gpl-3.0 | 2,291,811,314,450,939,000 | 34.847656 | 146 | 0.589517 | false |
uunicorn/pyskype | src/rsa_keygen.py | 1 | 1550 |
import random
def miller_rabin(n, k = 100):
if n > 31:
for p in [3, 5, 7, 11, 13, 17, 19, 23, 29, 31]:
if n % p == 0:
return False
d=n-1
s=0
while d & 1 == 0:
d = d >> 1
s += 1
for i in range(k):
a = random.randint(2,n-1)
x = pow(a, d, n)
if x == 1 or x == n-1:
continue
possiblyprime = False
for j in range(s-1):
x = (x**2)%n
if x == 1:
return False
if x == n - 1:
possiblyprime = True
break
if possiblyprime == False:
return False
return True
def random_prime(low, high):
r = random.randint(low, high)
if r%2 == 0:
r+=1
while True:
if miller_rabin(r) == True:
break
r+=2
return r
def nbit_prime(bits):
# make sure random prime is big enough so that p*q will have it's msb set
high = (1 << bits) - 1
low = 1 << (bits - 1)
low += (high-low)/2
return random_prime(low, high)
def get_d(e, m):
tm = m
x = lasty = 0
lastx = y = 1
while tm != 0:
q = e // tm
e, tm = tm, e % tm
x, lastx = lastx - q*x, x
y, lasty = lasty - q*y, y
if lastx < 0:
return lastx + m
else:
return lastx
def make_rsa_keypair(bits=512):
p = nbit_prime(bits)
q = nbit_prime(bits)
n = p*q
e = 0x10001
m = (p-1)*(q-1)
d = get_d(e, m)
return (e, n, d)
| mit | -2,289,449,030,673,462,800 | 17.902439 | 77 | 0.433548 | false |
pengli09/Paddle | python/paddle/v2/framework/io.py | 1 | 7239 | import os
import cPickle as pickle
from paddle.v2.framework.framework import Program, Parameter, g_program, \
Variable
__all__ = [
'save_vars', 'save_params', 'save_persistables', 'load_vars', 'load_params',
'load_persistables', "save_inference_model", "load_inference_model"
]
def is_parameter(var):
return isinstance(var, Parameter)
def is_persistable(var):
return var.persistable
def _clone_var_in_block_(block, var):
assert isinstance(var, Variable)
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.data_type,
type=var.type,
lod_level=var.lod_level,
persistable=True)
def save_vars(executor, dirname, program=None, vars=None, predicate=None):
"""
Save variables to directory by executor.
:param executor: executor that save variable
:param dirname: directory path
:param program: program. If vars is None, then filter all variables in this
program which fit `predicate`. Default g_program.
:param predicate: The Predicate describes a callable that returns a variable
as a bool. If it returns true, the variables will be saved.
:param vars: variables need to be saved. If specify vars, program & predicate
will be ignored
:return: None
"""
if vars is None:
if program is None:
program = g_program
if not isinstance(program, Program):
raise TypeError("program should be as Program type or None")
save_vars(
executor,
dirname=dirname,
vars=filter(predicate, program.list_vars()))
else:
save_program = Program()
save_block = save_program.global_block()
for each_var in vars:
new_var = _clone_var_in_block_(save_block, each_var)
save_block.append_op(
type='save',
inputs={'X': [new_var]},
outputs={},
attrs={'file_path': os.path.join(dirname, new_var.name)})
executor.run(save_program)
def save_params(executor, dirname, program=None):
"""
Save all parameters to directory with executor.
"""
save_vars(
executor,
dirname=dirname,
program=program,
vars=None,
predicate=is_parameter)
def save_persistables(executor, dirname, program=None):
"""
Save all persistables to directory with executor.
"""
save_vars(
executor,
dirname=dirname,
program=program,
vars=None,
predicate=is_persistable)
def load_vars(executor, dirname, program=None, vars=None, predicate=None):
"""
Load variables from directory by executor.
:param executor: executor that save variable
:param dirname: directory path
:param program: program. If vars is None, then filter all variables in this
program which fit `predicate`. Default g_program.
:param predicate: The Predicate describes a callable that returns a variable
as a bool. If it returns true, the variables will be loaded.
:param vars: variables need to be loaded. If specify vars, program &
predicate will be ignored
:return: None
"""
if vars is None:
if program is None:
program = g_program
if not isinstance(program, Program):
raise TypeError("program's type should be Program")
load_vars(
executor,
dirname=dirname,
vars=filter(predicate, program.list_vars()))
else:
load_prog = Program()
load_block = load_prog.global_block()
for each_var in vars:
assert isinstance(each_var, Variable)
new_var = _clone_var_in_block_(load_block, each_var)
load_block.append_op(
type='load',
inputs={},
outputs={"Out": [new_var]},
attrs={'file_path': os.path.join(dirname, new_var.name)})
executor.run(load_prog)
def load_params(executor, dirname, program=None):
"""
load all parameters from directory by executor.
"""
load_vars(
executor, dirname=dirname, program=program, predicate=is_parameter)
def load_persistables(executor, dirname, program=None):
"""
load all persistables from directory by executor.
"""
load_vars(
executor, dirname=dirname, program=program, predicate=is_persistable)
def save_inference_model(dirname,
feeded_var_names,
target_vars,
executor,
program=None):
"""
Build a model especially for inference,
and save it to directory by the executor.
:param dirname: directory path
:param feeded_var_names: Names of variables that need to be feeded data during inference
:param target_vars: Variables from which we can get inference results.
:param executor: executor that save inference model
:param program: original program, which will be pruned to build the inference model.
Default g_program.
:return: None
"""
if program is None:
program = g_program
if not isinstance(target_vars, list):
target_vars = [target_vars]
if not os.path.isdir(dirname):
os.makedirs(dirname)
pruned_program = program.prune(target_vars)
fetch_var_names = [v.name for v in target_vars]
model_file_name = dirname + "/__model__"
with open(model_file_name, "w") as f:
pickle.dump({
"program_desc_str": pruned_program.desc.serialize_to_string(),
"feed_var_names": feeded_var_names,
"fetch_var_names": fetch_var_names
}, f, -1)
save_params(executor, dirname, program)
def load_persistables_if_exist(executor, dirname, program=None):
filenames = next(os.walk(dirname))[2]
filenames = set(filenames)
def _is_presistable_and_exist_(var):
if not is_persistable(var):
return False
else:
return var.name in filenames
load_vars(
executor,
dirname,
program=program,
vars=None,
predicate=_is_presistable_and_exist_)
def load_inference_model(dirname, executor):
"""
Load inference model from a directory
:param dirname: directory path
:param executor: executor that load inference model
:return: [program, feed_var_names, fetch_var_names]
program: program especially for inference.
feeded_var_names: Names of variables that need to feed data
fetch_vars: Variables from which we can get inference results.
"""
if not os.path.isdir(dirname):
raise ValueError("There is no directory named '%s'", dirname)
model_file_name = dirname + "/__model__"
model = pickle.load(open(model_file_name, "r"))
program_desc_str = model["program_desc_str"]
feed_var_names = model["feed_var_names"]
fetch_var_names = model["fetch_var_names"]
program = Program.parse_from_string(program_desc_str)
load_persistables_if_exist(executor, dirname, program)
fetch_vars = [program.global_block().var(name) for name in fetch_var_names]
return [program, feed_var_names, fetch_vars]
| apache-2.0 | 4,832,600,745,266,361,000 | 30.473913 | 92 | 0.626191 | false |
lifanov/cobbler | cobbler/tftpgen.py | 1 | 49135 | """
Generate files provided by TFTP server based on Cobbler object tree.
This is the code behind 'cobbler sync'.
Copyright 2006-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import os.path
import re
import shutil
import socket
import string
from cexceptions import CX
import templar
import utils
class TFTPGen:
"""
Generate files provided by TFTP server
"""
def __init__(self, collection_mgr, logger):
"""
Constructor
"""
self.collection_mgr = collection_mgr
self.logger = logger
self.api = collection_mgr.api
self.distros = collection_mgr.distros()
self.profiles = collection_mgr.profiles()
self.systems = collection_mgr.systems()
self.settings = collection_mgr.settings()
self.repos = collection_mgr.repos()
self.images = collection_mgr.images()
self.templar = templar.Templar(collection_mgr)
self.bootloc = utils.tftpboot_location()
def copy_bootloaders(self):
"""
Copy bootloaders to the configured tftpboot directory
NOTE: we support different arch's if defined in
/etc/cobbler/settings.
"""
dst = self.bootloc
grub_dst = os.path.join(dst, "grub")
boot_dst = os.path.join(dst, "boot/grub")
# copy pxelinux from one of two locations
try:
try:
utils.copyfile_pattern(
'/var/lib/cobbler/loaders/pxelinux.0',
dst, api=self.api, cache=False, logger=self.logger)
utils.copyfile_pattern(
'/var/lib/cobbler/loaders/menu.c32',
dst, api=self.api, cache=False, logger=self.logger)
utils.copyfile_pattern(
'/var/lib/cobbler/loaders/ldlinux.c32',
dst, api=self.api, cache=False, logger=self.logger)
except:
utils.copyfile_pattern(
'/usr/share/syslinux/pxelinux.0',
dst, api=self.api, cache=False, logger=self.logger)
utils.copyfile_pattern(
'/usr/share/syslinux/menu.c32',
dst, api=self.api, cache=False, logger=self.logger)
utils.copyfile_pattern(
'/usr/share/syslinux/ldlinux.c32',
dst, api=self.api, cache=False, logger=self.logger)
except:
utils.copyfile_pattern(
'/usr/lib/syslinux/pxelinux.0',
dst, api=self.api, cache=False, logger=self.logger)
utils.copyfile_pattern(
'/usr/lib/syslinux/menu.c32',
dst, api=self.api, cache=False, logger=self.logger)
utils.copyfile_pattern(
'/usr/lib/syslinux/ldlinux.c32',
dst, api=self.api, cache=False, logger=self.logger)
# copy yaboot which we include for PowerPC targets
utils.copyfile_pattern(
'/var/lib/cobbler/loaders/yaboot', dst,
require_match=False, api=self.api, cache=False, logger=self.logger)
utils.copyfile_pattern(
'/var/lib/cobbler/loaders/boot/grub/*', boot_dst,
require_match=False, api=self.api, cache=False, logger=self.logger)
try:
utils.copyfile_pattern(
'/usr/lib/syslinux/memdisk',
dst, api=self.api, cache=False, logger=self.logger)
except:
utils.copyfile_pattern(
'/usr/share/syslinux/memdisk', dst,
require_match=False, api=self.api, cache=False, logger=self.logger)
# Copy gPXE/iPXE bootloader if it exists
utils.copyfile_pattern(
'/usr/share/*pxe/undionly.kpxe', dst,
require_match=False, api=self.api, cache=False, logger=self.logger)
# Copy grub EFI bootloaders if possible:
utils.copyfile_pattern(
'/var/lib/cobbler/loaders/grub*.efi', grub_dst,
require_match=False, api=self.api, cache=False, logger=self.logger)
pxegrub_imported = False
for i in self.distros:
if 'nexenta' == i.breed and not pxegrub_imported:
# name_without_arch = i.name[:-7] # removing -x86_64 from the fin on the string.
if os.path.isdir(os.path.join(self.bootloc, 'boot')):
shutil.rmtree(os.path.join(self.bootloc, 'boot'))
shutil.copytree(os.path.join('/var', 'www', 'cobbler', 'distro_mirror', i.name, 'boot'),
os.path.join(self.bootloc, 'boot'))
pxegrub_imported = True
def copy_images(self):
"""
Like copy_distros except for images.
"""
errors = list()
for i in self.images:
try:
self.copy_single_image_files(i)
except CX, e:
errors.append(e)
self.logger.error(e.value)
# FIXME: using logging module so this ends up in cobbler.log?
def copy_single_distro_files(self, d, dirtree, symlink_ok):
distros = os.path.join(dirtree, "images")
distro_dir = os.path.join(distros, d.name)
utils.mkdir(distro_dir)
kernel = utils.find_kernel(d.kernel) # full path
initrd = utils.find_initrd(d.initrd) # full path
if kernel is None:
raise CX("kernel not found: %(file)s, distro: %(distro)s" % {"file": d.kernel, "distro": d.name})
if initrd is None:
raise CX("initrd not found: %(file)s, distro: %(distro)s" % {"file": d.initrd, "distro": d.name})
# Koan manages remote kernel itself, but for consistent PXE
# configurations the synchronization is still necessary
if not utils.file_is_remote(kernel):
b_kernel = os.path.basename(kernel)
dst1 = os.path.join(distro_dir, b_kernel)
utils.linkfile(kernel, dst1, symlink_ok=symlink_ok, api=self.api, logger=self.logger)
else:
b_kernel = os.path.basename(kernel)
dst1 = os.path.join(distro_dir, b_kernel)
utils.copyremotefile(kernel, dst1, api=None, logger=self.logger)
if not utils.file_is_remote(initrd):
b_initrd = os.path.basename(initrd)
dst2 = os.path.join(distro_dir, b_initrd)
utils.linkfile(initrd, dst2, symlink_ok=symlink_ok, api=self.api, logger=self.logger)
else:
b_initrd = os.path.basename(initrd)
dst1 = os.path.join(distro_dir, b_initrd)
utils.copyremotefile(initrd, dst1, api=None, logger=self.logger)
if "nexenta" == d.breed:
try:
os.makedirs(os.path.join(distro_dir, 'platform', 'i86pc', 'kernel', 'amd64'))
os.makedirs(os.path.join(distro_dir, 'platform', 'i86pc', 'amd64'))
except OSError:
pass
b_kernel = os.path.basename(kernel)
utils.linkfile(kernel, os.path.join(distro_dir, 'platform', 'i86pc', 'kernel', 'amd64', b_kernel),
symlink_ok=symlink_ok, api=self.api, logger=self.logger)
b_initrd = os.path.basename(initrd)
utils.linkfile(initrd, os.path.join(distro_dir, 'platform', 'i86pc', 'amd64', b_initrd),
symlink_ok=symlink_ok, api=self.api, logger=self.logger)
# the [:-7] removes the architecture
if os.path.isdir(os.path.join('/var', 'www', 'cobbler', 'links', d.name, 'install_profiles')):
shutil.rmtree(os.path.join('/var', 'www', 'cobbler', 'links', d.name, 'install_profiles'))
shutil.copytree(os.path.join('/var', 'lib', 'cobbler', 'autoinstall_templates', 'install_profiles'),
os.path.join('/var', 'www', 'cobbler', 'links', d.name, 'install_profiles'))
def copy_single_image_files(self, img):
images_dir = os.path.join(self.bootloc, "images2")
filename = img.file
if not os.path.exists(filename):
# likely for virtual usage, cannot use
return
if not os.path.exists(images_dir):
os.makedirs(images_dir)
newfile = os.path.join(images_dir, img.name)
utils.linkfile(filename, newfile, api=self.api, logger=self.logger)
def write_all_system_files(self, system, menu_items):
profile = system.get_conceptual_parent()
if profile is None:
raise CX("system %(system)s references a missing profile %(profile)s" % {"system": system.name, "profile": system.profile})
distro = profile.get_conceptual_parent()
image_based = False
image = None
if distro is None:
if profile.COLLECTION_TYPE == "profile":
raise CX("profile %(profile)s references a missing distro %(distro)s" % {"profile": system.profile, "distro": profile.distro})
else:
image_based = True
image = profile
pxe_metadata = {'pxe_menu_items': menu_items}
# generate one record for each described NIC ..
for (name, interface) in system.interfaces.iteritems():
f1 = utils.get_config_filename(system, interface=name)
if f1 is None:
self.logger.warning("invalid interface recorded for system (%s,%s)" % (system.name, name))
continue
if image_based:
working_arch = image.arch
else:
working_arch = distro.arch
if working_arch is None:
raise "internal error, invalid arch supplied"
# for tftp only ...
grub_path = None
if working_arch in ["i386", "x86", "x86_64", "arm", "standard"]:
# pxelinux wants a file named $name under pxelinux.cfg
f2 = os.path.join(self.bootloc, "pxelinux.cfg", f1)
# Only generating grub menus for these arch's:
grub_path = os.path.join(self.bootloc, "grub", f1.upper())
elif working_arch.startswith("ppc"):
# Determine filename for system-specific bootloader config
filename = "%s" % utils.get_config_filename(system, interface=name).lower()
# to inherit the distro and system's boot_loader values correctly
blended_system = utils.blender(self.api, False, system)
if blended_system["boot_loader"] == "pxelinux":
# pxelinux wants a file named $name under pxelinux.cfg
f2 = os.path.join(self.bootloc, "pxelinux.cfg", f1)
elif distro.boot_loader == "grub2" or blended_system["boot_loader"] == "grub2":
f2 = os.path.join(self.bootloc, "boot/grub", "grub.cfg-" + filename)
else:
f2 = os.path.join(self.bootloc, "etc", filename)
# Link to the yaboot binary
f3 = os.path.join(self.bootloc, "ppc", filename)
if os.path.lexists(f3):
utils.rmfile(f3)
os.symlink("../yaboot", f3)
else:
continue
if system.is_management_supported():
if not image_based:
self.write_pxe_file(f2, system, profile, distro, working_arch, metadata=pxe_metadata)
if grub_path:
self.write_pxe_file(grub_path, system, profile, distro, working_arch, format="grub")
else:
self.write_pxe_file(f2, system, None, None, working_arch, image=profile, metadata=pxe_metadata)
else:
# ensure the file doesn't exist
utils.rmfile(f2)
if grub_path:
utils.rmfile(grub_path)
def make_pxe_menu(self):
self.make_actual_pxe_menu()
def get_menu_items(self):
"""
Generates menu items for pxe and grub
"""
# sort the profiles
profile_list = [profile for profile in self.profiles]
def sort_name(a, b):
return cmp(a.name, b.name)
profile_list.sort(sort_name)
# sort the images
image_list = [image for image in self.images]
image_list.sort(sort_name)
# Build out menu items and append each to this master list, used for
# the default menus:
pxe_menu_items = ""
grub_menu_items = ""
# For now, profiles are the only items we want grub EFI boot menu entries for:
for profile in profile_list:
if not profile.enable_menu:
# This profile has been excluded from the menu
continue
distro = profile.get_conceptual_parent()
if distro.name.find('exenta') != -1:
# nexenta has a separate menu
continue
contents = self.write_pxe_file(
filename=None,
system=None, profile=profile, distro=distro, arch=distro.arch,
include_header=False)
if contents is not None:
pxe_menu_items += contents + "\n"
grub_contents = self.write_pxe_file(
filename=None,
system=None, profile=profile, distro=distro, arch=distro.arch,
include_header=False, format="grub")
if grub_contents is not None:
grub_menu_items += grub_contents + "\n"
# image names towards the bottom
for image in image_list:
if os.path.exists(image.file):
contents = self.write_pxe_file(
filename=None,
system=None, profile=None, distro=None, arch=image.arch,
image=image)
if contents is not None:
pxe_menu_items += contents + "\n"
return {'pxe': pxe_menu_items, 'grub': grub_menu_items}
def get_menu_items_nexenta(self):
"""
Generates menu items for nexenta
"""
# sort the profiles
profile_list = [profile for profile in self.profiles]
def sort_name(a, b):
return cmp(a.name, b.name)
profile_list.sort(sort_name)
# Build out menu items and append each to this master list, used for
# the default menus:
pxe_menu_items = ""
grub_menu_items = ""
# For now, profiles are the only items we want grub EFI boot menu entries for:
for profile in profile_list:
if not profile.enable_menu:
# This profile has been excluded from the menu
continue
distro = profile.get_conceptual_parent()
if distro.name.find('nexenta') != -1:
contents = self.write_pxe_file(
filename=None,
system=None, profile=profile, distro=distro, arch=distro.arch,
include_header=False)
if contents is not None:
pxe_menu_items += contents + "\n"
grub_contents = self.write_pxe_file(
filename=None,
system=None, profile=profile, distro=distro, arch=distro.arch,
include_header=False, format="nexenta")
if grub_contents is not None:
grub_menu_items += grub_contents + "\n"
return {'pxe': pxe_menu_items, 'grub': grub_menu_items}
def make_actual_pxe_menu(self):
"""
Generates both pxe and grub boot menus.
"""
# only do this if there is NOT a system named default.
default = self.systems.find(name="default")
if default is None:
timeout_action = "local"
else:
timeout_action = default.profile
menu_items = self.get_menu_items()
# Write the PXE menu:
metadata = {"pxe_menu_items": menu_items['pxe'], "pxe_timeout_profile": timeout_action}
outfile = os.path.join(self.bootloc, "pxelinux.cfg", "default")
template_src = open(os.path.join(self.settings.boot_loader_conf_template_dir, "pxedefault.template"))
template_data = template_src.read()
self.templar.render(template_data, metadata, outfile, None)
template_src.close()
# Write the grub menu:
metadata = {"grub_menu_items": menu_items['grub']}
outfile = os.path.join(self.bootloc, "grub", "efidefault")
template_src = open(os.path.join(self.settings.boot_loader_conf_template_dir, "efidefault.template"))
template_data = template_src.read()
self.templar.render(template_data, metadata, outfile, None)
template_src.close()
# write the nexenta menu
menu_items = self.get_menu_items_nexenta()
metadata = {"grub_menu_items": menu_items['grub']}
outfile = os.path.join(self.bootloc, "boot", 'grub', 'menu.lst')
template_src = open(os.path.join(self.settings.boot_loader_conf_template_dir, "nexenta_grub_menu.template"))
template_data = template_src.read()
self.templar.render(template_data, metadata, outfile, None)
template_src.close()
def write_pxe_file(self, filename, system, profile, distro, arch,
image=None, include_header=True, metadata=None, format="pxe"):
"""
Write a configuration file for the boot loader(s).
More system-specific configuration may come in later, if so
that would appear inside the system object in api.py
Can be used for different formats, "pxe" (default) and "grub".
"""
if arch is None:
raise "missing arch"
if image and not os.path.exists(image.file):
return None # nfs:// URLs or something, can't use for TFTP
if metadata is None:
metadata = {}
(rval, settings) = utils.input_string_or_dict(self.settings.to_dict())
if rval:
for key in settings.keys():
metadata[key] = settings[key]
# ---
# just some random variables
template = None
buffer = ""
# ---
autoinstall_path = None
kernel_path = None
initrd_path = None
img_path = None
if image is None:
# not image based, it's something normalish
img_path = os.path.join("/images", distro.name)
if 'nexenta' == distro.breed:
kernel_path = os.path.join("/images", distro.name, 'platform', 'i86pc', 'kernel', 'amd64', os.path.basename(distro.kernel))
initrd_path = os.path.join("/images", distro.name, 'platform', 'i86pc', 'amd64', os.path.basename(distro.initrd))
elif 'http' in distro.kernel and 'http' in distro.initrd:
kernel_path = distro.kernel
initrd_path = distro.initrd
else:
kernel_path = os.path.join("/images", distro.name, os.path.basename(distro.kernel))
initrd_path = os.path.join("/images", distro.name, os.path.basename(distro.initrd))
# Find the automatic installation file if we inherit from another profile
if system:
blended = utils.blender(self.api, True, system)
else:
blended = utils.blender(self.api, True, profile)
autoinstall_path = blended.get("autoinstall", "")
# update metadata with all known information
# this allows for more powerful templating
metadata.update(blended)
else:
# this is an image we are making available, not kernel+initrd
if image.image_type == "direct":
kernel_path = os.path.join("/images2", image.name)
elif image.image_type == "memdisk":
kernel_path = "/memdisk"
initrd_path = os.path.join("/images2", image.name)
else:
# CD-ROM ISO or virt-clone image? We can't PXE boot it.
kernel_path = None
initrd_path = None
if img_path is not None and "img_path" not in metadata:
metadata["img_path"] = img_path
if kernel_path is not None and "kernel_path" not in metadata:
metadata["kernel_path"] = kernel_path
if initrd_path is not None and "initrd_path" not in metadata:
metadata["initrd_path"] = initrd_path
# ---
# choose a template
if system:
if format == "grub":
if system.netboot_enabled:
template = os.path.join(self.settings.boot_loader_conf_template_dir, "grubsystem.template")
else:
local = os.path.join(self.settings.boot_loader_conf_template_dir, "grublocal.template")
if os.path.exists(local):
template = local
else: # pxe
if system.netboot_enabled:
template = os.path.join(self.settings.boot_loader_conf_template_dir, "pxesystem.template")
if arch.startswith("ppc"):
# to inherit the distro and system's boot_loader values correctly
blended_system = utils.blender(self.api, False, system)
if blended_system["boot_loader"] == "pxelinux":
template = os.path.join(self.settings.boot_loader_conf_template_dir, "pxesystem_ppc.template")
elif distro.boot_loader == "grub2" or blended_system["boot_loader"] == "grub2":
template = os.path.join(self.settings.boot_loader_conf_template_dir, "grub2_ppc.template")
else:
template = os.path.join(self.settings.boot_loader_conf_template_dir, "yaboot_ppc.template")
elif arch.startswith("arm"):
template = os.path.join(self.settings.boot_loader_conf_template_dir, "pxesystem_arm.template")
elif distro and distro.os_version.startswith("esxi"):
# ESXi uses a very different pxe method, using more files than
# a standard automatic installation file and different options -
# so giving it a dedicated PXE template makes more sense than
# shoe-horning it into the existing templates
template = os.path.join(self.settings.boot_loader_conf_template_dir, "pxesystem_esxi.template")
else:
# local booting on ppc requires removing the system-specific dhcpd.conf filename
if arch is not None and arch.startswith("ppc"):
# Disable yaboot network booting for all interfaces on the system
for (name, interface) in system.interfaces.iteritems():
filename = "%s" % utils.get_config_filename(system, interface=name).lower()
# Remove symlink to the yaboot binary
f3 = os.path.join(self.bootloc, "ppc", filename)
if os.path.lexists(f3):
utils.rmfile(f3)
# Remove the interface-specific config file
f3 = os.path.join(self.bootloc, "boot/grub", "grub.cfg-" + filename)
if os.path.lexists(f3):
utils.rmfile(f3)
f3 = os.path.join(self.bootloc, "etc", filename)
if os.path.lexists(f3):
utils.rmfile(f3)
# Yaboot/OF doesn't support booting locally once you've
# booted off the network, so nothing left to do
return None
else:
template = os.path.join(self.settings.boot_loader_conf_template_dir, "pxelocal.template")
else:
# not a system record, so this is a profile record or an image
if arch.startswith("arm"):
template = os.path.join(self.settings.boot_loader_conf_template_dir, "pxeprofile_arm.template")
elif format == "grub":
template = os.path.join(self.settings.boot_loader_conf_template_dir, "grubprofile.template")
elif distro and distro.os_version.startswith("esxi"):
# ESXi uses a very different pxe method, see comment above in the system section
template = os.path.join(self.settings.boot_loader_conf_template_dir, "pxeprofile_esxi.template")
elif 'nexenta' == format:
template = os.path.join(self.settings.boot_loader_conf_template_dir, 'nexenta_profile.template')
else:
template = os.path.join(self.settings.boot_loader_conf_template_dir, "pxeprofile.template")
if kernel_path is not None:
metadata["kernel_path"] = kernel_path
if initrd_path is not None:
metadata["initrd_path"] = initrd_path
# generate the kernel options and append line:
kernel_options = self.build_kernel_options(system, profile, distro,
image, arch, autoinstall_path)
metadata["kernel_options"] = kernel_options
if distro and distro.os_version.startswith("esxi") and filename is not None:
append_line = "BOOTIF=%s" % (os.path.basename(filename))
elif "initrd_path" in metadata and (not arch or arch not in ["ppc", "ppc64", "arm"]):
append_line = "append initrd=%s" % (metadata["initrd_path"])
else:
append_line = "append "
append_line = "%s%s" % (append_line, kernel_options)
if arch.startswith("ppc"):
# remove the prefix "append"
# TODO: this looks like it's removing more than append, really
# not sure what's up here...
append_line = append_line[7:]
if distro and distro.os_version.startswith("xenserver620"):
append_line = "%s" % (kernel_options)
metadata["append_line"] = append_line
# store variables for templating
metadata["menu_label"] = ""
if profile:
if arch not in ["ppc", "ppc64"]:
metadata["menu_label"] = "MENU LABEL %s" % profile.name
metadata["profile_name"] = profile.name
elif image:
metadata["menu_label"] = "MENU LABEL %s" % image.name
metadata["profile_name"] = image.name
if system:
metadata["system_name"] = system.name
# get the template
if kernel_path is not None:
template_fh = open(template)
template_data = template_fh.read()
template_fh.close()
else:
# this is something we can't PXE boot
template_data = "\n"
# save file and/or return results, depending on how called.
buffer = self.templar.render(template_data, metadata, None)
if filename is not None:
self.logger.info("generating: %s" % filename)
fd = open(filename, "w")
fd.write(buffer)
fd.close()
return buffer
def build_kernel_options(self, system, profile, distro, image, arch,
autoinstall_path):
"""
Builds the full kernel options line.
"""
management_interface = None
if system is not None:
blended = utils.blender(self.api, False, system)
# find the first management interface
try:
for intf in system.interfaces.keys():
if system.interfaces[intf]["management"]:
management_interface = intf
break
except:
# just skip this then
pass
elif profile is not None:
blended = utils.blender(self.api, False, profile)
else:
blended = utils.blender(self.api, False, image)
append_line = ""
kopts = blended.get("kernel_options", dict())
kopts = utils.revert_strip_none(kopts)
# since network needs to be configured again (it was already in netboot) when kernel boots
# and we choose to do it dinamically, we need to set 'ksdevice' to one of
# the interfaces' MAC addresses in ppc systems.
# ksdevice=bootif is not useful in yaboot, as the "ipappend" line is a pxe feature.
if system and arch and "ppc" in arch:
for intf in system.interfaces.keys():
# use first interface with defined IP and MAC, since these are required
# fields in a DHCP entry
mac_address = system.interfaces[intf]['mac_address']
ip_address = system.interfaces[intf]['ip_address']
if mac_address and ip_address:
kopts['BOOTIF'] = '01-' + mac_address
kopts['ksdevice'] = mac_address
break
# support additional initrd= entries in kernel options.
if "initrd" in kopts:
append_line = ",%s" % kopts.pop("initrd")
hkopts = utils.dict_to_string(kopts)
append_line = "%s %s" % (append_line, hkopts)
# automatic installation file path rewriting (get URLs for local files)
if autoinstall_path is not None and autoinstall_path != "":
# FIXME: need to make shorter rewrite rules for these URLs
try:
ipaddress = socket.gethostbyname_ex(blended["http_server"])[2][0]
except socket.gaierror:
ipaddress = blended["http_server"]
URL_REGEX = "[a-zA-Z]*://.*"
local_autoinstall_file = not re.match(URL_REGEX, autoinstall_path)
if local_autoinstall_file:
if system is not None:
autoinstall_path = "http://%s/cblr/svc/op/autoinstall/system/%s" % (ipaddress, system.name)
else:
autoinstall_path = "http://%s/cblr/svc/op/autoinstall/profile/%s" % (ipaddress, profile.name)
if distro.breed is None or distro.breed == "redhat":
append_line += " kssendmac"
append_line = "%s ks=%s" % (append_line, autoinstall_path)
gpxe = blended["enable_gpxe"]
if gpxe:
append_line = append_line.replace('ksdevice=bootif', 'ksdevice=${net0/mac}')
elif distro.breed == "suse":
append_line = "%s autoyast=%s" % (append_line, autoinstall_path)
elif distro.breed == "debian" or distro.breed == "ubuntu":
append_line = "%s auto-install/enable=true priority=critical netcfg/choose_interface=auto url=%s" % (append_line, autoinstall_path)
if management_interface:
append_line += " netcfg/choose_interface=%s" % management_interface
elif distro.breed == "freebsd":
append_line = "%s ks=%s" % (append_line, autoinstall_path)
# rework kernel options for debian distros
translations = {'ksdevice': "interface", 'lang': "locale"}
for k, v in translations.iteritems():
append_line = append_line.replace("%s=" % k, "%s=" % v)
# interface=bootif causes a failure
append_line = append_line.replace("interface=bootif", "")
elif distro.breed == "vmware":
if distro.os_version.find("esxi") != -1:
# ESXi is very picky, it's easier just to redo the
# entire append line here since
append_line = " ks=%s %s" % (autoinstall_path, hkopts)
# ESXi likes even fewer options, so we remove them too
append_line = append_line.replace("kssendmac", "")
else:
append_line = "%s vmkopts=debugLogToSerial:1 mem=512M ks=%s" % \
(append_line, autoinstall_path)
# interface=bootif causes a failure
append_line = append_line.replace("ksdevice=bootif", "")
elif distro.breed == "xen":
if distro.os_version.find("xenserver620") != -1:
img_path = os.path.join("/images", distro.name)
append_line = "append %s/xen.gz dom0_max_vcpus=2 dom0_mem=752M com1=115200,8n1 console=com1,vga --- %s/vmlinuz xencons=hvc console=hvc0 console=tty0 install answerfile=%s --- %s/install.img" % (img_path, img_path, autoinstall_path, img_path)
return append_line
elif distro.breed == "powerkvm":
append_line += " kssendmac"
append_line = "%s kvmp.inst.auto=%s" % (append_line, autoinstall_path)
if distro is not None and (distro.breed in ["debian", "ubuntu"]):
# Hostname is required as a parameter, the one in the preseed is
# not respected, so calculate if we have one here.
# We're trying: first part of FQDN in hostname field, then system
# name, then profile name.
# In Ubuntu, this is at least used for the volume group name when
# using LVM.
domain = "local.lan"
if system is not None:
if system.hostname is not None and system.hostname != "":
# If this is a FQDN, grab the first bit
hostname = system.hostname.split(".")[0]
_domain = system.hostname.split(".")[1:]
if _domain:
domain = ".".join(_domain)
else:
hostname = system.name
else:
# ubuntu at the very least does not like having underscores
# in the hostname.
# FIXME: Really this should remove all characters that are
# forbidden in hostnames
hostname = profile.name.replace("_", "")
# At least for debian deployments configured for DHCP networking
# this values are not used, but specifying here avoids questions
append_line = "%s hostname=%s" % (append_line, hostname)
append_line = "%s domain=%s" % (append_line, domain)
# A similar issue exists with suite name, as installer requires
# the existence of "stable" in the dists directory
append_line = "%s suite=%s" % (append_line, distro.os_version)
elif distro is not None and distro.breed == 'nexenta':
append_line = "-B iso_nfs_path=%s:/var/www/cobbler/links/%s,auto_install=1" % (blended['next_server'], distro.name)
# append necessary kernel args for arm architectures
if arch is not None and arch.startswith("arm"):
append_line = "%s fixrtc vram=48M omapfb.vram=0:24M" % append_line
# do variable substitution on the append line
# promote all of the autoinstall_meta variables
if "autoinstall_meta" in blended:
blended.update(blended["autoinstall_meta"])
append_line = self.templar.render(append_line, utils.flatten(blended), None)
# FIXME - the append_line length limit is architecture specific
if len(append_line) >= 255:
self.logger.warning("warning: kernel option length exceeds 255")
return append_line
def write_templates(self, obj, write_file=False, path=None):
"""
A semi-generic function that will take an object
with a template_files dict {source:destiation}, and
generate a rendered file. The write_file option
allows for generating of the rendered output without
actually creating any files.
The return value is a dict of the destination file
names (after variable substitution is done) and the
data in the file.
"""
self.logger.info("Writing template files for %s" % obj.name)
results = {}
try:
templates = obj.template_files
except:
return results
blended = utils.blender(self.api, False, obj)
if obj.COLLECTION_TYPE == "distro":
if re.search("esxi[56]", obj.os_version) is not None:
realbootcfg = open(os.path.join(os.path.dirname(obj.kernel), 'boot.cfg')).read()
bootmodules = re.findall(r'modules=(.*)', realbootcfg)
for modules in bootmodules:
blended['esx_modules'] = modules.replace('/', '')
autoinstall_meta = blended.get("autoinstall_meta", {})
try:
del blended["autoinstall_meta"]
except:
pass
blended.update(autoinstall_meta) # make available at top level
templates = blended.get("template_files", {})
try:
del blended["template_files"]
except:
pass
blended.update(templates) # make available at top level
(success, templates) = utils.input_string_or_dict(templates)
if not success:
return results
# FIXME: img_path and local_img_path should probably be moved
# up into the blender function to ensure they're consistently
# available to templates across the board
if blended["distro_name"]:
blended['img_path'] = os.path.join("/images", blended["distro_name"])
blended['local_img_path'] = os.path.join(utils.tftpboot_location(), "images", blended["distro_name"])
for template in templates.keys():
dest = templates[template]
if dest is None:
continue
# Run the source and destination files through
# templar first to allow for variables in the path
template = self.templar.render(template, blended, None).strip()
dest = os.path.normpath(self.templar.render(dest, blended, None).strip())
# Get the path for the destination output
dest_dir = os.path.normpath(os.path.dirname(dest))
# If we're looking for a single template, skip if this ones
# destination is not it.
if path is not None and path != dest:
continue
# If we are writing output to a file, we allow files tobe
# written into the tftpboot directory, otherwise force all
# templated configs into the rendered directory to ensure that
# a user granted cobbler privileges via sudo can't overwrite
# arbitrary system files (This also makes cleanup easier).
if os.path.isabs(dest_dir) and write_file:
if dest_dir.find(utils.tftpboot_location()) != 0:
raise CX(" warning: template destination (%s) is outside %s, skipping." % (dest_dir, utils.tftpboot_location()))
continue
elif write_file:
dest_dir = os.path.join(self.settings.webdir, "rendered", dest_dir)
dest = os.path.join(dest_dir, os.path.basename(dest))
if not os.path.exists(dest_dir):
utils.mkdir(dest_dir)
# Check for problems
if not os.path.exists(template):
raise CX("template source %s does not exist" % template)
continue
elif write_file and not os.path.isdir(dest_dir):
raise CX("template destination (%s) is invalid" % dest_dir)
continue
elif write_file and os.path.exists(dest):
raise CX("template destination (%s) already exists" % dest)
continue
elif write_file and os.path.isdir(dest):
raise CX("template destination (%s) is a directory" % dest)
continue
elif template == "" or dest == "":
raise CX("either the template source or destination was blank (unknown variable used?)" % dest)
continue
template_fh = open(template)
template_data = template_fh.read()
template_fh.close()
buffer = self.templar.render(template_data, blended, None)
results[dest] = buffer
if write_file:
self.logger.info("generating: %s" % dest)
fd = open(dest, "w")
fd.write(buffer)
fd.close()
return results
def generate_gpxe(self, what, name):
if what.lower() not in ("profile", "system"):
return "# gpxe is only valid for profiles and systems"
distro = None
if what == "profile":
obj = self.api.find_profile(name=name)
distro = obj.get_conceptual_parent()
else:
obj = self.api.find_system(name=name)
distro = obj.get_conceptual_parent().get_conceptual_parent()
netboot_enabled = obj.netboot_enabled
# For multi-arch distros, the distro name in distro_mirror
# may not contain the arch string, so we need to figure out
# the path based on where the kernel is stored. We do this
# because some distros base future downloads on the initial
# URL passed in, so all of the files need to be at this location
# (which is why we can't use the images link, which just contains
# the kernel and initrd).
distro_mirror_name = string.join(distro.kernel.split('/')[-2:-1], '')
blended = utils.blender(self.api, False, obj)
autoinstall_meta = blended.get("autoinstall_meta", {})
try:
del blended["autoinstall_meta"]
except:
pass
blended.update(autoinstall_meta) # make available at top level
blended['distro'] = distro.name
blended['distro_mirror_name'] = distro_mirror_name
blended['kernel_name'] = os.path.basename(distro.kernel)
blended['initrd_name'] = os.path.basename(distro.initrd)
if what == "profile":
blended['append_line'] = self.build_kernel_options(None, obj, distro, None, None, blended['autoinstall'])
else:
blended['append_line'] = self.build_kernel_options(obj, None, distro, None, None, blended['autoinstall'])
template = None
if distro.breed in ['redhat', 'debian', 'ubuntu', 'suse']:
# all of these use a standard kernel/initrd setup so
# they all use the same gPXE template
template = os.path.join(self.settings.boot_loader_conf_template_dir, "gpxe_%s_linux.template" % what.lower())
elif distro.breed == 'vmware':
if distro.os_version == 'esx4':
# older ESX is pretty much RHEL, so it uses the standard kernel/initrd setup
template = os.path.join(self.settings.boot_loader_conf_template_dir, "gpxe_%s_linux.template" % what.lower())
elif distro.os_version == 'esxi4':
template = os.path.join(self.settings.boot_loader_conf_template_dir, "gpxe_%s_esxi4.template" % what.lower())
elif distro.os_version.startswith('esxi5'):
template = os.path.join(self.settings.boot_loader_conf_template_dir, "gpxe_%s_esxi5.template" % what.lower())
elif distro.os_version.startswith('esxi6'):
template = os.path.join(self.settings.boot_loader_conf_template_dir, "gpxe_%s_esxi6.template" % what.lower())
elif distro.breed == 'freebsd':
template = os.path.join(self.settings.boot_loader_conf_template_dir, "gpxe_%s_freebsd.template" % what.lower())
elif distro.breed == 'windows':
template = os.path.join(self.settings.boot_loader_conf_template_dir, "gpxe_%s_windows.template" % what.lower())
if what == "system":
if not netboot_enabled:
template = os.path.join(self.settings.boot_loader_conf_template_dir, "gpxe_%s_local.template" % what.lower())
if not template:
return "# unsupported breed/os version"
if not os.path.exists(template):
return "# gpxe template not found for the %s named %s (filename=%s)" % (what, name, template)
template_fh = open(template)
template_data = template_fh.read()
template_fh.close()
return self.templar.render(template_data, blended, None)
def generate_bootcfg(self, what, name):
if what.lower() not in ("profile", "system"):
return "# bootcfg is only valid for profiles and systems"
distro = None
if what == "profile":
obj = self.api.find_profile(name=name)
distro = obj.get_conceptual_parent()
else:
obj = self.api.find_system(name=name)
distro = obj.get_conceptual_parent().get_conceptual_parent()
# For multi-arch distros, the distro name in distro_mirror
# may not contain the arch string, so we need to figure out
# the path based on where the kernel is stored. We do this
# because some distros base future downloads on the initial
# URL passed in, so all of the files need to be at this location
# (which is why we can't use the images link, which just contains
# the kernel and initrd).
distro_mirror_name = string.join(distro.kernel.split('/')[-2:-1], '')
blended = utils.blender(self.api, False, obj)
autoinstall_meta = blended.get("autoinstall_meta", {})
try:
del blended["autoinstall_meta"]
except:
pass
blended.update(autoinstall_meta) # make available at top level
blended['distro'] = distro_mirror_name
# FIXME: img_path should probably be moved up into the
# blender function to ensure they're consistently
# available to templates across the board
if obj.enable_gpxe:
blended['img_path'] = 'http://%s:%s/cobbler/links/%s' % (self.settings.server, self.settings.http_port, distro.name)
else:
blended['img_path'] = os.path.join("/images", distro.name)
template = os.path.join(self.settings.boot_loader_conf_template_dir, "bootcfg_%s_%s.template" % (what.lower(), distro.os_version))
if not os.path.exists(template):
return "# boot.cfg template not found for the %s named %s (filename=%s)" % (what, name, template)
template_fh = open(template)
template_data = template_fh.read()
template_fh.close()
return self.templar.render(template_data, blended, None)
def generate_script(self, what, objname, script_name):
if what == "profile":
obj = self.api.find_profile(name=objname)
else:
obj = self.api.find_system(name=objname)
if not obj:
return "# %s named %s not found" % (what, objname)
distro = obj.get_conceptual_parent()
while distro.get_conceptual_parent():
distro = distro.get_conceptual_parent()
blended = utils.blender(self.api, False, obj)
autoinstall_meta = blended.get("autoinstall_meta", {})
try:
del blended["autoinstall_meta"]
except:
pass
blended.update(autoinstall_meta) # make available at top level
# FIXME: img_path should probably be moved up into the
# blender function to ensure they're consistently
# available to templates across the board
if obj.enable_gpxe:
blended['img_path'] = 'http://%s:%s/cobbler/links/%s' % (self.settings.server, self.settings.http_port, distro.name)
else:
blended['img_path'] = os.path.join("/images", distro.name)
template = os.path.normpath(os.path.join("/var/lib/cobbler/autoinstall_scripts", script_name))
if not os.path.exists(template):
return "# script template %s not found" % script_name
template_fh = open(template)
template_data = template_fh.read()
template_fh.close()
return self.templar.render(template_data, blended, None, obj)
| gpl-2.0 | 3,414,772,886,900,080,000 | 44.411275 | 261 | 0.570164 | false |
NuGrid/NuPyCEE | chem_evol.py | 1 | 325358 | # coding=utf-8
from __future__ import (division, print_function, absolute_import,
unicode_literals)
'''
Chemical Evolution - chem_evol.py
Functionality
=============
This is the superclass inherited by the SYGMA and the OMEGA modules. It provides
common functions for initialization and for the evolution of one single timestep.
Made by
=======
MAY2015: B. Cote
The core of this superclass is a reorganization of the functions previously found in
earlier versions of SYGMA:
v0.1 NOV2013: C. Fryer, C. Ritter
v0.2 JAN2014: C. Ritter
v0.3 APR2014: C. Ritter, J. F. Navarro, F. Herwig, C. Fryer, E. Starkenburg,
M. Pignatari, S. Jones, K. Venn, P. A. Denissenkov & the
NuGrid collaboration
v0.4 FEB2015: C. Ritter, B. Cote
v0.5 OCT2016: B. Cote, C. Ritter, A. Paul
Stop keeking track of version from now on.
MARCH2018: B. Cote
- Switched to Python 3
- Capability to include radioactive isotopes
JULY2018: B. Cote & R. Sarmento
- Re-wrote (improved) yield and lifetime treatment (B. Cote)
- PopIII IMF and yields update (R. Sarmento)
JAN2019: B. Cote
- Re-included radioactive isotopes with the new (improved) yield treatment
FEB2019: A. Yagüe, B. Cote
- Optimized to code to run faster (integration method)
Usage
=====
See sygma.py and omega.py
'''
# Standard packages
import numpy as np
import time as t_module
import copy
import os
import sys
import re
from pylab import polyfit
from scipy.integrate import quad
from scipy.integrate import dblquad
# Define where is the working directory
nupy_path = os.path.dirname(os.path.realpath(__file__))
# Import NuPyCEE codes
import NuPyCEE.read_yields as ry
class chem_evol(object):
'''
Input parameters (chem_evol.py)
================
special_timesteps : integer
Number of special timesteps. This option (already activated by default)
is activated when special_timesteps > 0. It uses a logarithm timestep
scheme which increases the duration of timesteps throughout the simulation.
Default value : 30
dt : float
Duration of the first timestep [yr] if special_timesteps is activated.
Duration of each timestep if special_timesteps is desactivated.
Default value : 1.0e6
tend : float
Total duration of the simulation [yr].
Default value : 13.0e9
dt_split_info : numpy array
Information regarding the creation of a timestep array with varying step size.
Array format : dt_split_info[number of conditions][0->dt,1->upper time limit]
Exemple : dt_split_info = [[1e6,40e6],[1e8,13e9]] means the timesteps will be
of 1 Myr until the time reaches 40 Myr, after which the timesteps
will be of 100 Myr until the time reaches 13 Gyr. The number of
"split" is unlimited, but the array must be in chronological order.
Default value : [] --> Not taken into account
imf_bdys : list
Upper and lower mass limits of the initial mass function (IMF) [Mo].
Default value : [0.1,100]
imf_yields_range : list
Initial mass of stars that contribute to stellar ejecta [Mo].
Default value : [1,30]
imf_type : string
Choices : 'salpeter', 'chabrier', 'kroupa', 'alphaimf', 'lognormal'
'alphaimf' creates a custom IMF with a single power-law covering imf_bdys.
'lognormal' creates an IMF of the form Exp[1/(2 1^2) Log[x/charMass]^2
Default value : 'kroupa'
alphaimf : float
Aplha index of the custom IMF, dN/dM = Constant * M^-alphaimf
Default value : 2.35
imf_bdys_pop3 : list
Upper and lower mass limits of the IMF of PopIII stars [Mo].
Default value : [0.1,100]
imf_yields_range_pop3 : list
Initial mass of stars that contribute to PopIII stellar ejecta [Mo].
PopIII stars ejecta taken from Heger et al. (2010)
Default value : [10,30]
imf_pop3_char_mass : float
The characteristic mass in a log normal IMF distribution.
Default value : 40.0
high_mass_extrapolation : string
Extrapolation technique used to extrapolate yields for stars more
massive than the most massive model (MMM) present in the yields table.
Choices:
"copy" --> This will apply the yields of the most massive model
to all more massive stars.
"scale" --> This will scale the yields of the most massive model
using the relation between the total ejected mass and
the initial stellar mass. The later relation is taken
from the interpolation of the two most massive models.
"extrapolate" --> This will extrapolate the yields of the most massive
model using the interpolation coefficients taken from
the interpolation of the two most massive models.
Default value : "copy"
iniZ : float
Initial metallicity of the gas in mass fraction (e.g. Solar Z = 0.02).
Choices : 0.0, 0.0001, 0.001, 0.006, 0.01, 0.02
(-1.0 to use non-default yield tables)
Default value : 0.0
Z_trans : float
Variable used when interpolating stellar yields as a function of Z.
Transition Z below which PopIII yields are used, and above which default
yields are used.
Default value : -1 (not active)
mgal : float
Initial mass of gas in the simulation [Mo].
Default value : 1.6e11
sn1a_on : boolean
True or False to include or exclude the contribution of SNe Ia.
Default value : True
sn1a_rate : string
SN Ia delay-time distribution function used to calculate the SN Ia rate.
Choices :
'power_law' - custom power law, set parameter with beta_pow (similar to Maoz & Mannucci 2012)
'gauss' - gaussian DTD, set parameter with gauss_dtd
'exp' - exponential DTD, set parameter with exp_dtd
'maoz' - specific power law from Maoz & Mannucci (2012)
Default value : 'power_law'
sn1a_energy : float
Energy ejected by single SNIa event. Units in erg.
Default value : 1e51
ns_merger_on : boolean
True or False to include or exclude the contribution of neutron star mergers.
Note : If t_nsm_coal or nsm_dtd_power is not used (see below), the delay time
distribution of neutron star mergers is given by the standard population synthesis
models of Dominik et al. (2012), using Z = 0.002 and Z = 0.02. In this case, the
total number of neutron star mergers can be tuned using f_binary and f_merger
(see below).
Default value : False
f_binary : float
Binary fraction for massive stars used to determine the total number of neutron
star mergers in a simple stellar population.
Default value : 1.0
f_merger : float
Fraction of massive star binary systems that lead to neutron star mergers in a
simple stellar population.
Default value : 0.0008
beta_pow : float
Slope of the power law for custom SN Ia rate, R = Constant * t^-beta_pow.
Default value : -1.0
gauss_dtd : list
Contains parameter for the gaussian DTD: first the characteristic time [yrs] (gaussian center)
and then the width of the distribution [yrs].
Default value : [3.3e9,6.6e8]
exp_dtd : float
Characteristic delay time [yrs] for the e-folding DTD.
nb_1a_per_m : float
Number of SNe Ia per stellar mass formed in a simple stellar population.
Default value : 1.0e-03
direct_norm_1a : float
Normalization coefficient for SNIa rate integral.
Default: deactived but replaces the usage of teh nb_1a_per_m when its value is larger than zero.
transitionmass : float
Initial mass which marks the transition from AGB to massive stars [Mo].
Default value : 8.0
exclude_masses : list
Contains initial masses in yield tables to be excluded from the simulation;
Default value : []
table : string
Path pointing toward the stellar yield tables for massive and AGB stars.
Default value : 'yield_tables/agb_and_massive_stars_nugrid_MESAonly_fryer12delay.txt' (NuGrid)
sn1a_table : string
Path pointing toward the stellar yield table for SNe Ia.
Default value : 'yield_tables/sn1a_t86.txt' (Tielemann et al. 1986)
nsmerger_table : string
Path pointing toward the r-process yield tables for neutron star mergers
Default value : 'yield_tables/r_process_rosswog_2014.txt' (Rosswog et al. 2013)
iniabu_table : string
Path pointing toward the table of initial abuncances in mass fraction.
Default value : 'yield_tables/iniabu/iniab2.0E-02GN93.ppn'
yield_tables_dir : string
Path to a custom directory that includes yields.
!! It needs to point to the directory where the yields directory is !!
This will bypass the default yields directory.
Default value : '' --> Deactivated
yield_interp : string
if 'None' : no yield interpolation, no interpolation of total ejecta
if 'lin' - Simple linear yield interpolation.
if 'wiersma' - Interpolation method which makes use of net yields
as used e.g. in Wiersma+ (2009); Does not require net yields.
if netyields_on is true than makes use of given net yields
else calculates net yields from given X0 in yield table.
Default : 'lin'
netyields_on : boolean
if true assumes that yields (input from table parameter)
are net yields.
Default : false.
total_ejecta_interp : boolean
if true then interpolates total ejecta given in yield tables
over initial mass range.
Default : True
stellar_param_on : boolean
if true reads in additional stellar parameter given in table stellar_param_table.
Default : true in sygma and false in omega
stellar_param_table: string
Path pointoing toward the table hosting the evolution of stellar parameter
derived from stellar evolution calculations.
Default table : 'yield_tables/isotope_yield_table_MESA_only_param.txt'
iolevel : int
Specifies the amount of output for testing purposes (up to 3).
Default value : 0
poly_fit_dtd : list
Array of polynomial coefficients of a customized delay-time distribution
function for SNe Ia. The polynome can be of any order.
Example : [0.2, 0.3, 0.1] for rate_snIa(t) = 0.2*t**2 + 0.3*t + 0.1
Note : Must be used with the poly_fit_range parameter (see below)
Default value : np.array([]) --> Deactivated
poly_fit_range : list --> [t_min,t_max]
Time range where the customized delay-time distribution function for
SNe Ia will be applied for a simple stellar population.
Default value : np.array([]) --> Deactivated
mass_sampled : list
Stellar masses that are sampled to eject yields in a stellar population.
Warning : The use of this parameter bypasses the IMF calculation and
do not ensure a correlation with the star formation rate. Each sampled
mass will eject the exact amount of mass give in the stellar yields.
Default value : np.array([]) --> Deactivated
scale_cor : 2D list
Determine the fraction of yields ejected for any given stellar mass bin.
Example : [ [1.0,8], [0.5,100] ] means that stars with initial mass between
0 and 8 Msu will eject 100% of their yields, and stars with initial mass
between 8 and 100 will eject 50% of their yields. There is no limit for
the number of [%,M_upper_limit] arrays used.
Default value : np.array([]) --> Deactivated
t_nsm_coal : float
When greater than zero, t_nsm_coal sets the delay time (since star formation)
after which all neutron star mergers occur in a simple stellar population.
Default value : -1 --> Deactivated
nsm_dtd_power : 3-index array --> [t_min, t_max, slope_of_the_power_law]
When used, nsm_dtd_power defines a delay time distribution for neutron
star mergers in the form of a power law, for a simple stellar population.
Exemple: [1.e7, 1.e10, -1.] --> t^-1 from 10 Myr to 10 Gyr
Default value : [] --> Deactivated
nb_nsm_per_m : float
Number of neutron star mergers per stellar mass formed in a simple
stellar population.
Note : This parameter is only considered when t_nsm_coal or nsm_dtd_power
is used to define the delay time of neutron star mergers.
Default value : -1 --> Deactivated
m_ej_nsm : float
Mass ejected per neutron star merger event.
Default value : 2.5e-02
yield_modifier : list of arrays --> [[iso, M, Z, type, modifier],[...]]
When used, modifies all isotopes yields for the given M and Z by
multiplying by a given factor (type="multiply") or replacing the
yield by a new value (type="replace"). Modifier will be either the
factor or value depending on type.
Default value : [] --> Deactivated
Delayed extra source
Adding source that requires delay-time distribution (DTD) functions
-------------------------------------------------------------------
delayed_extra_dtd : multi-D Numpy array --> [nb_sources][nb_Z]
nb_sources is the number of different input astrophysical site (e.g.,
SNe Ia, neutron star mergers).
nb_Z is the number of available metallicities.
delayed_extra_dtd[i][j] is a 2D array in the form of
[ number_of_times ][ 0-time, 1-rate ].
Defalut value : np.array([]), deactivated
delayed_extra_dtd_norm : multi-D Numpy array --> [nb_sources]
Total number of delayed sources occurring per Msun formed,
for each source and each metallicity.
Defalut value : np.array([]), deactivated
delayed_extra_yields : Numpy array of strings
Path to the yields table for each source.
Defalut value : np.array([]), deactivated
delayed extra_yields_norm : multi-D Numpy array --> [nb_sources][nb_Z]
Fraction of the yield table (float) that will be ejected per event,
for each source and each metallicity. This will be the mass ejected
per event if the yields are in mass fraction (normalized to 1).
Defalut value : np.array([]), deactivated
delayed_extra_stochastic : Numpy array of Boolean --> [nb_sources]
Determine whether the DTD provided as an input needs to be
stochastically sampled using a Monte Carlo technique.
Defalut value : np.array([]), deactivated
Run example
===========
See sygma.py and omega.py
'''
##############################################
## Constructor ##
##############################################
def __init__(self, imf_type='kroupa', alphaimf=2.35, imf_bdys=[0.1,100],\
sn1a_rate='power_law', iniZ=0.02, dt=1e6, special_timesteps=30,\
nsmerger_bdys=[8, 100], tend=13e9, mgal=1.6e11, transitionmass=8, iolevel=0,\
ini_alpha=True, is_sygma=False,\
table='yield_tables/agb_and_massive_stars_nugrid_MESAonly_fryer12delay.txt',\
f_network='isotopes_modified.prn', f_format=1,\
table_radio='', decay_file='', sn1a_table_radio='',\
nsmerger_table_radio='',\
hardsetZ=-1, sn1a_on=True, sn1a_table='yield_tables/sn1a_t86.txt',\
sn1a_energy=1e51, ns_merger_on=False,\
f_binary=1.0, f_merger=0.0008, t_merger_max=1.3e10,\
m_ej_nsm = 2.5e-02, nb_nsm_per_m=-1.0,\
t_nsm_coal=-1.0, nsm_dtd_power=[],\
nsmerger_table = 'yield_tables/r_process_arnould_2007.txt',\
iniabu_table='', extra_source_on=False,\
extra_source_table=['yield_tables/extra_source.txt'],\
f_extra_source=[1.0], pre_calculate_SSPs=False,\
extra_source_mass_range=[[8,30]],\
extra_source_exclude_Z=[[]], radio_refinement=100,\
pop3_table='yield_tables/popIII_heger10.txt',\
imf_bdys_pop3=[0.1,100], imf_yields_range_pop3=[10,30],\
imf_pop3_char_mass=40.0,\
use_net_yields_stable=False, use_net_yields_radio=False, \
high_mass_extrapolation='copy',\
use_external_integration=False,\
starbursts=[], beta_pow=-1.0,gauss_dtd=[3.3e9,6.6e8],\
exp_dtd=2e9,nb_1a_per_m=1.0e-3,direct_norm_1a=-1,Z_trans=0.0, \
f_arfo=1, imf_yields_range=[1,30],exclude_masses=[],\
netyields_on=False,wiersmamod=False,yield_interp='lin',\
print_off=False, yield_tables_dir='',\
total_ejecta_interp=True, tau_ferrini=False,\
input_yields=False,t_merge=-1.0,stellar_param_on=False,\
stellar_param_table='yield_tables/stellar_feedback_nugrid_MESAonly.txt',\
popIII_info_fast=True, out_follows_E_rate=False,\
t_dtd_poly_split=-1.0, delayed_extra_log=False,\
delayed_extra_yields_log_int=False,\
delayed_extra_log_radio=False, delayed_extra_yields_log_int_radio=False,\
pritchet_1a_dtd=[], ism_ini=np.array([]), ism_ini_radio=np.array([]),\
nsmerger_dtd_array=np.array([]),\
ytables_in=np.array([]), zm_lifetime_grid_nugrid_in=np.array([]),\
isotopes_in=np.array([]), ytables_pop3_in=np.array([]),\
zm_lifetime_grid_pop3_in=np.array([]), ytables_1a_in=np.array([]),\
ytables_nsmerger_in=np.array([]), dt_in_SSPs=np.array([]),\
dt_in=np.array([]),dt_split_info=np.array([]),\
ej_massive=np.array([]), ej_agb=np.array([]),\
ej_sn1a=np.array([]), ej_massive_coef=np.array([]),\
ej_agb_coef=np.array([]), ej_sn1a_coef=np.array([]),\
dt_ssp=np.array([]), poly_fit_dtd_5th=np.array([]),\
mass_sampled_ssp=np.array([]), scale_cor_ssp=np.array([]),\
poly_fit_range=np.array([]), SSPs_in=np.array([]),\
delayed_extra_dtd=np.array([]), delayed_extra_dtd_norm=np.array([]),\
delayed_extra_yields=np.array([]), delayed_extra_yields_norm=np.array([]),\
delayed_extra_yields_radio=np.array([]),\
delayed_extra_yields_norm_radio=np.array([]),\
delayed_extra_stochastic=np.array([]),\
ytables_radio_in=np.array([]), radio_iso_in=np.array([]),\
ytables_1a_radio_in=np.array([]), ytables_nsmerger_radio_in=np.array([]),\
test_clayton=np.array([]), inter_Z_points=np.array([]),\
nb_inter_Z_points=np.array([]), y_coef_M=np.array([]),\
y_coef_M_ej=np.array([]), y_coef_Z_aM=np.array([]),\
y_coef_Z_bM=np.array([]), y_coef_Z_bM_ej=np.array([]),\
tau_coef_M=np.array([]), tau_coef_M_inv=np.array([]),\
tau_coef_Z_aM=np.array([]), tau_coef_Z_bM=np.array([]),\
tau_coef_Z_aM_inv=np.array([]), tau_coef_Z_bM_inv=np.array([]),\
y_coef_M_pop3=np.array([]), y_coef_M_ej_pop3=np.array([]),\
tau_coef_M_pop3=np.array([]), tau_coef_M_pop3_inv=np.array([]),\
inter_lifetime_points_pop3=np.array([]),\
inter_lifetime_points_pop3_tree=np.array([]),\
nb_inter_lifetime_points_pop3=np.array([]),\
inter_lifetime_points=np.array([]), inter_lifetime_points_tree=np.array([]),\
nb_inter_lifetime_points=np.array([]), nb_inter_M_points_pop3=np.array([]),\
inter_M_points_pop3_tree=np.array([]), nb_inter_M_points=np.array([]),\
inter_M_points=np.array([]), y_coef_Z_aM_ej=np.array([]),
yield_modifier=np.array([])):
# Initialize the history class which keeps the simulation in memory
self.history = History()
self.const = Const()
# Define elements for ordering
self.__define_elements()
# If we need to assume the current baryonic ratio ...
if mgal < 0.0:
# Use a temporary mgal value for chem_evol __init__ function
mgal = 1.0e06
self.bar_ratio = True
# If we use the input mgal parameter ...
else:
self.bar_ratio = False
# Attribute the input parameters to the current object
self.history.mgal = mgal
self.history.tend = tend
self.history.dt = dt
self.history.sn1a_rate = sn1a_rate
self.history.imf_bdys = imf_bdys
self.history.transitionmass = transitionmass
self.history.nsmerger_bdys = nsmerger_bdys
self.history.f_binary = f_binary
self.history.f_merger = f_merger
self.mgal = mgal
self.transitionmass = transitionmass
self.iniZ = iniZ
self.imf_bdys=imf_bdys
self.nsmerger_bdys=nsmerger_bdys
self.popIII_info_fast = popIII_info_fast
self.imf_bdys_pop3=imf_bdys_pop3
self.imf_yields_range_pop3=imf_yields_range_pop3
self.imf_pop3_char_mass=imf_pop3_char_mass
self.high_mass_extrapolation = high_mass_extrapolation
self.extra_source_on = extra_source_on
self.f_extra_source= f_extra_source
self.extra_source_mass_range=extra_source_mass_range
self.extra_source_exclude_Z=extra_source_exclude_Z
self.pre_calculate_SSPs = pre_calculate_SSPs
self.SSPs_in = SSPs_in
self.table = table
self.iniabu_table = iniabu_table
self.sn1a_table = sn1a_table
self.nsmerger_table = nsmerger_table
self.extra_source_table = extra_source_table
self.pop3_table = pop3_table
self.hardsetZ = hardsetZ
self.starbursts = starbursts
self.imf_type = imf_type
self.alphaimf = alphaimf
self.sn1a_on = sn1a_on
self.sn1a_energy=sn1a_energy
self.ns_merger_on = ns_merger_on
self.nsmerger_dtd_array = nsmerger_dtd_array
self.len_nsmerger_dtd_array = len(nsmerger_dtd_array)
self.f_binary = f_binary
self.f_merger = f_merger
self.t_merger_max = t_merger_max
self.m_ej_nsm = m_ej_nsm
self.nb_nsm_per_m = nb_nsm_per_m
self.t_nsm_coal = t_nsm_coal
self.nsm_dtd_power = nsm_dtd_power
self.special_timesteps = special_timesteps
self.iolevel = iolevel
self.nb_1a_per_m = nb_1a_per_m
self.direct_norm_1a=direct_norm_1a
self.Z_trans = Z_trans
if sn1a_rate == 'maoz':
self.beta_pow = -1.0
else:
self.beta_pow = beta_pow
self.gauss_dtd = gauss_dtd
self.exp_dtd=exp_dtd
self.normalized = False # To avoid normalizing SN Ia rate more than once
self.nsm_normalized = False # To avoid normalizing NS merger rate more than once
self.f_arfo = f_arfo
self.imf_yields_range = imf_yields_range
self.exclude_masses=exclude_masses
self.netyields_on=netyields_on
self.wiersmamod=wiersmamod
self.yield_interp=yield_interp
self.out_follows_E_rate = out_follows_E_rate
self.total_ejecta_interp=total_ejecta_interp
self.tau_ferrini = tau_ferrini
self.t_merge = t_merge
self.ism_ini = ism_ini
self.ism_ini_radio = ism_ini_radio
self.dt_in = dt_in
self.dt_in_SSPs = dt_in_SSPs
self.dt_split_info = dt_split_info
self.t_dtd_poly_split = t_dtd_poly_split
self.poly_fit_dtd_5th = poly_fit_dtd_5th
self.poly_fit_range = poly_fit_range
self.stellar_param_table = stellar_param_table
self.stellar_param_on = stellar_param_on
self.delayed_extra_log = delayed_extra_log
self.delayed_extra_dtd = delayed_extra_dtd
self.delayed_extra_dtd_norm = delayed_extra_dtd_norm
self.delayed_extra_yields = delayed_extra_yields
self.delayed_extra_yields_norm = delayed_extra_yields_norm
self.delayed_extra_yields_log_int = delayed_extra_yields_log_int
self.delayed_extra_stochastic = delayed_extra_stochastic
self.nb_delayed_extra = len(self.delayed_extra_dtd)
self.pritchet_1a_dtd = pritchet_1a_dtd
self.len_pritchet_1a_dtd = len(pritchet_1a_dtd)
self.use_external_integration = use_external_integration
self.yield_tables_dir = yield_tables_dir
self.print_off = print_off
self.use_net_yields_stable = use_net_yields_stable
self.input_yields = input_yields
self.yield_modifier = yield_modifier
self.is_sygma = is_sygma
self.iolevel = iolevel
# Attributes associated with radioactive species
self.table_radio = table_radio
self.sn1a_table_radio = sn1a_table_radio
self.nsmerger_table_radio = nsmerger_table_radio
self.decay_file = decay_file
self.len_decay_file = len(decay_file)
self.delayed_extra_log_radio = delayed_extra_log_radio
self.delayed_extra_yields_radio = delayed_extra_yields_radio
self.delayed_extra_yields_norm_radio = delayed_extra_yields_norm_radio
self.delayed_extra_yields_log_int_radio = delayed_extra_yields_log_int_radio
self.nb_delayed_extra_radio = len(self.delayed_extra_yields_radio)
self.ytables_radio_in = ytables_radio_in
self.radio_iso_in = radio_iso_in
self.ytables_1a_radio_in = ytables_1a_radio_in
self.ytables_nsmerger_radio_in = ytables_nsmerger_radio_in
self.radio_massive_agb_on = False
self.radio_sn1a_on = False
self.radio_nsmerger_on = False
self.radio_refinement = radio_refinement
self.test_clayton = test_clayton
self.use_net_yields_radio = use_net_yields_radio
# Number of coefficients for the interpolation routine
self.nb_c_needed = 3
# Define the use of the decay_module and the decay_file
is_radio = not (len(self.table_radio) == 0 \
and len(self.sn1a_table_radio) == 0 \
and len(self.nsmerger_table_radio) == 0 \
and self.nb_delayed_extra_radio == 0)
if is_radio and self.len_decay_file == 0:
self.use_decay_module = True
else:
self.use_decay_module = False
# Initialize decay module
if self.use_decay_module:
import NuPyCEE.decay_module as decay_module
self.decay_module = decay_module
self.f_network = f_network
self.f_format = f_format
self.__initialize_decay_module()
# Normalization of the delayed extra sources
if self.nb_delayed_extra > 0:
self.__normalize_delayed_extra()
# Normalization constants for the Kroupa IMF
if self.imf_type == 'kroupa':
self.p0 = 1.0
self.p1 = 0.08**(-0.3 + 1.3)
self.p2 = 0.5**(-1.3 + 2.3)
self.p3 = 1**(-2.3 +2.3)
self.p1_p2 = self.p1 * self.p2
# Define the broken power-law of Ferrini IMF approximation
self.norm_fer = [3.1,1.929,1.398,0.9113,0.538,0.3641,0.2972,\
0.2814,0.2827,0.298,0.305,0.3269,0.3423,0.3634]
self.alpha_fer = [0.6,0.35,0.15,-0.15,-0.6,-1.05,-1.4,-1.6,-1.7,\
-1.83,-1.85,-1.9,-1.92,-1.94]
self.m_up_fer = [0.15,0.2,0.24,0.31,0.42,0.56,0.76,1.05,1.5,\
3.16,4.0,10.0,20.0,120]
for i_fer in range(0,len(self.norm_fer)):
self.alpha_fer[i_fer] = self.alpha_fer[i_fer] + 1
self.norm_fer[i_fer] = self.norm_fer[i_fer]/(self.alpha_fer[i_fer])
# Normalize the IMF to 1 MSun
self.A_imf = 1.0 / self._imf(self.imf_bdys[0], self.imf_bdys[1], 2)
self.A_imf_pop3 = 1.0 / self._imf(self.imf_bdys_pop3[0], self.imf_bdys_pop3[1], 2)
# Parameter that determines if not enough gas is available for star formation
self.not_enough_gas_count = 0
self.not_enough_gas = False
# Initialisation of the timesteps
if len(self.dt_split_info) > 0: # and len(self.ej_massive) == 0:
timesteps = self.__build_split_dt()
else:
timesteps = self.__get_timesteps()
self.history.timesteps = timesteps
self.nb_timesteps = len(timesteps)
if self.pre_calculate_SSPs:
self.t_ce = np.zeros(self.nb_timesteps)
self.t_ce[0] = self.history.timesteps[0]
for i_init in range(1,self.nb_timesteps):
self.t_ce[i_init] = self.t_ce[i_init-1] + \
self.history.timesteps[i_init]
# Define the decay properties
if self.use_decay_module:
self.__read_isotopes_and_define_decay_info()
elif self.len_decay_file > 0:
self.__define_decay_info()
# If the yield tables have already been read previously ...
if self.input_yields:
# Assign the input yields and lifetimes
self.history.isotopes = isotopes_in
self.nb_isotopes = len(self.history.isotopes)
self.ytables = ytables_in
self.ytables_1a = ytables_1a_in
self.ytables_nsmerger = ytables_nsmerger_in
self.extra_source_on = False
self.ytables_extra = 0
self.inter_Z_points = inter_Z_points
self.nb_inter_Z_points = nb_inter_Z_points
self.y_coef_M = y_coef_M
self.y_coef_M_ej = y_coef_M_ej
self.y_coef_Z_aM = y_coef_Z_aM
self.y_coef_Z_bM = y_coef_Z_bM
self.y_coef_Z_bM_ej = y_coef_Z_bM_ej
self.tau_coef_M = tau_coef_M
self.tau_coef_M_inv = tau_coef_M_inv
self.tau_coef_Z_aM = tau_coef_Z_aM
self.tau_coef_Z_bM = tau_coef_Z_bM
self.tau_coef_Z_aM_inv = tau_coef_Z_aM_inv
self.tau_coef_Z_bM_inv = tau_coef_Z_bM_inv
self.y_coef_M_pop3 = y_coef_M_pop3
self.y_coef_M_ej_pop3 = y_coef_M_ej_pop3
self.tau_coef_M_pop3 = tau_coef_M_pop3
self.tau_coef_M_pop3_inv = tau_coef_M_pop3_inv
self.inter_lifetime_points_pop3 = inter_lifetime_points_pop3
self.inter_lifetime_points_pop3_tree = inter_lifetime_points_pop3_tree
self.nb_inter_lifetime_points_pop3 = nb_inter_lifetime_points_pop3
self.inter_lifetime_points = inter_lifetime_points
self.inter_lifetime_points_tree = inter_lifetime_points_tree
self.nb_inter_lifetime_points = nb_inter_lifetime_points
self.nb_inter_M_points_pop3 = nb_inter_M_points_pop3
self.inter_M_points_pop3_tree = inter_M_points_pop3_tree
self.nb_inter_M_points = nb_inter_M_points
self.inter_M_points = inter_M_points
self.y_coef_Z_aM_ej = y_coef_Z_aM_ej
# Assign the input yields for radioactive isotopes
if self.len_decay_file > 0 or self.use_decay_module:
self.ytables_1a_radio = ytables_1a_radio_in
self.ytables_nsmerger_radio = ytables_nsmerger_radio_in
self.y_coef_M_radio = y_coef_M_radio
self.y_coef_Z_aM_radio = y_coef_Z_aM_radio
self.y_coef_Z_bM_radio = y_coef_Z_bM_radio
# If the yield tables need to be read from the files ...
else:
# Read of the yield tables
self.__read_tables()
# Read radioactive tables
if self.len_decay_file > 0 or self.use_decay_module:
self.__read_radio_tables()
# Modify the yields (ttrueman edit)
if len(self.yield_modifier) > 0:
iso = [i[0] for i in yield_modifier]
M = [i[1] for i in yield_modifier]
Z = [i[2] for i in yield_modifier]
modifier = [i[3] for i in yield_modifier]
val = [i[4] for i in yield_modifier]
for j,specie in enumerate(iso):
if Z[j] not in self.Z_table or M[j] not in self.M_table:
print('Z = %s or M_sun = %s is not in yield table'%(Z[j],M[j]))
print('No modifications will be performed on %s'%iso[j],"\n")
elif specie in self.history.isotopes:
if modifier[j] == "replace":
self.ytables.set(M=M[j],Z=Z[j],specie=iso[j],
value=val[j])
if modifier[j] == "multiply":
original = self.ytables.get(M=M[j],Z=Z[j],
quantity=iso[j])
self.ytables.set(M=M[j],Z=Z[j],specie=iso[j],
value=original*val[j])
elif self.len_decay_file > 0:
if specie in self.radio_iso:
if modifier[j] == "replace":
self.ytables_radio.set(M=M[j],Z=Z[j],specie=iso[j],
value=val[j])
if modifier[j] == "multiply":
original = self.ytables_radio.get(M=M[j],Z=Z[j],
quantity=iso[j])
self.ytables_radio.set(M=M[j],Z=Z[j],specie=iso[j],
value=original*val[j])
else:
print("ERROR 404: %s not found in list of isotopes"%specie,
"\n")
else:
print("ERROR 404: %s not found in list of isotopes"%specie,
"\n")
# Declare the interpolation coefficient arrays
self.__declare_interpolation_arrays()
# Interpolate the yields tables
self.__interpolate_pop3_yields()
self.__interpolate_massive_and_agb_yields()
# Interpolate lifetimes
self.__interpolate_pop3_lifetimes()
self.__interpolate_massive_and_agb_lifetimes()
# Calculate coefficients to interpolate masses from lifetimes
self.__interpolate_pop3_m_from_t()
self.__interpolate_massive_and_agb_m_from_t()
# If radioactive isotopes are used ..
if self.len_decay_file > 0 and len(self.table_radio) > 0:
# Interpolate the radioactive yields tables
self.__interpolate_massive_and_agb_yields(is_radio=True)
# Check whether the initial metallicity is available
if (not self.iniZ in self.ytables.Z_list) and (self.iniZ > 0.0):
print ('Error - iniZ must be an available metallicity in the grid of stellar yields.')
self.need_to_quit = True
return
# Check for incompatible inputs - Error messages
self.__check_inputs()
if self.need_to_quit:
return
# NOTE: This if statement also needs to be in SYGMA and OMEGA!
# Initialisation of the composition of the gas reservoir
if self.is_sygma:
ymgal = np.zeros(self.nb_isotopes)
ymgal[0] = copy.deepcopy(self.mgal)
else:
ymgal = self._get_iniabu()
self.len_ymgal = len(ymgal)
# Initialisation of the storing arrays
mdot, ymgal, ymgal_massive, ymgal_agb, ymgal_1a, ymgal_nsm,\
ymgal_delayed_extra, mdot_massive, mdot_agb, mdot_1a, mdot_nsm,\
mdot_delayed_extra, sn1a_numbers, sn2_numbers, nsm_numbers,\
delayed_extra_numbers, imf_mass_ranges, \
imf_mass_ranges_contribution, imf_mass_ranges_mtot = \
self._get_storing_arrays(ymgal, len(self.history.isotopes))
# Initialisation of the composition of the gas reservoir
if len(self.ism_ini) > 0:
for i_ini in range(0,self.len_ymgal):
ymgal[0][i_ini] = self.ism_ini[i_ini]
# If radioactive isotopes are used ..
if self.len_decay_file > 0 or self.use_decay_module:
# Define initial radioactive gas composition
ymgal_radio = np.zeros(self.nb_radio_iso)
# Initialisation of the storing arrays for radioactive isotopes
mdot_radio, ymgal_radio, ymgal_massive_radio, ymgal_agb_radio,\
ymgal_1a_radio, ymgal_nsm_radio, \
ymgal_delayed_extra_radio, mdot_massive_radio, mdot_agb_radio,\
mdot_1a_radio, mdot_nsm_radio,\
mdot_delayed_extra_radio, dummy, dummy, dummy, dummy, \
dummy, dummy, dummy = \
self._get_storing_arrays(ymgal_radio, self.nb_radio_iso)
# Initialisation of the composition of the gas reservoir
if len(self.ism_ini_radio) > 0:
for i_ini in range(0,len(self.ism_ini_radio)):
ymgal_radio[0][i_ini] = self.ism_ini_radio[i_ini]
# Define indexes to make connection between unstable/stable isotopes
if not self.use_decay_module:
self.__define_unstab_stab_indexes()
# Output information
if self.iolevel >= 1:
print ('Number of timesteps: ', '{:.1E}'.format(len(timesteps)))
# Create empty arrays if on the fast mode
if self.pre_calculate_SSPs:
self.history.gas_mass.append(np.sum(ymgal[0]))
self.history.ism_iso_yield.append(ymgal[0])
self.history.m_locked = []
self.history.m_locked_agb = []
self.history.m_locked_massive = []
self.massive_ej_rate = []
self.sn1a_ej_rate = []
# Add the initialized arrays to the history class
else:
self.history.gas_mass.append(np.sum(ymgal[0]))
self.history.ism_iso_yield.append(ymgal[0])
self.history.ism_iso_yield_agb.append(ymgal_agb[0])
self.history.ism_iso_yield_1a.append(ymgal_1a[0])
self.history.ism_iso_yield_nsm.append(ymgal_nsm[0])
self.history.ism_iso_yield_massive.append(ymgal_massive[0])
self.history.sn1a_numbers.append(0)
self.history.nsm_numbers.append(0)
self.history.sn2_numbers.append(0)
self.history.m_locked = []
self.history.m_locked_agb = []
self.history.m_locked_massive = []
# Keep track of the mass-loss rate of massive stars and SNe Ia
self.massive_ej_rate = []
for k in range(self.nb_timesteps + 1):
self.massive_ej_rate.append(0.0)
self.sn1a_ej_rate = []
for k in range(self.nb_timesteps + 1):
self.sn1a_ej_rate.append(0.0)
# Attribute arrays and variables to the current object
self.mdot = mdot
self.ymgal = ymgal
self.ymgal_massive = ymgal_massive
self.ymgal_agb = ymgal_agb
self.ymgal_1a = ymgal_1a
self.ymgal_nsm = ymgal_nsm
self.ymgal_delayed_extra = ymgal_delayed_extra
self.mdot_massive = mdot_massive
self.mdot_agb = mdot_agb
self.mdot_1a = mdot_1a
self.mdot_nsm = mdot_nsm
self.mdot_delayed_extra = mdot_delayed_extra
self.sn1a_numbers = sn1a_numbers
self.nsm_numbers = nsm_numbers
self.delayed_extra_numbers = delayed_extra_numbers
self.sn2_numbers = sn2_numbers
self.imf_mass_ranges = imf_mass_ranges
self.imf_mass_ranges_contribution = imf_mass_ranges_contribution
self.imf_mass_ranges_mtot = imf_mass_ranges_mtot
# Attribute radioactive arrays and variables to the current object
if self.len_decay_file > 0 or self.use_decay_module:
self.mdot_radio = mdot_radio
self.ymgal_radio = ymgal_radio
self.ymgal_massive_radio = ymgal_massive_radio
self.ymgal_agb_radio = ymgal_agb_radio
self.ymgal_1a_radio = ymgal_1a_radio
self.ymgal_nsm_radio = ymgal_nsm_radio
self.ymgal_delayed_extra_radio = ymgal_delayed_extra_radio
self.mdot_massive_radio = mdot_massive_radio
self.mdot_agb_radio = mdot_agb_radio
self.mdot_1a_radio = mdot_1a_radio
self.mdot_nsm_radio = mdot_nsm_radio
self.mdot_delayed_extra_radio = mdot_delayed_extra_radio
# Declare non-metals for the getmetallicity function
self.nonmetals = ['H-','He-','Li-']
self.i_nonmetals = []
for i_iso in range(self.nb_isotopes):
if 'H-' in self.history.isotopes[i_iso] or\
'He-' in self.history.isotopes[i_iso] or\
'Li-' in self.history.isotopes[i_iso]:
self.i_nonmetals.append(i_iso)
self.len_i_nonmetals = len(self.i_nonmetals)
# Set the initial time and metallicity
# Hardcode the metallicity if Sygma, so it does not required
# a consistent iniabu file for all (unexpected) metallicities
if self.is_sygma:
zmetal = copy.deepcopy(self.iniZ)
else:
zmetal = self._getmetallicity(0)
self.history.metallicity.append(zmetal)
self.t = 0
self.history.age = np.zeros(self.nb_timesteps+1)
for i_tt in range(0,self.nb_timesteps):
self.history.age[i_tt+1] = self.history.age[i_tt] + \
self.history.timesteps[i_tt]
self.zmetal = zmetal
# Calculate stellar initial composition interpolation coefficients
if self.use_net_yields_stable:
self.__calculate_X0_coefs()
# Define the element to isotope index connections
self.__get_elem_to_iso_main()
# Get coefficients for the fraction of white dwarfs fit (2nd poly)
if not self.pre_calculate_SSPs:
self.__get_coef_wd_fit()
# Output information
if self.iolevel > 0:
print ('### Start with initial metallicity of ','{:.4E}'.format(zmetal))
print ('###############################')
##############################################
# Get elem-to-iso Main #
##############################################
def __get_elem_to_iso_main(self):
# Get the list of elements
self.history.elements = []
self.i_elem_for_iso = np.zeros(self.nb_isotopes,dtype=int)
for i_iso in range(self.nb_isotopes):
the_elem = self.history.isotopes[i_iso].split('-')[0]
if not the_elem in self.history.elements:
self.history.elements.append(the_elem)
i_elem = self.history.elements.index(the_elem)
self.i_elem_for_iso[i_iso] = i_elem
self.nb_elements = len(self.history.elements)
##############################################
# Calculate X0 Coefs #
##############################################
def __calculate_X0_coefs(self):
'''
Calculate interpolation coefficients to interpolate the stellar
initial composition (X0) in between metallicities. This assumes
that the initial composition is the same for all masses at a given
metallicity.
log10(X0) = fct(log10(Z))
'''
# Define the interpolation coefficients array
# Xo_coefs[ Z bin index ][ coefs index ]
# NOTE: Z is in increasing order here
self.X0_int_coefs = np.zeros((self.ytables.nb_Z,\
self.nb_c_needed, self.nb_isotopes))
# Create the list of X0 as a function of metallicity
self.X0_vs_Z = []
Z_list = sorted(self.ytables.Z_list)
for i_Z in range(self.ytables.nb_Z):
M_temp = self.ytables.M_list[0]
Z_temp = Z_list[i_Z]
self.X0_vs_Z.append(self.ytables.get(M=M_temp, Z=Z_temp,\
quantity="X0", isotopes=self.history.isotopes))
self.X0_vs_Z = np.array(self.X0_vs_Z)
# Prepare the interpolation
x_arr = np.log10(np.array(Z_list))
y_arr = np.log10(np.array(self.X0_vs_Z))
interp_list = [[None]*len(y) for y in y_arr]
# Interpolate for each metallicity bin
for i_Z in range(self.ytables.nb_Z-1):
self.X0_int_coefs[i_Z] = self.interpolation(x_arr, y_arr, \
None, i_Z, interp_list, return_coefs=True)
##############################################
# Calculate X0 Coefs #
##############################################
def get_interp_X0(self, Z_gix):
'''
Return the interpolated initial compositions (X0) used
for the input stellar models
log10(X0) = fct(log10(Z))
Argument
========
Z_gix: Metallicity at which X0 is interpolated
'''
# Get the available metallicity range
Z_list = sorted(self.ytables.Z_list)
Z_max = Z_list[-1]
Z_min = Z_list[0]
# Limit the metallicity if out of range
if Z_gix >= Z_max:
Z_interp = Z_max
i_Z = self.ytables.nb_Z - 2
elif Z_gix <= Z_min:
Z_interp = Z_min
i_Z = 0
# If the metallicity is within the range
else:
# Find the lower metallicity limit
i_Z = 0
while Z_list[i_Z+1] < Z_gix:
i_Z += 1
Z_interp = Z_gix
# Get the interpolated value
x_arr = np.log10(np.array(Z_list))
y_arr = np.log10(np.array(self.X0_vs_Z))
X0_interp = 10**(self.interpolation(x_arr, y_arr, \
np.log10(Z_interp), i_Z, self.X0_int_coefs))
# Return the normalized abundances
return X0_interp / sum(X0_interp)
##############################################
# Check Inputs #
##############################################
def __check_inputs(self):
'''
This function checks for incompatible input entries and stops
the simulation if needed.
'''
self.need_to_quit = False
# Check the use of net yields
if self.use_net_yields_stable or self.use_net_yields_radio:
if not self.ytables.net_yields_available:
print("Error - Net yields cannot be use for "+self.table)
print(" --> Make sure X0 is provided and sum(X0) = 1.0")
self.need_to_quit = True
if self.pre_calculate_SSPs:
print("Error - Net yields cannot be used with pre_calculate_SSPs=True")
self.need_to_quit = True
# Total duration of the simulation
if self.history.tend > 1.5e10:
print ('Error - tend must be less than or equal to 1.5e10 years.')
self.need_to_quit = True
# Timestep
if self.history.dt > self.history.tend:
print ('Error - dt must be smaller or equal to tend.')
self.need_to_quit = True
# Transition mass between AGB and massive stars
#if #(self.transitionmass <= 7)or(self.transitionmass > 12):
# print ('Error - transitionmass must be between 7 and 12 Mo.')
# self.need_to_quit = True
# IMF
if not self.imf_type in ['salpeter','chabrier','kroupa','input', \
'alphaimf','chabrieralpha','fpp', 'kroupa93', 'lognormal']:
print ('Error - Selected imf_type is not available.')
self.need_to_quit = True
# IMF yields range
#if self.imf_yields_range[0] < 1:
# print ('Error - imf_yields_range lower boundary must be >= 1.')
#self.need_to_quit = True
#if (self.imf_yields_range[0] >= self.imf_bdys[1]) or \
# (self.imf_yields_range[0] <= self.imf_bdys[0]) or \
# (self.imf_yields_range[1] >= self.imf_bdys[1]):
if ((self.imf_yields_range[0] > self.imf_bdys[1]) or \
(self.imf_yields_range[1] < self.imf_bdys[0])):
print ('Error - part of imf_yields_range must be within imf_bdys.')
self.need_to_quit = True
if (self.transitionmass<self.imf_yields_range[0])\
or (self.transitionmass>self.imf_yields_range[1]):
print ('Error - Transitionmass outside imf yield range')
self.need_to_quit = True
if self.ns_merger_on:
if ((self.nsmerger_bdys[0] > self.imf_bdys[1]) or \
(self.nsmerger_bdys[1] < self.imf_bdys[0])):
print ('Error - part of nsmerger_bdys must be within imf_bdys.')
self.need_to_quit = True
# SN Ia delay-time distribution function
if not self.history.sn1a_rate in \
['exp','gauss','maoz','power_law']:
print ('Error - Selected sn1a_rate is not available.')
self.need_to_quit = True
# Initial metallicity for the gas
#if not self.iniZ in [0.0, 0.0001, 0.001, 0.006, 0.01, 0.02]:
# print ('Error - Selected iniZ is not available.')
# self.need_to_quit = True
# If popIII stars are used ...
if self.iniZ == 0.0:
# IMF and yield boundary ranges
if (self.imf_yields_range_pop3[0] >= self.imf_bdys_pop3[1]) or \
(self.imf_yields_range_pop3[1] <= self.imf_bdys_pop3[0]):
print ('Error - imf_yields_range_pop3 must be within imf_bdys_pop3.')
self.need_to_quit = True
if self.netyields_on == True and self.Z_trans > 0.0:
print ('Error - net yields setting not usable with PopIII at the moment.')
self.need_to_quit = True
# If input poly fit DTD, the applicable range must be specified
if len(self.poly_fit_dtd_5th) > 0:
if not len(self.poly_fit_range) == 2:
print ('Error - poly_fit_range must be specified when ',\
'using the poly_fit_dtd_5th parameter the SNe Ia DTD.')
self.need_to_quit = True
if self.extra_source_on:
lt=len(self.extra_source_table)
lf=len(self.f_extra_source)
lmr=len(self.extra_source_mass_range)
#leZ=len(self.extra_source_exclude_Z)
if (not lt == lf):
print ('Error - parameter extra_source_table and f_extra_source not of equal size')
self.need_to_quit = True
if (not lt == lmr):
print ('Error - parameter extra_source_table and extra_source_mass_range not of equal size')
self.need_to_quit = True
#if (not lt == leZ):
# print ('Error - parameter extra_source_table and extra_source_exclude_Z not of equal size')
# self.need_to_quit = True
# Use of radioactive isotopes
if (self.len_decay_file > 0 or self.use_decay_module) and \
(len(self.table_radio) == 0 and len(self.sn1a_table_radio) == 0 and \
len(self.nsmerger_table_radio) == 0 and self.nb_delayed_extra_radio == 0):
print ('Error - At least one radioactive yields table must '+\
'be defined when using radioactive isotopes.')
self.need_to_quit = True
elif self.len_decay_file > 0 or self.use_decay_module:
if self.yield_interp == 'wiersma':
print ('Error - Radioactive isotopes cannot be used with net yields .. for now.')
self.need_to_quit = True
if self.Z_trans > 0.0:
print ('Error - Radioactive isotopes cannot be used with PopIII stars .. for now.')
self.need_to_quit = True
##############################################
# Read Decay Info #
##############################################
def __define_decay_info(self):
'''
This function reads decay_file and create the decay_info array
to be used when radioactive isotopes are used.
'''
# Declare the decay_info array
# decay_info[nb_radio_iso][0] --> Unstable isotope
# decay_info[nb_radio_iso][1] --> Stable isotope where it decays
# decay_info[nb_radio_iso][2] --> Mean-life (ln2*half-life)[yr]
self.decay_info = []
# Open the input file
with open(os.path.join(nupy_path, self.decay_file)) as ddi:
# For each line in the input file ..
for line in ddi:
# Split the line and add the information in the decay_info array
line_split = [str(x) for x in line.split()]
if line_split[0][0] == '&':
self.decay_info.append(\
[line_split[0].split('&')[1],\
line_split[1].split('&')[1],\
float(line_split[2].split('&')[1])/np.log(2.0)])
# Count the number of radioactive isotopes
self.nb_radio_iso = len(self.decay_info)
self.nb_new_radio_iso = len(self.decay_info)
##############################################
# Read isotopes and define decay info #
##############################################
def __read_isotopes_and_define_decay_info(self):
'''
This function reads the yield files and creates the decay_info
array from them to include all isotopes.
'''
allIsotopes = set()
# Massive and AGB stars
if len(self.table_radio) > 0:
path = os.path.join(nupy_path, self.table_radio)
self.__table_isotopes_in_set(allIsotopes, path)
# SNe Ia
if len(self.sn1a_table_radio) > 0:
path = os.path.join(nupy_path, self.sn1a_table_radio)
self.__table_isotopes_in_set(allIsotopes, path)
# NS mergers
if len(self.nsmerger_table_radio) > 0:
path = os.path.join(nupy_path, self.nsmerger_table_radio)
self.__table_isotopes_in_set(allIsotopes, path)
# Delayed extra sources
if self.nb_delayed_extra_radio > 0:
self.ytables_delayed_extra_radio = []
for i_syt in range(0,self.nb_delayed_extra_radio):
path = os.path.join(nupy_path, self.delayed_extra_yields_radio[i_syt])
self.__table_isotopes_in_set(allIsotopes, path)
# Declare the decay_info array
# decay_info[nb_radio_iso][0] --> Unstable isotope
# decay_info[nb_radio_iso][1] --> None
# decay_info[nb_radio_iso][2] --> None
self.decay_info = []
for isotope in allIsotopes:
self.decay_info.append([isotope, None, None])
# Count the number of radioactive isotopes
self.nb_radio_iso = len(self.decay_info)
self.nb_new_radio_iso = len(self.decay_info)
##############################################
# Read yield table to add isotopes in set #
##############################################
def __table_isotopes_in_set(self, allIsotopes, path):
'''
Simply read the isotopes in path and add them to the set "allIsotopes"
'''
# Open the file in path
with open(path, "r") as fread:
for line in fread:
# Make sure we are not taking a header
if line[0] == "H" or "Isotopes" in line:
continue
# Retrieve the isotope name and add it to the set
isoName = line.split("&")[1].strip()
allIsotopes.add(isoName)
##############################################
# Read Radio Tables #
##############################################
def __read_radio_tables(self):
'''
This function reads the radioactive isotopes yields using the
decay_file and decay_info parameters to define which isosoptes
are considered.
'''
# Create the list of radioactive isotopes considered
self.radio_iso = []
for i_r in range(0,self.nb_radio_iso):
self.radio_iso.append(self.decay_info[i_r][0])
# Massive and AGB stars
if len(self.table_radio) > 0:
self.radio_massive_agb_on = True
self.ytables_radio = ry.read_yields_M_Z(os.path.join(nupy_path,\
self.table_radio), excludemass=self.exclude_masses,\
isotopes=self.radio_iso)
# SNe Ia
sys.stdout.flush()
if len(self.sn1a_table_radio) > 0:
self.radio_sn1a_on = True
self.ytables_1a_radio = ry.read_yields_Z( \
os.path.join(nupy_path, self.sn1a_table_radio), isotopes=self.radio_iso)
# NS mergers
if len(self.nsmerger_table_radio) > 0:
self.radio_nsmerger_on = True
self.ytables_nsmerger_radio = ry.read_yields_Z( \
os.path.join(nupy_path, self.nsmerger_table_radio), isotopes=self.radio_iso)
# Delayed extra sources
if self.nb_delayed_extra_radio > 0:
self.ytables_delayed_extra_radio = []
for i_syt in range(0,self.nb_delayed_extra_radio):
self.ytables_delayed_extra_radio.append(ry.read_yields_Z( \
os.path.join(nupy_path, self.delayed_extra_yields_radio[i_syt]),\
isotopes=self.radio_iso))
##############################################
# Read yield table to add isotopes in set #
##############################################
def __table_isotopes_in_set(self, allIsotopes, path):
'''
Simply read the isotopes in path and add them to the set "allIsotopes"
'''
# Open the file in path
with open(path, "r") as fread:
for line in fread:
# Make sure we are not taking a header
if line[0] == "H" or "Isotopes" in line:
continue
# Retrieve the isotope name and add it to the set
isoName = line.split("&")[1].strip()
allIsotopes.add(isoName)
##############################################
# Select suitable path for path sent. #
##############################################
def __select_path(self, table_path):
'''
Take care of absolute paths
'''
if table_path[0] == '/':
return table_path
else:
return os.path.join(nupy_path, table_path)
##############################################
# Define Elements #
##############################################
def __define_elements(self):
'''
Define the dictionaries storing information about
chemical elements
'''
# Atomic number of elements
self.element_Z_number = dict()
self.element_Z_number["H"] = 1
self.element_Z_number["He"] = 2
self.element_Z_number["Li"] = 3
self.element_Z_number["Be"] = 4
self.element_Z_number["B"] = 5
self.element_Z_number["C"] = 6
self.element_Z_number["N"] = 7
self.element_Z_number["O"] = 8
self.element_Z_number["F"] = 9
self.element_Z_number["Ne"] = 10
self.element_Z_number["Na"] = 11
self.element_Z_number["Mg"] = 12
self.element_Z_number["Al"] = 13
self.element_Z_number["Si"] = 14
self.element_Z_number["P"] = 15
self.element_Z_number["S"] = 16
self.element_Z_number["Cl"] = 17
self.element_Z_number["Ar"] = 18
self.element_Z_number["K"] = 19
self.element_Z_number["Ca"] = 20
self.element_Z_number["Sc"] = 21
self.element_Z_number["Ti"] = 22
self.element_Z_number["V"] = 23
self.element_Z_number["Cr"] = 24
self.element_Z_number["Mn"] = 25
self.element_Z_number["Fe"] = 26
self.element_Z_number["Co"] = 27
self.element_Z_number["Ni"] = 28
self.element_Z_number["Cu"] = 29
self.element_Z_number["Zn"] = 30
self.element_Z_number["Ga"] = 31
self.element_Z_number["Ge"] = 32
self.element_Z_number["As"] = 33
self.element_Z_number["Se"] = 34
self.element_Z_number["Br"] = 35
self.element_Z_number["Kr"] = 36
self.element_Z_number["Rb"] = 37
self.element_Z_number["Sr"] = 38
self.element_Z_number["Y"] = 39
self.element_Z_number["Zr"] = 40
self.element_Z_number["Nb"] = 41
self.element_Z_number["Mo"] = 42
self.element_Z_number["Tc"] = 43
self.element_Z_number["Ru"] = 44
self.element_Z_number["Rh"] = 45
self.element_Z_number["Pd"] = 46
self.element_Z_number["Ag"] = 47
self.element_Z_number["Cd"] = 48
self.element_Z_number["In"] = 49
self.element_Z_number["Sn"] = 50
self.element_Z_number["Sb"] = 51
self.element_Z_number["Te"] = 52
self.element_Z_number["I"] = 53
self.element_Z_number["Xe"] = 54
self.element_Z_number["Cs"] = 55
self.element_Z_number["Ba"] = 56
self.element_Z_number["La"] = 57
self.element_Z_number["Ce"] = 58
self.element_Z_number["Pr"] = 59
self.element_Z_number["Nd"] = 60
self.element_Z_number["Pm"] = 61
self.element_Z_number["Sm"] = 62
self.element_Z_number["Eu"] = 63
self.element_Z_number["Gd"] = 64
self.element_Z_number["Tb"] = 65
self.element_Z_number["Dy"] = 66
self.element_Z_number["Ho"] = 67
self.element_Z_number["Er"] = 68
self.element_Z_number["Tm"] = 69
self.element_Z_number["Yb"] = 70
self.element_Z_number["Lu"] = 71
self.element_Z_number["Hf"] = 72
self.element_Z_number["Ta"] = 73
self.element_Z_number["W"] = 74
self.element_Z_number["Re"] = 75
self.element_Z_number["Os"] = 76
self.element_Z_number["Ir"] = 77
self.element_Z_number["Pt"] = 78
self.element_Z_number["Au"] = 79
self.element_Z_number["Hg"] = 80
self.element_Z_number["Tl"] = 81
self.element_Z_number["Pb"] = 82
self.element_Z_number["Bi"] = 83
self.element_Z_number["Po"] = 84
self.element_Z_number["At"] = 85
self.element_Z_number["Rn"] = 86
self.element_Z_number["Fr"] = 87
self.element_Z_number["Ra"] = 88
self.element_Z_number["Ac"] = 89
self.element_Z_number["Th"] = 90
self.element_Z_number["Pa"] = 91
self.element_Z_number["U"] = 92
self.element_Z_number["Np"] = 93
self.element_Z_number["Pu"] = 94
self.element_Z_number["Am"] = 95
self.element_Z_number["Cm"] = 96
self.element_Z_number["Bk"] = 97
self.element_Z_number["Cf"] = 98
self.element_Z_number["Es"] = 99
self.element_Z_number["Fm"] = 100
self.element_Z_number["Md"] = 101
self.element_Z_number["No"] = 102
self.element_Z_number["Lr"] = 103
self.element_Z_number["Rf"] = 104
self.element_Z_number["Db"] = 105
self.element_Z_number["Sg"] = 106
self.element_Z_number["Bh"] = 107
self.element_Z_number["Hs"] = 108
self.element_Z_number["Mt"] = 109
self.element_Z_number["Ds"] = 110
self.element_Z_number["Rg"] = 111
self.element_Z_number["Cn"] = 112
self.element_Z_number["Nh"] = 113
self.element_Z_number["Fl"] = 114
self.element_Z_number["Mc"] = 115
self.element_Z_number["Lv"] = 116
self.element_Z_number["Ts"] = 117
self.element_Z_number["Og"] = 118
# List of elements in order of apperance in the periodic table
self.elem_list_periodic_table = ["H", "He", "Li", "Be", "B", "C", "N", "O", \
"F", "Ne", "Na", "Mg", "Al", "Si", "P", "S", "Cl", "Ar", "K", "Ca", \
"Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ga", "Ge", \
"As", "Se", "Br", "Kr", "Rb", "Sr", "Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", \
"Pd", "Ag", "Cd", "In", "Sn", "Sb", "Te", "I", "Xe", "Cs", "Ba", "La", \
"Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm", "Yb", \
"Lu", "Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl", "Pb", \
"Bi", "Po", "At", "Rn" "Fr", "Ra", "Ac", "Th", "Pa", "U", "Np", "Pu", \
"Am", "Cm", "Bk", "Cf", "Es", "Fm", "Md", "No", "Lr", "Rf", "Db", "Sg", \
"Bh", "Hs", "Mt", "Ds", "Rg", "Cn", "Nh", "Fl", "Mc", "Lv", "Ts", "Og"]
##############################################
# Sort Isotope List #
##############################################
def __sort_isotope_list(self, iso_list):
'''
Sort list of isotopes by atomic number, then by mass number
'''
# Define the list of sorted isotopes
nb_iso_temp = len(iso_list)
iso_list_sorted = []
# For each element in the periodic table ..
for elem in self.elem_list_periodic_table:
# Collect mass numbers for the isotopes of that element
A_temp = []
for iso in iso_list:
split = iso.split("-")
if split[0] == elem:
A_temp.append(split[1])
# Sort the mass number
A_temp = sorted(A_temp)
# Add the isotopes
for the_A in A_temp:
iso_list_sorted.append(elem+"-"+the_A)
# Return the sorted list
return iso_list_sorted
##############################################
# Read Tables #
##############################################
def __read_tables(self):
'''
This function reads the isotopes yields table for different sites
'''
# Get the list of isotopes
allIsotopes = set()
# Massive stars and AGB stars
path = self.__select_path(self.table)
self.__table_isotopes_in_set(allIsotopes, path)
# Pop III massive stars
path = self.__select_path(self.pop3_table)
self.__table_isotopes_in_set(allIsotopes, path)
# SNe Ia
path = self.__select_path(self.sn1a_table)
self.__table_isotopes_in_set(allIsotopes, path)
# Neutron star mergers
path = self.__select_path(self.nsmerger_table)
self.__table_isotopes_in_set(allIsotopes, path)
# Delayed-extra sources
if self.nb_delayed_extra > 0:
for i_syt in range(0,self.nb_delayed_extra):
path = self.__select_path(self.delayed_extra_yields[i_syt])
self.__table_isotopes_in_set(allIsotopes, path)
# Extra yields (on top of massive and AGB yields)
if self.extra_source_on == True:
for ee in range(len(self.extra_source_table)):
path = self.__select_path(self.extra_source_table[ee])
self.__table_isotopes_in_set(allIsotopes, path)
# Store the isotopes
isotope_list = list(allIsotopes)
self.history.isotopes = self.__sort_isotope_list(isotope_list)
self.nb_isotopes = len(self.history.isotopes)
# Massive stars and AGB stars
path = self.__select_path(self.table)
self.ytables = ry.read_yields_M_Z(path, isotopes=self.history.isotopes)
# PopIII massive stars
path = self.__select_path(self.pop3_table)
self.ytables_pop3 = ry.read_yields_M_Z(path, isotopes=self.history.isotopes)
# SNe Ia
path = self.__select_path(self.sn1a_table)
self.ytables_1a = ry.read_yields_Z(path, isotopes=self.history.isotopes)
# Neutron star mergers
path = self.__select_path(self.nsmerger_table)
self.ytables_nsmerger = ry.read_yields_Z(path, isotopes=self.history.isotopes)
# Delayed-extra sources
if self.nb_delayed_extra > 0:
self.ytables_delayed_extra = []
for i_syt in range(0,self.nb_delayed_extra):
path = self.__select_path(self.delayed_extra_yields[i_syt])
self.ytables_delayed_extra.append(ry.read_yields_Z(path, \
isotopes=self.history.isotopes))
# Extra yields (on top of massive and AGB yields)
if self.extra_source_on == True:
#go over all extra sources
self.ytables_extra =[]
for ee in range(len(self.extra_source_table)):
#if absolute path don't apply nupy_path
path = self.__select_path(self.extra_source_table[ee])
self.ytables_extra.append(ry.read_yields_Z(path,\
isotopes=self.history.isotopes))
# Read stellar parameter. stellar_param
if self.stellar_param_on:
path = self.__select_path(self.stellar_param_table)
table_param=ry.read_nugrid_parameter(path)
self.table_param=table_param
# Get the list of mass and metallicities found in the yields tables
self.__get_M_Z_models()
##############################################
# Get M Z Models #
##############################################
def __get_M_Z_models(self):
'''
Get the mass and metallicities of the input stellar yields
'''
# Main massive and AGB star yields
self.Z_table = copy.deepcopy(self.ytables.Z_list)
self.M_table = copy.deepcopy(self.ytables.M_list)
self.nb_Z_table = len(self.Z_table)
self.nb_M_table = len(self.M_table)
# Massive PopIII stars
self.M_table_pop3 = copy.deepcopy(self.ytables_pop3.M_list)
self.nb_M_table_pop3 = len(self.M_table_pop3)
##############################################
# Interpolate Pop3 Yields #
##############################################
def __interpolate_pop3_yields(self):
'''
Interpolate the mass-dependent yields table of massive
popIII yields. This will create arrays containing interpolation
coefficients. The chemical evolution calculations will then
only use these coefficients instead of the yields table.
Interpolation laws
==================
Interpolation across stellar mass M
log10(yields) = a_M * M + b_M
Interpolation (total mass) across stellar mass
M_ej = a_ej * M + b_ej
Results
=======
a_M and b_M coefficients
------------------------
y_coef_M_pop3[i_coef][i_M_low][i_iso]
- i_coef : 0 and 1 for a_M and b_M, respectively
- i_M_low : Index of the lower mass limit where
the interpolation occurs
- i_iso : Index of the isotope
'''
# For each interpolation lower-mass bin point ..
for i_M in range(self.nb_inter_M_points_pop3-1):
# Get the yields for the lower and upper mass models
yields_low, yields_upp, m_ej_low, m_ej_upp, yields_ej_low,\
yields_ej_upp = self.__get_y_low_upp_pop3(i_M)
# Get the interpolation coefficients a_M, b_M
self.y_coef_M_pop3[0][i_M], self.y_coef_M_pop3[1][i_M],\
self.y_coef_M_ej_pop3[0][i_M], self.y_coef_M_ej_pop3[1][i_M] =\
self.__get_inter_coef_M(self.inter_M_points_pop3[i_M],\
self.inter_M_points_pop3[i_M+1], yields_low, yields_upp,\
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp)
##############################################
# Interpolate Massive and AGB Yields #
##############################################
def __interpolate_massive_and_agb_yields(self, is_radio=False):
'''
Interpolate the metallicity- and mass-dependent yields
table of massive and AGB stars. This will create arrays
containing interpolation coefficients. The chemical
evolution calculations will then only use these
coefficients instead of the yields table.
Interpolation laws
==================
Interpolation across stellar mass M
log10(yields) = a_M * M + b_M
Interpolation (total mass) across stellar mass
M_ej = a_ej * M + b_ej
Interpolation of a_M and b_M across metallicity Z
x_M = a_Z * log10(Z) + b_Z
x_M_ej = a_Z * log10(Z) + b_Z
The functions first calculate a_M and b_M for each Z,
and then interpolate these coefficients across Z.
Results
=======
a_M and b_M coefficients
------------------------
y_coef_M[i_coef][i_Z][i_M_low][i_iso]
- i_coef : 0 and 1 for a_M and b_M, respectively
- i_Z : Metallicity index available in the table
- i_M_low : Index of the lower mass limit where
the interpolation occurs
- i_iso : Index of the isotope
a_Z and b_Z coefficients for x_M
--------------------------------
y_coef_Z_xM[i_coef][i_Z_low][i_M_low][i_iso]
- i_coef : 0 and 1 for a_Z and b_Z, respectively
- i_Z_low : Index of the lower metallicity limit where
the interpolation occurs
- i_M_low : Index of the lower mass limit where
the interpolation occurs
- i_iso : Index of the isotope
Note
====
self.Z_table is in decreasing order
but y_coef_... arrays have metallicities in increasing order
'''
# Fill the y_coef_M array
# For each metallicity available in the yields ..
for i_Z_temp in range(self.nb_Z_table):
# Get the metallicity index in increasing order
i_Z = self.inter_Z_points.index(self.Z_table[i_Z_temp])
# For each interpolation lower-mass bin point ..
for i_M in range(self.nb_inter_M_points-1):
# Get the yields for the lower and upper mass models
yields_low, yields_upp, m_ej_low, m_ej_upp, yields_ej_low,\
yields_ej_upp = self.__get_y_low_upp(i_Z_temp, i_M, \
is_radio=is_radio)
# Get the interpolation coefficients a_M, b_M
if is_radio: # Ignore the total mass ejected (done when stable)
self.y_coef_M_radio[0][i_Z][i_M], self.y_coef_M_radio[1][i_Z][i_M],\
dummy, dummy = self.__get_inter_coef_M(self.inter_M_points[i_M],\
self.inter_M_points[i_M+1], yields_low, yields_upp,\
1.0, 2.0, yields_upp, yields_upp)
else:
self.y_coef_M[0][i_Z][i_M], self.y_coef_M[1][i_Z][i_M],\
self.y_coef_M_ej[0][i_Z][i_M], self.y_coef_M_ej[1][i_Z][i_M] =\
self.__get_inter_coef_M(self.inter_M_points[i_M],\
self.inter_M_points[i_M+1], yields_low, yields_upp,\
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp)
# Fill the y_coef_Z_xM arrays
# For each interpolation lower-metallicity point ..
for i_Z in range(self.nb_inter_Z_points-1):
# For each interpolation lower-mass bin point ..
for i_M in range(self.nb_inter_M_points-1):
# If radioactive table ..
if is_radio:
# Get the interpolation coefficients a_Z, b_Z for a_M
self.y_coef_Z_aM_radio[0][i_Z][i_M], self.y_coef_Z_aM_radio[1][i_Z][i_M],\
dummy, dummy =\
self.__get_inter_coef_Z(self.y_coef_M_radio[0][i_Z][i_M],\
self.y_coef_M_radio[0][i_Z+1][i_M], self.y_coef_M_ej[0][i_Z][i_M],\
self.y_coef_M_ej[0][i_Z+1][i_M], self.inter_Z_points[i_Z],\
self.inter_Z_points[i_Z+1])
# Get the interpolation coefficients a_Z, b_Z for b_M
self.y_coef_Z_bM_radio[0][i_Z][i_M], self.y_coef_Z_bM_radio[1][i_Z][i_M],\
dummy, dummy =\
self.__get_inter_coef_Z(self.y_coef_M_radio[1][i_Z][i_M],\
self.y_coef_M_radio[1][i_Z+1][i_M], self.y_coef_M_ej[1][i_Z][i_M],\
self.y_coef_M_ej[1][i_Z+1][i_M], self.inter_Z_points[i_Z],\
self.inter_Z_points[i_Z+1])
# If stable table ..
else:
# Get the interpolation coefficients a_Z, b_Z for a_M
self.y_coef_Z_aM[0][i_Z][i_M], self.y_coef_Z_aM[1][i_Z][i_M],\
self.y_coef_Z_aM_ej[0][i_Z][i_M], self.y_coef_Z_aM_ej[1][i_Z][i_M] =\
self.__get_inter_coef_Z(self.y_coef_M[0][i_Z][i_M],\
self.y_coef_M[0][i_Z+1][i_M], self.y_coef_M_ej[0][i_Z][i_M],\
self.y_coef_M_ej[0][i_Z+1][i_M], self.inter_Z_points[i_Z],\
self.inter_Z_points[i_Z+1])
# Get the interpolation coefficients a_Z, b_Z for b_M
self.y_coef_Z_bM[0][i_Z][i_M], self.y_coef_Z_bM[1][i_Z][i_M],\
self.y_coef_Z_bM_ej[0][i_Z][i_M], self.y_coef_Z_bM_ej[1][i_Z][i_M] =\
self.__get_inter_coef_Z(self.y_coef_M[1][i_Z][i_M],\
self.y_coef_M[1][i_Z+1][i_M], self.y_coef_M_ej[1][i_Z][i_M],\
self.y_coef_M_ej[1][i_Z+1][i_M], self.inter_Z_points[i_Z],\
self.inter_Z_points[i_Z+1])
##############################################
# Declare Interpolation Arrays #
##############################################
def __declare_interpolation_arrays(self):
'''
Declare the arrays that will contain the interpolation
coefficients used in the chemical evolution calculation.
'''
# Non-zero metallicity models
# ===========================
# Create the stellar mass and lifetime points in between
# which there will be interpolations
self.__create_inter_M_points()
self.__create_inter_lifetime_points()
# Create the stellar metallicity points in between
# which there will be interpolations
self.inter_Z_points = sorted(self.Z_table)
self.nb_inter_Z_points = len(self.inter_Z_points)
# Declare the array containing the coefficients for
# the yields interpolation between masses (a_M, b_M)
self.y_coef_M = np.zeros((2, self.nb_Z_table,\
self.nb_inter_M_points-1, self.nb_isotopes))
# Declare the array containing the coefficients for
# the total-mass-ejected interpolation between masses (a_ej, b_ej)
self.y_coef_M_ej = np.zeros((2, self.nb_Z_table, self.nb_inter_M_points-1))
# Declare the array containing the coefficients for
# the yields interpolation between metallicities (a_Z, b_Z)
self.y_coef_Z_aM = np.zeros((2, self.nb_Z_table-1,\
self.nb_inter_M_points-1, self.nb_isotopes))
self.y_coef_Z_bM = np.zeros((2, self.nb_Z_table-1,\
self.nb_inter_M_points-1, self.nb_isotopes))
# Declare the array containing the coefficients for
# the total-mass-ejected interpolation between metallicities (a_ej, b_ej)
self.y_coef_Z_aM_ej = np.zeros((2, self.nb_Z_table-1, self.nb_inter_M_points-1))
self.y_coef_Z_bM_ej = np.zeros((2, self.nb_Z_table-1, self.nb_inter_M_points-1))
# Declare the array containing the coefficients for
# the lifetime interpolation between masses (a_M, b_M)
self.tau_coef_M = np.zeros((2, self.nb_Z_table, self.nb_M_table-1))
self.tau_coef_M_inv = np.zeros((2, self.nb_Z_table, self.nb_inter_lifetime_points-1))
# Declare the array containing the coefficients for
# the lifetime interpolation between metallicities (a_Z, b_Z)
self.tau_coef_Z_aM = np.zeros((2, self.nb_Z_table-1, self.nb_M_table-1))
self.tau_coef_Z_bM = np.zeros((2, self.nb_Z_table-1, self.nb_M_table-1))
self.tau_coef_Z_aM_inv = np.zeros((2, self.nb_Z_table-1, self.nb_inter_lifetime_points-1))
self.tau_coef_Z_bM_inv = np.zeros((2, self.nb_Z_table-1, self.nb_inter_lifetime_points-1))
# Zero metallicity models
# =======================
# Create the stellar mass and lifetime points in between
# which there will be interpolations
self.__create_inter_M_points_pop3()
self.__create_inter_lifetime_points_pop3()
# Declare the array containing the coefficients for
# the PopIII yields interpolation between masses (a_M, b_M)
self.y_coef_M_pop3 = np.zeros((2,\
self.nb_inter_M_points_pop3-1, self.nb_isotopes))
# Declare the array containing the coefficients for
# the PopIII total-mass-ejected interpolation between masses (a_ej, b_ej)
self.y_coef_M_ej_pop3 = np.zeros((2, self.nb_inter_M_points_pop3-1))
# Declare the array containing the coefficients for
# the lifetime interpolation between masses (a_M, b_M)
self.tau_coef_M_pop3 = np.zeros((2, self.nb_M_table_pop3-1))
self.tau_coef_M_pop3_inv = np.zeros((2, self.nb_inter_lifetime_points_pop3-1))
# Radioactive isotopes (non-zero metallicity)
# ===========================================
if (self.len_decay_file > 0 or self.use_decay_module) and \
len(self.table_radio) > 0:
# Declare the array containing the coefficients for
# the yields interpolation between masses (a_M, b_M)
self.y_coef_M_radio = np.zeros((2, self.nb_Z_table,\
self.nb_inter_M_points-1, self.nb_radio_iso))
# Declare the array containing the coefficients for
# the yields interpolation between metallicities (a_Z, b_Z)
self.y_coef_Z_aM_radio = np.zeros((2, self.nb_Z_table-1,\
self.nb_inter_M_points-1, self.nb_radio_iso))
self.y_coef_Z_bM_radio = np.zeros((2, self.nb_Z_table-1,\
self.nb_inter_M_points-1, self.nb_radio_iso))
##############################################
# Create Inter M Points Pop3 #
##############################################
def __create_inter_M_points_pop3(self):
'''
Create the boundary stellar masses array representing
the mass points in between which there will be yields
interpolations. This is for massive PopIII stars.
'''
# Initialize the array
self.inter_M_points_pop3 = copy.copy(self.M_table_pop3)
# Add the lower and upper IMF yields range limits
if not self.imf_yields_range_pop3[0] in self.M_table_pop3:
self.inter_M_points_pop3.append(self.imf_yields_range_pop3[0])
if not self.imf_yields_range_pop3[1] in self.M_table_pop3:
self.inter_M_points_pop3.append(self.imf_yields_range_pop3[1])
# Remove masses that are below or beyond the IMF yields range
len_temp = len(self.inter_M_points_pop3)
for i_m in range(len_temp):
ii_m = len_temp - i_m - 1
if self.inter_M_points_pop3[ii_m] < self.imf_yields_range_pop3[0] or\
self.inter_M_points_pop3[ii_m] > self.imf_yields_range_pop3[1]:
self.inter_M_points_pop3.remove(self.inter_M_points_pop3[ii_m])
# Sort the list of masses
self.inter_M_points_pop3 = sorted(self.inter_M_points_pop3)
self.inter_M_points_pop3_tree = Bin_tree(self.inter_M_points_pop3)
# Calculate the number of interpolation mass-points
self.nb_inter_M_points_pop3 = len(self.inter_M_points_pop3)
##############################################
# Create Inter M Points #
##############################################
def __create_inter_M_points(self):
'''
Create the boundary stellar masses array representing
the mass points in between which there will be yields
interpolations. This is for massive and AGB stars.
'''
# Initialize the array
self.inter_M_points = copy.copy(self.M_table)
# Add the lower and upper IMF yields range limits
if not self.imf_yields_range[0] in self.M_table:
self.inter_M_points.append(self.imf_yields_range[0])
if not self.imf_yields_range[1] in self.M_table:
self.inter_M_points.append(self.imf_yields_range[1])
# Add the transition mass between AGB and massive stars
if not self.transitionmass in self.M_table:
self.inter_M_points.append(self.transitionmass)
# Remove masses that are above or beyond the IMF yields range
len_temp = len(self.inter_M_points)
for i_m in range(len_temp):
ii_m = len_temp - i_m - 1
if self.inter_M_points[ii_m] < self.imf_yields_range[0] or\
self.inter_M_points[ii_m] > self.imf_yields_range[1]:
self.inter_M_points.remove(self.inter_M_points[ii_m])
# Sort the list of masses
self.inter_M_points = sorted(self.inter_M_points)
self.inter_M_points_tree = Bin_tree(self.inter_M_points)
# Calculate the number of interpolation mass-points
self.nb_inter_M_points = len(self.inter_M_points)
##############################################
# Get Y Low Upp Pop3 #
##############################################
def __get_y_low_upp_pop3(self, i_M):
'''
Get the lower and upper boundary yields in between
which there will be a yields interpolation. This is
for massive PopIII star yields.
Argument
========
i_M : Index of the lower mass limit where the
interpolation occurs. This is taken from
the self.inter_M_points array.
'''
# If need to extrapolate on the low-mass end ..
# =============================================
if self.inter_M_points_pop3[i_M] < self.M_table_pop3[0]:
# Copy the two least massive PopIII star yields
y_tables_0 = self.ytables_pop3.get(\
Z=0.0, M=self.M_table_pop3[0], quantity='Yields',\
isotopes=self.history.isotopes)
y_tables_1 = self.ytables_pop3.get(\
Z=0.0, M=self.M_table_pop3[1], quantity='Yields',\
isotopes=self.history.isotopes)
# Extrapolate the lower boundary
yields_low = self.scale_yields_to_M_ej(self.M_table_pop3[0],\
self.M_table_pop3[1], y_tables_0, y_tables_1, \
self.inter_M_points_pop3[i_M], y_tables_0, sum(y_tables_0))
# Take lowest-mass model for the upper boundary
yields_upp = y_tables_0
# Set the yields and mass for total-mass-ejected interpolation
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.M_table_pop3[0], self.M_table_pop3[1], y_tables_0, y_tables_1
# If need to extrapolate on the high-mass end ..
# ==============================================
elif self.inter_M_points_pop3[i_M+1] > self.M_table_pop3[-1]:
# Take the highest-mass model for the lower boundary
yields_low = self.ytables_pop3.get(Z=0.0,\
M=self.M_table_pop3[-1], quantity='Yields',\
isotopes=self.history.isotopes)
# Extrapolate the upper boundary
yields_upp = self.extrapolate_high_mass(\
self.ytables_pop3, 0.0, self.inter_M_points_pop3[i_M+1])
# Set the yields and mass for total-mass-ejected interpolation
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.inter_M_points_pop3[i_M], self.inter_M_points_pop3[i_M+1],\
yields_low, yields_upp
# If the mass point is the first one, is higher than the
# least massive mass in the yields table, but is not part
# of the yields table ..
# =======================================================
elif i_M == 0 and not self.inter_M_points_pop3[i_M] in self.M_table_pop3:
# Assign the upper-mass model
yields_upp = self.ytables_pop3.get(Z=0.0,\
M=self.inter_M_points_pop3[i_M+1], quantity='Yields',\
isotopes=self.history.isotopes)
# Interpolate the lower-mass model
i_M_upp = self.M_table_pop3.index(self.inter_M_points_pop3[i_M+1])
aa, bb, dummy, dummy = self.__get_inter_coef_M(self.M_table_pop3[i_M_upp-1],\
self.M_table_pop3[i_M_upp], self.ytables_pop3.get(Z=0.0,\
M=self.M_table_pop3[i_M_upp-1], quantity='Yields',\
isotopes=self.history.isotopes), yields_upp,\
1.0, 2.0, yields_upp, yields_upp)
yields_low = 10**(aa * self.inter_M_points_pop3[i_M] + bb)
# Set the yields and mass for total-mass-ejected interpolation
i_M_ori = self.M_table_pop3.index(self.inter_M_points_pop3[i_M+1])
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.M_table_pop3[i_M_ori-1], self.inter_M_points_pop3[i_M+1],\
self.ytables_pop3.get(Z=0.0, M=self.M_table_pop3[i_M_ori-1],\
quantity='Yields', isotopes=self.history.isotopes), yields_upp
# If the mass point is the last one, is lower than the
# most massive mass in the yields table, but is not part
# of the yields table ..
# ======================================================
elif i_M == (self.nb_inter_M_points_pop3-2) and \
not self.inter_M_points_pop3[i_M+1] in self.M_table_pop3:
# Assign the lower-mass model
yields_low = self.ytables_pop3.get(Z=0.0,\
M=self.inter_M_points_pop3[i_M], quantity='Yields',\
isotopes=self.history.isotopes)
# Interpolate the upper-mass model
i_M_low = self.M_table_pop3.index(self.inter_M_points_pop3[i_M])
aa, bb, dummy, dummy = self.__get_inter_coef_M(self.M_table_pop3[i_M_low],\
self.M_table_pop3[i_M_low+1], yields_low, self.ytables_pop3.get(\
Z=0.0, M=self.M_table_pop3[i_M_low+1],quantity='Yields',\
isotopes=self.history.isotopes),\
1.0, 2.0, yields_low, yields_low)
yields_upp = 10**(aa * self.inter_M_points_pop3[i_M+1] + bb)
# Set the yields and mass for total-mass-ejected interpolation
i_M_ori = self.M_table_pop3.index(self.inter_M_points_pop3[i_M])
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.inter_M_points_pop3[i_M], self.M_table_pop3[i_M_ori+1],\
yields_low, self.ytables_pop3.get(Z=0.0,\
M=self.M_table_pop3[i_M_ori+1], quantity='Yields',\
isotopes=self.history.isotopes)
# If this is an interpolation between two models
# originally in the yields table ..
# ==============================================
else:
# Get the original models
yields_low = self.ytables_pop3.get(Z=0.0,\
M=self.inter_M_points_pop3[i_M], quantity='Yields',\
isotopes=self.history.isotopes)
yields_upp = self.ytables_pop3.get(Z=0.0,\
M=self.inter_M_points_pop3[i_M+1], quantity='Yields',\
isotopes=self.history.isotopes)
# Set the yields and mass for total-mass-ejected interpolation
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.inter_M_points_pop3[i_M], self.inter_M_points_pop3[i_M+1],\
yields_low, yields_upp
# Return the yields for interpolation
return yields_low, yields_upp, m_ej_low, m_ej_upp, \
yields_ej_low, yields_ej_upp
##############################################
# Get Y Low Upp #
##############################################
def __get_y_low_upp(self, i_Z, i_M, is_radio=False):
'''
Get the lower and upper boundary yields in between
which there will be a yields interpolation. This is
for massive and AGB star yields.
Argument
========
i_Z : Metallicity index of the yields table
i_M : Index of the lower mass limit where the
interpolation occurs. This is taken from
the self.inter_M_points array.
'''
# If need to extrapolate on the low-mass end ..
# =============================================
if self.inter_M_points[i_M] < self.M_table[0]:
# Copy the two least massive AGB star yields
y_tables_0 = self.ytables.get(\
Z=self.Z_table[i_Z], M=self.M_table[0], quantity='Yields',\
isotopes=self.history.isotopes)
y_tables_1 = self.ytables.get(\
Z=self.Z_table[i_Z], M=self.M_table[1], quantity='Yields',\
isotopes=self.history.isotopes)
# If radioactive yields table ..
if is_radio:
# Get radioactive yields
y_tables_0_radio = self.ytables_radio.get(\
Z=self.Z_table[i_Z], M=self.M_table[0], quantity='Yields',\
isotopes=self.radio_iso)
# Extrapolate the lower boundary (using stable yields total mass)
yields_low = self.scale_yields_to_M_ej(self.M_table[0],\
self.M_table[1], y_tables_0, y_tables_1, \
self.inter_M_points[i_M], y_tables_0_radio, \
sum(y_tables_0))
# Take lowest-mass model for the upper boundary
yields_upp = y_tables_0_radio
# If stable yields table ..
else:
# Extrapolate the lower boundary
yields_low = self.scale_yields_to_M_ej(self.M_table[0],\
self.M_table[1], y_tables_0, y_tables_1, \
self.inter_M_points[i_M], y_tables_0, sum(y_tables_0))
# Take lowest-mass model for the upper boundary
yields_upp = y_tables_0
# Set the yields and mass for total-mass-ejected interpolation
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.M_table[0], self.M_table[1], y_tables_0, y_tables_1
# If the upper boundary is the transition mass ..
# ===============================================
elif self.inter_M_points[i_M+1] == self.transitionmass:
# Keep the lower-boundary yields
yields_low_stable = self.ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M], quantity='Yields',\
isotopes=self.history.isotopes)
# If the transition mass is part of the yields table
if self.transitionmass in self.M_table:
# Prepare to use the transition-mass model
i_M_add = 1
# Set the yields and mass for total-mass-ejected interpolation
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.inter_M_points[i_M], self.inter_M_points[i_M+1], \
yields_low_stable, self.ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M+1], quantity='Yields',\
isotopes=self.history.isotopes)
# If the transition mass is not part of the yields table
else:
# Prepare to use the model after the transition mass
i_M_add = 2
# Set the yields and mass for total-mass-ejected interpolation
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.inter_M_points[i_M], self.inter_M_points[i_M+2], \
yields_low_stable, self.ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M+2], quantity='Yields',\
isotopes=self.history.isotopes)
# Copy the upper-boundary model used to scale the yields
yields_tr_upp = self.ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M+i_M_add], quantity='Yields',\
isotopes=self.history.isotopes)
# If radioactive table
if is_radio:
# Keep the lower-boundary yields
yields_low = self.ytables_radio.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M], quantity='Yields',\
isotopes=self.radio_iso)
# If stable table
else:
# Keep the lower-boundary yields (stable)
yields_low = yields_low_stable
# Scale massive AGB yields for the upper boundary
yields_upp = self.scale_yields_to_M_ej(\
self.inter_M_points[i_M], self.inter_M_points[i_M+i_M_add],\
yields_low_stable, yields_tr_upp, self.transitionmass, \
yields_low, sum(yields_low_stable))
# If the lower boundary is the transition mass ..
# ===============================================
elif self.inter_M_points[i_M] == self.transitionmass:
# Keep the upper-boundary yields
yields_upp_stable = self.ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M+1], quantity='Yields',\
isotopes=self.history.isotopes)
# If the transition mass is part of the yields table
if self.transitionmass in self.M_table:
# Prepare to use the transition-mass model
i_M_add = 0
# Set the yields and mass for total-mass-ejected interpolation
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.inter_M_points[i_M], self.inter_M_points[i_M+1], \
self.ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M], quantity='Yields',\
isotopes=self.history.isotopes), yields_upp_stable
# If the transition mass is not part of the yields table
else:
# Prepare to use the model before the transition mass
i_M_add = -1
# Set the yields and mass for total-mass-ejected interpolation
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.inter_M_points[i_M-1], self.inter_M_points[i_M+1], \
self.ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M-1], quantity='Yields',\
isotopes=self.history.isotopes), yields_upp_stable
# Copy the lower-boundary model used to scale the yields
yields_tr_low = self.ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M+i_M_add], quantity='Yields',\
isotopes=self.history.isotopes)
# If radioactive table
if is_radio:
# Keep the lower-boundary yields
yields_upp = self.ytables_radio.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M+1], quantity='Yields',\
isotopes=self.radio_iso)
# If stable table
else:
# Keep the lower-boundary yields (stable)
yields_upp = yields_upp_stable
# Scale lowest massive star yields for the lower boundary
yields_low = self.scale_yields_to_M_ej(\
self.inter_M_points[i_M+i_M_add], self.inter_M_points[i_M+1],\
yields_tr_low, yields_upp_stable, self.transitionmass, \
yields_upp, sum(yields_upp_stable))
# If need to extrapolate on the high-mass end ..
# ==============================================
elif self.inter_M_points[i_M+1] > self.M_table[-1]:
# If radioactive table ..
if is_radio:
# Take the highest-mass model for the lower boundary
yields_low = self.ytables_radio.get(Z=self.Z_table[i_Z],\
M=self.M_table[-1], quantity='Yields',\
isotopes=self.radio_iso)
# Extrapolate the upper boundary
yields_upp = self.extrapolate_high_mass(self.ytables_radio,\
self.Z_table[i_Z], self.inter_M_points[i_M+1],\
is_radio = is_radio)
# If stable table ..
else:
# Take the highest-mass model for the lower boundary
yields_low = self.ytables.get(Z=self.Z_table[i_Z],\
M=self.M_table[-1], quantity='Yields',\
isotopes=self.history.isotopes)
# Extrapolate the upper boundary
yields_upp = self.extrapolate_high_mass(self.ytables,\
self.Z_table[i_Z], self.inter_M_points[i_M+1])
# Set the yields and mass for total-mass-ejected interpolation
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.inter_M_points[i_M], self.inter_M_points[i_M+1],\
yields_low, yields_upp
# If the mass point is the first one, is higher than the
# least massive mass in the yields table, but is not part
# of the yields table ..
# =======================================================
elif i_M == 0 and not self.inter_M_points[i_M] in self.M_table:
# Use the appropriate yield tables ..
if is_radio:
the_ytables = self.ytables_radio
the_isotopes = self.radio_iso
else:
the_ytables = self.ytables
the_isotopes = self.history.isotopes
# Assign the upper-mass model
yields_upp = the_ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M+1], quantity='Yields',\
isotopes=the_isotopes)
# Interpolate the lower-mass model
i_M_upp = self.M_table.index(self.inter_M_points[i_M+1])
aa, bb, dummy, dummy = self.__get_inter_coef_M(self.M_table[i_M_upp-1],\
self.M_table[i_M_upp], the_ytables.get(Z=self.Z_table[i_Z],\
M=self.M_table[i_M_upp-1], quantity='Yields',\
isotopes=the_isotopes), yields_upp,\
1.0, 2.0, yields_upp, yields_upp)
yields_low = 10**(aa * self.inter_M_points[i_M] + bb)
# Set the yields and mass for total-mass-ejected interpolation
if not is_radio:
i_M_ori = self.M_table.index(self.inter_M_points[i_M+1])
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.M_table[i_M_ori-1], self.inter_M_points[i_M+1],\
self.ytables.get(Z=self.Z_table[i_Z],\
M=self.M_table[i_M_ori-1], quantity='Yields',\
isotopes=the_isotopes), yields_upp
# If the mass point is the last one, is lower than the
# most massive mass in the yields table, but is not part
# of the yields table ..
# ======================================================
elif i_M == (self.nb_inter_M_points-2) and \
not self.inter_M_points[i_M+1] in self.M_table:
# Use the appropriate yield tables ..
if is_radio:
the_ytables = self.ytables_radio
the_isotopes = self.radio_iso
else:
the_ytables = self.ytables
the_isotopes = self.history.isotopes
# Assign the lower-mass model
yields_low = the_ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M], quantity='Yields',\
isotopes=the_isotopes)
# Interpolate the upper-mass model
i_M_low = self.M_table.index(self.inter_M_points[i_M])
aa, bb, dummy, dummy = self.__get_inter_coef_M(self.M_table[i_M_low],\
self.M_table[i_M_low+1], yields_low, the_ytables.get(\
Z=self.Z_table[i_Z], M=self.M_table[i_M_low+1],quantity='Yields',\
isotopes=the_isotopes),\
1.0, 2.0, yields_low, yields_low)
yields_upp = 10**(aa * self.inter_M_points[i_M+1] + bb)
# Set the yields and mass for total-mass-ejected interpolation
if not is_radio:
i_M_ori = self.M_table.index(self.inter_M_points[i_M])
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.inter_M_points[i_M], self.M_table[i_M_ori+1],\
yields_low, self.ytables.get(Z=self.Z_table[i_Z],\
M=self.M_table[i_M_ori+1], quantity='Yields',\
isotopes=the_isotopes)
# If this is an interpolation between two models
# originally in the yields table ..
else:
# Use the appropriate yield tables ..
if is_radio:
the_ytables = self.ytables_radio
the_isotopes = self.radio_iso
else:
the_ytables = self.ytables
the_isotopes = self.history.isotopes
# Get the original models
yields_low = the_ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M], quantity='Yields',\
isotopes=the_isotopes)
yields_upp = the_ytables.get(Z=self.Z_table[i_Z],\
M=self.inter_M_points[i_M+1], quantity='Yields',\
isotopes=the_isotopes)
# Set the yields and mass for total-mass-ejected interpolation
if not is_radio:
m_ej_low, m_ej_upp, yields_ej_low, yields_ej_upp = \
self.inter_M_points[i_M], self.inter_M_points[i_M+1],\
yields_low, yields_upp
# Return the yields for interpolation
if is_radio:
return yields_low, yields_upp, 1.0, 2.0, 1.0, 1.0
else:
return yields_low, yields_upp, m_ej_low, m_ej_upp, \
yields_ej_low, yields_ej_upp
##############################################
# Get Inter Coef Yields M #
##############################################
def __get_inter_coef_M(self, m_low, m_upp, yields_low, yields_upp,\
m_low_ej, m_upp_ej, yields_low_ej, yields_upp_ej):
'''
Calculate the interpolation coefficients for interpolating
in between two given yields of different mass.
Interpolation law
=================
log10(yields) = a_M * M + b_M
M_ej = a_ej * M + b_ej
Argument
========
m_low : Lower stellar mass boundary (yields)
m_upp : Upper stellar mass boundary (yields)
yields_low : Yields associated to m_low (yields)
yields_upp : Yields associated to m_upp (yields)
m_low_ej : Lower stellar mass boundary (total mass)
m_upp_ej : Upper stellar mass boundary (total mass)
yields_low_ej : Yields associated to m_low (total mass)
yields_upp_ej : Yields associated to m_upp (total mass)
'''
# Convert zeros into 1.0e-30
for i_iso in range(len(yields_upp)):
if yields_upp[i_iso] == 0.0:
yields_upp[i_iso] = 1.0e-30
for i_iso in range(len(yields_low)):
if yields_low[i_iso] == 0.0:
yields_low[i_iso] = 1.0e-30
# Calculate the coefficients a_M
np_log10_yields_upp = np.log10(yields_upp)
the_a_M = (np_log10_yields_upp - np.log10(yields_low)) /\
(m_upp - m_low)
if m_upp == m_low:
print('Problem in __get_inter_coef_M', m_upp, m_low)
# Calculate the coefficients b_M
the_b_M = np_log10_yields_upp - the_a_M * m_upp
# Calculate the coefficients a_ej
sum_yields_upp_ej = sum(yields_upp_ej)
the_a_ej = (sum_yields_upp_ej - sum(yields_low_ej)) /\
(m_upp_ej - m_low_ej)
# Calculate the coefficients b_ej
the_b_ej = sum_yields_upp_ej - the_a_ej * m_upp_ej
# Return the coefficients arrays
return the_a_M, the_b_M, the_a_ej, the_b_ej
##############################################
# Get Inter Coef Yields M Tau #
##############################################
def __get_inter_coef_M_tau(self, m_low, m_upp, tau_low, tau_upp):
'''
Calculate the interpolation coefficients for interpolating
in between two given lifetimes at different mass.
Interpolation law
=================
log10(tau) = a_M * log10(M) + b_M
Argument
========
m_low : Lower stellar mass boundary
m_upp : Upper stellar mass boundary
tau_low : Lifetime associated to m_low
tau_upp : Lifetime associated to m_upp
'''
# Calculate the coefficients a_M
np_log10_tau_upp = np.log10(tau_upp)
np_log10_m_upp = np.log10(m_upp)
the_a_M = (np_log10_tau_upp - np.log10(tau_low)) /\
(np_log10_m_upp - np.log10(m_low))
if m_upp == m_low:
print('Problem in __get_inter_coef_M_tau', m_upp, m_low)
# Calculate the coefficients b_M
the_b_M = np_log10_tau_upp - the_a_M * np_log10_m_upp
# Return the coefficients arrays
return the_a_M, the_b_M
##############################################
# Scale Yields to M_ej #
##############################################
def scale_yields_to_M_ej(self, m_low, m_upp, yields_low, yields_upp,\
the_m_scale, the_yields, the_yields_m_tot):
'''
Scale yields according to the total ejected mass vs initial
mass relation. This will keep the relative chemical composition
of the yields.
Interpolation law
=================
M_ej = a * M_initial + b
Argument
========
m_low : Initial mass of the lower-mass boundary model
m_upp : Initial mass of the upper-mass boundary model
yields_low : Yields of the lower-mass boundary model
yields_upp : Yields of the upper-mass boundary model
the_m_scale : Initial mass to which the_yields will be scaled
the_yields : Yields that need to be scaled
the_yields_m_tot : Total mass of the yields that need to be scaled
'''
# Get the coefficient for the total-mass-ejected interpolation
m_ej_low = sum(yields_low)
m_ej_upp = sum(yields_upp)
a_temp = (m_ej_upp - m_ej_low) / (m_upp - m_low)
b_temp = m_ej_upp - a_temp * m_upp
# Calculate the interpolated (or extrapolated) total ejected mass
m_ej_temp = a_temp * the_m_scale + b_temp
if m_ej_temp < 0.0:
m_ej_temp = 0.0
# Return the scaled yields
return np.array(the_yields) * m_ej_temp / the_yields_m_tot
##############################################
# Extrapolate High Mass #
##############################################
def extrapolate_high_mass(self, table_ehm, Z_ehm, m_extra, is_radio=False):
'''
Extrapolate yields for stellar masses larger than what
is provided in the yields table.
Extrapolation choices (input parameter)
=====================
copy : This will apply the yields of the most massive model
to all more massive stars.
scale : This will scale the yields of the most massive model
using the relation between the total ejected mass and
the initial stellar mass. The later relation is taken
from the interpolation of the two most massive models.
extrapolate : This will extrapolate the yields of the most massive
model using the interpolation coefficients taken from
the interpolation of the two most massive models.
Arguments
=========
table_ehm : Yields table
Z_ehm : Metallicity of the yields table
m_extra : Mass to which the yields will be extrapolated
is_radio : Whether these are radioactive yields
'''
# Get the two most massive mass
if Z_ehm == 0.0:
mass_m2 = self.M_table_pop3[-2]
mass_m1 = self.M_table_pop3[-1]
else:
mass_m2 = self.M_table[-2]
mass_m1 = self.M_table[-1]
# Take into account radio
if is_radio:
the_isotopes = self.radio_iso
else:
the_isotopes = self.history.isotopes
# Copy the yields of most massive model
y_tables_m1 = table_ehm.get(Z=Z_ehm, M=mass_m1, quantity='Yields',\
isotopes=the_isotopes)
# If the yields are copied ..
if self.high_mass_extrapolation == 'copy':
# Return the yields of the most massive model
return y_tables_m1
# If the yields are scaled ..
if self.high_mass_extrapolation == 'scale':
# Make sure we use the stable yields for scaling (table_ehm could be radio)
y_stable_m1 = self.ytables.get(Z=Z_ehm, M=mass_m1, quantity='Yields',\
isotopes=the_isotopes)
y_stable_m2 = self.ytables.get(Z=Z_ehm, M=mass_m2, quantity='Yields',\
isotopes=the_isotopes)
# Calculate the scaled yields
y_scaled = self.scale_yields_to_M_ej(mass_m2,\
mass_m1, y_stable_m2, y_stable_m1, m_extra, y_tables_m1,\
sum(y_stable_m1))
# Set to 1e-30 if yields are negative. Do not set
# to zero, because yields will be interpolated in log.
if sum(y_scaled) <= 0.0:
y_scaled = np.zeros(len(y_scaled))
y_scaled += 1.0e-30
# Return the scaled yields of the most massive model
return y_scaled
# If the yields are extrapolated ..
if self.high_mass_extrapolation == 'extrapolate':
# Copy the yields of the second most massive model
y_tables_m2 = table_ehm.get(Z=Z_ehm, M=mass_m2, quantity='Yields',\
isotopes=the_isotopes)
# Extrapolate the yields
the_a, the_b, the_a_ej, the_b_ej = self.__get_inter_coef_M(\
mass_m2, mass_m1, y_tables_m2, y_tables_m1,\
mass_m2, mass_m1, y_tables_m2, y_tables_m1)
y_extra = 10**(the_a * m_extra + the_b)
m_ej_extra = the_a_ej * m_extra + the_b_ej
y_extra = y_extra * m_ej_extra / sum(y_extra)
# # Set to 1e-30 if yields are negative. Do not set
# to zero, because yields will be interpolated in log.
for i_yy in range(len(y_extra)):
if y_extra[i_yy] <= 0.0:
y_extra[i_yy] = 1.0e-30
# Return the extrapolated yields
return y_extra
##############################################
# Get Inter Coef Yields Z #
##############################################
def __get_inter_coef_Z(self, x_M_low, x_M_upp, \
x_M_ej_low, x_M_ej_upp, Z_low, Z_upp):
'''
Calculate the interpolation coefficients for interpolating
the mass-interpolation coefficients in between two given
metallicities.
Interpolation laws
==================
log10(yields) = a_M * M + b_M
x_M = a_Z * log10(Z) + b_Z
The function calculates a_Z and b_Z for either a_M or b_M
Argument
========
x_M_low : Lower mass-interpolation coefficient limit (yields)
x_M_upp : Upper mass-interpolation coefficient limit (yields)
x_M_ej_low : Lower mass-interpolation coefficient limit (total mass)
x_M_ej_upp : Upper mass-interpolation coefficient limit (total mass)
Z_low : Lower-metallicity limit of the interpolation
Z_upp : Upper-metallicity limit of the interpolation
'''
# Copy the lower and upper metallicities
lg_Z_low = np.log10(Z_low)
lg_Z_upp = np.log10(Z_upp)
# Calculate the coefficients a_Z and b_Z (yields)
the_a_Z = (x_M_upp - x_M_low) / (lg_Z_upp - lg_Z_low)
the_b_Z = x_M_upp - the_a_Z * lg_Z_upp
# Calculate the coefficients a_Z and b_Z (total mass)
the_a_Z_ej = (x_M_ej_upp - x_M_ej_low) / (lg_Z_upp - lg_Z_low)
the_b_Z_ej = x_M_ej_upp - the_a_Z_ej * lg_Z_upp
# Return the coefficients arrays
return the_a_Z, the_b_Z, the_a_Z_ej, the_b_Z_ej
##############################################
# Get Inter Coef Yields Z Tau #
##############################################
def __get_inter_coef_Z_tau(self, x_M_low, x_M_upp, Z_low, Z_upp):
'''
Calculate the interpolation coefficients for interpolating
the mass-interpolation lifetime coefficients in between two
given metallicities.
Interpolation laws
==================
log10(tau) = a_M * log10(M) + b_M
x_M = a_Z * Z + b_Z
The function calculates a_Z and b_Z for either a_M or b_M
Argument
========
x_M_low : Lower mass-interpolation coefficient limit
x_M_upp : Upper mass-interpolation coefficient limit
Z_low : Lower-metallicity limit of the interpolation
Z_upp : Upper-metallicity limit of the interpolation
'''
# Calculate the coefficients a_Z and b_Z (yields)
the_a_Z = (x_M_upp - x_M_low) / (Z_upp - Z_low)
the_b_Z = x_M_upp - the_a_Z * Z_upp
# Return the coefficients arrays
return the_a_Z, the_b_Z
##############################################
# Interpolate Pop3 Lifetimes #
##############################################
def __interpolate_pop3_lifetimes(self):
'''
Interpolate the mass-dependent lifetimes of PopIII stars.
This will create arrays containing interpolation coefficients.
The chemical evolution calculations will then only use these
coefficients instead of the tabulated lifetimes.
Interpolation laws
==================
Interpolation across stellar mass M
log10(tau) = a_M * log10(M) + b_M
Results
=======
a_M and b_M coefficients
------------------------
tau_coef_M_pop3[i_coef][i_M_low]
- i_coef : 0 and 1 for a_M and b_M, respectively
- i_M_low : Index of the lower mass limit where
the interpolation occurs
Note
====
self.Z_table is in decreasing order
but y_coef_... arrays have metallicities in increasing order
'''
# Fill the tau_coef_M_pop3 array
# For each interpolation lower-mass bin point ..
for i_M in range(self.nb_M_table_pop3-1):
# Get the lifetime for the lower and upper mass models
tau_low = self.ytables_pop3.get(\
M=self.M_table_pop3[i_M], Z=0.0, quantity='Lifetime')
tau_upp = self.ytables_pop3.get(\
M=self.M_table_pop3[i_M+1], Z=0.0, quantity='Lifetime')
# Get the interpolation coefficients a_M, b_M
self.tau_coef_M_pop3[0][i_M],\
self.tau_coef_M_pop3[1][i_M] =\
self.__get_inter_coef_M_tau(self.M_table_pop3[i_M],\
self.M_table_pop3[i_M+1], tau_low, tau_upp)
##############################################
# Interpolate Massive and AGB Lifetimes #
##############################################
def __interpolate_massive_and_agb_lifetimes(self):
'''
Interpolate the metallicity- and mass-dependent lifetimes
of massive and AGB stars. This will create arrays containing
interpolation coefficients. The chemical evolution calculations
will then only use these coefficients instead of the tabulated
lifetimes.
Interpolation laws
==================
Interpolation across stellar mass M
log10(tau) = a_M * log10(M) + b_M
log10(M) = a_M * log10(tau) + b_M
Interpolation of a_M and b_M across metallicity Z
x_M = a_Z * Z + b_Z
The functions first calculate a_M and b_M for each Z,
and then interpolate these coefficients across Z.
Results
=======
a_M and b_M coefficients
------------------------
tau_coef_M[i_coef][i_Z][i_M_low]
- i_coef : 0 and 1 for a_M and b_M, respectively
- i_Z : Metallicity index available in the table
- i_M_low : Index of the lower mass limit where
the interpolation occurs
a_Z and b_Z coefficients for x_M
--------------------------------
y_coef_Z_xM_tau[i_coef][i_Z_low][i_M_low]
- i_coef : 0 and 1 for a_Z and b_Z, respectively
- i_Z_low : Index of the lower metallicity limit where
the interpolation occurs
- i_M_low : Index of the lower mass limit where
the interpolation occurs
Note
====
self.Z_table is in decreasing order
but y_coef_... arrays have metallicities in increasing order
'''
# Fill the tau_coef_M array
# For each metallicity available in the yields ..
for i_Z_temp in range(self.nb_Z_table):
# Get the metallicity index in increasing order
i_Z = self.inter_Z_points.index(self.Z_table[i_Z_temp])
# For each interpolation lower-mass bin point ..
for i_M in range(self.nb_M_table-1):
# Get the lifetime for the lower and upper mass models
tau_low = self.ytables.get(M=self.M_table[i_M],\
Z=self.inter_Z_points[i_Z], quantity='Lifetime')
tau_upp = self.ytables.get(M=self.M_table[i_M+1],\
Z=self.inter_Z_points[i_Z], quantity='Lifetime')
# Get the interpolation coefficients a_M, b_M
self.tau_coef_M[0][i_Z][i_M],\
self.tau_coef_M[1][i_Z][i_M] =\
self.__get_inter_coef_M_tau(self.M_table[i_M],\
self.M_table[i_M+1], tau_low, tau_upp)
# Fill the y_coef_Z_xM_tau arrays
# For each interpolation lower-metallicity point ..
for i_Z in range(self.nb_inter_Z_points-1):
# For each interpolation lower-mass bin point ..
for i_M in range(self.nb_M_table-1):
# Get the interpolation coefficients a_Z, b_Z for a_M
self.tau_coef_Z_aM[0][i_Z][i_M],\
self.tau_coef_Z_aM[1][i_Z][i_M] =\
self.__get_inter_coef_Z_tau(self.tau_coef_M[0][i_Z][i_M],\
self.tau_coef_M[0][i_Z+1][i_M], self.inter_Z_points[i_Z],\
self.inter_Z_points[i_Z+1])
# Get the interpolation coefficients a_Z, b_Z for b_M
self.tau_coef_Z_bM[0][i_Z][i_M],\
self.tau_coef_Z_bM[1][i_Z][i_M] =\
self.__get_inter_coef_Z_tau(self.tau_coef_M[1][i_Z][i_M],\
self.tau_coef_M[1][i_Z+1][i_M], self.inter_Z_points[i_Z],\
self.inter_Z_points[i_Z+1])
##############################################
# Interpolate Pop3 M From T #
##############################################
def __interpolate_pop3_m_from_t(self):
'''
Calculate the interpolation coefficients to extract
the mass of stars based on their lifetimes.
Interpolation laws
==================
Interpolation across stellar lifetime tau
log10(M) = a_tau * log10(tau) + b_tau
Interpolation of a_M and b_M across metallicity Z
x_M = a_Z * Z + b_Z
Results
=======
a_tau and b_tau coefficients
----------------------------
tau_coef_M_pop3_inv[i_coef][i_tau_low]
- i_coef : 0 and 1 for a_tau and b_tau, respectively
- i_tau_low : Index of the lower lifetime limit where
the interpolation occurs
a_Z and b_Z coefficients for x_tau
----------------------------------
tau_coef_Z_xM_pop3_inv[i_coef][i_Z_low][i_tau_low]
- i_coef : 0 and 1 for a_Z and b_Z, respectively
- i_tau_low : Index of the lower lifetime limit where
the interpolation occurs
Note
====
self.Z_table is in decreasing order
but y_coef_... arrays have metallicities in increasing order
'''
# Declare list of lifetimes for each mass at each metallicity
self.lifetimes_list_pop3 = np.zeros(self.nb_M_table_pop3)
for i_M in range(self.nb_M_table_pop3):
self.lifetimes_list_pop3[i_M] = self.ytables_pop3.get(\
M=self.M_table_pop3[i_M], Z=0.0, quantity='Lifetime')
# Fill the tau_coef_M_inv array
# For each interpolation lower-lifetime bin point ..
for i_tau in range(self.nb_inter_lifetime_points_pop3-1):
# Get the mass for the lower and upper lifetimes
m_tau_low = self.__get_m_from_tau_pop3(\
self.inter_lifetime_points_pop3[i_tau])
m_tau_upp = self.__get_m_from_tau_pop3(\
self.inter_lifetime_points_pop3[i_tau+1])
# Get the interpolation coefficients a_tau, b_tau
# Here we use the __get_inter_coef_M_tau, be we
# swap mass for lifetime and vice-versa
self.tau_coef_M_pop3_inv[0][i_tau],\
self.tau_coef_M_pop3_inv[1][i_tau] =\
self.__get_inter_coef_M_tau(self.inter_lifetime_points_pop3[i_tau],\
self.inter_lifetime_points_pop3[i_tau+1], m_tau_low, m_tau_upp)
##############################################
# Interpolate Massive and AGB M From T #
##############################################
def __interpolate_massive_and_agb_m_from_t(self):
'''
Calculate the interpolation coefficients to extract
the mass of stars from metallicity- and mass-dependent
lifetimes. This will fix lifetime intervals that will
be common to all metallicities. This will accelerate
the mass search during the chemical evolution calculation.
Interpolation laws
==================
Interpolation across stellar lifetime tau
log10(M) = a_tau * log10(tau) + b_tau
Interpolation of a_M and b_M across metallicity Z
x_M = a_Z * Z + b_Z
Results
=======
a_tau and b_tau coefficients
----------------------------
tau_coef_M_inv[i_coef][i_Z][i_tau_low]
- i_coef : 0 and 1 for a_tau and b_tau, respectively
- i_Z : Metallicity index available in the table
- i_tau_low : Index of the lower lifetime limit where
the interpolation occurs
a_Z and b_Z coefficients for x_tau
----------------------------------
tau_coef_Z_xM_inv[i_coef][i_Z_low][i_tau_low]
- i_coef : 0 and 1 for a_Z and b_Z, respectively
- i_Z_low : Index of the lower metallicity limit where
the interpolation occurs
- i_tau_low : Index of the lower lifetime limit where
the interpolation occurs
Note
====
self.Z_table is in decreasing order
but y_coef_... arrays have metallicities in increasing order
'''
# Declare list of lifetimes for each mass at each metallicity
self.lifetimes_list = np.zeros((self.nb_Z_table,self.nb_M_table))
for i_Z in range(self.nb_Z_table):
for i_M in range(self.nb_M_table):
self.lifetimes_list[i_Z][i_M] = self.ytables.get(\
M=self.M_table[i_M], Z=self.Z_table[i_Z],\
quantity='Lifetime')
# Fill the tau_coef_M_inv array
# For each metallicity available in the yields ..
for i_Z_temp in range(self.nb_Z_table):
# Get the metallicity index in increasing order
i_Z = self.inter_Z_points.index(self.Z_table[i_Z_temp])
# For each interpolation lower-lifetime bin point ..
for i_tau in range(self.nb_inter_lifetime_points-1):
# Get the mass for the lower and upper lifetimes
m_tau_low = self.__get_m_from_tau(\
i_Z_temp, self.inter_lifetime_points[i_tau])
m_tau_upp = self.__get_m_from_tau(\
i_Z_temp, self.inter_lifetime_points[i_tau+1])
# Get the interpolation coefficients a_tau, b_tau
# Here we use the __get_inter_coef_M_tau, be we
# swap mass for lifetime and vice-versa
self.tau_coef_M_inv[0][i_Z][i_tau],\
self.tau_coef_M_inv[1][i_Z][i_tau] =\
self.__get_inter_coef_M_tau(self.inter_lifetime_points[i_tau],\
self.inter_lifetime_points[i_tau+1], m_tau_low, m_tau_upp)
# Fill the tau_coef_Z_inv arrays
# For each interpolation lower-metallicity point ..
for i_Z in range(self.nb_inter_Z_points-1):
# For each interpolation lower-lifetime bin point ..
for i_tau in range(self.nb_inter_lifetime_points-1):
# Get the interpolation coefficients a_Z, b_Z for a_M
self.tau_coef_Z_aM_inv[0][i_Z][i_tau],\
self.tau_coef_Z_aM_inv[1][i_Z][i_tau] =\
self.__get_inter_coef_Z_tau(self.tau_coef_M_inv[0][i_Z][i_tau],\
self.tau_coef_M_inv[0][i_Z+1][i_tau], self.inter_Z_points[i_Z],\
self.inter_Z_points[i_Z+1])
# Get the interpolation coefficients a_Z, b_Z for b_M
self.tau_coef_Z_bM_inv[0][i_Z][i_tau],\
self.tau_coef_Z_bM_inv[1][i_Z][i_tau] =\
self.__get_inter_coef_Z_tau(self.tau_coef_M_inv[1][i_Z][i_tau],\
self.tau_coef_M_inv[1][i_Z+1][i_tau], self.inter_Z_points[i_Z],\
self.inter_Z_points[i_Z+1])
##############################################
# Create Inter Lifetime Points Pop3 #
##############################################
def __create_inter_lifetime_points_pop3(self):
'''
Create the lifetime points in between which there will be
interpolations. This is for PopIII stars.
'''
# List all lifetimes for Pop III stars
self.inter_lifetime_points_pop3 = []
for i_M in range(self.nb_M_table_pop3):
the_tau = self.ytables_pop3.get(M=self.M_table_pop3[i_M],\
Z=0.0, quantity='Lifetime')
if not the_tau in self.inter_lifetime_points_pop3:
self.inter_lifetime_points_pop3.append(the_tau)
self.nb_inter_lifetime_points_pop3 = len(self.inter_lifetime_points_pop3)
# Sort the list to have lifetimes in increasing order
self.inter_lifetime_points_pop3 = sorted(self.inter_lifetime_points_pop3)
self.inter_lifetime_points_pop3_tree = Bin_tree(
self.inter_lifetime_points_pop3)
##############################################
# Create Inter Lifetime Points #
##############################################
def __create_inter_lifetime_points(self):
'''
Create the lifetime points in between which there will be
interpolations. This is for metallicity-dependent models.
'''
# List all lifetimes for all metallicities
self.inter_lifetime_points = []
for i_Z in range(self.nb_Z_table):
for i_M in range(self.nb_M_table):
the_tau = self.ytables.get(M=self.M_table[i_M],\
Z=self.Z_table[i_Z], quantity='Lifetime')
if not the_tau in self.inter_lifetime_points:
self.inter_lifetime_points.append(the_tau)
self.nb_inter_lifetime_points = len(self.inter_lifetime_points)
# Sort the list to have lifetimes in increasing order
self.inter_lifetime_points = sorted(self.inter_lifetime_points)
self.inter_lifetime_points_tree = Bin_tree(
self.inter_lifetime_points)
##############################################
# Get lgM from Tau Pop3 #
##############################################
def __get_m_from_tau_pop3(self, the_tau):
'''
Return the interpolated mass of a given lifetime.
This is for PopIII stars
Interpolation law
=================
log10(M) = a_M * log10(tau) + b_M
Arguments
=========
the_tau : Lifetime [yr]
'''
# Find the lower-mass boundary of the interval surrounding
# the given lifetime
if the_tau >= self.lifetimes_list_pop3[0]:
i_M_low = 0
elif the_tau <= self.lifetimes_list_pop3[-1]:
i_M_low = len(self.lifetimes_list_pop3) - 2
else:
i_M_low = 0
while self.lifetimes_list_pop3[i_M_low+1] >= the_tau:
i_M_low += 1
# Get the interpolation coefficients
lg_tau_low = np.log10(self.lifetimes_list_pop3[i_M_low+1])
lg_tau_upp = np.log10(self.lifetimes_list_pop3[i_M_low])
lg_m_low = np.log10(self.M_table_pop3[i_M_low+1])
lg_m_upp = np.log10(self.M_table_pop3[i_M_low])
a_temp = (lg_m_upp - lg_m_low) / (lg_tau_upp - lg_tau_low)
b_temp = lg_m_upp - a_temp * lg_tau_upp
# Return the interpolated mass
return 10**(a_temp * np.log10(the_tau) + b_temp)
##############################################
# Get lgM from Tau #
##############################################
def __get_m_from_tau(self, i_Z, the_tau):
'''
Return the interpolated mass of a given metallicity
that has a given lifetime.
Interpolation law
=================
log10(M) = a_M * log10(tau) + b_M
Arguments
=========
i_Z : Metallicity index of the yields table
the_tau : Lifetime [yr]
'''
# Find the lower-mass boundary of the interval surrounding
# the given lifetime
if the_tau >= self.lifetimes_list[i_Z][0]:
i_M_low = 0
elif the_tau <= self.lifetimes_list[i_Z][-1]:
i_M_low = len(self.lifetimes_list[i_Z]) - 2
else:
i_M_low = 0
while self.lifetimes_list[i_Z][i_M_low+1] >= the_tau:
i_M_low += 1
# Get the interpolation coefficients
lg_tau_low = np.log10(self.lifetimes_list[i_Z][i_M_low+1])
lg_tau_upp = np.log10(self.lifetimes_list[i_Z][i_M_low])
lg_m_low = np.log10(self.M_table[i_M_low+1])
lg_m_upp = np.log10(self.M_table[i_M_low])
a_temp = (lg_m_upp - lg_m_low) / (lg_tau_upp - lg_tau_low)
b_temp = lg_m_upp - a_temp * lg_tau_upp
# Return the interpolated mass
return 10**(a_temp * np.log10(the_tau) + b_temp)
##############################################
# Get Iniabu #
##############################################
def _get_iniabu(self):
'''
This function returns the initial gas reservoir, ymgal, containing
the mass of all the isotopes considered by the stellar yields.
'''
# Zero metallicity gas reservoir
if self.iniZ == 0:
# If an input iniabu table is provided ...
if len(self.iniabu_table) > 0:
iniabu=ry.iniabu(os.path.join(nupy_path, self.iniabu_table))
if self.iolevel >0:
print ('Use initial abundance of ', self.iniabu_table)
ymgal_gi = np.array(iniabu.iso_abundance(self.history.isotopes)) * \
self.mgal
else:
# Get the primordial composition of Walker et al. (1991)
iniabu_table = 'yield_tables/iniabu/iniab_bb_walker91.txt'
ytables_bb = ry.read_yields_Z( \
os.path.join(nupy_path, iniabu_table), isotopes=self.history.isotopes)
# Assign the composition to the gas reservoir
ymgal_gi = ytables_bb.get(Z=0.0, quantity='Yields',\
isotopes=self.history.isotopes) * self.mgal
# Output information
if self.iolevel > 0:
print ('Use initial abundance of ', iniabu_table)
# Already enriched gas reservoir
else:
# If an input iniabu table is provided ...
if len(self.iniabu_table) > 0:
iniabu=ry.iniabu(os.path.join(nupy_path, self.iniabu_table))
if self.iolevel > 0:
print ('Use initial abundance of ', self.iniabu_table)
# If NuGrid's yields are used ...
else:
# Define all the Z and abundance input files considered by NuGrid
ini_Z = [0.01, 0.001, 0.0001, 0.02, 0.006, 0.00001, 0.000001]
ini_list = ['iniab1.0E-02GN93.ppn', 'iniab1.0E-03GN93_alpha.ppn', \
'iniab1.0E-04GN93_alpha.ppn', 'iniab2.0E-02GN93.ppn', \
'iniab6.0E-03GN93_alpha.ppn', \
'iniab1.0E-05GN93_alpha_scaled.ppn', \
'iniab1.0E-06GN93_alpha_scaled.ppn']
# Pick the composition associated to the input iniZ
for metal in ini_Z:
if metal == float(self.iniZ):
iniabu = ry.iniabu(os.path.join(nupy_path,\
"yield_tables", "iniabu",\
ini_list[ini_Z.index(metal)]))
if self.iolevel>0:
print ('Use initial abundance of ', \
ini_list[ini_Z.index(metal)])
break
# Input file for the initial composition ...
#else:
# iniabu=ry.iniabu(nupy_path + iniabu_table) # TODO this might not work now. Use os.path.join
# print ('Use initial abundance of ', iniabu_table)
# Assign the composition to the gas reservoir
ymgal_gi = np.array(iniabu.iso_abundance(self.history.isotopes)) * \
self.mgal
# Make sure the total mass of gas is exactly mgal
# This is in case we have a few isotopes without H and He
ymgal_gi = ymgal_gi * self.mgal / sum(ymgal_gi)
# if sum(ymgal_gi) > self.mgal:
# ymgal_gi[0] = ymgal_gi[0] - (sum(ymgal_gi) - self.mgal)
# Return the gas reservoir
return ymgal_gi
##############################################
# Get Timesteps #
##############################################
def __get_timesteps(self):
'''
This function calculates and returns the duration of every timestep.
'''
# Declaration of the array containing the timesteps
timesteps_gt = []
# If the timesteps are given as an input ...
if len(self.dt_in) > 0:
# Copy the timesteps
timesteps_gt = self.dt_in
# If the timesteps need to be calculated ...
else:
# If all the timesteps have the same duration ...
if self.special_timesteps <= 0:
# Make sure the last timestep is equal to tend
counter = 0
step = 1
laststep = False
t = 0
t0 = 0
while(True):
counter+=step
if (self.history.tend/self.history.dt)==0:
if (self.history.dt*counter)>self.history.tend:
break
else:
if laststep==True:
break
if (self.history.dt*counter+step)>self.history.tend:
counter=(self.history.tend/self.history.dt)
laststep=True
t=counter
timesteps_gt.append(int(t-t0)*self.history.dt)
t0=t
# If the special timestep option is chosen ...
if self.special_timesteps > 0:
# Use a logarithm scheme
times1 = np.logspace(np.log10(self.history.dt), \
np.log10(self.history.tend), self.special_timesteps)
times1 = [0] + list(times1)
timesteps_gt = np.array(times1[1:]) - np.array(times1[:-1])
# If a timestep needs to be added to be synchronized with
# the external program managing merger trees ...
if self.t_merge > 0.0:
if self.t_merge < (self.history.tend - 1.1):
# Declare the new timestep array
timesteps_new = []
# Find the interval where the step needs to be added
i_temp = 0
t_temp = timesteps_gt[0]
while t_temp < self.t_merge:
timesteps_new.append(timesteps_gt[i_temp])
i_temp += 1
t_temp += timesteps_gt[i_temp]
# Add the extra timestep
dt_up_temp = t_temp - self.t_merge
dt_low_temp = timesteps_gt[i_temp] - dt_up_temp
timesteps_new.append(dt_low_temp)
timesteps_new.append(dt_up_temp)
# Keep the t_merger index in memory
self.i_t_merger = i_temp
# Add the rest of the timesteps
# Skip the current one that just has been split
for i_dtnew in range(i_temp+1,len(timesteps_gt)):
timesteps_new.append(timesteps_gt[i_dtnew])
# Replace the timesteps array to be returned
timesteps_gt = timesteps_new
else:
self.i_t_merger = len(timesteps_gt)-1
# Correct the last timestep if needed
if timesteps_gt[-1] == 0.0:
timesteps_gt[-1] = self.history.tend - sum(timesteps_gt)
# Return the duration of all timesteps
return timesteps_gt
##############################################
# Get Storing Arrays #
##############################################
def _get_storing_arrays(self, ymgal, nb_iso_gsa):
'''
This function declares and returns all the arrays containing information
about the evolution of the stellar ejecta, the gas reservoir, the star
formation rate, and the number of core-collapse SNe, SNe Ia, neutron star
mergers, and white dwarfs.
Argument
========
ymgal : Initial gas reservoir. This function extends it to all timesteps
nb_iso_gsa : Number of isotopes (can different if stable or radio isotopes)
'''
# Number of timesteps and isotopes
nb_dt_gsa = self.nb_timesteps
# Stellar ejecta
mdot = np.zeros((nb_dt_gsa,nb_iso_gsa))
# Gas reservoir
temp = copy.copy(ymgal)
ymgal = np.zeros((nb_dt_gsa+1,nb_iso_gsa))
ymgal[0] += np.array(temp)
# Massive stars, AGB stars, SNe Ia ejecta, and neutron star merger ejecta
ymgal_massive = []
ymgal_agb = []
ymgal_1a = []
ymgal_nsm = []
ymgal_delayed_extra = []
if self.pre_calculate_SSPs:
mdot_massive = copy.deepcopy(mdot)
mdot_agb = []
mdot_1a = copy.deepcopy(mdot)
mdot_nsm = []
mdot_delayed_extra = []
sn1a_numbers = []
nsm_numbers = []
sn2_numbers = []
self.wd_sn1a_range = []
self.wd_sn1a_range1 = []
delayed_extra_numbers = []
self.number_stars_born = []
else:
for k in range(nb_dt_gsa + 1):
ymgal_massive.append(np.zeros(nb_iso_gsa))
ymgal_agb.append(np.zeros(nb_iso_gsa))
ymgal_1a.append(np.zeros(nb_iso_gsa))
ymgal_nsm.append(np.zeros(nb_iso_gsa))
for iiii in range(0,self.nb_delayed_extra):
ymgal_delayed_extra.append([])
for k in range(nb_dt_gsa + 1):
ymgal_delayed_extra[iiii].append(np.zeros(nb_iso_gsa))
mdot_massive = copy.deepcopy(mdot)
mdot_agb = copy.deepcopy(mdot)
mdot_1a = copy.deepcopy(mdot)
mdot_nsm = copy.deepcopy(mdot)
mdot_delayed_extra = []
for iiii in range(0,self.nb_delayed_extra):
mdot_delayed_extra.append(copy.deepcopy(mdot))
# Number of SNe Ia, core-collapse SNe, and neutron star mergers
sn1a_numbers = np.zeros(nb_dt_gsa)
nsm_numbers = np.zeros(nb_dt_gsa)
sn2_numbers = np.zeros(nb_dt_gsa)
self.wd_sn1a_range = np.zeros(nb_dt_gsa)
self.wd_sn1a_range1 = np.zeros(nb_dt_gsa)
delayed_extra_numbers = []
for iiii in range(0,self.nb_delayed_extra):
delayed_extra_numbers.append(np.zeros(nb_dt_gsa))
# Star formation
self.number_stars_born = np.zeros(nb_dt_gsa+1)
# Related to the IMF
self.history.imf_mass_ranges = [[]] * (nb_dt_gsa + 1)
imf_mass_ranges = []
imf_mass_ranges_contribution = [[]] * (nb_dt_gsa + 1)
imf_mass_ranges_mtot = [[]] * (nb_dt_gsa + 1)
# Return all the arrays
return mdot, ymgal, ymgal_massive, ymgal_agb, ymgal_1a, ymgal_nsm,\
ymgal_delayed_extra, mdot_massive, mdot_agb, mdot_1a, mdot_nsm,\
mdot_delayed_extra, sn1a_numbers, sn2_numbers, nsm_numbers,\
delayed_extra_numbers, imf_mass_ranges, imf_mass_ranges_contribution,\
imf_mass_ranges_mtot
##############################################
# Define Unstab. Stab. Indexes #
##############################################
def __define_unstab_stab_indexes(self):
'''
Create an array to make the connection between radioactive isotopes
and the stable isotopes they are decaying into. For example, if Al-26
decays into Mg-26, the array will contain the Mg-26 index in the stable
ymgal array.
'''
# Declare the index connection array
self.rs_index = [0]*self.nb_radio_iso
# For each radioactive isotope ..
for i_dusi in range(0,self.nb_radio_iso):
# If stable isotope is in the main yields table ..
if self.decay_info[i_dusi][1] in self.history.isotopes:
# Get the radioactive and stable index
self.rs_index[i_dusi] = \
self.history.isotopes.index(self.decay_info[i_dusi][1])
# If stable isotope is not in the main yields table ..
else:
self.need_to_quit = True
print ('Error - Decayed product '+self.decay_info[i_dusi][1]+\
' is not in the list of considered stable isotopes.')
##############################################
# Build Split dt #
##############################################
def __build_split_dt(self):
'''
Create a timesteps array from the dt_split_info array.
'''
# Declaration of the the timestep array to be return
dt_in_split = []
# Initiation of the time for the upcomming simulation
t_bsd = 0.0
# For each split condition ...
for i_bsd in range(0,len(self.dt_split_info)):
# While the time still satisfies the current condition ...
while t_bsd < self.dt_split_info[i_bsd][1]:
# Add the timestep and update the time
dt_in_split.append(self.dt_split_info[i_bsd][0])
t_bsd += dt_in_split[-1]
# Return the timesteps array
return dt_in_split
##############################################
# Get Coef WD Fit #
##############################################
def __get_coef_wd_fit(self):
'''
This function calculates the coefficients for the fraction of white
dwarfs fit in the form of f_wd = a*lg(t)**2 + b*lg(t) + c. Only
progenitor stars for SNe Ia are considered.
'''
# Only consider stars between 3 and 8 Mo
lg_m_fit = []
lg_t_fit = []
for Z in self.ytables.Z_list:
for M in self.ytables.M_list:
if M >= 3.0 and M <= 8.0:
lg_m_fit.append(np.log10(M))
lg_t_fit.append(np.log10(\
self.ytables.get(M=M,Z=Z,quantity="Lifetime")))
# Create fit lgt = a*lgM**2 + b*lgM + c
a_fit, b_fit, c_fit = polyfit(lg_m_fit, lg_t_fit, 2)
# Array of lifetimes
t_f_wd = []
m_f_wd = []
t_max_f_wd = 10**(a_fit*0.47712**2 + b_fit*0.47712 + c_fit)
t_min_f_wd = 10**(a_fit*0.90309**2 + b_fit*0.90309 + c_fit)
self.t_3_0 = t_max_f_wd
self.t_8_0 = t_min_f_wd
nb_m = 15
dm_wd = (8.0 - 3.0) / nb_m
m_temp = 3.0
for i_gcwf in range(0,nb_m):
m_f_wd.append(m_temp)
t_f_wd.append(10**(a_fit*np.log10(m_temp)**2 + \
b_fit*np.log10(m_temp) + c_fit))
m_temp += dm_wd
# Calculate the total number of progenitor stars
n_tot_prog_inv = 1.0 / self._imf(3.0,8.0,1)
# For each lifetime ...
f_wd = []
for i_gcwf in range(0,len(t_f_wd)):
# Calculate the fraction of white dwarfs
f_wd.append(self._imf(m_f_wd[i_gcwf],8.0,1)*n_tot_prog_inv)
# Calculate the coefficients for the fit f_wd vs t
self.a_wd, self.b_wd, self.c_wd, self.d_wd = \
polyfit(t_f_wd, f_wd, 3)
##############################################
# Evol Stars #
##############################################
def _evol_stars(self, i, f_esc_yields=0.0, mass_sampled=np.array([]), \
scale_cor=np.array([])):
'''
This function executes a part of a single timestep with the simulation
managed by either OMEGA or SYGMA. It converts gas into stars, calculates
the stellar ejecta of the new simple stellar population (if any), and adds
its contribution to the total ejecta coming from all stellar populations.
Argument
========
i : Index of the current timestep
f_esc_yields: Fraction of non-contributing stellar ejecta
mass_sampled : Stars sampled in the IMF by an external program
scale_cor : Envelope correction for the IMF
'''
# Update the time of the simulation. Here, i is in fact the end point
# of the current timestep which extends from i-1 to i.
self.t += self.history.timesteps[i-1]
# Initialisation of the mass locked into stars
if not self.use_external_integration:
self.m_locked = 0
self.m_locked_agb = 0
self.m_locked_massive = 0
# If stars are forming during the current timestep ..
# Note: self.sfrin is calculated in SYGMA or OMEGA
if self.sfrin > 0:
# If not using an integration scheme to advanced the system ..
if not self.use_external_integration:
# Limit the SFR if there is not enough gas
if self.sfrin > 1.0:
print ('Warning -- Not enough gas to sustain the SFH.', i)
self.sfrin = 1.0
self.not_enough_gas = True
self.not_enough_gas_count += 1
# Lock gas into stars
f_lock_remain = 1.0 - self.sfrin
self.__lock_gas_into_stars(i, f_lock_remain)
# Correction if comparing with Clayton's analytical model
# DO NOT USE unless you know why
if not self.pre_calculate_SSPs:
if len(self.test_clayton) > 0:
i_stable = self.test_clayton[0]
i_unst = self.test_clayton[1]
RR = self.test_clayton[2]
self.ymgal[i][i_stable] = \
(1.0 - self.sfrin*(1.0-RR)) * self.ymgal[i-1][i_stable]
self.ymgal_radio[i][i_unst] = \
(1.0 - self.sfrin*(1.0-RR)) * self.ymgal_radio[i-1][i_unst]
# Add the pre-calculated SSP ejecta .. if fast mode
if self.pre_calculate_SSPs:
self.__add_ssp_ejecta(i)
# Calculate stellar ejecta .. if normal mode
else:
self.__calculate_stellar_ejecta(i, f_esc_yields, mass_sampled, scale_cor)
# If no star is forming during the current timestep ...
else:
# Use the previous gas reservoir for the current timestep
# Done by assuming f_lock_remain = 1.0
if not self.use_external_integration:
self.__lock_gas_into_stars(i, 1.0)
# Initialize array containing no CC SNe for the SSP_i-1
if self.out_follows_E_rate:
self.ssp_nb_cc_sne = np.array([])
# Add stellar ejecta to the gas reservoir
# This needs to be called even if no star formation at the
# current timestep, because older stars may still pollute
if not self.use_external_integration:
self.__pollute_gas_with_ejecta(i)
# Convert the mass ejected by massive stars into rate
if not self.pre_calculate_SSPs:
if self.history.timesteps[i-1] == 0.0:
self.massive_ej_rate[i-1] = 0.0
self.sn1a_ej_rate[i-1] = 0.0
else:
self.massive_ej_rate[i-1] = sum(self.mdot_massive[i-1]) / \
self.history.timesteps[i-1]
self.sn1a_ej_rate[i-1] = sum(self.mdot_1a[i-1]) / \
self.history.timesteps[i-1]
##############################################
# Lock Gas Into Stars #
##############################################
def __lock_gas_into_stars(self, i, f_lock_remain):
'''
Correct the mass of the different gas reservoirs "ymgal"
for the mass lock into stars.
Argument
========
i : Index of the current timestep
f_lock_remain: Mass fraction of gas remaining after star formation
'''
# If this is the fast chem_evol version ..
if self.pre_calculate_SSPs:
# Update a limited number of gas components
self.ymgal[i] = f_lock_remain * self.ymgal[i-1]
if self.len_decay_file > 0 or self.use_decay_module:
self.ymgal_radio[i] = f_lock_remain * self.ymgal_radio[i-1]
# Keep track of the mass locked into stars
self.m_locked += (1.0 - f_lock_remain) * sum(self.ymgal[i-1])
# If this is the normal chem_evol version ..
else:
# Update all stable gas components
self.ymgal[i] = f_lock_remain * self.ymgal[i-1]
self.ymgal_massive[i] = f_lock_remain * self.ymgal_massive[i-1]
self.ymgal_agb[i] = f_lock_remain * self.ymgal_agb[i-1]
self.ymgal_1a[i] = f_lock_remain * self.ymgal_1a[i-1]
self.ymgal_nsm[i] = f_lock_remain * self.ymgal_nsm[i-1]
self.m_locked += self.sfrin * sum(self.ymgal[i-1])
for iiii in range(0,self.nb_delayed_extra):
self.ymgal_delayed_extra[iiii][i] = \
f_lock_remain * self.ymgal_delayed_extra[iiii][i-1]
# Update all radioactive gas components
if self.len_decay_file > 0 or self.use_decay_module:
self.ymgal_radio[i] = f_lock_remain * self.ymgal_radio[i-1]
if not self.use_decay_module and self.len_decay_file > 0:
if self.radio_massive_agb_on:
self.ymgal_massive_radio[i] = f_lock_remain * self.ymgal_massive_radio[i-1]
self.ymgal_agb_radio[i] = f_lock_remain * self.ymgal_agb_radio[i-1]
if self.radio_sn1a_on:
self.ymgal_1a_radio[i] = f_lock_remain * self.ymgal_1a_radio[i-1]
if self.radio_nsmerger_on:
self.ymgal_nsm_radio[i] = f_lock_remain * self.ymgal_nsm_radio[i-1]
for iiii in range(0,self.nb_delayed_extra_radio):
self.ymgal_delayed_extra_radio[iiii][i] = \
f_lock_remain * self.ymgal_delayed_extra_radio[iiii][i-1]
##############################################
# Pollute Gas With Ejecta #
##############################################
def __pollute_gas_with_ejecta(self, i):
'''
Add stellar ejecta to the gas components.
Argument
========
i : Index of the current timestep
'''
# If this is the fast chem_evol version ..
if self.pre_calculate_SSPs:
# Pollute a limited number of gas components
self.ymgal[i] += self.mdot[i-1]
if self.len_decay_file > 0 or self.use_decay_module:
self.ymgal_radio[i][:self.nb_radio_iso] += self.mdot_radio[i-1]
# If this is the normal chem_evol version ..
else:
# Pollute all stable gas components
self.ymgal[i] += self.mdot[i-1]
self.ymgal_agb[i] += self.mdot_agb[i-1]
self.ymgal_1a[i] += self.mdot_1a[i-1]
self.ymgal_massive[i] += self.mdot_massive[i-1]
self.ymgal_nsm[i] += self.mdot_nsm[i-1]
if self.nb_delayed_extra > 0:
for iiii in range(0,self.nb_delayed_extra):
self.ymgal_delayed_extra[iiii][i] += \
self.mdot_delayed_extra[iiii][i-1]
# Pollute all radioactive gas components
# Note: ymgal_radio[i] is treated in the decay_radio function
# However, the contribution of individual sources must be here!
if not self.use_decay_module:
if self.radio_massive_agb_on:
self.ymgal_agb_radio[i] += \
self.mdot_agb_radio[i-1]
self.ymgal_massive_radio[i] += \
self.mdot_massive_radio[i-1]
if self.radio_sn1a_on:
self.ymgal_1a_radio[i] += \
self.mdot_1a_radio[i-1]
if self.radio_nsmerger_on:
self.ymgal_nsm_radio[i] += \
self.mdot_nsm_radio[i-1]
for iiii in range(0,self.nb_delayed_extra_radio):
self.ymgal_delayed_extra_radio[iiii][i] += \
self.mdot_delayed_extra_radio[iiii][i-1]
##############################################
# Update History #
##############################################
def _update_history(self, i):
'''
This function adds the state of current timestep into the history class.
Argument
========
i : Index of the current timestep
Note
====
This function is decoupled from evol_stars() because OMEGA modifies
the quantities between evol_stars() and the update of the history class.
'''
# Keep the current in memory
if self.pre_calculate_SSPs:
self.history.metallicity.append(self.zmetal)
#self.history.age.append(self.t)
self.history.gas_mass.append(np.sum(self.ymgal[i]))
self.history.ism_iso_yield.append(self.ymgal[i])
self.history.m_locked.append(self.m_locked)
else:
self.history.metallicity.append(self.zmetal)
#self.history.age.append(self.t)
self.history.gas_mass.append(np.sum(self.ymgal[i]))
self.history.ism_iso_yield.append(self.ymgal[i])
self.history.ism_iso_yield_agb.append(self.ymgal_agb[i])
self.history.ism_iso_yield_1a.append(self.ymgal_1a[i])
self.history.ism_iso_yield_nsm.append(self.ymgal_nsm[i])
self.history.ism_iso_yield_massive.append(self.ymgal_massive[i])
self.history.sn1a_numbers.append(self.sn1a_numbers[i-1])
self.history.nsm_numbers.append(self.nsm_numbers[i-1])
self.history.sn2_numbers.append(self.sn2_numbers[i-1])
self.history.m_locked.append(self.m_locked)
# self.history.m_locked_agb.append(self.m_locked_agb)
# self.history.m_locked_massive.append(self.m_locked_massive)
##############################################
# Update History Final #
##############################################
def _update_history_final(self):
'''
This function adds the total stellar ejecta to the history class as well
as converting isotopes into chemical elements.
'''
# Fill the last bits of the history class
self.history.mdot = self.mdot
self.history.imf_mass_ranges_contribution=self.imf_mass_ranges_contribution
self.history.imf_mass_ranges_mtot = self.imf_mass_ranges_mtot
# Convert isotopes into elements
if self.pre_calculate_SSPs:
for h in range(len(self.history.ism_iso_yield)):
self.history.ism_elem_yield.append(self._iso_abu_to_elem(self.history.ism_iso_yield[h]))
else:
for h in range(len(self.history.ism_iso_yield)):
self.history.ism_elem_yield.append(\
self._iso_abu_to_elem(self.history.ism_iso_yield[h]))
self.history.ism_elem_yield_agb.append(\
self._iso_abu_to_elem(self.history.ism_iso_yield_agb[h]))
self.history.ism_elem_yield_1a.append(\
self._iso_abu_to_elem(self.history.ism_iso_yield_1a[h]))
self.history.ism_elem_yield_nsm.append(\
self._iso_abu_to_elem(self.history.ism_iso_yield_nsm[h]))
self.history.ism_elem_yield_massive.append(\
self._iso_abu_to_elem(self.history.ism_iso_yield_massive[h]))
##############################################
# Calculate Stellar Ejecta #
##############################################
def __calculate_stellar_ejecta(self, i, f_esc_yields, mass_sampled, \
scale_cor, dm_imf=0.5):
'''
For each upcoming timestep, including the current one,
calculate the yields ejected by the new stellar population
that will be deposited in the gas at that timestep. This
function updates the "mdot" arrays, which will eventually
be added to the "ymgal" arrays, corresponding to the gas
component arrays.
Argument
========
i : Index of the timestep after the current timestep
f_esc_yields: Fraction of non-contributing stellar ejecta
mass_sampled : Stars sampled in the IMF by an external program
scale_cor : Envelope correction for the IMF
dm_imf : Mass interval resolution of the IMF. Stars within a
specific mass interval will have the same yields
'''
# If net yields are used ..
if self.use_net_yields_stable:
# Get the interpolated initial stellar composition
# The metallicity is the metallicity at which the
# stars formed, at current timestep
self.X0_stellar = self.get_interp_X0(self.zmetal)
# Calculate the mass fraction of each isotope in the gas
# at the time the stellar population formed
# Note: i-1 is the current timestep
self.X0_gas = self.ymgal[i-1] / sum(self.ymgal[i-1])
# Select the adequate IMF properties
if self.zmetal <= self.Z_trans:
the_A_imf = self.A_imf_pop3
else:
the_A_imf = self.A_imf
# Initialize the age of the newly-formed stars
t_lower = 0.0
# If the IMF is stochastically sampled ..
if len(mass_sampled) > 0:
# Sort the list of masses in decreasing order
# And set the index to point to most massive one
mass_sampled_sort = sorted(mass_sampled)[::-1]
nb_mass_sampled = len(mass_sampled)
stochastic_IMF = True
i_m_sampled = 0
# If the IMF is fully sampled ..
else:
stochastic_IMF = False
# For each upcoming timesteps (including the current one) ..
for i_cse in range(i-1, self.nb_timesteps):
# Get the adapted IMF mass bin information
nb_dm, new_dm_imf, m_lower = \
self.__get_mass_bin(dm_imf, t_lower, i_cse)
# If the population is not active anymore, stop the loop
if nb_dm == -1:
break
# If there are yields to be calculated ..
elif nb_dm > 0:
# If the IMF is stochastically sampled ..
if stochastic_IMF:
# For each sampled mass in that mass bin ..
m_upper = m_lower + new_dm_imf*nb_dm
while i_m_sampled < nb_mass_sampled and \
mass_sampled_sort[i_m_sampled] >= m_lower and \
mass_sampled_sort[i_m_sampled] <= m_upper:
# Get the yields for that star
the_yields = self.get_interp_yields(\
mass_sampled_sort[i_m_sampled], self.zmetal)
# Add that one star in the stellar ejecta array
self.__add_yields_in_mdot(1.0, the_yields, \
mass_sampled_sort[i_m_sampled], i_cse, i)
# Go to the next sampled mass
i_m_sampled += 1
# If the IMF is fully sampled ..
else:
# For each IMF mass bin ..
for i_imf_bin in range(nb_dm):
# Calculate lower, central, and upper masses of this bin
the_m_low = m_lower + i_imf_bin * new_dm_imf
the_m_cen = the_m_low + 0.5 * new_dm_imf
the_m_upp = the_m_low + new_dm_imf
# Get the number of stars in that mass bin
nb_stars = self.m_locked * the_A_imf *\
self._imf(the_m_low, the_m_upp, 1)
# Get the yields for the central stellar mass
the_yields = self.get_interp_yields(the_m_cen, self.zmetal)
# Add yields in the stellar ejecta array. We do this at
# each mass bin to distinguish between AGB and massive.
self.__add_yields_in_mdot(nb_stars, the_yields, \
the_m_cen, i_cse, i, lower_mass = the_m_low, \
upper_mass = the_m_upp)
# If there are radioactive isotopes
if (self.len_decay_file > 0 or self.use_decay_module) and \
len(self.table_radio) > 0:
# Get the yields for the central stellar mass
the_yields = self.get_interp_yields(the_m_cen, \
self.zmetal, is_radio=True)
# Add yields in the stellar ejecta array. We do this at
# each mass bin to distinguish between AGB and massive.
self.__add_yields_in_mdot(nb_stars, the_yields, \
the_m_cen, i_cse, i, is_radio=True)
# Move the lower limit of the lifetime range to the next timestep
t_lower += self.history.timesteps[i_cse]
# Include the ejecta from other enrichment sources
# such as SNe Ia, neutron star mergers, ...
self.__add_other_sources(i)
##############################################
# Get Mass Bin #
##############################################
def __get_mass_bin(self, dm_imf, t_lower, i_cse):
'''
Calculate the new IMF mass bin resolution. This is based on
the input resolution (dm_imf), but adapted to have an integer
number of IMF bins that fits within the stellar mass interval
defined by a given stellar lifetime interval.
Arguments
=========
dm_imf : Mass interval resolution of the IMF. Stars within a
specific mass interval will have the same yields.
t_lower : Lower age limit of the stellar populations.
i_cse : Index of the "future" timestep (see __calculate_stellar_ejecta).
'''
# Copy the adequate IMF yields range
if self.zmetal <= self.Z_trans:
imf_yr = self.imf_yields_range_pop3
else:
imf_yr = self.imf_yields_range
# Calculate the upper age limit of the stars for that timestep
t_upper = t_lower + self.history.timesteps[i_cse]
# Get the lower and upper stellar mass range that will
# contribute to the ejecta in that timestep
m_lower = self.get_interp_lifetime_mass(t_upper, self.zmetal, is_mass=False)
if t_lower == 0.0:
m_upper = 1.0e30
else:
m_upper = self.get_interp_lifetime_mass(t_lower, self.zmetal, is_mass=False)
# Return a sing that the population is not active anymore
# if the age is larger than the lower-mass stellar model ..
if m_upper < imf_yr[0]:
return -1, 0, 0
# Skip the yields calculation if the age is too low to
# activate the most massive star model ..
elif m_lower > imf_yr[1]:
# Skip the yields calculation
nb_dm = 0
new_dm_imf = 0
# If the mass interval is inside or overlapping
# with the IMF yields range ..
else:
# Redefine the boundary to respect the IMF yields range
m_lower = max(m_lower, imf_yr[0])
m_upper = min(m_upper, imf_yr[1])
# Calculate the new IMF resolution, which is based on the
# input resolution, but adapted to have an integer number
# of IMF bins that fits in the redefined stellar mass interval
nb_dm = int(round((m_upper-m_lower)/dm_imf))
if nb_dm < 1:
nb_dm = 1
new_dm_imf = m_upper - m_lower
else:
new_dm_imf = (m_upper - m_lower) / float(nb_dm)
# Return the new IMF bin
return nb_dm, new_dm_imf, m_lower
##############################################
# Get Interp Yields #
##############################################
def get_interp_yields(self, M_giy, Z_giy, is_radio=False):
'''
Return the interpolated yields for a star with given
mass and metallicity
Interpolation law
=================
log10(yields) = a_M * M + b_M
x_M = a_Z * log10(Z) + bZ
Arguments
=========
M_giy : Initial mass of the star
Z_giy : Initial metallicity of the star
Note
====
self.Z_table is in decreasing order
but y_coef_... arrays have metallicities in increasing order
'''
# Select the appropriate interpolation coefficients
if is_radio:
the_y_coef_M = self.y_coef_M_radio
the_y_coef_Z_aM = self.y_coef_Z_aM_radio
the_y_coef_Z_bM = self.y_coef_Z_bM_radio
else:
the_y_coef_M = self.y_coef_M
the_y_coef_Z_aM = self.y_coef_Z_aM
the_y_coef_Z_bM = self.y_coef_Z_bM
# If the metallicity is in the PopIII regime ..
if Z_giy <= self.Z_trans and not is_radio:
# Find the lower-mass boundary of the interpolation
if self.nb_inter_M_points_pop3 < 30:
i_M_low = 0
while M_giy > self.inter_M_points_pop3[i_M_low+1]:
i_M_low += 1
else:
i_M_low = self.inter_M_points_pop3_tree.search_left(M_giy)
# Select the M interpolation coefficients of PopIII yields
a_M = self.y_coef_M_pop3[0][i_M_low]
b_M = self.y_coef_M_pop3[1][i_M_low]
a_M_ej = self.y_coef_M_ej_pop3[0][i_M_low]
b_M_ej = self.y_coef_M_ej_pop3[1][i_M_low]
# If we do not use PopIII yields ..
else:
# Find the lower-mass boundary of the interpolation
if self.nb_inter_M_points < 30:
i_M_low = 0
while M_giy > self.inter_M_points[i_M_low+1]:
i_M_low += 1
else:
i_M_low = self.inter_M_points_tree.search_left(M_giy)
# If the metallicity is below the lowest Z available ..
if Z_giy <= self.inter_Z_points[0]:
# Select the M interpolation coefficients of the lowest Z
a_M = the_y_coef_M[0][0][i_M_low]
b_M = the_y_coef_M[1][0][i_M_low]
if not is_radio:
a_M_ej = self.y_coef_M_ej[0][0][i_M_low]
b_M_ej = self.y_coef_M_ej[1][0][i_M_low]
# If the metallicity is above the highest Z available ..
elif Z_giy > self.inter_Z_points[-1]:
# Select the M interpolation coefficients of the highest Z
a_M = the_y_coef_M[0][-1][i_M_low]
b_M = the_y_coef_M[1][-1][i_M_low]
if not is_radio:
a_M_ej = self.y_coef_M_ej[0][-1][i_M_low]
b_M_ej = self.y_coef_M_ej[1][-1][i_M_low]
# If the metallicity is within the Z interval of the yields table ..
else:
# Find the lower-Z boundary of the interpolation
i_Z_low = 0
while Z_giy > self.inter_Z_points[i_Z_low+1]:
i_Z_low += 1
lg_Z_giy = np.log10(Z_giy)
# Calculate the a coefficient for the M interpolation
a_Z = the_y_coef_Z_aM[0][i_Z_low][i_M_low]
b_Z = the_y_coef_Z_aM[1][i_Z_low][i_M_low]
a_M = a_Z * lg_Z_giy + b_Z
if not is_radio:
a_Z_ej = self.y_coef_Z_aM_ej[0][i_Z_low][i_M_low]
b_Z_ej = self.y_coef_Z_aM_ej[1][i_Z_low][i_M_low]
a_M_ej = a_Z_ej * lg_Z_giy + b_Z_ej
# Calculate the b coefficient for the M interpolation
a_Z = the_y_coef_Z_bM[0][i_Z_low][i_M_low]
b_Z = the_y_coef_Z_bM[1][i_Z_low][i_M_low]
b_M = a_Z * lg_Z_giy + b_Z
if not is_radio:
a_Z_ej = self.y_coef_Z_bM_ej[0][i_Z_low][i_M_low]
b_Z_ej = self.y_coef_Z_bM_ej[1][i_Z_low][i_M_low]
b_M_ej = a_Z_ej * lg_Z_giy + b_Z_ej
# Interpolate the yields
y_interp = 10**(a_M * M_giy + b_M)
# Calculate the correction factor to match the relation
# between the total ejected mass and the stellar initial
# mass. M_ej = a * M_i + b
if is_radio:
f_corr = 1.0
else:
f_corr = (a_M_ej * M_giy + b_M_ej) / sum(y_interp)
if f_corr < 0.0:
f_corr = 0.0
# Return the interpolated and corrected yields
return y_interp * f_corr
##############################################
# Get Interp Lifetime Mass #
##############################################
def get_interp_lifetime_mass(self, the_quantity, Z_giy, is_mass=True):
'''
Return the interpolated lifetime of a star with a given mass
and metallicity
Interpolation law
=================
log10(lifetime) = a_M * log10(M) + b_M
log10(M) = a_M * log10(lifetime) + b_M
x_M = a_Z * Z + bZ
Arguments
=========
the_quantity : Initial mass or lifetime of the star
Z_giy : Initial metallicity of the star
is_mass : True --> the_quantity = mass
False --> the quantity = lifetime
Note
====
self.Z_table is in decreasing order
but y_coef_... arrays have metallicities in increasing order
'''
# Define the quantity
if is_mass:
quantity_pop3 = self.M_table_pop3
nb_quantity_pop3 = self.nb_M_table_pop3
tau_coef_M_pop3 = self.tau_coef_M_pop3
quantity = self.M_table
nb_quantity = self.nb_M_table
tau_coef_M = self.tau_coef_M
tau_coef_Z_aM = self.tau_coef_Z_aM
tau_coef_Z_bM = self.tau_coef_Z_bM
else:
quantity_pop3 = self.inter_lifetime_points_pop3
quantity_pop3_tree = self.inter_lifetime_points_pop3_tree
nb_quantity_pop3 = self.nb_inter_lifetime_points_pop3
tau_coef_M_pop3 = self.tau_coef_M_pop3_inv
quantity = self.inter_lifetime_points
quantity_tree = self.inter_lifetime_points_tree
nb_quantity = self.nb_inter_lifetime_points
tau_coef_M = self.tau_coef_M_inv
tau_coef_Z_aM = self.tau_coef_Z_aM_inv
tau_coef_Z_bM = self.tau_coef_Z_bM_inv
# If the metallicity is in the PopIII regime ..
if Z_giy <= self.Z_trans:
# Find the lower-quantity boundary of the interpolation
if the_quantity >= quantity_pop3[-1]:
i_q_low = nb_quantity_pop3 - 2
else:
if nb_quantity_pop3 < 30:
i_q_low = 0
while the_quantity > quantity_pop3[i_q_low+1]:
i_q_low += 1
else:
i_q_low = quantity_pop3_tree.search_left(the_quantity)
# Select the M interpolation coefficients of PopIII yields
a_M = tau_coef_M_pop3[0][i_q_low]
b_M = tau_coef_M_pop3[1][i_q_low]
# If we do not use PopIII models ..
else:
# Find the lower-mass boundary of the interpolation
if the_quantity >= quantity[-1]:
i_q_low = nb_quantity - 2
else:
if nb_quantity < 30:
i_q_low = 0
while the_quantity > quantity[i_q_low+1]:
i_q_low += 1
else:
i_q_low = quantity_tree.search_left(the_quantity)
# If the metallicity is below the lowest Z available ..
if Z_giy <= self.inter_Z_points[0]:
# Select the M interpolation coefficients of the lowest Z
a_M = tau_coef_M[0][0][i_q_low]
b_M = tau_coef_M[1][0][i_q_low]
# If the metallicity is above the highest Z available ..
elif Z_giy > self.inter_Z_points[-1]:
# Select the M interpolation coefficients of the highest Z
a_M = tau_coef_M[0][-1][i_q_low]
b_M = tau_coef_M[1][-1][i_q_low]
# If the metallicity is within the Z interval of the yields table ..
else:
# Find the lower-Z boundary of the interpolation
i_Z_low = 0
while Z_giy > self.inter_Z_points[i_Z_low+1]:
i_Z_low += 1
# Calculate the a coefficient for the M interpolation
a_Z = tau_coef_Z_aM[0][i_Z_low][i_q_low]
b_Z = tau_coef_Z_aM[1][i_Z_low][i_q_low]
a_M = a_Z * Z_giy + b_Z
# Calculate the b coefficient for the M interpolation
a_Z = tau_coef_Z_bM[0][i_Z_low][i_q_low]
b_Z = tau_coef_Z_bM[1][i_Z_low][i_q_low]
b_M = a_Z * Z_giy + b_Z
# Return the interpolate the lifetime
return 10**(a_M * np.log10(the_quantity) + b_M)
##############################################
# Add Yields in Mdot #
##############################################
def __add_yields_in_mdot(self, nb_stars, the_yields, the_m_cen, \
i_cse, i, lower_mass = None, upper_mass = None, \
is_radio=False):
'''
Add the IMF-weighted stellar yields in the ejecta "mdot" arrays.
Keep track of the contribution of low-mass and massive stars.
Argument
========
nb_stars : Number of stars in the IMF that eject the yields
the_yields : Yields of the IMF-central-mass-bin star
the_m_cen : Central stellar mass of the IMF bin
i_cse : Index of the "future" timestep (see __calculate_stellar_ejecta)
i : Index of the timestep where the stars originally formed
lower_mass: lower limit of the imf mass bin
upper_mass: upper limit of the imf mass bin
both the upper and lower limit must be provided to calculate the
fractional number of SNe before the imf cut-off
'''
# Calculate the total yields
the_tot_yields = nb_stars * the_yields
# If radioactive yields ..
if is_radio:
# Add the yields in the total ejecta array
self.mdot_radio[i_cse] += the_tot_yields
# Keep track of the contribution of massive and AGB stars
if the_m_cen > self.transitionmass:
self.mdot_massive_radio[i_cse] += the_tot_yields
else:
self.mdot_agb_radio[i_cse] += the_tot_yields
# If stable yields ..
else:
# If net yields are used ..
if self.use_net_yields_stable:
# Calculate the mass lost by the stars
M_ejected = sum(the_tot_yields)
# Correct the total yields to account for the
# initial composition of the stellar model
the_tot_yields = np.maximum(the_tot_yields + \
(self.X0_gas - self.X0_stellar) * M_ejected, 0.0)
# Add the yields in the total ejecta array
self.mdot[i_cse] += the_tot_yields
# Keep track of the contribution of massive and AGB stars
if the_m_cen > self.transitionmass:
self.mdot_massive[i_cse] += the_tot_yields
else:
self.mdot_agb[i_cse] += the_tot_yields
# Count the number of core-collapse SNe
# Calculate the fraction of imf above transitionmass
# But only if upper_mass and lower_mass are provided
if upper_mass is not None and lower_mass is not None:
if upper_mass > self.transitionmass:
ratio = upper_mass - max(self.transitionmass, lower_mass)
ratio /= upper_mass - lower_mass
else:
ratio = 0
elif the_m_cen > self.transitionmass:
ratio = 1
else:
ratio = 0
self.sn2_numbers[i_cse] += nb_stars*ratio
if self.out_follows_E_rate:
self.ssp_nb_cc_sne[i_cse-i-1] += nb_stars*ratio
# Sum the total number of stars born in the timestep
# where the stars originally formed
self.number_stars_born[i] += nb_stars
##############################################
# Add Other Sources #
##############################################
def __add_other_sources(self, i):
'''
Add the contribution of enrichment sources other than
massive stars (wind + SNe) and AGB stars to the ejecta
"mdot" array.
Argument
========
i : Index of the timestep where the stars originally formed
'''
# Add the contribution of SNe Ia, if any ...
if self.sn1a_on and self.zmetal > self.Z_trans:
if not (self.imf_bdys[0] > 8 or self.imf_bdys[1] < 3):
f_esc_yields = 0.0 # temporary, this parameters will disapear
self.__sn1a_contribution(i, f_esc_yields)
# Add the contribution of neutron star mergers, if any...
if self.ns_merger_on:
self.__nsmerger_contribution(i)
# Add the contribution of delayed extra sources, if any...
if len(self.delayed_extra_dtd) > 0:
self.__delayed_extra_contribution(i)
##############################################
# Get Yield Factor #
##############################################
def __get_yield_factor(self, minm1, maxm1, mass_sampled, \
func_total_ejecta, m_table):
'''
This function calculates the factor that must be multiplied to
the input stellar yields, given the mass bin implied for the
considered timestep and the stellar masses sampled by an external
program.
Argument
========
minm1 : Minimum stellar mass having ejecta in this timestep j
maxm1 : Minimum stellar mass having ejecta in this timestep j
mass_sampled : Stellar mass sampled by an external program
func_total_ejecta : Relation between M_tot_ej and stellar mass
m_table : Mass of the star in the table providing the yields
'''
# Initialisation of the number of stars sampled in this mass bin
nb_sampled_stars = 0.0
# Initialisation of the total mass ejected
m_ej_sampled = 0.0
# For all mass sampled ...
for i_gyf in range(0,len(mass_sampled)):
# If the mass is within the mass bin considered in this step ...
if mass_sampled[i_gyf] >= minm1 and mass_sampled[i_gyf] < maxm1:
# Add a star and cumulate the mass ejected
m_ej_sampled += func_total_ejecta(mass_sampled[i_gyf])
nb_sampled_stars += 1.0
# Stop the loop if the mass bin has been covered
if mass_sampled[i_gyf] >= maxm1:
break
# If no star is sampled in the current mass bin ...
if nb_sampled_stars == 0.0:
# No ejecta
return 0.0, 0.0
# If stars have been sampled ...
else:
# Calculate an adapted scalefactor parameter and return yield_factor
return nb_sampled_stars, m_ej_sampled / func_total_ejecta(m_table)
##############################################
# Get Scale Cor #
##############################################
def __get_scale_cor(self, minm1, maxm1, scale_cor):
'''
This function calculates the envelope correction that must be
applied to the IMF. This correction can be used the increase
or reduce the number of stars in a particular mass bin, without
creating a new IMF. It returns the scalefactor_factor, that will
be multiplied to scalefactor (e.g., 1.0 --> no correction)
Argument
========
minm1 : Minimum stellar mass having ejecta in this timestep j
maxm1 : Minimum stellar mass having ejecta in this timestep j
scale_cor : Envelope correction for the IMF
'''
# Initialization of the scalefactor correction factor
scalefactor_factor = 0.0
# Calculate the width of the stellar mass bin
m_bin_width_inv = 1.0 / (maxm1 - minm1)
# Cumulate the number of overlaped array bins
nb_overlaps = 0
# For each mass bin in the input scale_cor array ...
for i_gsc in range(0,len(scale_cor)):
# Copy the lower-mass limit of the current array bin
if i_gsc == 0:
m_low_temp = 0.0
else:
m_low_temp = scale_cor[i_gsc-1][0]
# If the array bin overlaps the considered stellar mass bin ...
if (scale_cor[i_gsc][0] > minm1 and scale_cor[i_gsc][0] <= maxm1)\
or (m_low_temp > minm1 and m_low_temp < maxm1)\
or (scale_cor[i_gsc][0] >= maxm1 and m_low_temp <= minm1):
# Calculate the stellar bin fraction covered by the array bin
frac_temp = (min(maxm1, scale_cor[i_gsc][0]) - \
max(minm1, m_low_temp)) * m_bin_width_inv
# Cumulate the correction
scalefactor_factor += frac_temp * scale_cor[i_gsc][1]
# Increment the number of overlaps
nb_overlaps += 1
# Warning is no overlap
if nb_overlaps == 0:
print ('!!Warning - No overlap with scale_cor!!')
# Return the scalefactor correction factor
return scalefactor_factor
##############################################
# Decay Radio #
##############################################
def _decay_radio(self, i):
'''
This function decays radioactive isotopes present in the
radioactive gas component and add the stable decayed product
inside the stable gas component. This is using a simple
decay routine where an unstable isotope decay to only one
stable isotope.
Argument
========
i : Index of the current timestep.
Reminder, here 'i' is the upper-time boundary of the timestep
'''
# Nb of refinement steps
nb_ref = int(self.radio_refinement)
nb_ref_fl = float(nb_ref)
# Copy the duration of the timestep (duration of the decay)
dt_decay = self.history.timesteps[i-1] / nb_ref_fl
# For each radioactive isotope ..
for i_dr in range(0,self.nb_radio_iso):
# Keep track of the mass before the decay
m_copy = self.ymgal_radio[i][i_dr] + self.mdot_radio[i-1][i_dr]
# Get the mass added to the gas
m_added = self.mdot_radio[i-1][i_dr] / nb_ref_fl
# If there is something to decay ..
if m_copy > 0.0:
# Declare variable to keep track of the decayed mass
m_decay = 0.0
# For each refinement step ..
for i_loop in range(nb_ref):
# Add ejecta and decay the isotope
self.ymgal_radio[i][i_dr] += m_added
m_prev = copy.deepcopy(self.ymgal_radio[i][i_dr])
self.ymgal_radio[i][i_dr] *= \
np.exp((-1.0)*dt_decay/self.decay_info[i_dr][2])
# Cumulate the decayed mass
m_decay += m_prev - self.ymgal_radio[i][i_dr]
# Add the decayed stable isotope in the stable gas
self.ymgal[i][self.rs_index[i_dr]] += m_decay
# Calculate the fraction left over in the radioactive gas
f_remain = self.ymgal_radio[i][i_dr] / m_copy
# Correct the contribution of different sources
if not self.pre_calculate_SSPs:
self.ymgal_massive_radio[i][i_dr] *= f_remain
self.ymgal_agb_radio[i][i_dr] *= f_remain
self.ymgal_1a_radio[i][i_dr] *= f_remain
self.ymgal_nsm_radio[i][i_dr] *= f_remain
for iiii in range(0,self.nb_delayed_extra_radio):
self.ymgal_delayed_extra_radio[iiii][i][i_dr] *= f_remain
##############################################
# Initialize Decay Module #
##############################################
def __initialize_decay_module(self):
'''
This function import and initialize the decay module
used to decay unstable isotopes. Declare arrays used
for the communication between the fortran decay code
and NuPyCEE
'''
# Import and declare the decay module
self.decay_module.initialize(self.f_network, self.f_format,\
os.path.join(nupy_path, ""))
# Declare the element names used to return the charge number Z
# Index 0 needs to be NN! H needs to be index 1!
self.element_names = ['NN', 'H', 'He', 'Li', 'Be', 'B', 'C', \
'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P',\
'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr',\
'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As',\
'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo',\
'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb',\
'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd',\
'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm',\
'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt',\
'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr',\
'Ra', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm',\
'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db',\
'Sg', 'Bh', 'Hs', 'Mt', 'Uun', 'Uuu', 'Uub', 'zzz', \
'Uuq']
# Number of isotope entry in the fortran decay module
self.len_iso_module = len(self.decay_module.iso.z)
# Find the isotope name associated with each isotope entry
# Isolate the mass number A
self.iso_decay_module = ['']*self.len_iso_module
self.A_mass_iso_decay_module = np.zeros(self.len_iso_module)
for i_iso in range(self.len_iso_module):
if self.decay_module.iso.z[i_iso] == 0:
self.iso_decay_module[i_iso] = 'Nn-1'
self.A_mass_iso_decay_module[i_iso] = 1.0
else:
self.iso_decay_module[i_iso] = \
self.element_names[self.decay_module.iso.z[i_iso]] + '-' + \
str(self.decay_module.iso.z[i_iso]+self.decay_module.iso.n[i_iso])
self.A_mass_iso_decay_module[i_iso] = \
float(self.decay_module.iso.z[i_iso]+self.decay_module.iso.n[i_iso])
# Year to second conversion
self.yr_to_sec = 3.154e+7
##############################################
# Decay Radio With Module #
##############################################
def _decay_radio_with_module(self, i):
'''
This function decays radioactive isotopes present in the
radioactive gas component and add the stable decayed product
inside the stable gas component. This is using the decay
module to account for all decay channels.
Argument
========
i : Index of the current timestep.
Reminder, here 'i' is the upper-time boundary of the timestep
'''
# Nb of refinement steps
nb_ref = int(self.radio_refinement)
nb_ref_fl = float(nb_ref)
# Copy the duration of the timestep (duration of the decay)
dt_decay = self.history.timesteps[i-1] / nb_ref_fl
# Keep track of the mass before the decay
sum_ymgal_temp = sum(self.ymgal_radio[i])
sum_mdot_temp = sum(self.mdot_radio[i-1])
# If there is something to decay ..
if sum_ymgal_temp > 0.0 or sum_mdot_temp > 0.0:
# Get the mass added to the gas at each refined timesteps
m_added = self.mdot_radio[i-1][:self.nb_radio_iso] / nb_ref_fl
# Declare variable to keep track of the decayed mass
m_decay = 0.0
# For each refinement step ..
for i_loop in range(nb_ref):
# Add ejecta
self.ymgal_radio[i][:self.nb_radio_iso] += m_added
# Call the decay module
self.__run_decay_module(i, dt_decay)
##############################################
# Run Decay Module #
##############################################
def __run_decay_module(self, i, dt_decay):
'''
Decay the current radioactive abundances using
the decay module.
Argument
========
i : Index of the current timestep.
dt_decay: Duration of the decay [yr]
'''
# Get the initial abundances of radioactive isotopes
# This is in number of particles, not mass
init_abun = self.__get_init_abun_decay(i)
# Call the decay module
self.decay_module.run_decay(dt_decay*self.yr_to_sec, 1, init_abun)
# For each relevant isotope in the decay module ..
need_resize = False
for i_iso in range(self.len_iso_module):
if self.decay_module.iso.abundance[i_iso] > 0.0 or init_abun[i_iso] > 0.0:
# Convert number of particles into masses
self.decay_module.iso.abundance[i_iso] *= self.A_mass_iso_decay_module[i_iso]
# Replace the unstable component by the decayed product
if self.iso_decay_module[i_iso] in self.radio_iso:
k_temp = self.radio_iso.index(self.iso_decay_module[i_iso])
self.ymgal_radio[i][k_temp] = \
copy.deepcopy(self.decay_module.iso.abundance[i_iso])
# Add decayed product to the stable component
elif self.iso_decay_module[i_iso] in self.history.isotopes:
k_temp = self.history.isotopes.index(self.iso_decay_module[i_iso])
self.ymgal[i][k_temp] += self.decay_module.iso.abundance[i_iso]
# If this is a new so-far-unacounted isotope ..
else:
# Add the new isotope name
self.radio_iso.append(self.iso_decay_module[i_iso])
# Add the entry and the abundance of the new isotope
self.ymgal_radio = np.concatenate((self.ymgal_radio,\
np.zeros((1,self.nb_timesteps+1)).T), axis=1)
self.ymgal_radio[i][-1] = \
copy.deepcopy(self.decay_module.iso.abundance[i_iso])
need_resize = True
# Resize all radioactive arrays
if need_resize:
self.nb_new_radio_iso = len(self.radio_iso)
##############################################
# Get Init Abun Decay #
##############################################
def __get_init_abun_decay(self, i):
'''
Calculate and return the initial abundance of radioactive
isotopes in the format required by the fortran decay module.
Argument
========
i : Index of the current timestep.
'''
# Initially set abundances to zero
init_abun_temp = np.zeros(self.len_iso_module)
# For each radioactive isotope ..
for i_iso in range(self.nb_new_radio_iso):
# Find the isotope index for the decay module
i_temp = self.iso_decay_module.index(self.radio_iso[i_iso])
# Copy the mass of the isotope
init_abun_temp[i_temp] = self.ymgal_radio[i][i_iso] / \
self.A_mass_iso_decay_module[i_temp]
# Return the initial abundance
return init_abun_temp
##############################################
# SN Ia Contribution #
##############################################
def __sn1a_contribution(self, i, f_esc_yields):
'''
This function calculates the contribution of SNe Ia in the stellar ejecta,
and adds it to the mdot array.
Argument
========
i : Index of the current timestep.
'''
# Set the IMF normalization constant for a 1 Mo stellar population
# Normalization constant is only used if inte = 0 in the IMF call
self._imf(0, 0, -1, 0)
# Get SN Ia yields
tables_Z = sorted(self.ytables_1a.Z_list,reverse=True)
if self.radio_sn1a_on:
tables_Z_radio = sorted(self.ytables_1a_radio.Z_list,reverse=True)
# Pick the metallicity
for tz in tables_Z:
if self.zmetal <= tables_Z[-1]:
yields1a = self.ytables_1a.get(Z=tables_Z[-1], quantity='Yields',\
isotopes=self.history.isotopes)
break
if self.zmetal >= tables_Z[0]:
yields1a = self.ytables_1a.get(Z=tables_Z[0], quantity='Yields',\
isotopes=self.history.isotopes)
break
if self.zmetal > tz:
yields1a = self.ytables_1a.get(Z=tz, quantity='Yields',\
isotopes=self.history.isotopes)
break
# Pick the metallicity (for radioactive yields)
if self.radio_sn1a_on:
for tz in tables_Z_radio:
if self.zmetal <= tables_Z_radio[-1]:
yields1a_radio = \
self.ytables_1a_radio.get(Z=tables_Z_radio[-1], quantity='Yields',\
isotopes=self.radio_iso)
break
if self.zmetal >= tables_Z_radio[0]:
yields1a_radio = \
self.ytables_1a_radio.get(Z=tables_Z_radio[0], quantity='Yields',\
isotopes=self.radio_iso)
break
if self.zmetal > tz:
yields1a_radio = self.ytables_1a_radio.get(Z=tz, quantity='Yields',\
isotopes=self.radio_iso)
break
# If the selected SN Ia rate depends on the number of white dwarfs ...
if self.history.sn1a_rate == 'exp' or \
self.history.sn1a_rate == 'gauss' or \
self.history.sn1a_rate == 'maoz' or \
self.history.sn1a_rate == 'power_law':
# Get the lifetimes of the considered stars (if needed ...)
if len(self.poly_fit_dtd_5th) == 0:
lifetime_min = self.inter_lifetime_points[0]
# Normalize the SN Ia rate if not already done
if len(self.poly_fit_dtd_5th) > 0 and not self.normalized:
self.__normalize_poly_fit()
if self.history.sn1a_rate == 'exp' and not self.normalized:
self.__normalize_efolding(lifetime_min)
elif self.history.sn1a_rate == 'gauss' and not self.normalized:
self.__normalize_gauss(lifetime_min)
elif (self.history.sn1a_rate == 'maoz' or \
self.history.sn1a_rate == 'power_law') and not self.normalized:
self.__normalize_maoz(lifetime_min)
# Initialisation of the cumulated time and number of SNe Ia
sn1a_output = 0
tt = 0
# For every upcoming timestep j, starting with the current one ...
for j in range(i-1, self.nb_timesteps):
# Set the upper and lower time boundary of the timestep j
timemin = tt
tt += self.history.timesteps[j]
timemax = tt
# For an input polynomial DTD ...
if len(self.poly_fit_dtd_5th) > 0:
# If no SN Ia ...
if timemax < self.poly_fit_range[0] or \
timemin > self.poly_fit_range[1]:
n1a = 0.0
# If SNe Ia occur during this timestep j ...
else:
# Calculate the number of SNe Ia and white dwarfs (per Mo)
wd_number = 0.0 # Could be calculated if needed
n1a = self.__poly_dtd(timemin, timemax)
# If we use Chris Pritchet's prescription ...
#elif self.len_pritchet_1a_dtd > 0:
# If no SN Ia ...
# if timemax < self.pritchet_1a_dtd[0] or \
# timemin > self.pritchet_1a_dtd[[1]:
# n1a = 0.0
# If SNe Ia occur during this timestep j ...
# else:
# Calculate the number of SNe Ia and white dwarfs (per Mo)
# wd_number = 0.0 # Could be calculated if needed
# n1a = self.__pritchet_dtd(timemin, timemax)
# For other DTDs ...
else:
# Calculate the number of SNe Ia if with Vogelsberger SN Ia rate
if self.history.sn1a_rate=='vogelsberger':
n1a = self.__vogelsberger13(timemin, timemax)
# No SN Ia if the minimum current stellar lifetime is too long
if lifetime_min > timemax:
n1a = 0
# If SNe Ia occur during this timestep j ...
else:
# Set the lower time limit for the integration
if timemin < lifetime_min:
timemin = lifetime_min
# For an exponential SN Ia rate ...
if self.history.sn1a_rate == 'exp':
# Calculate the number of SNe Ia and white dwarfs (per Mo)
n1a, wd_number = self.__efolding(timemin, timemax)
# For a power law SN Ia rate ...
elif self.history.sn1a_rate == 'maoz' or \
self.history.sn1a_rate == 'power_law':
# Calculate the number of SNe Ia and white dwarfs (per Mo)
n1a, wd_number = self.__maoz12_powerlaw(timemin, timemax)
# For a gaussian SN Ia rate ...
elif self.history.sn1a_rate == 'gauss':
# Calculate the number of SNe Ia and white dwarfs (per Mo)
n1a, wd_number = self.__gauss(timemin, timemax)
# Cumulate the number of white dwarfs in the SN Ia mass range
self.wd_sn1a_range[j] += (wd_number * self.m_locked)
# Convert number of SNe Ia per Mo into real number of SNe Ia
n1a = n1a * self.m_locked
# Cumulate the number of SNe Ia
self.sn1a_numbers[j] += n1a
# add SNIa energy
if self.sn1a_on and self.stellar_param_on:
idx=self.stellar_param_attrs.index('SNIa energy')
self.stellar_param[idx][j] = self.stellar_param[idx][j] + \
n1a * self.sn1a_energy/(self.history.timesteps[j]*self.const.syr)
# Output information
if sn1a_output == 0 :
if self.iolevel >= 2:
print ('SN1a (pop) start to contribute at time ', \
'{:.3E}'.format((timemax)))
sn1a_output = 1
# Add the contribution of SNe Ia to the timestep j
f_contr_yields = 1.0 - f_esc_yields
self.mdot[j] = self.mdot[j] + n1a * f_contr_yields * yields1a
self.mdot_1a[j] = self.mdot_1a[j] + n1a * f_contr_yields * yields1a
if self.radio_sn1a_on:
self.mdot_radio[j] += n1a * f_contr_yields * yields1a_radio
self.mdot_1a_radio[j] += n1a * f_contr_yields * yields1a_radio
#############################################
# NS Merger Contribution #
#############################################
def __nsmerger_contribution(self, i):
'''
This function calculates the contribution of neutron star mergers
on the stellar ejecta and adds it to the mdot array.
Arguments
=========
i : index of the current timestep
'''
# Get NS merger yields
tables_Z = self.ytables_nsmerger.Z_list
for tz in tables_Z:
if self.zmetal > tz:
yieldsnsm = self.ytables_nsmerger.get(Z=tz, quantity='Yields', \
isotopes=self.history.isotopes)
break
if self.zmetal <= tables_Z[-1]:
yieldsnsm = self.ytables_nsmerger.get(Z=tables_Z[-1], quantity='Yields',\
isotopes=self.history.isotopes)
break
# Get NS merger radioactive yields
if self.radio_nsmerger_on:
tables_Z_radio = self.ytables_nsmerger_radio.Z_list
for tz in tables_Z_radio:
if self.zmetal > tz:
yieldsnsm_radio = \
self.ytables_nsmerger_radio.get(Z=tz, quantity='Yields',\
isotopes=self.radio_iso)
break
if self.zmetal <= tables_Z_radio[-1]:
yieldsnsm_radio = \
self.ytables_nsmerger_radio.get(Z=tables_Z_radio[-1], quantity='Yields',\
isotopes=self.radio_iso)
break
# initialize variables which cumulate in loop
tt = 0
# Normalize ...
if not self.nsm_normalized:
self.__normalize_nsmerger(1) # NOTE: 1 is a dummy variable right now
# For every upcoming timestep j, starting with the current one...
for j in range(i-1, self.nb_timesteps):
# Set the upper and lower time boundary of the timestep j
timemin = tt
tt += self.history.timesteps[j]
timemax = tt
# Stop if the SSP no more NS merger occurs
if timemin >= self.t_merger_max:
break
# Calculate the number of NS mergers per stellar mass
nns_m = self.__nsmerger_num(timemin, timemax)
# Calculate the number of NS mergers in the current SSP
nns_m = nns_m * self.m_locked
self.nsm_numbers[j] += nns_m
# Add the contribution of NS mergers to the timestep j
self.mdot[j] = np.array(self.mdot[j]) + \
np.array(nns_m * self.m_ej_nsm * yieldsnsm)
self.mdot_nsm[j] = np.array(self.mdot_nsm[j]) + \
np.array(nns_m * self.m_ej_nsm * yieldsnsm)
if self.radio_nsmerger_on:
self.mdot_radio[j] += nns_m * self.m_ej_nsm * yieldsnsm_radio
self.mdot_nsm_radio[j] += nns_m * self.m_ej_nsm * yieldsnsm_radio
##############################################
# NS merger number #
##############################################
def __nsmerger_num(self, timemin, timemax):
'''
This function returns the number of neutron star mergers occurring within a given time
interval using the Dominik et al. (2012) delay-time distribution function.
Arguments
=========
timemin : Lower boundary of time interval.
timemax : Upper boundary of time interval.
'''
# If an input DTD array is provided ...
if self.len_nsmerger_dtd_array > 0:
# Find the lower and upper Z boundaries
if self.zmetal <= self.Z_nsmerger[0]:
i_Z_low = 0
i_Z_up = 0
elif self.zmetal >= self.Z_nsmerger[-1]:
i_Z_low = -1
i_Z_up = -1
else:
i_Z_low = 0
i_Z_up = 1
while self.zmetal > self.Z_nsmerger[i_Z_up]:
i_Z_low += 1
i_Z_up += 1
# Get the number of NSMs at the lower Z boundary
nb_NSMs_low = self.__get_nb_nsm_array(timemin, timemax, i_Z_low)
# Return the number of NSM .. if no interpolation is needed
if i_Z_up == i_Z_low:
return nb_NSMs_low
# Interpolate the number of NSMs .. if needed
else:
nb_NSMs_up = self.__get_nb_nsm_array(timemin, timemax, i_Z_up)
lg_Z_low = np.log10(self.Z_nsmerger[i_Z_low])
lg_Z_up = np.log10(self.Z_nsmerger[i_Z_up])
lg_Z_metal = np.log10(self.zmetal)
a = (nb_NSMs_up - nb_NSMs_low) / (lg_Z_up - lg_Z_low)
b = nb_NSMs_low - a * lg_Z_low
return a * lg_Z_metal + b
# If all NSMs occur after a time t_NSM_coal ...
if self.t_nsm_coal > 0.0:
# Return all NSMs if t_NSM_coal is in the current time interval
if timemin <= self.t_nsm_coal and self.t_nsm_coal < timemax:
return self.nb_nsm_per_m
else:
return 0.0
# If the NSM DTD is a power law ...
if len(self.nsm_dtd_power) > 0:
# Copy the power law characteristics
t_min_temp = self.nsm_dtd_power[0]
t_max_temp = self.nsm_dtd_power[1]
alpha_temp = self.nsm_dtd_power[2]
# Return the number of NSMs
if timemax < t_min_temp or timemin > t_max_temp:
return 0.0
elif alpha_temp == -1.0:
return self.A_nsmerger * \
(np.log(min(t_max_temp,timemax)) - np.log(max(t_min_temp,timemin)))
else:
return self.A_nsmerger / (1.0+alpha_temp) * \
(min(t_max_temp,timemax)**(1.0+alpha_temp) - \
max(t_min_temp,timemin)**(1.0+alpha_temp))
# Values of bounds on the piecewise DTDs, in Myr
lower = 10
a02bound = 22.2987197486
a002bound = 39.7183036496
#upper = 10000
# convert time bounds into Myr, since DTD is in units of Myr
timemin = timemin/1.0e6
timemax = timemax/1.0e6
# initialise the number of neutron star mergers in the current time interval
nns_m = 0.0
# Integrate over solar metallicity DTD
if self.zmetal >= 0.019:
# Define a02 DTD fit parameters
a = -0.0138858377011
b = 1.0712569392
c = -32.1555682584
d = 468.236521089
e = -3300.97955814
f = 9019.62468302
a_pow = 1079.77358975
# Manually compute definite integral values over DTD with bounds timemin and timemax
# DTD doesn't produce until 10 Myr
if timemax < lower:
nns_m = 0.0
# if timemin is below 10 Myr and timemax is in the first portion of DTD
elif timemin < lower and timemax <= a02bound:
up = ((a/6.)*(timemax**6))+((b/5.)*(timemax**5))+((c/4.)*(timemax**4))+((d/3.)*(timemax**3))+((e/2.)*(timemax**2))+(f*timemax)
down = ((a/6.)*(lower**6))+((b/5.)*(lower**5))+((c/4.)*(lower**4))+((d/3.)*(lower**3))+((e/2.)*(lower**2))+(f*lower)
nns_m = up - down
# if timemin is below 10 Myr and timemax is in the power law portion of DTD
elif timemin < lower and timemax >= a02bound:
up1 = a_pow * np.log(timemax)
down1 = a_pow * np.log(a02bound)
up = up1 - down1
up2 = ((a/6.)*(a02bound**6))+((b/5.)*(a02bound**5))+((c/4.)*(a02bound**4))+((d/3.)*(a02bound**3))+((e/2.)*(a02bound**2))+(f*a02bound)
down2 = ((a/6.)*(lower**6))+((b/5.)*(lower**5))+((c/4.)*(lower**4))+((d/3.)*(lower**3))+((e/2.)*(lower**2))+(f*lower)
down = up2 - down2
nns_m = up + down # + because we are adding the contribution of the two integrals on either side of the piecewise discontinuity
# if both timemin and timemax are in initial portion of DTD
elif timemin >= lower and timemax <= a02bound:
up = ((a/6.)*(timemax**6))+((b/5.)*(timemax**5))+((c/4.)*(timemax**4))+((d/3.)*(timemax**3))+((e/2.)*(timemax**2))+(f*timemax)
down = ((a/6.)*(timemin**6))+((b/5.)*(timemin**5))+((c/4.)*(timemin**4))+((d/3.)*(timemin**3))+((e/2.)*(timemin**2))+(f*timemin)
nns_m = up - down
# if timemin is in initial portion of DTD and timemax is in power law portion
elif timemin <= a02bound and timemax > a02bound:
up1 = a_pow * np.log(timemax)
down1 = a_pow * np.log(a02bound)
up = up1 - down1
up2 = ((a/6.)*(a02bound**6))+((b/5.)*(a02bound**5))+((c/4.)*(a02bound**4))+((d/3.)*(a02bound**3))+((e/2.)*(a02bound**2))+(f*a02bound)
down2 = ((a/6.)*(timemin**6))+((b/5.)*(timemin**5))+((c/4.)*(timemin**4))+((d/3.)*(timemin**3))+((e/2.)*(timemin**2))+(f*timemin)
down = up2 - down2
nns_m = up + down # + because we are adding the contribution of the two integrals on either side of the piecewise discontinuity
# if both timemin and timemax are in power law portion of DTD
elif timemin > a02bound:
up = a_pow * np.log(timemax)
down = a_pow * np.log(timemin)
nns_m = up - down
# normalize
nns_m *= self.A_nsmerger_02
# Integrate over 0.1 solar metallicity
elif self.zmetal <= 0.002:
# Define a002 DTD fit parameters
a = -2.88192413434e-5
b = 0.00387383125623
c = -0.20721471544
d = 5.64382310405
e = -82.6061154979
f = 617.464778362
g = -1840.49386605
a_pow = 153.68106991
# Manually compute definite integral values over DTD with bounds timemin and timemax, procedurally identical to a02 computation above
if timemax < lower:
nns_m = 0.0
elif timemin < lower and timemax <= a002bound:
up = ((a/7.)*(timemax**7))+((b/6.)*(timemax**6))+((c/5.)*(timemax**5))+((d/4.)*(timemax**4))+((e/3.)*(timemax**3))+((f/2.)*(timemax**2))+(g*timemax)
down = ((a/7.)*(lower**7))+((b/6.)*(lower**6))+((c/5.)*(lower**5))+((d/4.)*(lower**4))+((e/3.)*(lower**3))+((f/2.)*(lower**2))+(g*lower)
nns_m = up - down
elif timemin < lower and timemax >= a002bound:
up1 = a_pow * np.log(timemax)
down1 = a_pow * np.log(a002bound)
up = up1 - down1
up2 = ((a/7.)*(a002bound**7))+((b/6.)*(a002bound**6))+((c/5.)*(a002bound**5))+((d/4.)*(a002bound**4))+((e/3.)*(a002bound**3))+((f/2.)*(a002bound**2))+(g*a002bound)
down2 = ((a/7.)*(lower**7))+((b/6.)*(lower**6))+((c/5.)*(lower**5))+((d/4.)*(lower**4))+((e/3.)*(lower**3))+((f/2.)*(lower**2))+(g*lower)
down = up2 - down2
nns_m = up + down # + because we are adding the contribution of the two integrals on either side of the piecewise discontinuity
elif timemin >= lower and timemax <= a002bound:
up = ((a/7.)*(timemax**7))+((b/6.)*(timemax**6))+((c/5.)*(timemax**5))+((d/4.)*(timemax**4))+((e/3.)*(timemax**3))+((f/2.)*(timemax**2))+(g*timemax)
down = ((a/7.)*(timemin**7))+((b/6.)*(timemin**6))+((c/5.)*(timemin**5))+((d/4.)*(timemin**4))+((e/3.)*(timemin**3))+((f/2.)*(timemin**2))+(g*timemin)
nns_m = up - down
elif timemin <= a002bound and timemax > a002bound:
up1 = a_pow * np.log(timemax)
down1 = a_pow * np.log(a002bound)
up = up1 - down1
up2 = ((a/7.)*(a002bound**7))+((b/6.)*(a002bound**6))+((c/5.)*(a002bound**5))+((d/4.)*(a002bound**4))+((e/3.)*(a002bound**3))+((f/2.)*(a002bound**2))+(g*a002bound)
down2 = ((a/7.)*(timemin**7))+((b/6.)*(timemin**6))+((c/5.)*(timemin**5))+((d/4.)*(timemin**4))+((e/3.)*(timemin**3))+((f/2.)*(timemin**2))+(g*timemin)
down = up2 - down2
nns_m = up + down # + because we are adding the contribution of the two integrals on either side of the piecewise discontinuity
elif timemin > a002bound:
up = a_pow*np.log(timemax)
down = a_pow*np.log(timemin)
nns_m = up - down
# normalize
nns_m *= self.A_nsmerger_002
# Interpolate between the two metallicities
else:
# Define a002 DTD fit parameters
a = -2.88192413434e-5
b = 0.00387383125623
c = -0.20721471544
d = 5.64382310405
e = -82.6061154979
f = 617.464778362
g = -1840.49386605
a_pow = 153.68106991
# 0.1 solar metallicity integration
if timemax < lower:
nns_m002 = 0.0
elif timemin < lower and timemax <= a002bound:
up = ((a/7.)*(timemax**7))+((b/6.)*(timemax**6))+((c/5.)*(timemax**5))+((d/4.)*(timemax**4))+((e/3.)*(timemax**3))+((f/2.)*(timemax**2))+(g*timemax)
down = ((a/7.)*(lower**7))+((b/6.)*(lower**6))+((c/5.)*(lower**5))+((d/4.)*(lower**4))+((e/3.)*(lower**3))+((f/2.)*(lower**2))+(g*lower)
nns_m002 = up - down
elif timemin < lower and timemax >= a002bound:
up1 = a_pow * np.log(timemax)
down1 = a_pow * np.log(a002bound)
up = up1 - down1
up2 = ((a/7.)*(a002bound**7))+((b/6.)*(a002bound**6))+((c/5.)*(a002bound**5))+((d/4.)*(a002bound**4))+((e/3.)*(a002bound**3))+((f/2.)*(a002bound**2))+(g*a002bound)
down2 = ((a/7.)*(lower**7))+((b/6.)*(lower**6))+((c/5.)*(lower**5))+((d/4.)*(lower**4))+((e/3.)*(lower**3))+((f/2.)*(lower**2))+(g*lower)
down = up2 - down2
nns_m002 = up + down # + because we are adding the contribution of the two integrals on either side of the piecewise discontinuity
elif timemin >= lower and timemax <= a002bound:
up = ((a/7.)*(timemax**7))+((b/6.)*(timemax**6))+((c/5.)*(timemax**5))+((d/4.)*(timemax**4))+((e/3.)*(timemax**3))+((f/2.)*(timemax**2))+(g*timemax)
down = ((a/7.)*(timemin**7))+((b/6.)*(timemin**6))+((c/5.)*(timemin**5))+((d/4.)*(timemin**4))+((e/3.)*(timemin**3))+((f/2.)*(timemin**2))+(g*timemin)
nns_m002 = up - down
elif timemin <= a002bound and timemax > a002bound:
up1 = a_pow * np.log(timemax)
down1 = a_pow * np.log(a002bound)
up = up1 - down1
up2 = ((a/7.)*(a002bound**7))+((b/6.)*(a002bound**6))+((c/5.)*(a002bound**5))+((d/4.)*(a002bound**4))+((e/3.)*(a002bound**3))+((f/2.)*(a002bound**2))+(g*a002bound)
down2 = ((a/7.)*(timemin**7))+((b/6.)*(timemin**6))+((c/5.)*(timemin**5))+((d/4.)*(timemin**4))+((e/3.)*(timemin**3))+((f/2.)*(timemin**2))+(g*timemin)
down = up2 - down2
nns_m002 = up + down # + because we are adding the contribution of the two integrals on either side of the piecewise discontinuity
elif timemin > a002bound:
up = a_pow*np.log(timemax)
down = a_pow*np.log(timemin)
nns_m002 = up - down
# Define a02 DTD fit parameters
a = -0.0138858377011
b = 1.0712569392
c = -32.1555682584
d = 468.236521089
e = -3300.97955814
f = 9019.62468302
a_pow = 1079.77358975
# solar metallicity integration
if timemax < lower:
nns_m02 = 0.0
elif timemin < lower and timemax <= a02bound:
up = ((a/6.)*(timemax**6))+((b/5.)*(timemax**5))+((c/4.)*(timemax**4))+((d/3.)*(timemax**3))+((e/2.)*(timemax**2))+(f*timemax)
down = ((a/6.)*(lower**6))+((b/5.)*(lower**5))+((c/4.)*(lower**4))+((d/3.)*(lower**3))+((e/2.)*(lower**2))+(f*lower)
nns_m02 = up - down
elif timemin < lower and timemax >= a02bound:
up1 = a_pow * np.log(timemax)
down1 = a_pow * np.log(a02bound)
up = up1 - down1
up2 = ((a/6.)*(a02bound**6))+((b/5.)*(a02bound**5))+((c/4.)*(a02bound**4))+((d/3.)*(a02bound**3))+((e/2.)*(a02bound**2))+(f*a02bound)
down2 = ((a/6.)*(lower**6))+((b/5.)*(lower**5))+((c/4.)*(lower**4))+((d/3.)*(lower**3))+((e/2.)*(lower**2))+(f*lower)
down = up2 - down2
nns_m02 = up + down # + because we are adding the contribution of the two integrals on either side of the piecewise discontinuity
elif timemin >= lower and timemax <= a02bound:
up = ((a/6.)*(timemax**6))+((b/5.)*(timemax**5))+((c/4.)*(timemax**4))+((d/3.)*(timemax**3))+((e/2.)*(timemax**2))+(f*timemax)
down = ((a/6.)*(timemin**6))+((b/5.)*(timemin**5))+((c/4.)*(timemin**4))+((d/3.)*(timemin**3))+((e/2.)*(timemin**2))+(f*timemin)
nns_m02 = up - down
elif timemin <= a02bound and timemax > a02bound:
up1 = a_pow * np.log(timemax)
down1 = a_pow * np.log(a02bound)
up = up1 - down1
up2 = ((a/6.)*(a02bound**6))+((b/5.)*(a02bound**5))+((c/4.)*(a02bound**4))+((d/3.)*(a02bound**3))+((e/2.)*(a02bound**2))+(f*a02bound)
down2 = ((a/6.)*(timemin**6))+((b/5.)*(timemin**5))+((c/4.)*(timemin**4))+((d/3.)*(timemin**3))+((e/2.)*(timemin**2))+(f*timemin)
down = up2 - down2
nns_m02 = up + down # + because we are adding the contribution of the two integrals on either side of the piecewise discontinuity
elif timemin > a02bound:
up = a_pow * np.log(timemax)
down = a_pow * np.log(timemin)
nns_m02 = up - down
# normalize
nns_m02 *= self.A_nsmerger_02
nns_m002 *= self.A_nsmerger_002
# interpolate between nns_m002 and nns_m02
metallicities = np.asarray([0.002, 0.02])
nsm_array = np.asarray([nns_m002, nns_m02])
nns_m = np.interp(self.zmetal, metallicities, nsm_array)
# return the number of neutron star mergers produced in this time interval
return nns_m
##############################################
# Get Nb NSM Array #
##############################################
def __get_nb_nsm_array(self, timemin, timemax, i_Z_temp):
'''
This function returns the number of NSMs that occur within
a specific time interval for the input DTD array.
Arguments
=========
timemin : Lower time intervall of the OMEGA timestep
timemax : Upper time intervall of the OMEGA timestep
i_Z_temp : Index of the considered Z in the DTD array
'''
# If there are some NSMs ...
nb_NSMs_temp = 0.0
if timemin < max(self.nsmerger_dtd_array[i_Z_temp][0]) and \
timemax > min(self.nsmerger_dtd_array[i_Z_temp][0]):
# Find the lower time boundary of the first input interval
i_t_low = 0
while timemin > self.nsmerger_dtd_array[i_Z_temp][0][i_t_low+1]:
i_t_low += 1
# While the current input interval is still within timemin - timemax ...
while timemax > self.nsmerger_dtd_array[i_Z_temp][0][i_t_low]:
# Cumulate the number of NSMs
dt_NSM_temp = \
min(timemax, self.nsmerger_dtd_array[i_Z_temp][0][i_t_low+1]) - \
max(timemin, self.nsmerger_dtd_array[i_Z_temp][0][i_t_low])
nb_NSMs_temp += \
self.nsmerger_dtd_array[i_Z_temp][1][i_t_low] * dt_NSM_temp
# Go to the next interval
i_t_low += 1
# Return the number of NSMs
return nb_NSMs_temp
##############################################
# NS Merger Rate #
##############################################
def __nsmerger_rate(self, t):
'''
This function returns the rate of neutron star mergers occurring at a given
stellar lifetime. It uses the delay time distribution
of Dominik et al. (2012).
Arguments
=========
t : lifetime of stellar population in question
Z : metallicity of stellar population in question
'''
# if solar metallicity...
if self.zmetal == 0.02:
# piecewise defined DTD
if t < 25.7:
func = (-0.0138858377011*(t**5))+(1.10712569392*(t**4))-(32.1555682584*(t**3))+(468.236521089*(t**2))-(3300.97955814*t)+(9019.62468302)
elif t >= 25.7:
func = 1079.77358975/t
# if 0.1 solar metallicity...
elif self.zmetal == 0.002:
# piecewise defined DTD
if t < 45.76:
func = ((-2.88192413434e-5)*(t**6))+(0.00387383125623*(t**5))-(0.20721471544*(t**4))+(5.64382310405*(t**3))-(82.6061154979*(t**2))+(617.464778362*t)-(1840.49386605)
elif t >= 45.76:
func = 153.68106991 / t
# return the appropriate NS merger rate for time t
return func
##############################################
# NS merger normalization #
##############################################
def __normalize_nsmerger(self, lifetime_min):
'''
This function normalizes the Dominik et al. (2012) delay time distribution
to appropriately compute the total number of neutron star mergers in an SSP.
Arguments
=========
lifetime_min : minimum stellar lifetime
'''
# Compute the number of massive stars (NS merger progenitors)
N = self._imf(self.nsmerger_bdys[0], self.nsmerger_bdys[1], 1) # IMF integration
# Compute total mass of system
M = self._imf(self.imf_bdys[0], self.imf_bdys[1], 2)
# multiply number by fraction in binary systems
N *= self.f_binary / 2.
# multiply number by fraction which will form neutron star mergers
N *= self.f_merger
# Define the number of NSM per Msun formed .. if not already given
if self.nb_nsm_per_m < 0.0:
self.nb_nsm_per_m = N / M
# Calculate the normalization constants for Z_o and 0.1Z_o
self.A_nsmerger_02 = N / ((196.4521885+6592.893564)*M)
self.A_nsmerger_002 = N / ((856.0742532+849.6301493)*M)
# Initialization for the input DTD .. if chosen
if self.len_nsmerger_dtd_array > 0:
self.Z_nsmerger = np.zeros(self.len_nsmerger_dtd_array)
for i_dtd in range(0,self.len_nsmerger_dtd_array):
self.Z_nsmerger[i_dtd] = self.nsmerger_dtd_array[i_dtd][2]
if max(self.nsmerger_dtd_array[i_dtd][0]) < self.history.tend:
self.nsmerger_dtd_array[i_dtd][0].append(2.*self.history.tend)
self.nsmerger_dtd_array[i_dtd][1].append(0.0)
# Calculate the normalization of the power law .. if chosen
elif len(self.nsm_dtd_power) > 0:
t_min_temp = self.nsm_dtd_power[0]
t_max_temp = self.nsm_dtd_power[1]
alpha_temp = self.nsm_dtd_power[2]
if alpha_temp == -1.0:
self.A_nsmerger = self.nb_nsm_per_m / \
( np.log(t_max_temp) - np.log(t_min_temp) )
else:
self.A_nsmerger = self.nb_nsm_per_m * (1.0+alpha_temp) / \
( t_max_temp**(1.0+alpha_temp) - t_min_temp**(1.0+alpha_temp) )
# Ensure normalization only occurs once
self.nsm_normalized = True
#############################################
# Normalize Delayed Extra #
#############################################
def __normalize_delayed_extra(self):
'''
This function normalize the DTD of all input delayed extra sources.
'''
# Create the normalization factor array
self.delayed_extra_dtd_A_norm = []
# Create the un-normalized maximum dtd value array
self.delayed_extra_dtd_max = []
# For each delayed source ...
for i_e_nde in range(0,self.nb_delayed_extra):
self.delayed_extra_dtd_A_norm.append([])
self.delayed_extra_dtd_max.append([])
# For each metallicity ...
for i_Z_nde in range(0,len(self.delayed_extra_dtd[i_e_nde])):
# Copy the lower and upper time boundaries
t_low_nde = self.delayed_extra_dtd[i_e_nde][i_Z_nde][0][0]
t_up_nde = self.delayed_extra_dtd[i_e_nde][i_Z_nde][-1][0]
# Integrate the entire DTD
N_tot_nde = self.__delayed_extra_num(t_low_nde,t_up_nde,i_e_nde,i_Z_nde)
# Assigne the normalization factor
self.delayed_extra_dtd_A_norm[i_e_nde].append(\
self.delayed_extra_dtd_norm[i_e_nde][i_Z_nde] / N_tot_nde)
# Find the max DTD value (un-normalized)
#yy_max_temp = 0.0
#for i_t_nde in range(len(self.delayed_extra_dtd[i_e_nde][i_Z_nde])):
# if self.delayed_extra_dtd[i_e_nde][i_Z_nde][i_t_nde][1] > yy_max_temp:
# yy_max_temp = self.delayed_extra_dtd[i_e_nde][i_Z_nde][i_t_nde][1]
#self.delayed_extra_dtd_max[i_e_nde].append(yy_max_temp)
#############################################
# Delayed Extra Contribution #
#############################################
def __delayed_extra_contribution(self, i):
'''
This function calculates the contribution of delayed extra source
and adds it to the mdot array.
Arguments
=========
i : index of the current timestep
'''
# For each delayed extra source ...
for i_extra in range(0,self.nb_delayed_extra):
# Get the yields and metallicity indexes of the considered source
if self.len_decay_file > 0 or self.use_decay_module:
Z_extra, yextra_low, yextra_up, yextra_low_radio, \
yextra_up_radio, iZ_low, iZ_up = \
self.__get_YZ_delayed_extra(i_extra, return_radio=True)
else:
Z_extra, yextra_low, yextra_up, iZ_low, iZ_up = \
self.__get_YZ_delayed_extra(i_extra)
# Initialize age of the latest SSP, which cumulate in loop
tt = 0
# Define the ultimate min and max times
tmax_extra = max(self.delayed_extra_dtd[i_extra][iZ_low][-1][0],\
self.delayed_extra_dtd[i_extra][iZ_up ][-1][0])
tmin_extra = min(self.delayed_extra_dtd[i_extra][iZ_low][0][0],\
self.delayed_extra_dtd[i_extra][iZ_up ][0][0])
# For every upcoming timestep j, starting with the current one...
for j in range(i-1, self.nb_timesteps):
# Set the upper and lower time boundary of the timestep j
timemin = tt
tt += self.history.timesteps[j]
timemax = tt
# Stop if the SSP do not contribute anymore to the delayed extra source
if timemin >= tmax_extra:
break
# If the is someting to eject during this timestep j ...
if timemax > tmin_extra:
# Get the total number of sources and yields (interpolated)
if self.len_decay_file > 0 or self.use_decay_module:
nb_sources_extra_tot, yields_extra_interp, yields_extra_interp_radio = \
self.__get_nb_y_interp(timemin, timemax, i_extra, iZ_low, iZ_up,\
yextra_low, yextra_up, Z_extra, yextra_low_radio=yextra_low_radio,\
yextra_up_radio=yextra_up_radio, return_radio=True)
else:
nb_sources_extra_tot, yields_extra_interp = \
self.__get_nb_y_interp(timemin, timemax, i_extra, iZ_low, iZ_up,\
yextra_low, yextra_up, Z_extra)
# Calculate the number of sources in the current SSP (not per Msun)
self.delayed_extra_numbers[i_extra][j] += nb_sources_extra_tot
# Add the contribution of the sources to the timestep j
self.mdot_delayed_extra[i_extra][j] = \
np.array(self.mdot_delayed_extra[i_extra][j]) + yields_extra_interp
self.mdot[j] = np.array(self.mdot[j]) + yields_extra_interp
# Add the radioactive contribution
if self.len_decay_file > 0 or self.use_decay_module:
self.mdot_delayed_extra_radio[i_extra][j] = \
np.array(self.mdot_delayed_extra_radio[i_extra][j]) + yields_extra_interp_radio
self.mdot_radio[j] = np.array(self.mdot_radio[j]) + yields_extra_interp_radio
#############################################
# Get YZ Delayed Extra #
#############################################
def __get_YZ_delayed_extra(self, i_extra, return_radio=False):
'''
This function returns the yields, metallicities, and Z boundary indexes
for a considered delayed extra source (according to the ISM metallicity).
Arguments
=========
i_extra : index of the extra source
return_radio : whether to calculate radioactive yields
'''
# Get the number of metallicities considered source (in decreasing order)
Z_extra = self.ytables_delayed_extra[i_extra].Z_list
nb_Z_extra = len(Z_extra)
# Set the Z indexes if only one metallicity is provided
if nb_Z_extra == 1:
iZ_low = iZ_up = 0
# If several metallicities are provided ...
else:
# Search the input metallicity interval that englobe self.zmetal
# Copy the lowest input Z is zmetal is lower (same for higher than highest)
if self.zmetal <= Z_extra[-1]:
iZ_low = iZ_up = -1
elif self.zmetal >= Z_extra[0]:
iZ_low = iZ_up = 0
else:
iZ_low = 1
iZ_up = 0
while self.zmetal < Z_extra[iZ_low]:
iZ_low += 1
iZ_up += 1
# Get the yields table for the lower and upper Z boundaries
yextra_low = self.ytables_delayed_extra[i_extra].get( \
Z=Z_extra[iZ_low], quantity='Yields', isotopes=self.history.isotopes)
yextra_up = self.ytables_delayed_extra[i_extra].get( \
Z=Z_extra[iZ_up], quantity='Yields', isotopes=self.history.isotopes)
if return_radio:
yextra_low_radio = self.ytables_delayed_extra_radio[i_extra].get( \
Z=Z_extra[iZ_low], quantity='Yields', isotopes=self.radio_iso)
yextra_up_radio = self.ytables_delayed_extra_radio[i_extra].get( \
Z=Z_extra[iZ_up], quantity='Yields', isotopes=self.radio_iso)
# Return the metallicities and the yields and Z boundaries
if return_radio:
return Z_extra, yextra_low, yextra_up, yextra_low_radio, yextra_up_radio, iZ_low, iZ_up
else:
return Z_extra, yextra_low, yextra_up, iZ_low, iZ_up
#############################################
# Get Nb Y Interp #
#############################################
def __get_nb_y_interp(self, timemin, timemax, i_extra, iZ_low, iZ_up,\
yextra_low, yextra_up, Z_extra, yextra_low_radio=[],\
yextra_up_radio=[], return_radio=False):
'''
This function returns the yields, metallicities, and Z boundary indexes
for a considered delayed extra source (according to the ISM metallicity).
Arguments
=========
timemin : Lower boundary of the time interval.
timemax : Upper boundary of the time interval.
i_extra : Index of the extra source.
iZ_low : Lower index of the provided input Z in the delayed extra yields table.
iZ_up : Upper index of the provided input Z in the delayed extra yields table.
yextra_low : Delayed extra yields of the lower Z.
yextra_up : Delayed extra yields of the upper Z.
Z_extra : List of provided Z in the delayed extra yields table.
'''
# Calculate the number of sources per unit of Msun formed (lower Z)
nb_sources_low = self.__delayed_extra_num(timemin, timemax, i_extra, iZ_low)
# Normalize the number of sources (still per unit of Msun formed)
# This needs to be before calculating ejecta_Z_low!
nb_sources_low *= self.delayed_extra_dtd_A_norm[i_extra][iZ_low]
# Calculate the total ejecta (yields) for the lower Z
ejecta_Z_low = np.array(nb_sources_low * self.m_locked *
yextra_low * self.delayed_extra_yields_norm[i_extra][iZ_low])
if return_radio:
ejecta_Z_low_radio = np.array(nb_sources_low * self.m_locked *
yextra_low_radio * self.delayed_extra_yields_norm_radio[i_extra][iZ_low])
# If we do not need to interpolate between Z
if iZ_up == iZ_low:
# Return the total number of sources and ejecta for the lower Z
if return_radio:
return nb_sources_low * self.m_locked, ejecta_Z_low, ejecta_Z_low_radio
else:
return nb_sources_low * self.m_locked, ejecta_Z_low
# If we need to interpolate between Z
else:
# Calculate the number of sources per unit of Msun formed (upper Z)
nb_sources_up = self.__delayed_extra_num(timemin, timemax, i_extra, iZ_up)
# Normalize the number of sources (still per unit of Msun formed)
# This needs to be before calculating ejecta_Z_up!
nb_sources_up *= self.delayed_extra_dtd_A_norm[i_extra][iZ_up]
# Calculate the total ejecta (yields) for the upper Z
ejecta_Z_up = np.array(nb_sources_up * self.m_locked *
yextra_up * self.delayed_extra_yields_norm[i_extra][iZ_up])
if return_radio:
ejecta_Z_up_radio = np.array(nb_sources_up * self.m_locked *
yextra_up_radio * self.delayed_extra_yields_norm_radio[i_extra][iZ_up])
# Interpolate the number of sources (N = aa*log10(Z) + bb)
aa = (nb_sources_up - nb_sources_low) / \
(np.log10(Z_extra[iZ_up]) - np.log10(Z_extra[iZ_low]))
bb = nb_sources_up - aa * np.log10(Z_extra[iZ_up])
nb_sources_interp = aa * np.log10(self.zmetal) + bb
# Convert yields into log if needed ..
if self.delayed_extra_yields_log_int:
for i_iso_temp in range(self.nb_isotopes):
if ejecta_Z_low[i_iso_temp] == 0.0:
ejecta_Z_low[i_iso_temp] = -50.0
else:
ejecta_Z_low[i_iso_temp] = np.log10(ejecta_Z_low[i_iso_temp])
if ejecta_Z_up[i_iso_temp] == 0.0:
ejecta_Z_up[i_iso_temp] = -50.0
else:
ejecta_Z_up[i_iso_temp] = np.log10(ejecta_Z_up[i_iso_temp])
if return_radio:
for i_iso_temp in range(self.nb_radio_iso):
if ejecta_Z_low_radio[i_iso_temp] == 0.0:
ejecta_Z_low_radio[i_iso_temp] = -50.0
else:
ejecta_Z_low_radio[i_iso_temp] = np.log10(ejecta_Z_low_radio[i_iso_temp])
if ejecta_Z_up_radio[i_iso_temp] == 0.0:
ejecta_Z_up_radio[i_iso_temp] = -50.0
else:
ejecta_Z_up_radio[i_iso_temp] = np.log10(ejecta_Z_up_radio[i_iso_temp])
# Interpolate the yields (Y = aa*log10(Z) + bb)
aa = (ejecta_Z_up - ejecta_Z_low) / \
(np.log10(Z_extra[iZ_up]) - np.log10(Z_extra[iZ_low]))
bb = ejecta_Z_up - aa * np.log10(Z_extra[iZ_up])
ejecta_interp = aa * np.log10(self.zmetal) + bb
if return_radio:
aa = (ejecta_Z_up_radio - ejecta_Z_low_radio) / \
(np.log10(Z_extra[iZ_up]) - np.log10(Z_extra[iZ_low]))
bb = ejecta_Z_up_radio - aa * np.log10(Z_extra[iZ_up])
ejecta_interp_radio = aa * np.log10(self.zmetal) + bb
# Convert interpolated yields back into linear scale if needed ..
if self.delayed_extra_yields_log_int:
for i_iso_temp in range(self.nb_isotopes):
ejecta_interp[i_iso_temp] = 10**(ejecta_interp[i_iso_temp])
if return_radio:
for i_iso_temp in range(self.nb_radio_iso):
ejecta_interp_radio[i_iso_temp] = 10**(ejecta_interp_radio[i_iso_temp])
# Return the total number of sources and ejecta for the interpolation
if return_radio:
return nb_sources_interp * self.m_locked, ejecta_interp, ejecta_interp_radio
else:
return nb_sources_interp * self.m_locked, ejecta_interp
#############################################
# Delayed Extra Num #
#############################################
def __delayed_extra_num(self, timemin, timemax, i_extra, i_ZZ):
'''
This function returns the integrated number of delayed extra source within
a given OMEGA time interval for a given source and metallicity
Arguments
=========
timemin : Lower boundary of the OMEGA time interval.
timemax : Upper boundary of the OMEGA time interval.
i_extra : Index of the extra source.
iZZ : Index of the provided input Z in the delayed extra yields table.
'''
# Initialize the number of sources that occur between timemin and timemax
N_den = 0
# Search the lower boundary input time interval
i_search = 0
while timemin > self.delayed_extra_dtd[i_extra][i_ZZ][i_search+1][0]:
#while timemin > self.delayed_extra_dtd[i_extra][0][i_search+1][0]:
i_search += 1
# Copie the current time
t_cur = max(self.delayed_extra_dtd[i_extra][i_ZZ][0][0], timemin)
timemax_cor = min(timemax,self.delayed_extra_dtd[i_extra][i_ZZ][-1][0])
# While the is still time to consider in the OMEGA timestep ...
while abs(timemax_cor - t_cur) > 0.01:
# Integrate the DTD
t_min_temp = max(t_cur,self.delayed_extra_dtd[i_extra][i_ZZ][i_search][0])
t_max_temp = min(timemax_cor,self.delayed_extra_dtd[i_extra][i_ZZ][i_search+1][0])
N_den += self.__integrate_delayed_extra_DTD(\
t_min_temp, t_max_temp, i_extra, i_ZZ, i_search)
# Go to the next delayed input timestep
t_cur += t_max_temp - t_min_temp
i_search += 1
# Return the number of occuring sources
return N_den
#############################################
# Integrate Delayed Extra DTD #
#############################################
def __integrate_delayed_extra_DTD(self, t_min_temp, t_max_temp, i_extra, i_ZZ, i_search):
'''
This function returns the integrated number of delayed extra source within
a given time interval for a given source and metallicity.
Note: There is no normalization here, as this function is actualy used for
the normalization process.
Arguments
=========
t_min_temp : Lower boundary of the delayed extra input time interval.
t_max_temp : Upper boundary of the delayed extra time interval.
i_extra : Index of the extra source.
iZZ : Index of the provided input Z in the delayed extra yields table.
i_search : Index of the lower input timestep interval
'''
# If we integrate in the log-log space
# Rate = R = bt^a --> logR = a*logt + logb
if self.delayed_extra_log:
# Copy the boundary conditions of the input DTD interval
lg_t_max_tmp = np.log10(self.delayed_extra_dtd[i_extra][i_ZZ][i_search+1][0])
lg_t_min_tmp = np.log10(self.delayed_extra_dtd[i_extra][i_ZZ][i_search][0])
lg_R_max_tmp = np.log10(self.delayed_extra_dtd[i_extra][i_ZZ][i_search+1][1])
lg_R_min_tmp = np.log10(self.delayed_extra_dtd[i_extra][i_ZZ][i_search][1])
# Calculate the coefficients "a" and "b"
a_ided = (lg_R_max_tmp - lg_R_min_tmp) / (lg_t_max_tmp - lg_t_min_tmp)
b_ided = 10**(lg_R_max_tmp - a_ided * lg_t_max_tmp)
# If not a power law with an index of -1
if a_ided > -0.999999 or a_ided < -1.0000001:
# Integrate
N_ided = (b_ided / (a_ided+1.0)) * \
(t_max_temp**(a_ided+1.0) - t_min_temp**(a_ided+1.0))
# If a power law with an index of -1
else:
# Integrate with a natural logarithm
N_ided = b_ided * (np.log(t_max_temp) - np.log(t_min_temp))
# If we integrate NOT in the log-log space
# Rate = R = a * t + b
else:
# Copy the boundary conditions of the input DTD interval
t_max_tmp = self.delayed_extra_dtd[i_extra][i_ZZ][i_search+1][0]
t_min_tmp = self.delayed_extra_dtd[i_extra][i_ZZ][i_search][0]
R_max_tmp = self.delayed_extra_dtd[i_extra][i_ZZ][i_search+1][1]
R_min_tmp = self.delayed_extra_dtd[i_extra][i_ZZ][i_search][1]
# Calculate the coefficients "a" and "b"
a_ided = (R_max_tmp - R_min_tmp) / (t_max_tmp - t_min_tmp)
b_ided = R_max_tmp - a_ided * t_max_tmp
# Integrate
N_ided = 0.5 * a_ided * (t_max_temp**2 - t_min_temp**2) + \
b_ided * (t_max_temp - t_min_temp)
# Return the number of extra sources
return N_ided
##############################################
# Vogelsberger 13 #
##############################################
def __vogelsberger13(self, timemin,timemax):
'''
This function returns the number of SNe Ia occuring within a given time
interval using the Vogelsberger et al. (2013) delay-time distribution
function.
Arguments
=========
timemin : Lower boundary of the time interval.
timemax : Upper boundary of the time interval.
'''
# Define the minimum age for a stellar population to host SNe Ia
fac = 4.0e7
# If stars are too young ...
if timemax < fac:
# No SN Ia
n1a = 0
# If the age fac is in between the given time interval ...
elif timemin <= fac:
# Limit the lower time boundary to fac
timemin = fac
n1a = quad(self.__vb, timemin, timemax, args=(fac))[0]
# If SNe Ia occur during the whole given time interval ...
else:
# Use the full time range
n1a = quad(self.__vb, timemin, timemax, args=(fac))[0]
# Exit if the IMF boundary do not cover 3 - 8 Mo (SN Ia progenitors)
if not ( (self.imf_bdys[0] < 3) and (self.imf_bdys[1] > 8)):
print ('!!!!!IMPORTANT!!!!')
print ('With Vogelsberger SNIa implementation selected mass', \
'range not possible.')
sys.exit('Choose mass range which either fully includes' + \
'range from 3 to 8Msun or fully excludes it'+ \
'or use other SNIa implementations')
# Return the number of SNe Ia per Mo
return n1a
##############################################
# Vogelsberger 13 - DTD #
##############################################
def __vb(self, tt, fac1):
'''
This function returns the rate of SNe Ia using the delay-time distribution
of Vogelsberger et al. (2013) at a given time
Arguments
=========
tt : Age of the stellar population
fac1 : Minimum age for the stellar population to host SNe Ia
'''
# Return the rate of SN
fac2 = 1.12
return 1.3e-3 * (tt / fac1)**(-fac2) * (fac2 - 1.0) / fac1
##############################################
# Spline 1 #
##############################################
def __spline1(self, t_s):
'''
This function returns the lower mass boundary of the SN Ia progenitors
from a given stellar population age.
Arguments
=========
t_s : Age of the considered stellar population
'''
# Set the very minimum mass for SN Ia progenitors
minm_prog1a = 3.0
# Limit the minimum mass to the lower mass limit of the IMF if needed
if self.imf_bdys[0] > minm_prog1a:
minm_prog1a = self.imf_bdys[0]
# Return the minimum mass
the_m_ts = self.get_interp_lifetime_mass(t_s, self.zmetal, is_mass=False)
return float(max(minm_prog1a, the_m_ts))
##############################################
# WD Number #
##############################################
def __wd_number(self, m, t):
'''
This function returns the number of white dwarfs, at a given time, which
had stars of a given initial mass as progenitors. The number is
normalized to a stellar population having a total mass of 1 Mo.
Arguments
=========
m : Initial stellar mass of the white dwarf progenitors
t : Age of the considered stellar population
'''
# Calculate the stellar mass associated to the lifetime t
mlim = self.get_interp_lifetime_mass(t, self.zmetal, is_mass=False)
# Set the maximum mass for SN Ia progenitor
maxm_prog1a = 8.0
# Limit the maximum progenitor mass to the IMF upper limit, if needed
if 8.0 > self.imf_bdys[1]:
maxm_prog1a = self.imf_bdys[1]
# Return the number of white dwarfs, if any
if mlim > maxm_prog1a:
return 0
else:
mmin=0
mmax=0
inte=0
return float(self._imf(mmin,mmax,inte,m))
##############################################
# Maoz SN Rate #
##############################################
def __maoz_sn_rate(self, m, t):
'''
This function returns the rate of SNe Ia, at a given stellar population
age, coming from stars having a given initial mass. It uses the delay-
time distribution of Maoz & Mannucci (2012).
Arguments
=========
m : Initial stellar mass of the white dwarf progenitors
t : Age of the considered stellar population
'''
# Factors 4.0e-13 and 1.0e9 need to stay there !
# Even if the rate is re-normalized.
return self.__wd_number(m,t) * 4.0e-13 * (t/1.0e9)**self.beta_pow
##############################################
# Maoz SN Rate Int #
##############################################
def __maoz_sn_rate_int(self, t):
'''
This function returns the rate of SNe Ia, at a given stellar population
age, coming from all the possible progenitors. It uses the delay-time
distribution of Maoz & Mannucci (2012).
Arguments
=========
t : Age of the considered stellar population
'''
# Return the SN Ia rate integrated over all possible progenitors
return quad(self.__maoz_sn_rate, self.__spline1(t), 8, args=t)[0]
##############################################
# Maoz12 PowerLaw #
##############################################
def __maoz12_powerlaw(self, timemin, timemax):
'''
This function returns the total number of SNe Ia (per Mo formed) and
white dwarfs for a given time interval. It uses the delay-time
distribution of Maoz & Mannucci (2012).
Arguments
=========
timemin : Lower limit of the time (age) interval
timemax : Upper limit of the time (age) interval
'''
# Avoid the zero in the integration
if timemin == 0:
timemin = 1
# Maximum mass for SN Ia progenitor
maxm_prog1a = 8.0
# Get stellar masses associated with lifetimes of timemax and timemin
spline1_timemax = float(self.__spline1(timemax))
spline1_timemin = float(self.__spline1(timemin))
# Calculate the number of SNe Ia per Mo of star formed
#n1a = self.A_maoz * quad(self.__maoz_sn_rate_int, timemin, timemax)[0]
# Initialisation of the number of SNe Ia (IMPORTANT)
n1a = 0.0
# If SNe Ia occur during this time interval ...
if timemax > self.t_8_0 and timemin < 13.0e9:
# If the fraction of white dwarfs needs to be integrated ...
if timemin < self.t_3_0:
# Get the upper and lower time limits for this integral part
t_temp_up = min(self.t_3_0,timemax)
t_temp_low = max(self.t_8_0,timemin)
# Calculate a part of the integration
temp_up = self.a_wd * t_temp_up**(self.beta_pow+4.0) / \
(self.beta_pow+4.0) + \
self.b_wd * t_temp_up**(self.beta_pow+3.0) / \
(self.beta_pow+3.0) + \
self.c_wd * t_temp_up**(self.beta_pow+2.0) / \
(self.beta_pow+2.0)
temp_low = self.a_wd * t_temp_low**(self.beta_pow+4.0) / \
(self.beta_pow+4.0) + \
self.b_wd * t_temp_low**(self.beta_pow+3.0) / \
(self.beta_pow+3.0) + \
self.c_wd * t_temp_low**(self.beta_pow+2.0) / \
(self.beta_pow+2.0)
# Natural logarithm if beta_pow == -1.0
if self.beta_pow == -1.0:
temp_up += self.d_wd*np.log(t_temp_up)
temp_low += self.d_wd*np.log(t_temp_low)
# Normal integration if beta_pow != -1.0
else:
temp_up += self.d_wd * t_temp_up**(self.beta_pow+1.0) / \
(self.beta_pow+1.0)
temp_low += self.d_wd * t_temp_low**(self.beta_pow+1.0) / \
(self.beta_pow+1.0)
# Add the number of SNe Ia (with the wrong units)
n1a = (temp_up - temp_low)
# If the integration continues beyond the point where all
# progenitor white dwarfs are present (this should not be an elif)
if timemax > self.t_3_0:
# Get the upper and lower time limits for this integral part
t_temp_up = min(13.0e9,timemax)
t_temp_low = max(self.t_3_0,timemin)
# Natural logarithm if beta_pow == -1.0
if self.beta_pow == -1.0:
temp_int = np.log(t_temp_up) - np.log(t_temp_low)
# Normal integration if beta_pow != -1.0
else:
temp_int = (t_temp_up**(self.beta_pow+1.0) - \
t_temp_low**(self.beta_pow+1.0)) / (self.beta_pow+1.0)
# Add the number of SNe Ia (with the wrong units)
n1a += temp_int
# Add the right units
n1a = n1a * self.A_maoz * 4.0e-13 / 10**(9.0*self.beta_pow)
# Calculate the number of white dwarfs
#number_wd = quad(self.__wd_number, spline1_timemax, maxm_prog1a, \
# args=timemax)[0] - quad(self.__wd_number, spline1_timemin, \
# maxm_prog1a, args=timemin)[0]
number_wd = 1.0 # Temporary .. should be modified if nb_wd is needed..
# Return the number of SNe Ia (per Mo formed) and white dwarfs
return n1a, number_wd
##############################################
# Exp SN Rate #
##############################################
def __exp_sn_rate(self, m,t):
'''
This function returns the rate of SNe Ia, at a given stellar population
age, coming from stars having a given initial mass. It uses the exponential
delay-time distribution of Wiersma et al. (2009).
Arguments
=========
m : Initial stellar mass of the white dwarf progenitors
t : Age of the considered stellar population
'''
# E-folding timescale of the exponential law
tau=self.exp_dtd #Wiersma default: 2e9
mmin=0
mmax=0
inte=0
# Return the SN Ia rate at time t coming from stars of mass m
return self.__wd_number(m,t) * np.exp(-t/tau) / tau
##############################################
# Wiersma09 E-Folding #
##############################################
def __efolding(self, timemin, timemax):
'''
This function returns the total number of SNe Ia (per Mo formed) and
white dwarfs for a given time interval. It uses the exponential delay-
time distribution of Wiersma et al. (2009).
Arguments
=========
timemin : Lower limit of the time (age) interval
timemax : Upper limit of the time (age) interval
'''
# Avoid the zero in the integration (exp function)
if timemin == 0:
timemin = 1
# Set the maximum mass of the progenitors of SNe Ia
maxm_prog1a = 8.0
if 8 > self.imf_bdys[1]:
maxm_prog1a = self.imf_bdys[1]
# Calculate the number of SNe Ia per Mo of star formed
n1a = self.A_exp * dblquad(self.__exp_sn_rate, timemin, timemax, \
lambda x: self.__spline1(x), lambda x: maxm_prog1a)[0]
# Calculate the number of white dwarfs per Mo of star formed
number_wd = quad(self.__wd_number, self.__spline1(timemax), maxm_prog1a, \
args=timemax)[0] - quad(self.__wd_number, self.__spline1(timemin), \
maxm_prog1a, args=timemin)[0]
# Return the number of SNe Ia and white dwarfs
return n1a, number_wd
##############################################
# Normalize WEfolding #
##############################################
def __normalize_efolding(self, lifetime_min):
'''
This function normalizes the SN Ia rate of a gaussian.
Argument
========
lifetime_min : Minimum stellar lifetime.
'''
# Set the maximum mass of progenitors of SNe Ia
maxm_prog1a = 8.0
if maxm_prog1a > self.imf_bdys[1]:
maxm_prog1a = self.imf_bdys[1]
# Maximum time of integration
ageofuniverse = 1.3e10
# Calculate the normalisation constant
self.A_exp = self.nb_1a_per_m / dblquad(self.__exp_sn_rate, \
lifetime_min, ageofuniverse, \
lambda x:self.__spline1(x), lambda x:maxm_prog1a)[0]
if self.direct_norm_1a >0:
self.A_exp=self.direct_norm_1a
# Avoid renormalizing during the next timesteps
self.normalized = True
##############################################
# Gauss SN Rate #
##############################################
def __gauss_sn_rate(self, m, t):
'''
This function returns the rate of SNe Ia, at a given stellar population
age, coming from stars having a given initial mass. It uses a gaussian
delay-time distribution similar to Wiersma09.
Arguments
=========
m : Initial stellar mass of the white dwarf progenitors
t : Age of the considered stellar population
'''
# Gaussian characteristic delay timescale, and its sigma value
tau = self.gauss_dtd[0] #Wiersma09 defaults:1.0e9
sigma = self.gauss_dtd[1] #Wiersma09 defaults: 0.66e9
# Return the SN Ia rate at time t coming from stars of mass m
return self.__wd_number(m,t) * 1.0 / np.sqrt(2.0 * np.pi * sigma**2) * \
np.exp(-(t - tau)**2 / (2.0 * sigma**2))
##############################################
# Wiersma09 Gauss #
##############################################
def __gauss(self, timemin, timemax):
'''
This function returns the total number of SNe Ia (per Mo formed) and
white dwarfs for a given time interval. It uses the gaussian delay-
time distribution of Wiersma et al. (2009).
Arguments
=========
timemin : Lower limit of the time (age) interval
timemax : Upper limit of the time (age) interval
'''
# Set the maximum mass of the progenitors of SNe Ia
maxm_prog1a = 8.0
if 8 > self.imf_bdys[1]:
maxm_prog1a=self.imf_bdys[1]
# Calculate the number of SNe Ia per Mo of star formed
n1a = self.A_gauss * dblquad(self.__gauss_sn_rate, timemin, timemax, \
lambda x:self.__spline1(x), lambda x:maxm_prog1a)[0]
# Calculate the number of white dwarfs per Mo of star formed
number_wd = quad(self.__wd_number, self.__spline1(timemax), maxm_prog1a, \
args=timemax)[0] - quad(self.__wd_number, self.__spline1(timemin), \
maxm_prog1a, args=timemin)[0]
# Return the number of SNe Ia and white dwarfs
return n1a, number_wd
##############################################
# Normalize WGauss #
##############################################
def __normalize_gauss(self, lifetime_min):
'''
This function normalizes the SN Ia rate of a gaussian (similar to Wiersma09).
Argument
========
lifetime_min : Minimum stellar lifetime.
'''
# Set the maximum mass of progenitors of SNe Ia
maxm_prog1a = 8.0
if maxm_prog1a > self.imf_bdys[1]:
maxm_prog1a = self.imf_bdys[1]
# Maximum time of integration
ageofuniverse = 1.3e10
# Calculate the normalisation constant
self.A_gauss = self.nb_1a_per_m / dblquad(self.__gauss_sn_rate, \
lifetime_min, ageofuniverse, \
lambda x:self.__spline1(x), lambda x:maxm_prog1a)[0]
# Avoid renormalizing during the next timesteps
self.normalized = True
##############################################
# Normalize Maoz #
##############################################
def __normalize_maoz(self, lifetime_min):
'''
This function normalizes the SN Ia rate of Maoz or any power law.
Argument
========
lifetime_min : Minimum stellar lifetime.
'''
# Set the maximum mass of progenitors of SNe Ia
# maxm_prog1a = 8.0
# if maxm_prog1a > self.imf_bdys[1]:
# maxm_prog1a = self.imf_bdys[1]
# Maximum time of integration
# ageofuniverse = 1.3e10
# Calculate the normalisation constant
# self.A_maoz = self.nb_1a_per_m / quad(self.__maoz_sn_rate_int, \
# lifetime_min, ageofuniverse)[0]
# print (self.A_maoz)
# Calculate the first part of the integral
temp_8_0 = self.a_wd*self.t_8_0**(self.beta_pow+4.0)/(self.beta_pow+4.0)+\
self.b_wd*self.t_8_0**(self.beta_pow+3.0)/(self.beta_pow+3.0)+\
self.c_wd*self.t_8_0**(self.beta_pow+2.0)/(self.beta_pow+2.0)
temp_3_0 = self.a_wd*self.t_3_0**(self.beta_pow+4.0)/(self.beta_pow+4.0)+\
self.b_wd*self.t_3_0**(self.beta_pow+3.0)/(self.beta_pow+3.0)+\
self.c_wd*self.t_3_0**(self.beta_pow+2.0)/(self.beta_pow+2.0)
# Natural logarithm if beta_pow == -1.0
if self.beta_pow == -1.0:
temp_8_0 += self.d_wd*np.log(self.t_8_0)
temp_3_0 += self.d_wd*np.log(self.t_3_0)
temp_13gys = np.log(13.0e9) - np.log(self.t_3_0)
# Normal integration if beta_pow != -1.0
else:
temp_8_0 += self.d_wd*self.t_8_0**(self.beta_pow+1.0)/(self.beta_pow+1.0)
temp_3_0 += self.d_wd*self.t_3_0**(self.beta_pow+1.0)/(self.beta_pow+1.0)
temp_13gys = (13.0e9**(self.beta_pow+1.0) - \
self.t_3_0**(self.beta_pow+1.0)) / (self.beta_pow+1.0)
# Calculate the normalization constant
self.A_maoz = self.nb_1a_per_m * 10**(9.0*self.beta_pow) / 4.0e-13 / \
(temp_3_0 - temp_8_0 + temp_13gys)
# Avoid renormalizing during the next timesteps
self.normalized = True
##############################################
# Poly DTD #
##############################################
def __poly_dtd(self, timemin, timemax):
'''
This function returns the total number of SNe Ia (per Mo formed) for
a given time interval. It uses an input DTD polynomial function of
any order.
Arguments
=========
timemin : Lower limit of the time (age) interval
timemax : Upper limit of the time (age) interval
'''
# Initialization of the integrated DTD with upper and lower mass limit
int_poly_up = 0.0
int_poly_low = 0.0
# Set the upper and lower time limit of the integration
t_up_int = min(timemax, self.poly_fit_range[1])
t_low_int = max(timemin, self.poly_fit_range[0])
# If this is a split poly DTD ...
if self.t_dtd_poly_split > 0.0:
# If in the first section ...
if t_up_int <= self.t_dtd_poly_split:
# For each order of the polynomial fit ...
for i_npf in range(0,len(self.poly_fit_dtd_5th[0])):
# Cumulate with the upper and lower limits
exp_poly = len(self.poly_fit_dtd_5th[0]) - i_npf - 1.0
int_poly_up += self.poly_fit_dtd_5th[0][i_npf] * \
t_up_int**(exp_poly+1.0) / (exp_poly+1.0)
int_poly_low += self.poly_fit_dtd_5th[0][i_npf] * \
t_low_int**(exp_poly+1.0) / (exp_poly+1.0)
# If in the second section ...
elif t_low_int >= self.t_dtd_poly_split:
# For each order of the polynomial fit ...
for i_npf in range(0,len(self.poly_fit_dtd_5th[1])):
# Cumulate with the upper and lower limits
exp_poly = len(self.poly_fit_dtd_5th[1]) - i_npf - 1.0
int_poly_up += self.poly_fit_dtd_5th[1][i_npf] * \
t_up_int**(exp_poly+1.0) / (exp_poly+1.0)
int_poly_low += self.poly_fit_dtd_5th[1][i_npf] * \
t_low_int**(exp_poly+1.0) / (exp_poly+1.0)
# If overlap ...
else:
# For each order of the polynomial fit ...
for i_npf in range(0,len(self.poly_fit_dtd_5th[0])):
# Cumulate with the upper and lower limits
exp_poly = len(self.poly_fit_dtd_5th[0]) - i_npf - 1.0
int_poly_up += self.poly_fit_dtd_5th[0][i_npf] * \
self.t_dtd_poly_split**(exp_poly+1.0) / (exp_poly+1.0)
int_poly_low += self.poly_fit_dtd_5th[0][i_npf] * \
t_low_int**(exp_poly+1.0) / (exp_poly+1.0)
exp_poly = len(self.poly_fit_dtd_5th[1]) - i_npf - 1.0
int_poly_up += self.poly_fit_dtd_5th[1][i_npf] * \
t_up_int**(exp_poly+1.0) / (exp_poly+1.0)
int_poly_low += self.poly_fit_dtd_5th[1][i_npf] * \
self.t_dtd_poly_split**(exp_poly+1.0) / (exp_poly+1.0)
# If this is not a split poly DTD ...
else:
# For each order of the polynomial fit ...
for i_npf in range(0,len(self.poly_fit_dtd_5th)):
# Cumulate with the upper and lower limits
exp_poly = len(self.poly_fit_dtd_5th) - i_npf - 1.0
int_poly_up += self.poly_fit_dtd_5th[i_npf] * \
t_up_int**(exp_poly+1.0) / (exp_poly+1.0)
int_poly_low += self.poly_fit_dtd_5th[i_npf] * \
t_low_int**(exp_poly+1.0) / (exp_poly+1.0)
# Return the number of SNe Ia n this time bin
if (int_poly_up - int_poly_low) < 0.0: # can happen since it's a fit
return 0.0
else:
return self.A_poly * (int_poly_up - int_poly_low)
##############################################
# Normalize Poly Fit #
##############################################
def __normalize_poly_fit(self):
'''
This function normalizes the polynomial input DTD function. Can
be any polynomial order.
'''
# Initialization of the integrated DTD with upper and lower mass limit
int_poly_up = 0.0
int_poly_low = 0.0
# If it is a split poly DTD ...
if self.t_dtd_poly_split > 0.0:
# For each order of the polynomial fit ...
for i_npf in range(0,len(self.poly_fit_dtd_5th[0])):
# Cumulate with the upper and lower limits
exp_poly = len(self.poly_fit_dtd_5th[0]) - i_npf - 1.0
int_poly_up += self.poly_fit_dtd_5th[0][i_npf] * \
self.t_dtd_poly_split**(exp_poly+1.0) / (exp_poly+1.0)
int_poly_low += self.poly_fit_dtd_5th[0][i_npf] * \
self.poly_fit_range[0]**(exp_poly+1.0) / (exp_poly+1.0)
exp_poly = len(self.poly_fit_dtd_5th[1]) - i_npf - 1.0
int_poly_up += self.poly_fit_dtd_5th[1][i_npf] * \
self.poly_fit_range[1]**(exp_poly+1.0) / (exp_poly+1.0)
int_poly_low += self.poly_fit_dtd_5th[1][i_npf] * \
self.t_dtd_poly_split**(exp_poly+1.0) / (exp_poly+1.0)
# If it not is a split poly DTD ...
else:
# For each order of the polynomial fit ...
for i_npf in range(0,len(self.poly_fit_dtd_5th)):
# Cumulate with the upper and lower limits
exp_poly = len(self.poly_fit_dtd_5th) - i_npf - 1.0
int_poly_up += self.poly_fit_dtd_5th[i_npf] * \
self.poly_fit_range[1]**(exp_poly+1.0) / (exp_poly+1.0)
int_poly_low += self.poly_fit_dtd_5th[i_npf] * \
self.poly_fit_range[0]**(exp_poly+1.0) / (exp_poly+1.0)
# Calculate the normalization constant
self.A_poly = self.nb_1a_per_m / (int_poly_up - int_poly_low)
# Avoid renormalizing during the next timesteps
self.normalized = True
##############################################
# element list #
##############################################
def _i_elem_lists(self, elem):
'''
Finds and returns the list of indices for isotopes of
element 'elem'. Also returns a list of the indices for
H and He to facility metallicity calculations.
Arguments
=========
elem : a string identifying the element requested.
Returns 2 lists
=========
indices of isotopes of elem,
indices of isotopes of H and He
'''
# Declare the list of isotope indexes associated with this element
i_iso_list = []
# Declare the list of isotope indexes associated with H and He
i_H_He_list = []
# Find the isotopes associated with this element
for i_iso in range(self.nb_isotopes):
if self.history.isotopes[i_iso].split('-')[0] == elem:
i_iso_list.append(i_iso)
if 'H-' in self.history.isotopes[i_iso] or 'He-' in self.history.isotopes[i_iso]:
i_H_He_list.append(i_iso)
return i_iso_list, i_H_He_list
##############################################
# Compute metal fraction #
##############################################
def Z_x(self, elem, t_step=-1):
'''
Compute the metal fraction for a list of elements.
The metal fraction is defined as mass_element/mass_metals.
Arguments
=========
elem : the name of the element to use. All isotopes
will be found.
t_step : the indx of the time step to do the calculation.
if t_step = -1, or not specified, the last
time_step is used
Returns
=========
mass fraction of metals for all isotopes identified by
i_iso_list as a single number
'''
# Get the list of isotopes indices for element elem
# along with a list of indices for H and He
i_iso_list, i_H_He_list = self._i_elem_lists(elem)
if len(i_iso_list) == 0:
print("Element {} not found. Returning -1".format(elem))
if t_step > self.nb_timesteps:
print("t_step must be < nb_timesteps")
return -1.0
if t_step == -1:
t_step = self.nb_timesteps
# Calculate the total mass of gas at that timestep
m_tot = self.ymgal[t_step].sum()
m_Z_tot = m_tot
# Calculate the total mass of metals at that timestep
for i_iso in range(len(i_H_He_list)):
m_Z_tot = m_Z_tot - self.ymgal[t_step][i_H_He_list[i_iso]]
# Make sure there is something in the gas reservoir ..
if m_Z_tot > 0.0:
# Sum the mass of each isotope associated with the desired element
m_tot_elem = 0.0
for i_iso in range(len(i_iso_list)):
m_tot_elem += self.ymgal[t_step][ i_iso_list[i_iso] ]
# Calculate the mass fraction of metals
return m_tot_elem / m_Z_tot
else:
return 0.0
##############################################
# IMF #
##############################################
def _imf(self, mmin, mmax, inte, mass=0):
'''
This function returns, using the IMF, the number or the mass of all
the stars within a certain initial stellar mass interval.
Arguments
=========
mmin : Lower mass limit of the interval.
mmax : Upper mass limit of the interval.
inte : 1 - Return the number of stars.
2 - Return the stellar mass.
0 - Return the number of stars having a mass 'mass'
-1 - Return the IMF proportional constant when normalized to 1 Mo.
mass : Mass of a star (if inte == 0).
'''
# Return zero if there is an error in the mass boundary
if mmin>mmax:
if self.iolevel > 1:
print ('Warning in _imf function')
print ('mmin:',mmin)
print ('mmax',mmax)
print ('mmin>mmax')
print ('Assume mmin == mmax')
return 0
# Salpeter IMF or any power law
if self.imf_type == 'salpeter' or self.imf_type == 'alphaimf':
# Choose the right option
if inte == 0:
return self.imfnorm * self.__g1_power_law(mass)
if inte == 1:
return quad(self.__g1_power_law, mmin, mmax)[0]
if inte == 2:
return quad(self.__g2_power_law, mmin, mmax)[0]
if inte == -1:
self.imfnorm = 1.0 / quad(self.__g2_power_law, \
self.imf_bdys[0], self.imf_bdys[1])[0]
# Custom IMF with the file imf_input.py
if self.imf_type=='input':
# Load the file
ci = load_source('custom_imf', os.path.join(nupy_path, 'imf_input.py'))
self.ci = ci
# Choose the right option
if inte == 0:
return self.imfnorm * self.__g1_custom(mass)
if inte == 1:
return quad(self.__g1_custom, mmin, mmax)[0]
if inte == 2:
return quad(self.__g2_custom, mmin, mmax)[0]
if inte == -1:
self.imfnorm = 1.0 / quad(self.__g2_custom, \
self.imf_bdys[0], self.imf_bdys[1])[0]
# Chabrier IMF
elif self.imf_type=='chabrier':
# Choose the right option
if inte == 0:
return self.imfnorm * self.__g1_chabrier(mass)
if inte == 1:
return quad(self.__g1_chabrier, mmin, mmax)[0]
if inte == 2:
return quad(self.__g2_chabrier, mmin, mmax)[0]
if inte == -1:
self.imfnorm = 1.0 / quad(self.__g2_chabrier, \
self.imf_bdys[0], self.imf_bdys[1])[0]
# Chabrier - Alpha custom - IMF
elif self.imf_type=='chabrieralpha':
# Choose the right option
if inte == 0:
return self.imfnorm * self.__g1_chabrier_alphaimf(mass)
if inte == 1:
return quad(self.__g1_chabrier_alphaimf, mmin, mmax)[0]
if inte == 2:
return quad(self.__g2_chabrier_alphaimf, mmin, mmax)[0]
if inte == -1:
self.imfnorm = 1.0 / quad(self.__g2_chabrier_alphaimf, \
self.imf_bdys[0], self.imf_bdys[1])[0]
# Kroupa 1993 - IMF
elif self.imf_type=='kroupa93':
# Choose the right option
if inte == 0:
return self.imfnorm * self.__g1_kroupa93_alphaimf(mass)
if inte == 1:
return quad(self.__g1_kroupa93_alphaimf, mmin, mmax)[0]
if inte == 2:
return quad(self.__g2_kroupa93_alphaimf, mmin, mmax)[0]
if inte == -1:
self.imfnorm = 1.0 / quad(self.__g2_kroupa93_alphaimf, \
self.imf_bdys[0], self.imf_bdys[1])[0]
# Kroupa IMF
elif self.imf_type=='kroupa':
# Choose the right option
if inte == 0:
return self.imfnorm * self.__g1_kroupa(mass)
if inte == 1:
return self.__integrate_g1_kroupa(mmin, mmax)
if inte == 2:
return quad(self.__g2_kroupa, mmin, mmax)[0]
if inte == -1:
self.imfnorm = 1.0 / quad(self.__g2_kroupa, \
self.imf_bdys[0], self.imf_bdys[1])[0]
elif self.imf_type == 'lognormal':
# Choose the right option
if inte == 0:
return self.imfnorm * self.__g1_log_normal(mass)
if inte == 1:
return quad(self.__g1_log_normal, mmin, mmax)[0]
if inte == 2:
return quad(self.__g2_log_normal, mmin, mmax)[0]
if inte == -1:
self.imfnorm = 1.0 / quad(self.__g2_log_normal, \
self.imf_bdys[0], self.imf_bdys[1])[0]
# Ferrini, Pardi & Penco (1990)
elif self.imf_type=='fpp':
# Choose the right option
if inte == 0:
return self.imfnorm * self.__g1_fpp(mass)
if inte == 1:
return quad(self.__g1_fpp, mmin, mmax)[0]
if inte == 2:
#return quad(self.__g2_fpp, mmin, mmax)[0]
#if mmin < 0.8:
# print ('!!Error - Ferrini IMF not fitted below 0.8 Msun!!')
# Find the lower mass bin
i_fer = 0
while mmin >= self.m_up_fer[i_fer]:
i_fer += 1
# Integrate this mass bin ...
imf_int = 0.0
imf_int += self.norm_fer[i_fer] * \
(min(mmax,self.m_up_fer[i_fer])**self.alpha_fer[i_fer]\
- mmin**self.alpha_fer[i_fer])
# For the remaining mass bin ...
if not mmax <= self.m_up_fer[i_fer]:
for i_fer2 in range((i_fer+1),len(self.m_up_fer)):
if mmax >= self.m_up_fer[i_fer2-1]:
imf_int += self.norm_fer[i_fer2] * \
(min(mmax,self.m_up_fer[i_fer2])**self.alpha_fer[i_fer2]\
- self.m_up_fer[i_fer2-1]**self.alpha_fer[i_fer2])
# Return the integration
return imf_int
if inte == -1:
self.imfnorm = 1.0 / quad(self.__g2_fpp, \
self.imf_bdys[0], self.imf_bdys[1])[0]
##############################################
# G1 Power Law #
##############################################
def __g1_power_law(self, mass):
'''
This function returns the number of stars having a certain stellar mass
with a Salpeter IMF or a similar power law.
Arguments
=========
mass : Stellar mass.
'''
# Select the right alpha index
if self.imf_type == 'salpeter':
return mass**(-2.35)
elif self.imf_type == 'alphaimf':
return mass**(-self.alphaimf)
else:
return 0
##############################################
# G2 Power Law #
##############################################
def __g2_power_law(self, mass):
'''
This function returns the total mass of stars having a certain initial
mass with a Salpeter IMF or a similar power law.
Arguments
=========
mass : Stellar mass.
'''
# Select the right alpha index
if self.imf_type == 'salpeter':
return mass * mass**(-2.35)
elif self.imf_type == 'alphaimf':
return mass * mass**(-self.alphaimf)
else:
return 0
##############################################
# G1 Log Normal #
##############################################
def __g1_log_normal(self, mass):
'''
This function returns the number of stars having a certain stellar mass
with a log normal IMF with characteristic mass self.imf_pop3_char_mass.
Arguments
=========
mass : Stellar mass.
** future add, sigma... assuming sigma = 1 for now **
'''
# Select the right alpha index
return np.exp(-1.0/2.0 * np.log(mass/self.imf_pop3_char_mass)**2) * 1/mass
##############################################
# G2 Log Normal #
##############################################
def __g2_log_normal(self, mass):
'''
This function returns the total mass of stars having a certain initial
mass with a log normal IMF with characteristic mass self.imf_pop3_char_mass.
Arguments
=========
mass : Stellar mass.
** future add, sigma... assuming sigma = 1 for now **
'''
return np.exp(-1.0/2.0 * np.log(mass/self.imf_pop3_char_mass)**2)
##############################################
# G1 Custom #
##############################################
def __g1_custom(self, mass):
'''
This function returns the number of stars having a certain stellar mass
with a custom IMF.
Arguments
=========
mass : Stellar mass.
ci : File containing the custom IMF.
'''
# Return the number of stars
return self.ci.custom_imf(mass)
##############################################
# G2 Custom #
##############################################
def __g2_custom(self, mass):
'''
This function returns the total mass of stars having a certain stellar
mass with a custom IMF.
Arguments
=========
mass : Stellar mass.
ci : File containing the custom IMF.
'''
# Return the total mass of stars
return mass * self.ci.custom_imf(mass)
##############################################
# G1 Chabrier #
##############################################
def __g1_chabrier(self, mass):
'''
This function returns the number of stars having a certain stellar mass
with a Chabrier IMF.
Arguments
=========
mass : Stellar mass.
'''
# Select the right mass regime
if mass <= 1:
return 0.158 * (1.0 / mass) * \
np.exp(-np.log10(mass/0.079)**2 / (2.0 * 0.69**2))
else:
return 0.0443 * mass**(-2.3)
##############################################
# G2 Chabrier #
##############################################
def __g2_chabrier(self, mass):
'''
This function returns the total mass of stars having a certain stellar
mass with a Chabrier IMF.
Arguments
=========
mass : Stellar mass.
'''
# Select the right mass regime
if mass <= 1:
return 0.158 * np.exp( -np.log10(mass/0.079)**2 / (2.0 * 0.69**2))
else:
return 0.0443 * mass * mass**(-2.3)
##############################################
# G1 Chabrier AlphaIMF #
##############################################
def __g1_chabrier_alphaimf(self, mass):
'''
This function returns the number of stars having a certain stellar mass
with a Chabrier IMF.
Arguments
=========
mass : Stellar mass.
'''
# Select the right mass regime
if mass <= 1:
return 0.158 * (1.0 / mass) * \
np.exp(-np.log10(mass/0.079)**2 / (2.0 * 0.69**2))
else:
return 0.0443 * mass**(-self.alphaimf)
##############################################
# G2 Chabrier AlphaIMF #
##############################################
def __g2_chabrier_alphaimf(self, mass):
'''
This function returns the total mass of stars having a certain stellar
mass with a Chabrier IMF.
Arguments
=========
mass : Stellar mass.
'''
# Select the right mass regime
if mass <= 1:
return 0.158 * np.exp( -np.log10(mass/0.079)**2 / (2.0 * 0.69**2))
else:
return 0.0443 * mass * mass**(-self.alphaimf)
##############################################
# G1 Kroupa93 AlphaIMF #
##############################################
def __g1_kroupa93_alphaimf(self, mass):
'''
This function returns the number of stars having a certain stellar mass
with a Kroupa et al. (1993) IMF.
Arguments
=========
mass : Stellar mass.
'''
# Select the right mass regime
if mass < 0.5:
return 0.035 * mass**(-1.3)
elif mass < 1.0:
return 0.019 * mass**(-2.2)
else:
return 0.019 * mass**(-2.7)
##############################################
# G2 Kroupa93 AlphaIMF #
##############################################
def __g2_kroupa93_alphaimf(self, mass):
'''
This function returns the total mass of stars having a certain stellar
mass with a Kroupa et al. (1993) IMF.
Arguments
=========
mass : Stellar mass.
'''
# Select the right mass regime
if mass < 0.5:
return 0.035 * mass * mass**(-1.3)
elif mass < 1.0:
return 0.019 * mass * mass**(-2.2)
else:
return 0.019 * mass * mass**(-2.7)
##############################################
# G1 Kroupa #
##############################################
def __g1_kroupa(self, mass):
'''
This function returns the number of stars having a certain stellar mass
with a Kroupa IMF.
Arguments
=========
mass : Stellar mass.
'''
# Select the right mass regime
if mass < 0.08:
return self.p0 * mass**(-0.3)
elif mass < 0.5:
return self.p1 * mass**(-1.3)
else:
return self.p1_p2 * mass**(-2.3)
##############################################
# Integrate G1 Kroupa #
##############################################
def __integrate_g1_kroupa(self, mmin, mmax):
'''
This function returns the integration of the Kroupa (2001)
IMF. Number of stars.
Arguments
=========
mmin : Lower-boundary mass of the integration
mmax : Upper-boundary mass
'''
# Declare the integral result
integral_sum = 0.0
# Integrate the lower-mass regime if needed
# 1.42857 = 1.0 / 0.7
if mmin < 0.08:
integral_sum += self.p0 * 1.42857 * \
( min(mmax,0.08)**0.7 - mmin**0.7 )
# Integrate the intermediate-mass regime if needed
if mmax > 0.08 and mmin < 0.5:
# 3.33333 = 1.0 / 0.3
integral_sum += self.p1 * 3.33333 * \
( max(mmin,0.08)**(-0.3) - min(mmax,0.5)**(-0.3) )
# Integrate the high-mass regime if needed
if mmax > 0.5:
# 0.769231 = 1.0 / 1.3
integral_sum += self.p1*self.p2 * 0.769231 * \
( max(mmin,0.5)**(-1.3) - mmax**(-1.3) )
# Return the integral of all mass regime combined
return integral_sum
##############################################
# G2 Kroupa #
##############################################
def __g2_kroupa(self, mass):
'''
This function returns the total mass of stars having a certain stellar
mass with a Kroupa IMF.
Arguments
=========
mass : Stellar mass.
'''
# Select the right mass regime
if mass < 0.08:
return self.p0 * mass**(0.7)
elif mass < 0.5:
return self.p1 * mass**(-0.3)
else:
return self.p1_p2 * mass**(-1.3)
##############################################
# G1 FPP #
##############################################
def __g1_fpp(self, mass):
'''
This function returns the number of stars having a certain stellar mass
with a Ferrini, Pardi & Penco (1990) IMF.
Arguments
=========
mass : Stellar mass.
'''
# Calculate the number of stars
lgmm = np.log10(mass)
return 2.01 * mass**(-1.52) / 10**((2.07*lgmm**2+1.92*lgmm+0.73)**0.5)
##############################################
# G2 FPP #
##############################################
def __g2_fpp(self, mass):
'''
This function returns the total mass of stars having a certain stellar
mass with a Ferrini, Pardi & Penco (1990) IMF.
Arguments
=========
mass : Stellar mass.
'''
# Calculate the mass of stars
lgmm = np.log10(mass)
return 2.01 * mass**(-0.52) / 10**((2.07*lgmm**2+1.92*lgmm+0.73)**0.5)
##############################################
# Get Z Wiersma #
##############################################
def __get_Z_wiersma(self, Z, Z_grid):
'''
This function returns the closest available metallicity grid point
for a given Z. It always favours the lower boundary.
Arguments
=========
Z : Current metallicity of the gas reservoir.
Z_grid : Available metallicity grid points.
'''
import decimal
# For every available metallicity ...
for tz in Z_grid:
# If Z is above the grid range, use max available Z
if Z >= Z_grid[0]:
Z_gridpoint = Z_grid[0]
if self.iolevel >= 2:
print ('Z > Zgrid')
break
# If Z is below the grid range, use min available Z
if Z <= Z_grid[-1]:
Z_gridpoint = Z_grid[-1]
if self.iolevel >= 2:
print ('Z < Zgrid')
break
# If Z is exactly one of the available Z, use the given Z
# round here to precision given in yield table
if round(Z,abs(decimal.Decimal(str(tz)).as_tuple().exponent)) == tz: #Z == tz:
Z_gridpoint = tz#Z
if self.iolevel >= 2:
print ('Z = Zgrid')
break
# If Z is above the grid point at index tz, use this last point
if Z > tz:
Z_gridpoint = tz
if self.iolevel >= 2:
print ('interpolation necessary')
break
# Return the closest metallicity grid point
return Z_gridpoint
##############################################
# Correct Iniabu #
##############################################
def __correct_iniabu(self, ymgal_t, ytables, Z_gridpoint, m_stars):
'''
This function returns yields that are corrected for the difference
between the initial abundances used in the stellar model calculations
and the ones in the gas reservoir at the moment of star formation.
See Wiersma et al. (2009) for more information on this approach.
Note that tabulated net yields are not required for this approach.
Arguments
=========
mgal_t : Current mass of the gas reservoir (for 'wiersma' setting).
ytables : Ojbect containing the yield tables.
Z_gridpoint : Metallicity where the correction is made.
m_stars : Stellar mass grid point at metallicity Z_gridpoint.
'''
# Calculate the isotope mass fractions of the gas reservoir
X_ymgal_t = []
for p in range(len(ymgal_t)):
X_ymgal_t.append(ymgal_t[p] / np.sum(ymgal_t))
if not Z_gridpoint==0: #X0 is not in popIII tables and not necessary for popIII setting
# Get the initial abundances used for the stellar model calculation
X0 = ytables.get(Z=Z_gridpoint, M=m_stars[0], quantity='X0',\
isotopes=self.history.isotopes)
# Declaration of the corrected yields
yields = []
# For every stellar model at metallicity Z_gridpoint ...
for m in m_stars:
# Get its yields
y = ytables.get(Z=Z_gridpoint, M=m, quantity='Yields',\
isotopes=self.history.isotopes)
#print ('test Z: ',Z_gridpoint,' M: ',m)
mfinal = ytables.get(Z=Z_gridpoint, M=m, quantity='M_final')
iso_name=copy.deepcopy(self.history.isotopes)
yi_all=[]
# Correct every isotope and make sure the ejecta is always positive
for p in range(len(X_ymgal_t)):
#assume your yields are net yields
if (self.netyields_on==True):
if self.wiersmamod: #for Wiesma09 tests
# initial amount depending on the simulation Z + net production factors
if (m>8) and (iso_name[p] in ['C-12','Mg-24','Fe-56']):
yi = (X_ymgal_t[p]*(m-mfinal) + y[p]) #total yields, Eq. 4 in Wiersma09
if iso_name[p] in ['C-12','Fe-56']:
#print ('M=',m,' Reduce ',iso_name[p],' by 0.5 ',yi,yi*0.5)
yi = yi*0.5
else:
#print ('M=',m,' Multiply ',iso_name[p],' by 2.')
yi = yi*2.
else:
yi = (X_ymgal_t[p]*(m-mfinal) + y[p])
else:
yi = (X_ymgal_t[p]*(m-mfinal) + y[p])
#print (yi,(m-mfinal),y[p],X_ymgal_t[p])
else:
#assume your yields are NOT net yields
#if iso_name[p] in ['C-12']:
#print ('C12: Current gas fraction and X0: ',X_ymgal_t[p],X0[p])
#introduce relative correction check of term X_ymgal_t[p] - X0[p]
#since small difference (e.g. due to lack of precision in X0) can
#lead to big differences in yi; yield table X0 has only limited digits
relat_corr=abs(X_ymgal_t[p] - X0[p])/X_ymgal_t[p]
if (relat_corr - 1.)>1e-3:
yi = y[p] + ( X_ymgal_t[p] - X0[p]) * (m-mfinal) #sum(y) #total yields yi, Eq. 7 in Wiersma09
else:
yi = y[p]
if yi < 0:
if self.iolevel>0:
if abs(yi/y[p])>0.1:
print (iso_name[p],'star ',m,' set ',yi,' to 0, ', \
'netyields: ',y[p],'Xsim: ',X_ymgal_t[p],X0[p])
yi = 0
yi_all.append(yi)
# we do not do the normalization
#norm = (m-mfinal)/sum(yi_all)
yi_all= np.array(yi_all) #* norm
yields.append(yi_all)
# save calculated net yields and corresponding masses
self.history.netyields=yields
self.history.netyields_masses=m_stars
#print ('star ',m,(m-mfinal),sum(yields[-1]))
# Return the corrected yields
return yields
##############################################
# Mass Ejected Fit #
##############################################
def __fit_mej_mini(self, m_stars, yields):
'''
This function calculates and returns the coefficients of the linear fit
regarding the total mass ejected as a function of the initial mass at
the low-mass end of massive stars (up to 15 Mo).
Arguments
=========
m_stars : Stellar mass grid point at a specific metallicity.
yields : Stellar yields at a specific metalliticy
'''
import matplotlib.pyplot as plt
# Linear fit coefficients
slope = []
intercept = []
# Get the actual stellar masses and total mass ejected
x_all = np.array(m_stars)
y_all = np.array([np.sum(a) for a in yields])
if self.iolevel>0:
plt.figure()
# Calculate the linear fit for all stellar mass bins
for h in range(len(x_all)-1):
x=np.array([x_all[h],x_all[h+1]])
y=np.array([y_all[h],y_all[h+1]])
a,b=polyfit(x=x,y=y,deg=1)
slope.append(a)
intercept.append(b)
if self.iolevel>0:
mtests=np.arange(x[0],x[1],0.1)
plt.plot(mtests,slope[-1]*np.array(mtests)+intercept[-1])
plt.title('Total mass fit')
plt.xlabel('Minis');plt.ylabel('Meject')
# Return the linear fit coefficients
return slope, intercept
##############################################
# Get Metallicity #
##############################################
def _getmetallicity(self, i):
'''
Returns the metallicity of the gas reservoir at step i.
Metals are defined as everything heavier than lithium.
Argument
========
i : Index of the timestep
'''
# Return the input Z if the code is forced to always use a specific Z
if self.hardsetZ >= 0:
zmetal = self.hardsetZ
return zmetal
# Calculate the total mass
mgastot = np.sum(self.ymgal[i])
# In the case where there is no gas left
if mgastot == 0.0:
zmetal = 0.0
# If gas left, calculate the mass fraction of metals
else:
m_non_metal = 0.0
for i_nm in range(self.len_i_nonmetals):
m_non_metal += self.ymgal[i][self.i_nonmetals[i_nm]]
zmetal = 1.0 - m_non_metal / mgastot
# Return the metallicity of the gas reservoir
return zmetal
##############################################
# Iso Abu to Elem #
##############################################
def _iso_abu_to_elem(self, yields_iso):
'''
This function converts isotope yields in elements and returns the result.
'''
# Combine isotopes into elements
yields_ele = np.zeros(self.nb_elements)
for i_iso in range(self.nb_isotopes):
yields_ele[self.i_elem_for_iso[i_iso]] += yields_iso[i_iso]
# Return the list of elements, and the associated yields
return yields_ele
##############################################
# GetTime #
##############################################
def _gettime(self):
'''
Return current time. This is for keeping track of the computational time.
'''
out = 'Run time: '+str(round((t_module.time() - self.start_time),2))+"s"
return out
##############################################
# Add SSP Ejecta #
##############################################
def __add_ssp_ejecta(self, i):
'''
Distribute the SSP ejecta. The SSP that forms during this step
is treated by interpolating SYGMA results that were kept in memory.
The SSp still deposit its ejecta in the upcoming timesteps, as in the
original chem_evol.py class.
Argument
========
i : Index of the current timestep
'''
# Interpolate the SSP ejecta
self.__interpolate_ssp_ej( i-1 )
# Copy the initial simulation step that will be increased
i_sim = i-1
if i_sim == 0:
t_form = 0.0
else:
t_form = self.t_ce[i_sim-1]
# For each SSP step ...
for i_ssp in range(0,self.nb_steps_table):
# Declare the array that contains the time covered by
# the SSP step on each simulation step
time_frac = []
# Keep the initial current simulation step in memory
i_sim_low = i_sim
# While all simulation steps covered by the SSP step
# have not been treated ...
not_complete = True
while not_complete:
# Calculate the time lower-boundary of the SSP time bin
if i_ssp == 0:
t_low_ssp = 0.0
else:
t_low_ssp = self.t_ssp[i_ssp-1]
if i_sim == 0:
t_low_ce = 0.0
else:
t_low_ce = self.t_ce[i_sim-1]
# Calculate the time covered by the SSP step on
# the considered simulation step
time_frac.append( \
min((self.t_ce[i_sim]-t_form), self.t_ssp[i_ssp]) - \
max((t_low_ce-t_form), t_low_ssp))
# If all the simulations steps has been covered ...
if (self.t_ce[i_sim]-t_form) >= self.t_ssp[i_ssp] or \
(i_sim + 1) == self.nb_timesteps:
# Stop the while loop
not_complete = False
# If we still need to cover simulation steps ...
else:
# Move to the next one
i_sim += 1
# Convert the time into time fraction
dt_temp_inv = 1.0 / (self.t_ssp[i_ssp] - t_low_ssp)
for i_tf in range(0,len(time_frac)):
time_frac[i_tf] = time_frac[i_tf] * dt_temp_inv
# For each simulation step ...
for j_ase in range(0,len(time_frac)):
# Add the ejecta
self.mdot[i_sim_low+j_ase] += \
self.ej_SSP_int[i_ssp] * time_frac[j_ase]
if self.len_decay_file > 0 or self.use_decay_module:
self.mdot_radio[i_sim_low+j_ase] += \
self.ej_SSP_int_radio[i_ssp] * time_frac[j_ase]
# Break is the end of the simulation is reached
if (i_sim + 1) == self.nb_timesteps:
break
##############################################
# Interpolate SSP Ej. #
##############################################
def __interpolate_ssp_ej(self, i):
'''
Interpolate all the isotope for each step to create a SSP with the
wanted metallicity. The ejecta is scale to the mass of the SSP.
Arguments
=========
i : Index of the current timestep
'''
# Use the lowest metallicity
if self.zmetal <= self.Z_trans:
self.ej_SSP_int = self.ej_SSP[0] * self.m_locked
if self.len_decay_file > 0 or self.use_decay_module:
self.ej_SSP_int_radio = self.ej_SSP_radio[0] * self.m_locked
# Use the highest metallicity
elif self.zmetal >= self.Z_table_SSP[-1]:
self.ej_SSP_int = self.ej_SSP[-1] * self.m_locked
if self.len_decay_file > 0 or self.use_decay_module:
self.ej_SSP_int_radio = self.ej_SSP_radio[-1] * self.m_locked
# If the metallicity is between Z_trans and lowest non-zero Z_table ..
elif self.zmetal <= self.Z_table_first_nzero:
if self.Z_table_SSP[0] == self.Z_table_first_nzero:
self.ej_SSP_int = self.ej_SSP[0] * self.m_locked
else:
self.ej_SSP_int = self.ej_SSP[1] * self.m_locked
if self.len_decay_file > 0 or self.use_decay_module:
if self.Z_table_SSP[0] == self.Z_table_first_nzero:
self.ej_SSP_int_radio = self.ej_SSP_radio[0] * self.m_locked
else:
self.ej_SSP_int_radio = self.ej_SSP_radio[1] * self.m_locked
# If we need to interpolate the ejecta ...
else:
# Find the metallicity lower boundary
i_Z_low = 0
while self.zmetal >= self.Z_table_SSP[i_Z_low+1]:
i_Z_low += 1
# Calculate the log of the current gas metallicity
log_Z_cur = np.log10(self.zmetal)
# Calculate the time left to the simulation
t_left_ce = self.t_ce[-1] - self.t_ce[i]
# For each step and each isotope ...
for j_ise in range(0,self.nb_steps_table):
# Interpolate the isotopes
self.ej_SSP_int[j_ise] = (self.ej_SSP_coef[0][i_Z_low][j_ise] * \
log_Z_cur + self.ej_SSP_coef[1][i_Z_low][j_ise]) * self.m_locked
if self.len_decay_file > 0 or self.use_decay_module:
self.ej_SSP_int_radio[j_ise] = (\
self.ej_SSP_coef_radio[0][i_Z_low][j_ise] * log_Z_cur + \
self.ej_SSP_coef_radio[1][i_Z_low][j_ise]) * self.m_locked
# Break if the SSP time exceed the simulation time
if self.t_ssp[j_ise] > t_left_ce:
break
##############################################
# Interpolation routine #
##############################################
def interpolation(self, x_arr, y_arr, xx, indx, interp_list, return_coefs=False):
'''
This function interpolates with the Steffen 1990 algorithm, adding
linear extra points at both ends of the interval.
Argument
========
x_arr: coordinate array
y_arr: 1-D or 2-D numpy array for interpolation
xx: value for which y_arr must be interpolated
indx: interpolation index such that
x_arr[indx] < xx < x_arr[indx + 1].
The minimum value is 0 and the maximum is len(x_arr) - 1.
interp_list: list holding the interpolation coefficients.
it should have the same size and dimensions as y_arr and
initialized to None.
return_coefs: If True, return the calculated interp_list[indx]
instead of returning the interpolated y_arr
'''
# Get the dimensions and catch non-numpy arrays
try:
dimensions = y_arr.ndim
except AttributeError:
raise Exception("The interpolation routine uses numpy arrays")
except:
raise
# Get the dimensions and catch non-numpy arrays
try:
dimensions = len(y_arr.shape)
except AttributeError:
raise Exception("The interpolation routine uses numpy arrays")
except:
raise
# Return if last extreme
if indx == len(x_arr) - 1:
return y_arr[indx]
# Check that the indx is lower than the maximum
if indx > len(x_arr) - 1:
raise Exception("Interpolating outside of range!")
# Return the calculation with coefficients if exists
if dimensions == 1:
coefCheck = interp_list[indx]
elif dimensions == 2:
coefCheck = interp_list[indx][0]
else:
raise Exception("Current support for up to 2-d in interpolation method")
if coefCheck is not None:
if return_coefs:
return interp_list[indx]
else:
coefs = interp_list[indx]
deltx = xx - x_arr[indx]
return coefs[0]*deltx**3 + coefs[1]*deltx**2 + coefs[2]*deltx + y_arr[indx]
# If not, we have to calculate the coefficients for this region
x0 = x_arr[indx]; xp1 = x_arr[indx + 1]
# Store yp1 and y0
yp1 = y_arr[indx + 1]; y0 = y_arr[indx]
hi0 = (xp1 - x0)
si0 = (yp1 - y0)/hi0
# Calculate sim1 and deriv0
if indx > 0:
# Store x, y
ym1 = y_arr[indx - 1]
xm1 = x_arr[indx - 1]
else:
# We are in the lowest extreme, create an extra point
dx = x_arr[indx + 1] - x_arr[indx]
dy = y_arr[indx + 1] - y_arr[indx]
xm1 = x_arr[indx] - dx*1e-5
ym1 = y_arr[indx + 1] - dy/dx*1e-5
him1 = (x0 - xm1)
sim1 = (y0 - ym1)/him1
# Pi0 calculation
pi0 = (sim1*hi0 + si0*him1)/(him1 + hi0)
# Derivative
deriv0 = np.sign(sim1) + np.sign(si0)
if dimensions == 1:
deriv0 = deriv0*min(abs(sim1), abs(si0), 0.5*abs(pi0))
elif dimensions == 2:
deriv0 = deriv0*np.minimum(abs(sim1),\
np.minimum(abs(si0), 0.5*abs(pi0)))
# Calculate sip1, pip1 and derivp1
if indx < len(x_arr) - 2:
yp2 = y_arr[indx + 2]
xp2 = x_arr[indx + 2]
else:
# We are in the highest extreme, create an extra point
dx = x_arr[indx + 1] - x_arr[indx]
dy = y_arr[indx + 1] - y_arr[indx]
xp2 = x_arr[indx + 1] + dx*1e-5
yp2 = y_arr[indx + 1] + dy/dx*1e-5
hip1 = (xp2 - xp1)
sip1 = (yp2 - yp1)/hip1
# Pip1 calculation
pip1 = (si0*hip1 + sip1*hi0)/(hi0 + hip1)
# Derivative
derivp1 = np.sign(si0) + np.sign(sip1)
if dimensions == 1:
derivp1 = derivp1*min(abs(si0), abs(sip1), 0.5*abs(pip1))
elif dimensions == 2:
derivp1 = derivp1*np.minimum(abs(si0), \
np.minimum(abs(sip1), 0.5*abs(pip1)))
# Now calculate coefficients (ci = deriv0; di = y0)
ai = (deriv0 + derivp1 - 2*si0)/(hi0*hi0)
bi = (3*si0 - 2*deriv0 - derivp1)/hi0
interp_list[indx] = (ai, bi, deriv0)
return self.interpolation(x_arr, y_arr, xx, indx, interp_list, return_coefs=return_coefs)
##############################################
# History CLASS #
##############################################
class History():
'''
Class tracking the evolution of composition, model parameter, etc.
Allows separation of tracking variables from original code.
'''
#############################
# Constructor #
#############################
def __init__(self):
'''
Initiate variables tracking.history
'''
self.age = []
self.sfr = []
self.gas_mass = []
self.metallicity = []
self.ism_iso_yield = []
self.ism_iso_yield_agb = []
self.ism_iso_yield_massive = []
self.ism_iso_yield_1a = []
self.ism_iso_yield_nsm = []
self.isotopes = []
self.elements = []
self.ism_elem_yield = []
self.ism_elem_yield_agb = []
self.ism_elem_yield_massive = []
self.ism_elem_yield_1a = []
self.ism_elem_yield_nsm = []
self.sn1a_numbers = []
self.nsm_numbers = []
self.sn2_numbers = []
self.t_m_bdys = []
##############################################
# Const CLASS #
##############################################
class Const():
'''
Holds the physical constants.
Please add further constants if required.
'''
#############################
# Constructor #
#############################
def __init__(self):
'''
Initiate variables tracking.history
'''
self.syr = 31536000 #seconds in a year
self.c= 2.99792458e10 #speed of light in vacuum (cm s^-1)
self.pi = 3.1415926535897932384626433832795029e0
self.planck_h = 6.62606896e-27 # Planck's constant (erg s)
self.ev2erg = 1.602176487e-12 # electron volt (erg)
self.rsol = 6.9598e10 # solar radius (cm)
self.lsol = 3.8418e33 #erg/s
self.msol = 1.9892e33 # solar mass (g)
self.ggrav = 6.67428e-8 #(g^-1 cm^3 s^-2)
##############################################
# BinTree CLASS #
##############################################
class Bin_tree():
'''
Class for the construction and search in a binary tree.
'''
#############################
# Constructor #
#############################
def __init__(self, sorted_array):
'''
Initialize the balanced tree
'''
self.head = self._create_tree(sorted_array)
#############################
# Tree creation #
#############################
def _create_tree(self, sorted_array, index = 0):
'''
Create the tree itself
'''
# Sort edge cases
len_array = len(sorted_array)
if len_array == 0:
return None
elif len_array == 1:
return Node(sorted_array[0], index)
# Find middle value and index, introduce them
# and recursively create the children
mid_index = len_array//2
mid_value = sorted_array[mid_index]
new_node = Node(mid_value, mid_index + index)
new_node.lchild = self._create_tree(sorted_array[0:mid_index], index)
new_node.rchild = self._create_tree(sorted_array[mid_index + 1:],
mid_index + 1 + index)
return new_node
#############################
# Wraper Tree search left #
#############################
def search_left(self, value):
'''
Wrapper for search_left
Search for the rightmost index lower or equal than value
'''
# Call function and be careful with lowest case
index = self._search_left_rec(value, self.head)
if index is None:
return 0
return index
#############################
# Tree search left #
#############################
def _search_left_rec(self, value, node):
'''
Search for the rightmost index lower or equal than value
'''
# Sort edge case
if node is None:
return None
# If to the left, we can always return the index, even if None.
# If to the right and none, we return current index, as it will
# be the closest to value from the left
if value < node.value:
return self._search_left_rec(value, node.lchild)
else:
index = self._search_left_rec(value, node.rchild)
if index is None:
return node.index
return index
##############################################
# Node CLASS #
##############################################
class Node():
'''
Class for the bin_tree nodes.
'''
#############################
# Constructor #
#############################
def __init__(self, value, index):
'''
Initialize the constructor
'''
self.value = value
self.index = index
self.lchild = None
self.rchild = None
| bsd-3-clause | -8,643,699,521,290,951,000 | 38.303817 | 180 | 0.51382 | false |
hale36/SRTV | sickbeard/providers/torrentz.py | 1 | 5614 | # Author: Dustyn Gibson <[email protected]>
# URL: https://github.com/SiCKRAGETV/SickRage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import time
import traceback
import xmltodict
import HTMLParser
from six.moves import urllib
from xml.parsers.expat import ExpatError
import sickbeard
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.providers import generic
from sickbeard.common import cpu_presets
class TORRENTZProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "Torrentz")
self.public = True
self.supportsBacklog = True
self.confirmed = True
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = TORRENTZCache(self)
self.urls = {'verified': 'https://torrentz.eu/feed_verified',
'feed': 'https://torrentz.eu/feed',
'base': 'https://torrentz.eu/'}
self.url = self.urls['base']
def isEnabled(self):
return self.enabled
def seedRatio(self):
return self.ratio
@staticmethod
def _split_description(description):
match = re.findall(r'[0-9]+', description)
return (int(match[0]) * 1024**2, int(match[1]), int(match[2]))
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings:
for search_string in search_strings[mode]:
search_url = self.urls['verified'] if self.confirmed else self.urls['feed']
if mode != 'RSS':
search_url += '?q=' + urllib.parse.quote_plus(search_string)
logger.log(search_url)
data = self.getURL(search_url)
if not data:
logger.log('Seems to be down right now!')
continue
if not data.startswith("<?xml"):
logger.log('Wrong data returned from: ' + search_url, logger.DEBUG)
continue
if not data.startswith('<?xml'):
logger.log(u'Expected xml but got something else, is your proxy failing?', logger.INFO)
continue
try:
data = xmltodict.parse(HTMLParser.HTMLParser().unescape(data.encode('utf-8')).decode('utf-8').replace('&', '&'))
except ExpatError:
logger.log(u"Failed parsing provider. Traceback: %r\n%r" % (traceback.format_exc(), data), logger.ERROR)
continue
if not all([data, 'rss' in data, 'channel' in data['rss'], 'item' in data['rss']['channel']]):
logger.log(u"Malformed rss returned, skipping", logger.DEBUG)
continue
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
# https://github.com/martinblech/xmltodict/issues/111
entries = data['rss']['channel']['item']
entries = entries if isinstance(entries, list) else [entries]
for item in entries:
if 'tv' not in item.get('category', ''):
continue
title = item.get('title', '').rsplit(' ', 1)[0].replace(' ', '.')
t_hash = item.get('guid', '').rsplit('/', 1)[-1]
if not all([title, t_hash]):
continue
# TODO: Add method to generic provider for building magnet from hash.
download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "&tr=udp://tracker.openbittorrent.com:80&tr=udp://tracker.coppersurfer.tk:6969&tr=udp://open.demonii.com:1337&tr=udp://tracker.leechers-paradise.org:6969&tr=udp://exodus.desync.com:6969"
size, seeders, leechers = self._split_description(item.get('description', ''))
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
items[mode].append((title, download_url, size, seeders, leechers))
#For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
class TORRENTZCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll every 15 minutes max
self.minTime = 15
def _getRSSData(self):
return {'entries': self.provider._doSearch({'RSS': ['']})}
provider = TORRENTZProvider()
| gpl-3.0 | -9,218,043,349,983,745,000 | 38.535211 | 272 | 0.584788 | false |
munhyunsu/Hobby | 2018F_SCSCAlgorithm/week5/change_money_dp_list_test.py | 1 | 1356 | import unittest
import random
from change_money_dp_list import make_change
class TestMakechange(unittest.TestCase):
def test_makechange(self):
coin_value_list = [1, 5, 10, 21, 25]
for change in range(0, 101, 1):
random.shuffle(coin_value_list)
known_result = list()
for index in range(0, change + 1):
known_result.append([])
result = make_change(coin_value_list, change, known_result)
print(sum(known_result[change]) == change, known_result[change], change)
self.assertEqual(sum(known_result[change]), change)
def test_63(self):
coin_value_list = [1, 5, 10, 21, 25]
random.shuffle(coin_value_list)
known_result = list()
for index in range(0, 63 + 1):
known_result.append([])
make_change(coin_value_list, 63, known_result)
self.assertEqual([21, 21, 21], known_result[63])
def test_12(self):
coin_value_list = [1, 2, 3, 5, 7]
random.shuffle(coin_value_list)
known_result = list()
for index in range(0, 12 + 1):
known_result.append([])
make_change(coin_value_list, 12, known_result)
known_result[12].sort()
self.assertEqual([5, 7], known_result[12])
if __name__ == '__main__':
unittest.main(verbosity=2)
| gpl-3.0 | -569,924,681,379,773,600 | 32.9 | 84 | 0.580383 | false |
memsharded/conan | conans/model/requires.py | 1 | 5387 | from collections import OrderedDict
import six
from conans.errors import ConanException
from conans.model.ref import ConanFileReference
from conans.util.env_reader import get_env
class Requirement(object):
""" A reference to a package plus some attributes of how to
depend on that package
"""
def __init__(self, ref, private=False, override=False):
"""
param override: True means that this is not an actual requirement, but something to
be passed upstream and override possible existing values
"""
self.ref = ref
self.range_ref = ref
self.override = override
self.private = private
self.build_require = False
@property
def version_range(self):
""" returns the version range expression, without brackets []
or None if it is not an expression
"""
version = self.range_ref.version
if version.startswith("[") and version.endswith("]"):
return version[1:-1]
@property
def is_resolved(self):
""" returns True if the version_range reference has been already resolved to a
concrete reference
"""
return self.ref != self.range_ref
def __repr__(self):
return ("%s" % str(self.ref) + (" P" if self.private else ""))
def __eq__(self, other):
return (self.override == other.override and
self.ref == other.ref and
self.private == other.private)
def __ne__(self, other):
return not self.__eq__(other)
class Requirements(OrderedDict):
""" {name: Requirement} in order, e.g. {"Hello": Requirement for Hello}
"""
def __init__(self, *args):
super(Requirements, self).__init__()
for v in args:
if isinstance(v, tuple):
override = private = False
ref = v[0]
for elem in v[1:]:
if elem == "override":
override = True
elif elem == "private":
private = True
else:
raise ConanException("Unknown requirement config %s" % elem)
self.add(ref, private=private, override=override)
else:
self.add(v)
def copy(self):
""" We need a custom copy as the normal one requires __init__ to be
properly defined. This is not a deep-copy, in fact, requirements in the dict
are changed by RangeResolver, and are propagated upstream
"""
result = Requirements()
for name, req in self.items():
result[name] = req
return result
def iteritems(self): # FIXME: Just a trick to not change default testing conanfile for py3
return self.items()
def add(self, reference, private=False, override=False):
""" to define requirements by the user in text, prior to any propagation
"""
assert isinstance(reference, six.string_types)
ref = ConanFileReference.loads(reference)
name = ref.name
new_requirement = Requirement(ref, private, override)
old_requirement = self.get(name)
if old_requirement and old_requirement != new_requirement:
raise ConanException("Duplicated requirement %s != %s"
% (old_requirement, new_requirement))
else:
self[name] = new_requirement
def update(self, down_reqs, output, own_ref, down_ref):
""" Compute actual requirement values when downstream values are defined
param down_reqs: the current requirements as coming from downstream to override
current requirements
param own_ref: ConanFileReference of the current conanfile
param down_ref: ConanFileReference of the downstream that is overriding values or None
return: new Requirements() value to be passed upstream
"""
assert isinstance(down_reqs, Requirements)
assert isinstance(own_ref, ConanFileReference) if own_ref else True
assert isinstance(down_ref, ConanFileReference) if down_ref else True
error_on_override = get_env("CONAN_ERROR_ON_OVERRIDE", False)
new_reqs = down_reqs.copy()
if own_ref:
new_reqs.pop(own_ref.name, None)
for name, req in self.items():
if req.private:
continue
if name in down_reqs:
other_req = down_reqs[name]
# update dependency
other_ref = other_req.ref
if other_ref and other_ref != req.ref:
msg = "requirement %s overridden by %s to %s " \
% (req.ref, down_ref or "your conanfile", other_ref)
if error_on_override and not other_req.override:
raise ConanException(msg)
msg = "%s %s" % (own_ref, msg)
output.warn(msg)
req.ref = other_ref
new_reqs[name] = req
return new_reqs
def __call__(self, reference, private=False, override=False):
self.add(reference, private, override)
def __repr__(self):
result = []
for req in self.values():
result.append(str(req))
return '\n'.join(result)
| mit | 4,285,353,454,071,079,000 | 35.154362 | 95 | 0.573046 | false |
vmont/specfem3d | utils/Cubit_or_Gmsh/LibGmsh2Specfem_convert_Gmsh_to_Specfem3D.py | 1 | 17706 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-------------------------------------------------------------
# GMSH mesh convertion for SPECFEM3D
#
# by Thomas CURTELIN
# Centrale Marseille, France, July 2012
#
# Based on Paul Cristini's equivalent script for 2D meshes
#
# For now it can only handle volumes meshed with hexahedra and boundaries meshed with quadrangles
# (boundary 3D elements must have four nodes on the boundary).
# I tested it with the "homogeneous_halfspace" example provided in the distribution.
#
#--------------------------------------------------------------
#
# /!\ IMPORTANT REMARKS /!\
# - Only first order hexahedral 3D-elements are handled by this script
# - Boundary 2D-elements must thus be first order quadrangles
# - "xmax", "xmin", "ymax", "ymin", "top" and "bottom" boundaries must be defined as physical surfaces in GMSH
# - Propagation media "M1", "M2", ... must be defined as physical volumes in GMSH
#
#--------------------------------------------------------------
#--------------------------------------------------------------
# PACKAGES
####################################################
import sys, string, time
from os.path import splitext, isfile
try:
from numpy import *
except ImportError:
print "error: package python-numpy is not installed"
###################################################
# Save file function (ASCII format)
###################################################
def SauvFicSpecfem(Ng, Ct, Var, Fv):
# Ng is the name of the file to be written
# Ct is the number of lines to read
# Var is the name of the variable containing data to be written
# Fv is data format (%i for indexes and %f for coordinates)
savetxt(Ng,(Ct,), fmt='%i')
fd = open(Ng,'a')
savetxt(fd, Var, fmt=Fv, delimiter=' ')
fd.close()
return
###################################################
# Read mesh function
###################################################
def OuvreGmsh(Dir,Nom):
# Get mesh name
if splitext(Nom)[-1]=='.msh':
fic=Nom
elif splitext(Nom)[-1]=='':
fic=Nom+'.msh'
else:
print 'File extension is not correct'
print 'script aborted'
sys.exit()
#
# Open the file and get the lines
####################################################
f = file(Dir+fic,'r')
lignes= f.readlines()
f.close()
# Locate information (elements, nodes and physical entities)
####################################################
for ii in range(len(lignes)):
if lignes[ii]=='$Nodes\n': PosNodes=ii
if lignes[ii]=='$PhysicalNames\n': PosPhys=ii
if lignes[ii]=='$Elements\n':
PosElem=ii
break
# Element type : second order ONLY
# 2D elt = 4-node quadrangle : GMSH flag = 3
# 3D elt = 8-node hexahedron : GMSH flag = 5
# cf. GMSH documentation available online
####################################################
Ngnod, surfElem, volElem = 4, 3, 5
len2D, len3D = 4, 8 # number of nodes per element according to type
###################################################
# PHYSICAL NAMES
###################################################
# Get physical surfaces names (borders) and physical volumes names (propagation volumes)
NbPhysNames = int(string.split(lignes[PosPhys+1])[0])
# Variable type
dt = dtype([('dimension',int), ('zone', int), ('name', str, 16)])
PhysCar=zeros((NbPhysNames,), dtype=dt)
for Ip in range(NbPhysNames):
Dim = int(string.split(lignes[PosPhys+2+Ip])[0]) # 2D or 3D
Zon = int(string.split(lignes[PosPhys+2+Ip])[1]) # Physical number
Nam = string.split(lignes[PosPhys+2+Ip])[2][1:-1] # Name (xmax, xmin ...)
PhysCar[Ip] = (Dim, Zon, Nam) # Sorting data
if Nam == 'xmax':
Bord_xmax=Zon
if Nam == 'xmin':
Bord_xmin=Zon
if Nam == 'ymin':
Bord_ymax=Zon
if Nam == 'ymax':
Bord_ymin=Zon
if Nam == 'bottom':
Bord_bottom=Zon
if Nam == 'top' :
Bord_top=Zon
###################################################
print 'Physical Names', PhysCar
###################################################
# GMSH file info
####################################################
Ver=float(string.split(lignes[1])[0])
File_Type=int(string.split(lignes[1])[1])
Data_Size=int(string.split(lignes[1])[2])
####################################################
# Nodes
####################################################
NbNodes=int(string.split(lignes[PosNodes+1])[0]) # Total number of nodes
print 'Number of nodes: ',NbNodes
Nodes=zeros((NbNodes,4),dtype=float) # Array receiving nodes index and coordinates
for Ninc in range(NbNodes):
Nodes[Ninc][0] = int(Ninc+1)
Nodes[Ninc][1:4] = [float(val) for val in (string.split(lignes[PosNodes+2+Ninc])[1:4])]
# Save in SPECFEM file format
####################################################
SauvFicSpecfem('nodes_coords_file', NbNodes, Nodes, ['%i','%.9f','%.9f','%.9f'])
####################################################
# Elements
####################################################
NbElements=int(string.split(lignes[PosElem+1])[0]) # Total number of elements
# Initializing arrays
Elements = empty((NbElements,len3D+1),dtype=int) # 3D elements
Milieu = empty((NbElements,2),dtype=int) # Media index
Elements3DBord = empty((NbElements),dtype=int) # Volume element next to borders
Elements2D = empty((NbElements,len2D),dtype=int) # Surface elements (borders)
#---------------------------------------------------------------------------
Elements2DBordTop = empty((NbElements,len2D),dtype=int)
Elements2DBordBottom = empty((NbElements,len2D),dtype=int)
Elements3DBordTop = zeros((NbElements,len2D+1),dtype=int)
Elements3DBordBottom = zeros((NbElements,len2D+1),dtype=int)
Elements2DBordxmin = empty((NbElements,len2D),dtype=int)
Elements2DBordxmax = empty((NbElements,len2D),dtype=int)
Elements3DBordxmin = zeros((NbElements,len2D+1),dtype=int)
Elements3DBordxmax = zeros((NbElements,len2D+1),dtype=int)
Elements2DBordymin = empty((NbElements,len2D),dtype=int)
Elements2DBordymax = empty((NbElements,len2D),dtype=int)
Elements3DBordymin = zeros((NbElements,len2D+1),dtype=int)
Elements3DBordymax = zeros((NbElements,len2D+1),dtype=int)
#---------------------------------------------------------------------------
# Initializing run through elements (surfaces and volumes)
Ninc2D, Ninc3D = 0, 0
# Initializing run through boundaries
Ninc2DBordTop, Ninc2DBordBottom, Ninc2DBordxmax, Ninc2DBordxmin, Ninc2DBordymax, Ninc2DBordymin, = 0, 0, 0, 0, 0, 0
print 'Number of elements: ', NbElements
for Ninc in range(NbElements):
# Line position
Pos = PosElem+Ninc+2
# Element type position on line
TypElem = int(string.split(lignes[Pos])[1])
# Physical entity number position on line
ZonP = int(string.split(lignes[Pos])[3])
# Initializing material index for Materials_file
Milieu[Ninc3D]= 1
# First case : Surface element
#print 'elem ',Ninc,Pos,TypElem,ZonP,'xmax,xmin,ymax,ymin,top,bottom', \
# Bord_xmax,Bord_xmin,Bord_ymax,Bord_ymin,Bord_top,Bord_bottom
if TypElem==surfElem:
# Get nodes indexes of the surface element
#Elements2D[Ninc2D] = [int(val) for val in (string.split(lignes[Pos])[6:])]
Elements2D[Ninc2D] = [int(val) for val in (string.split(lignes[Pos])[5:])] # MeshFormat 2.2
# Choosing boundary
if ZonP==Bord_xmax:
Elements2DBordxmax[Ninc2DBordxmax] = Elements2D[Ninc2D]
Ninc2DBordxmax+=1
if ZonP==Bord_xmin:
Elements2DBordxmin[Ninc2DBordxmin] = Elements2D[Ninc2D]
Ninc2DBordxmin+=1
if ZonP==Bord_ymax:
Elements2DBordymax[Ninc2DBordymax] = Elements2D[Ninc2D]
Ninc2DBordymax+=1
if ZonP==Bord_ymin:
Elements2DBordymin[Ninc2DBordymin] = Elements2D[Ninc2D]
Ninc2DBordymin+=1
if ZonP==Bord_top:
Elements2DBordTop[Ninc2DBordTop] = Elements2D[Ninc2D]
Ninc2DBordTop+=1
if ZonP==Bord_bottom:
Elements2DBordBottom[Ninc2DBordBottom] = Elements2D[Ninc2D]
Ninc2DBordBottom+=1
Ninc2D+=1
# Second case : Volume element
elif TypElem==volElem:
Elements[Ninc3D,0] = Ninc3D+1
#Elements[Ninc3D,1:]= [int(val) for val in (string.split(lignes[Pos])[6:])]
Elements[Ninc3D,1:]= [int(val) for val in (string.split(lignes[Pos])[5:])] # MeshFormat 2.2
Milieu[Ninc3D,0] = Ninc3D+1
Milieu[Ninc3D,1] = ZonP # - 6
Ninc3D+=1
else:
print "ERROR : wrong element type flag (3 or 5 only)"
# Reduce arrays (exclude zeros elements)
print 'number of elements: xmax,xmin,ymax,ymin,top,bottom = ',Ninc2DBordxmax,Ninc2DBordxmin,Ninc2DBordymax,Ninc2DBordymin, \
Ninc2DBordTop,Ninc2DBordBottom
Elements = Elements[:Ninc3D,:]
Milieu = Milieu[:Ninc3D,:]
Elements2D = Elements2D[:Ninc2D,:]
Elements2DBordxmin = Elements2DBordxmin[:Ninc2DBordxmin,:]
Elements2DBordxmax = Elements2DBordxmax[:Ninc2DBordxmax,:]
Elements2DBordymin = Elements2DBordymin[:Ninc2DBordymin,:]
Elements2DBordymax = Elements2DBordymax[:Ninc2DBordymax,:]
Elements2DBordTop = Elements2DBordTop[:Ninc2DBordTop,:]
Elements2DBordBottom = Elements2DBordBottom[:Ninc2DBordBottom,:]
# Get nodes from 2D boundary elements
Elements2DBordFlat=ravel(Elements2D)
NodesBordC=set(Elements2DBordFlat)
#-------------------------------------------------------
NodesBordxmax = set(ravel(Elements2DBordxmax))
NodesBordxmin = set(ravel(Elements2DBordxmin))
NodesBordymax = set(ravel(Elements2DBordymax))
NodesBordymin = set(ravel(Elements2DBordymin))
NodesBordTop = set(ravel(Elements2DBordTop))
NodesBordBottom = set(ravel(Elements2DBordBottom))
#-------------------------------------------------------
ctBord=0
ctxmax, ctxmin, ctymax, ctymin, ctt, ctb = 0, 0, 0, 0, 0, 0
for Ct3D in xrange(Ninc3D):
# Test if 3D element contains nodes on boundary
nodes3DcurrentElement = set(Elements[Ct3D,1:])
if not set.isdisjoint(nodes3DcurrentElement, NodesBordC): # True if there is nodes in common
# Choose boundary
if not set.isdisjoint(nodes3DcurrentElement, NodesBordxmax):
# Nodes in common between 3D current element and boundary
rr = set.intersection(nodes3DcurrentElement, NodesBordxmax)
if len(rr) != 4:
print "WARNING : wrong 2D boundary element type : ONLY QUADRANGLES"
print "Size of wrong intersection :"+str(len(rr))
print "Nodes :"
print rr
sys.exit()
else:
el = concatenate(([Ct3D+1], list(rr)))
Elements3DBordxmax[ctxmax,:] = el
ctxmax+=1
if not set.isdisjoint(nodes3DcurrentElement, NodesBordxmin):
rr = set.intersection(nodes3DcurrentElement, NodesBordxmin)
if len(rr) != 4:
print "WARNING : wrong 2D boundary element type : ONLY QUADRANGLES"
print "Size of wrong intersection :"+str(len(rr))
print "Nodes :"
print rr
sys.exit()
else:
el = concatenate(([Ct3D+1], list(rr)))
Elements3DBordxmin[ctxmin,:] = el
ctxmin+=1
if not set.isdisjoint(nodes3DcurrentElement, NodesBordymax):
rr = set.intersection(nodes3DcurrentElement, NodesBordymax)
if len(rr) != 4:
print "WARNING : wrong 2D boundary element type : ONLY QUADRANGLES"
print "Size of wrong intersection :"+str(len(rr))
print "Nodes :"
print rr
sys.exit()
else:
el = concatenate(([Ct3D+1], list(rr)))
Elements3DBordymax[ctymax,:] = el
ctymax+=1
if not set.isdisjoint(nodes3DcurrentElement, NodesBordymin):
rr = set.intersection(nodes3DcurrentElement, NodesBordymin)
if len(rr) != 4:
print "WARNING : wrong 2D boundary element type : ONLY QUADRANGLES"
print "Size of wrong intersection :"+str(len(rr))
print "Nodes :"
print rr
sys.exit()
else:
el = concatenate(([Ct3D+1], list(rr)))
Elements3DBordymin[ctymin,:] = el
ctymin+=1
if not set.isdisjoint(nodes3DcurrentElement, NodesBordTop):
rr = set.intersection(nodes3DcurrentElement, NodesBordTop)
if len(rr) != 4:
print "WARNING : wrong 2D boundary element type : ONLY QUADRANGLES"
print "Size of wrong intersection :"+str(len(rr))
print "Nodes :"
print rr
sys.exit()
else:
el = concatenate(([Ct3D+1], list(rr)))
Elements3DBordTop[ctt,:] = el
ctt+=1
if not set.isdisjoint(nodes3DcurrentElement, NodesBordBottom):
rr = set.intersection(nodes3DcurrentElement, NodesBordBottom)
if len(rr) != 4:
print "WARNING : wrong 2D boundary element type : ONLY QUADRANGLES"
print "Size of wrong intersection :"+str(len(rr))
print "Nodes :"
print rr
sys.exit()
else:
el = concatenate(([Ct3D+1], list(rr)))
Elements3DBordBottom[ctb,:] = el
ctb+=1
# Reducing arrays (exclude zeros elements)
Elements3DBord=Elements3DBord[:ctBord]
#----------------------------------------------------------------------
Elements3DBordTop = Elements3DBordTop[:ctt,:]
Elements3DBordxmax = Elements3DBordxmax[:ctxmax,:]
Elements3DBordxmin = Elements3DBordxmin[:ctxmin,:]
Elements3DBordymax = Elements3DBordymax[:ctymax,:]
Elements3DBordymin = Elements3DBordymin[:ctymin,:]
Elements3DBordBottom = Elements3DBordBottom[:ctb,:]
#-----------------------------------------------------------------------
# Save in SPECFEM file format
SauvFicSpecfem('mesh_file', Ninc3D, Elements, '%i')
#
savetxt('materials_file',Milieu, fmt='%i')
#
SauvFicSpecfem('free_or_absorbing_surface_file_zmax', ctt, Elements3DBordTop, '%i')
#
SauvFicSpecfem('absorbing_surface_file_xmax', ctxmax, Elements3DBordxmax, '%i')
#
SauvFicSpecfem('absorbing_surface_file_xmin', ctxmin, Elements3DBordxmin, '%i')
#
SauvFicSpecfem('absorbing_surface_file_ymax', ctymax, Elements3DBordymax, '%i')
#
SauvFicSpecfem('absorbing_surface_file_ymin', ctymin, Elements3DBordymin, '%i')
#
SauvFicSpecfem('absorbing_surface_file_bottom', ctb, Elements3DBordBottom, '%i')
return
if __name__=='__main__':
set_printoptions(precision=6, threshold=None, edgeitems=None, linewidth=200, suppress=None, nanstr=None, infstr=None)
#
Fic = sys.argv[1]; del sys.argv[1]
#
OuvreGmsh('',Fic)
| gpl-3.0 | 6,999,215,924,848,763,000 | 42.826733 | 130 | 0.486841 | false |
asm-products/pants-party | ppuser/forms.py | 1 | 1367 | from django import forms
from django.contrib.auth.forms import UserChangeForm
from ppuser.models import CustomUser
class CustomUserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = CustomUser
fields = ('username', 'display_name', 'email')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def __init__(self, *args, **kwargs):
super(CustomUserCreationForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
# Save the provided password in hashed format
user = super(CustomUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class CustomUserChangeForm(UserChangeForm):
def __init__(self, *args, **kargs):
super(CustomUserChangeForm, self).__init__(*args, **kargs)
class Meta:
model = CustomUser
| agpl-3.0 | -5,837,199,713,151,674,000 | 34.051282 | 90 | 0.673007 | false |
diegocortassa/TACTIC | src/tactic/ui/panel/__init__.py | 1 | 1169 | ###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from layout_util import *
from action_wdg import *
from layout_wdg import *
from print_layout_wdg import *
from planner_layout_wdg import *
from edit_wdg import *
from edit_cmd import *
from panel_wdg import *
from simple_side_bar_wdg import *
from freeform_layout_wdg import *
from custom_layout_wdg import *
from table_layout_wdg import *
from tile_layout_wdg import *
from sobject_panel_wdg import *
from search_type_manager_wdg import *
from schema_section_wdg import *
from manage_view_panel_wdg import *
from security_manager_wdg import *
from sql_panel_wdg import *
from swf_wdg import *
#from element_definition_wdg import *
from view_manager_wdg import *
from custom_search_wdg import *
from hash_panel_wdg import *
from static_table_layout_wdg import *
from tool_layout_wdg import *
from collection_wdg import *
| epl-1.0 | 3,939,677,892,655,107,000 | 23.87234 | 64 | 0.711719 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.