text
stringlengths
29
850k
# Copyright (c) 2012-2021, Mark Peek <[email protected]> # All rights reserved. # # See LICENSE file for full license. # # *** Do not modify - this file is autogenerated *** # Resource specification version: 31.0.0 from troposphere import Tags from . import AWSObject, AWSProperty class IamRole(AWSProperty): props = { "arn": (str, False), } class IamUser(AWSProperty): props = { "arn": (str, False), } class User(AWSProperty): props = { "id": (str, False), } class AccessPolicyIdentity(AWSProperty): props = { "IamRole": (IamRole, False), "IamUser": (IamUser, False), "User": (User, False), } class PortalProperty(AWSProperty): props = { "id": (str, False), } class Project(AWSProperty): props = { "id": (str, False), } class AccessPolicyResource(AWSProperty): props = { "Portal": (PortalProperty, False), "Project": (Project, False), } class AccessPolicy(AWSObject): resource_type = "AWS::IoTSiteWise::AccessPolicy" props = { "AccessPolicyIdentity": (AccessPolicyIdentity, True), "AccessPolicyPermission": (str, True), "AccessPolicyResource": (AccessPolicyResource, True), } class AssetHierarchy(AWSProperty): props = { "ChildAssetId": (str, True), "LogicalId": (str, True), } class AssetProperty(AWSProperty): props = { "Alias": (str, False), "LogicalId": (str, True), "NotificationState": (str, False), } class Asset(AWSObject): resource_type = "AWS::IoTSiteWise::Asset" props = { "AssetHierarchies": ([AssetHierarchy], False), "AssetModelId": (str, True), "AssetName": (str, True), "AssetProperties": ([AssetProperty], False), "Tags": (Tags, False), } class Attribute(AWSProperty): props = { "DefaultValue": (str, False), } class VariableValue(AWSProperty): props = { "HierarchyLogicalId": (str, False), "PropertyLogicalId": (str, True), } class ExpressionVariable(AWSProperty): props = { "Name": (str, True), "Value": (VariableValue, True), } class TumblingWindow(AWSProperty): props = { "Interval": (str, True), } class MetricWindow(AWSProperty): props = { "Tumbling": (TumblingWindow, False), } class Metric(AWSProperty): props = { "Expression": (str, True), "Variables": ([ExpressionVariable], True), "Window": (MetricWindow, True), } class Transform(AWSProperty): props = { "Expression": (str, True), "Variables": ([ExpressionVariable], True), } class PropertyType(AWSProperty): props = { "Attribute": (Attribute, False), "Metric": (Metric, False), "Transform": (Transform, False), "TypeName": (str, True), } class AssetModelProperty(AWSProperty): props = { "DataType": (str, True), "DataTypeSpec": (str, False), "LogicalId": (str, True), "Name": (str, True), "Type": (PropertyType, True), "Unit": (str, False), } class AssetModelCompositeModel(AWSProperty): props = { "CompositeModelProperties": ([AssetModelProperty], False), "Description": (str, False), "Name": (str, True), "Type": (str, True), } class AssetModelHierarchy(AWSProperty): props = { "ChildAssetModelId": (str, True), "LogicalId": (str, True), "Name": (str, True), } class AssetModel(AWSObject): resource_type = "AWS::IoTSiteWise::AssetModel" props = { "AssetModelCompositeModels": ([AssetModelCompositeModel], False), "AssetModelDescription": (str, False), "AssetModelHierarchies": ([AssetModelHierarchy], False), "AssetModelName": (str, True), "AssetModelProperties": ([AssetModelProperty], False), "Tags": (Tags, False), } class Dashboard(AWSObject): resource_type = "AWS::IoTSiteWise::Dashboard" props = { "DashboardDefinition": (str, True), "DashboardDescription": (str, True), "DashboardName": (str, True), "ProjectId": (str, False), "Tags": (Tags, False), } class GatewayCapabilitySummary(AWSProperty): props = { "CapabilityConfiguration": (str, False), "CapabilityNamespace": (str, True), } class Greengrass(AWSProperty): props = { "GroupArn": (str, True), } class GatewayPlatform(AWSProperty): props = { "Greengrass": (Greengrass, True), } class Gateway(AWSObject): resource_type = "AWS::IoTSiteWise::Gateway" props = { "GatewayCapabilitySummaries": ([GatewayCapabilitySummary], False), "GatewayName": (str, True), "GatewayPlatform": (GatewayPlatform, True), "Tags": (Tags, False), } class Portal(AWSObject): resource_type = "AWS::IoTSiteWise::Portal" props = { "PortalAuthMode": (str, False), "PortalContactEmail": (str, True), "PortalDescription": (str, False), "PortalName": (str, True), "RoleArn": (str, True), "Tags": (Tags, False), }
Compare Epson T5594 Ink Cartridge prices! On this page you will find companies that sell the Epson T5594 Ink Cartridge. Make sure to check the product information, company reviews and selected coupons when you comparison shop for the best possible deal on your desired cartridge. Use the links below for more details or to purchase the Epson T5594 Ink Cartridge online.
""" pglookout - test configuration Copyright (c) 2016 Ohmu Ltd See LICENSE for details """ from pglookout import logutil, pgutil from pglookout.pglookout import PgLookout from py import path as py_path # pylint: disable=no-name-in-module from unittest.mock import Mock import os import pytest import signal import subprocess import tempfile import time PG_VERSIONS = ["13", "12", "11", "10", "9.6", "9.5", "9.4", "9.3", "9.2"] logutil.configure_logging() @pytest.yield_fixture def pgl(): pgl_ = PgLookout("pglookout.json") pgl_.config["remote_conns"] = {} pgl_.check_for_maintenance_mode_file = Mock() pgl_.check_for_maintenance_mode_file.return_value = False pgl_.cluster_monitor._connect_to_db = Mock() # pylint: disable=protected-access pgl_.create_alert_file = Mock() pgl_.execute_external_command = Mock() try: yield pgl_ finally: pgl_.quit() class TestPG: def __init__(self, pgdata): self.pgbin = self.find_pgbin() self.pgdata = pgdata self.pg = None @staticmethod def find_pgbin(versions=None): pathformats = ["/usr/pgsql-{ver}/bin", "/usr/lib/postgresql/{ver}/bin"] for ver in versions or PG_VERSIONS: for pathfmt in pathformats: pgbin = pathfmt.format(ver=ver) if os.path.exists(pgbin): return pgbin return "/usr/bin" @property def pgver(self): with open(os.path.join(self.pgdata, "PG_VERSION"), "r") as fp: return fp.read().strip() def connection_string(self, user="testuser", dbname="postgres"): return pgutil.create_connection_string({ "dbname": dbname, "host": self.pgdata, "port": 5432, "user": user, }) def createuser(self, user="testuser"): self.run_cmd("createuser", "-h", self.pgdata, "-p", "5432", "-s", user) def run_cmd(self, cmd, *args): argv = [os.path.join(self.pgbin, cmd)] argv.extend(args) subprocess.check_call(argv) def run_pg(self): self.pg = subprocess.Popen([ os.path.join(self.pgbin, "postgres"), "-D", self.pgdata, "-k", self.pgdata, "-p", "5432", "-c", "listen_addresses=", ]) time.sleep(1.0) # let pg start def kill(self, force=True, immediate=True): if self.pg is None: return if force: os.kill(self.pg.pid, signal.SIGKILL) elif immediate: os.kill(self.pg.pid, signal.SIGQUIT) else: os.kill(self.pg.pid, signal.SIGTERM) timeout = time.monotonic() + 10 while (self.pg.poll() is None) and (time.monotonic() < timeout): time.sleep(0.1) if not force and self.pg.poll() is None: raise Exception("PG pid {} not dead".format(self.pg.pid)) # NOTE: cannot use 'tmpdir' fixture here, it only works in 'function' scope @pytest.yield_fixture(scope="session") def db(): tmpdir_obj = py_path.local(tempfile.mkdtemp(prefix="pglookout_dbtest_")) tmpdir = str(tmpdir_obj) # try to find the binaries for these versions in some path pgdata = os.path.join(tmpdir, "pgdata") db = TestPG(pgdata) # pylint: disable=redefined-outer-name db.run_cmd("initdb", "-D", pgdata, "--encoding", "utf-8") # NOTE: point $HOME to tmpdir - $HOME shouldn't affect most tests, but # psql triest to find .pgpass file from there as do our functions that # manipulate pgpass. By pointing $HOME there we make sure we're not # making persistent changes to the environment. os.environ["HOME"] = tmpdir # allow replication connections with open(os.path.join(pgdata, "pg_hba.conf"), "w") as fp: fp.write( "local all all trust\n" "local replication all trust\n" ) with open(os.path.join(pgdata, "postgresql.conf"), "a") as fp: fp.write( "max_wal_senders = 2\n" "wal_level = archive\n" # disable fsync and synchronous_commit to speed up the tests a bit "fsync = off\n" "synchronous_commit = off\n" # don't need to wait for autovacuum workers when shutting down "autovacuum = off\n" ) if db.pgver < "13": fp.write("wal_keep_segments = 100\n") db.run_pg() try: db.createuser() db.createuser("otheruser") yield db finally: db.kill() try: tmpdir_obj.remove(rec=1) except: # pylint: disable=bare-except pass
Today we index as DOCS_AND_POSITIONS, which is necessary because we stuff the payload into one of those tokens. If we indexed under two fields instead, then we could make the drill-down field DOCS_ONLY. But ... once/if we cutover to doc values then we could use one field again.
import os from OpenSSL import crypto class TLS: def __init__(self, id=None, certificate=None, private_key=None, passphrase=None, intermediates=None, primary_cn=None): self.id = id self.certificate = certificate self.private_key = private_key self.passphrase = passphrase self.intermediates = intermediates self.primary_cn = primary_cn def build_pem(self): pem = () if self.intermediates: for c in self.intermediates: pem = pem + (c,) if self.certificate: pem = pem + (self.certificate,) if self.private_key: pem = pem + (self.private_key,) pem = "\n".join(pem) return pem @staticmethod def get_primary_cn(certificate): cert = crypto.load_certificate(crypto.FILETYPE_PEM, certificate) subject = cert.get_subject() issued_to = subject.CN issuer = cert.get_issuer() issued_by = issuer.CN return issued_to def create_pem_file(self, dest_dir): if self is None: return None pem = self.build_pem() pem_file_name = dest_dir + '/'+ self.primary_cn + '.pem' f = open(pem_file_name, 'w') f.write(pem) f.close() os.chmod(pem_file_name, 0600) return pem_file_name
The brake pads on a Toyota 4Runner need to be changed every year. Usually, this is because the pad material grinds down to about 1/8 inch thick. At this thickness, the pads will have a hard time gripping the brake rotor. When you notice a vibration or "shimmy" in the steering wheel while braking, this will be a clear indication that the brake pads on your 4Runner need changing. Loosen the lug nuts on the wheels you'll be working on. Turn the lug nuts 40 degrees counterclockwise with a tire wrench. Lift the 4Runner off the ground and put it on jack stands. Use the floor jack to lift up on the front and rear (if needed) jack points. On the 4Runner, these will be the front and rear cross member. Place jack stands underneath the front and rear pinch welds, and lower the 4Runner onto the stands. Finish removing the lug nuts, and pull the wheel off the hub. The wheels may be stuck on there, but if you give them a good kick at the top or the bottom, you should be able to pull them right off. Slide the c-clamp over the caliper assembly, which will expose the back of the outboard brake pad. Put the screw-end of the clamp against the outboard pad and the other end of the clamp against the back of the rotor. Tighten the clamp to push the caliper piston back into the caliper. You can't see the piston at this point, but you will see a space developing between the outboard pad and the caliper bracket. When the piston bottoms out in the caliper, stop tightening the clamp. Remove the caliper pin bolt and upper and lower mounting bolts. The pin bolt is the bottom-most bolt on the caliper. The mounting bolts connect the caliper to the wheel hub. Slide the caliper off the brake rotor. Open the caliper from the bottom and pull out the brake pads. Then, insert the new pads. Reassemble the caliper. Assembly is the reverse of disassembly. Make sure that when tightening the bolts, you put a dab of thread locker on the threads of all bolts. Tighten the pin bolt to 23 lbs.-ft., and the mounting bolts to between 50 and 55 lbs.-ft. depending on the year of your 4Runner. Spray the caliper and rotor down with brake parts cleaner to remove any oils from your fingers and dirt. Put the wheels back on, lower the 4Runner to the ground, and tighten the lug nuts to 100 lbs.-ft.
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import re import shutil from twitter.common.collections import OrderedSet from pants.backend.codegen.thrift.lib.thrift import Thrift from pants.base.build_environment import get_buildroot from pants.base.exceptions import TaskError from pants.base.workunit import WorkUnitLabel from pants.option.custom_types import target_option from pants.task.simple_codegen_task import SimpleCodegenTask from pants.util.memo import memoized_property from pants.util.process_handler import subprocess class ApacheThriftGenBase(SimpleCodegenTask): # The name of the thrift generator to use. Subclasses must set. # E.g., java, py (see `thrift -help` for all available generators). thrift_generator = None # Subclasses may set their own default generator options. default_gen_options_map = None @classmethod def register_options(cls, register): super(ApacheThriftGenBase, cls).register_options(register) # NB: As of thrift 0.9.2 there is 1 warning that -strict promotes to an error - missing a # struct field id. If an artifact was cached with strict off, we must re-gen with strict on # since this case may be present and need to generate a thrift compile error. register('--strict', default=True, fingerprint=True, type=bool, help='Run thrift compiler with strict warnings.') # The old --gen-options was string-typed, so we keep it that way for backwards compatibility, # and reluctantly use the clunky name --gen-options-map for the new, map-typed options. # TODO: Do a deprecation cycle to restore the old name. register('--gen-options-map', type=dict, advanced=True, fingerprint=True, default=cls.default_gen_options_map, help='Use these options for the {} generator.'.format(cls.thrift_generator)) register('--deps', advanced=True, type=list, member_type=target_option, help='A list of specs pointing to dependencies of thrift generated code.') register('--service-deps', advanced=True, type=list, member_type=target_option, help='A list of specs pointing to dependencies of thrift generated service ' 'code. If not supplied, then --deps will be used for service deps.') @classmethod def subsystem_dependencies(cls): return super(ApacheThriftGenBase, cls).subsystem_dependencies() + (Thrift.scoped(cls),) def synthetic_target_extra_dependencies(self, target, target_workdir): for source in target.sources_relative_to_buildroot(): if self._declares_service(os.path.join(get_buildroot(), source)): return self._service_deps return self._deps def execute_codegen(self, target, target_workdir): target_cmd = self._thrift_cmd[:] bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt)) for base in bases: target_cmd.extend(('-I', base)) if hasattr(target, 'compiler_args'): target_cmd.extend(list(target.compiler_args or [])) target_cmd.extend(('-o', target_workdir)) for source in target.sources_relative_to_buildroot(): cmd = target_cmd[:] cmd.append(os.path.join(get_buildroot(), source)) with self.context.new_workunit(name=source, labels=[WorkUnitLabel.TOOL], cmd=' '.join(cmd)) as workunit: result = subprocess.call(cmd, stdout=workunit.output('stdout'), stderr=workunit.output('stderr')) if result != 0: raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result)) # The thrift compiler generates sources to a gen-[lang] subdir of the `-o` argument. We # relocate the generated sources to the root of the `target_workdir` so that our base class # maps them properly. gen_dir = os.path.join(target_workdir, 'gen-{}'.format(self.thrift_generator)) for path in os.listdir(gen_dir): shutil.move(os.path.join(gen_dir, path), target_workdir) os.rmdir(gen_dir) @memoized_property def _thrift_binary(self): return Thrift.scoped_instance(self).select(context=self.context) @memoized_property def _deps(self): deps = self.get_options().deps return list(self.resolve_deps(deps)) @memoized_property def _service_deps(self): service_deps = self.get_options().service_deps return list(self.resolve_deps(service_deps)) if service_deps else self._deps SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)') def _declares_service(self, source): with open(source) as thrift: return any(line for line in thrift if self.SERVICE_PARSER.search(line)) @memoized_property def _thrift_cmd(self): cmd = [self._thrift_binary] def opt_str(item): return item[0] if not item[1] else '{}={}'.format(*item) gen_opts_map = self.get_options().gen_options_map or {} gen_opts = [opt_str(item) for item in gen_opts_map.items()] generator_spec = ('{}:{}'.format(self.thrift_generator, ','.join(gen_opts)) if gen_opts else self.thrift_generator) cmd.extend(('--gen', generator_spec)) if self.get_options().strict: cmd.append('-strict') if self.get_options().level == 'debug': cmd.append('-verbose') return cmd
FrustratedAndAnnoyed – A 100% NIGHTMARE. AVOID! – If you want constant email problems, like receiving tons of spam and NOT receiving legitimate emails. If you want terrible customer service, the kind that takes 2-4 HOURS to ‘solve’ a problem and then SURPRISE! it’s not solved. If you want a slow website despite doing everything you can to optimise. If you want a user interface that will leave you completely befuddled and annoyed. If you want to waste tons of time trying to fix the same problems over and over again. If you want to waste your money. If you want to tear your hair out with frustration. Then go with #Justhost. Otherwise, avoid like the plague.
import sys import os from path import path XQUEUE_INTERFACE = { "url": "http://127.0.0.1:3032", "django_auth": { "username": "lms", "password": "abcd" }, "basic_auth": ('anant', 'agarwal'), } MITX_FEATURES = { 'SAMPLE': False, 'USE_DJANGO_PIPELINE': True, 'DISPLAY_HISTOGRAMS_TO_STAFF': True, 'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails 'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose ## DO NOT SET TO True IN THIS FILE ## Doing so will cause all courses to be released on production 'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date # When True, will only publicly list courses by the subdomain. Expects you # to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of # course_ids (see dev_int.py for an example) 'SUBDOMAIN_COURSE_LISTINGS': False, # When True, will override certain branding with university specific values # Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the # university to use for branding purposes 'SUBDOMAIN_BRANDING': False, 'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST # set to None to do no university selection 'ENABLE_TEXTBOOK': True, 'ENABLE_DISCUSSION_SERVICE': True, 'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard) 'ENABLE_SQL_TRACKING_LOGS': False, 'ENABLE_LMS_MIGRATION': False, 'ENABLE_MANUAL_GIT_RELOAD': False, 'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL 'STUB_VIDEO_FOR_TESTING': False, # do not display video when running automated acceptance tests # extrernal access methods 'ACCESS_REQUIRE_STAFF_FOR_COURSE': False, 'AUTH_USE_OPENID': False, 'AUTH_USE_MIT_CERTIFICATES': False, 'AUTH_USE_OPENID_PROVIDER': False, # analytics experiments 'ENABLE_INSTRUCTOR_ANALYTICS': False, # Flip to True when the YouTube iframe API breaks (again) 'USE_YOUTUBE_OBJECT_API': False, # Give a UI to show a student's submission history in a problem by the # Staff Debug tool. 'ENABLE_STUDENT_HISTORY_VIEW': True } ############################# SET PATH INFORMATION ############################# ENV_ROOT = os.path.abspath(os.path.join(__file__, "..", "..", "..", "..", "..")) COURSES_ROOT = "{0}/{1}".format(ENV_ROOT,"data") DATA_DIR = COURSES_ROOT MODULESTORE = { 'default': { 'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore', 'OPTIONS': { 'data_dir': DATA_DIR, 'default_class': 'xmodule.hidden_module.HiddenDescriptor', } } } GENERATE_PROFILE_SCORES = False
Our medical intern Claudia talks about her fantastic experience working at Karan Hospital in Palampur, India. The truly inspiring work that the doctors and staff in these hospitals do is something that is great to witness and experience first-hand. Read to find out about how a day in this beautiful area of the world would go. Most days we were lucky enough to wake up to an amazing view of the mountains. We could enjoy this as we walked up the hill to catch the bus to the hospitals. During our time in Palampur we went to three different hospitals. A women and children’s hospital, a general hospital, and an internal medicine clinic (which was sort of like the GP). When we were at the women’s hospital, we were split into two groups of two. One pair with the gynaecologist and one pair with the paediatrician. With the paediatrician, we got to see lots of cute babies and young children. With the gynaecologist you see lots of pregnancy check-ups. The staff are all really friendly and they are very open to answering questions. Within the first half an hour of my placement in the hospital we got called upstairs to see a baby being born! An amazing way to start my internship. The general hospital (called Karan Hospital) is much busier than the women’s and children’s hospital. So much so that whilst one patient is being seen by the doctor, the following patients on the list will be sitting waiting in the same room! This is something that we found very odd, coming from UK hospitals where patient confidentiality is extremely important. Karan is a drop-in hospital and has to deal with many different types of health problems. It made the experience really exciting as you never know what you’re going to see next. At this hospital we saw ultrasounds, endoscopies, x-rays, labs and surgery so there was a lot to observe in our short time there. At the internal medicine clinic, we got to see ultrasounds and treadmill tests, (used to measure heart rate whilst exercising for those with potential heart problems). Although not like the busy hospital, the clinic was still varied and exciting. The doctor always made sure to explain to us in detail about the problem the patient presented with and how they treat it. At about 2pm we headed back to the accommodation for lunch. This usually consisted of rice, chickpeas, vegetables and chapatti. On many afternoons, there was surgery at the Karan hospital so a group of four of us would head up to see an operation after lunch. I got to see a gall bladder removal due to gall stones. It was keyhole surgery so we got to see it all on the screen. It was really interesting to watch. If you’re not going to surgery you might take a trip up to the market. Here we could wander round and pick up some of the western foods we missed most from home. Cadbury’s chocolate, Oreos and digestives were just some of them!. We also bought locally grown tea to take home. For the rest of the afternoon we relaxed, read, played cards and a lot of games of Uno! It was nice to spend time with the group and this way you get to know everyone really well. Once a week we had yoga and a lecture in the evening. Yoga was a really fun experience and we all really enjoyed the lectures where we got to learn more about the Indian culture and the medical system. We had dinner at 7pm and after dinner (weather permitting!) we all grabbed a yoga mat and went up to the roof. Here we chatted and watched the stars before heading to bed to get some well-earned sleep.
#!/usr/bin/env python # -*- coding: utf-8 -*- # -*- Python -*- """ @file ActroidKinematics.py @brief ModuleDescription @date $Date$ """ import sys import time import numpy as np import scipy as sp import math sys.path.append(".") # Import RTM module import RTC import OpenRTM_aist # Import Service implementation class # <rtc-template block="service_impl"> # </rtc-template> # Import Service stub modules # <rtc-template block="consumer_import"> # </rtc-template> # This module's spesification # <rtc-template block="module_spec"> actroidkinematics_spec = ["implementation_id", "ActroidKinematics", "type_name", "ActroidKinematics", "description", "ModuleDescription", "version", "1.0.0", "vendor", "VenderName", "category", "Category", "activity_type", "STATIC", "max_instance", "1", "language", "Python", "lang_type", "SCRIPT", ""] # </rtc-template> ## # @class ActroidKinematics # @brief ModuleDescription # # class ActroidKinematics(OpenRTM_aist.DataFlowComponentBase): ## # @brief constructor # @param manager Maneger Object # def __init__(self, manager): OpenRTM_aist.DataFlowComponentBase.__init__(self, manager) self._d_posein = RTC.TimedDoubleSeq(RTC.Time(0,0),[]) """ """ self._poseinIn = OpenRTM_aist.InPort("posein", self._d_posein) self._d_poseout = RTC.TimedPose3D(RTC.Time(0,0),0) """ """ self._poseoutOut = OpenRTM_aist.OutPort("poseout", self._d_poseout) # initialize of configuration-data. # <rtc-template block="init_conf_param"> # </rtc-template> ## # # The initialize action (on CREATED->ALIVE transition) # formaer rtc_init_entry() # # @return RTC::ReturnCode_t # # def onInitialize(self): # Bind variables and configuration variable # Set InPort buffers self.addInPort("posein",self._poseinIn) # Set OutPort buffers self.addOutPort("poseout",self._poseoutOut) # Set service provider to Ports # Set service consumers to Ports # Set CORBA Service Ports return RTC.RTC_OK # ## # # # # The finalize action (on ALIVE->END transition) # # formaer rtc_exiting_entry() # # # # @return RTC::ReturnCode_t # # # #def onFinalize(self, ec_id): # # return RTC.RTC_OK # ## # # # # The startup action when ExecutionContext startup # # former rtc_starting_entry() # # # # @param ec_id target ExecutionContext Id # # # # @return RTC::ReturnCode_t # # # # #def onStartup(self, ec_id): # # return RTC.RTC_OK # ## # # # # The shutdown action when ExecutionContext stop # # former rtc_stopping_entry() # # # # @param ec_id target ExecutionContext Id # # # # @return RTC::ReturnCode_t # # # # #def onShutdown(self, ec_id): # # return RTC.RTC_OK # ## # # # # The activated action (Active state entry action) # # former rtc_active_entry() # # # # @param ec_id target ExecutionContext Id # # # # @return RTC::ReturnCode_t # # # # #def onActivated(self, ec_id): # # return RTC.RTC_OK # ## # # # # The deactivated action (Active state exit action) # # former rtc_active_exit() # # # # @param ec_id target ExecutionContext Id # # # # @return RTC::ReturnCode_t # # # # #def onDeactivated(self, ec_id): # # return RTC.RTC_OK ## # # The execution action that is invoked periodically # former rtc_active_do() # # @param ec_id target ExecutionContext Id # # @return RTC::ReturnCode_t # # def onExecute(self, ec_id): th = [] try: def rotationXandOffset(x, y, z, th): s = math.sin(th) c = math.cos(th) P = np.array([[1,0,0,x],[0,c,s,y],[0,-s,c,z],[0,0,0,1]]) return P def rotationYandOffset(x, y, z, th): s = math.sin(th) c = math.cos(th) P = np.array([[c,0,s,x],[0,1,0,y],[-s,0,c,z],[0,0,0,1]]) return P def rotationZandOffset(x, y, z, th): s = math.sin(th) c = math.cos(th) P = np.array([[c,s,0,x],[-s,c,0,y],[0,0,1,z],[0,0,0,1]]) return P #if __name__ == '__main__': if self._poseinIn.isNew(): data = self._poseinIn.read() for num in range(8, 15): value = data.data[num] th.append(value) l1 = 10 l2 = 12 l3 = 15 T = [0]*7 T1 = rotationYandOffset(0, 0, 0, th[0]) T2 = rotationXandOffset(0, 0, 0, th[1]) T3 = rotationZandOffset(0, 0, l1, th[2]) T4 = rotationYandOffset(0, 0, 0, th[3]) T5 = rotationZandOffset(0, 0, l2, th[4]) T6 = rotationYandOffset(0, 0, 0, th[5]) T7 = rotationXandOffset(l3, 0, 0, th[6]) Hand = np.array([[0],[0],[0],[1]]) T = [T1,T2,T3,T4,T5,T6,T7] target_T = sp.dot(T1,sp.dot(T2,sp.dot(T3,sp.dot(T4,sp.dot(T5,sp.dot(T6,sp.dot(T7,Hand))))))) print 'Hand Positoin is ', target_T # 最初のデータからの結果しか出ない。 #raw_input(); #リターンキーを押下するまでロック return RTC.RTC_OK except Exception, e: print 'Exception : ', e traceback.print_exc() #これは print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file) の省略表現 pass return RTC.RTC_OK # ## # # # # The aborting action when main logic error occurred. # # former rtc_aborting_entry() # # # # @param ec_id target ExecutionContext Id # # # # @return RTC::ReturnCode_t # # # # #def onAborting(self, ec_id): # # return RTC.RTC_OK # ## # # # # The error action in ERROR state # # former rtc_error_do() # # # # @param ec_id target ExecutionContext Id # # # # @return RTC::ReturnCode_t # # # # #def onError(self, ec_id): # # return RTC.RTC_OK # ## # # # # The reset action that is invoked resetting # # This is same but different the former rtc_init_entry() # # # # @param ec_id target ExecutionContext Id # # # # @return RTC::ReturnCode_t # # # # #def onReset(self, ec_id): # # return RTC.RTC_OK # ## # # # # The state update action that is invoked after onExecute() action # # no corresponding operation exists in OpenRTm-aist-0.2.0 # # # # @param ec_id target ExecutionContext Id # # # # @return RTC::ReturnCode_t # # # # #def onStateUpdate(self, ec_id): # # return RTC.RTC_OK # ## # # # # The action that is invoked when execution context's rate is changed # # no corresponding operation exists in OpenRTm-aist-0.2.0 # # # # @param ec_id target ExecutionContext Id # # # # @return RTC::ReturnCode_t # # # # #def onRateChanged(self, ec_id): # # return RTC.RTC_OK def ActroidKinematicsInit(manager): profile = OpenRTM_aist.Properties(defaults_str=actroidkinematics_spec) manager.registerFactory(profile, ActroidKinematics, OpenRTM_aist.Delete) def MyModuleInit(manager): ActroidKinematicsInit(manager) # Create a component comp = manager.createComponent("ActroidKinematics") def main(): mgr = OpenRTM_aist.Manager.init(sys.argv) mgr.setModuleInitProc(MyModuleInit) mgr.activateManager() mgr.runManager() if __name__ == "__main__": main()
Dr. Wade is a native and current resident of Baton Rouge, Louisiana. She currently serves as Assistant Chief Administrative Officer for the EBR Office of Mayor-President. Dr. Wade is a licensed educator and has served as a founding Dean and Adjunct Professor. She has taught at every level of the education spectrum from kindergarten to doctoral degree.
import gevent from geventwebsocket import WebSocketApplication from guacamole.client import GuacamoleClient, PROTOCOL_NAME try: # Add local_settings.py with RDP connection variables from local_settings import ( PROTOCOL, USERNAME, PASSWORD, HOST, PORT, DOMAIN, APP, SEC) except ImportError: USERNAME = '' PASSWORD = '' HOST = '' PORT = 3389 DOMAIN = '' APP = '' SEC = '' class GuacamoleApp(WebSocketApplication): def __init__(self, ws): self.client = None self._listener = None super(GuacamoleApp, self).__init__(ws) @classmethod def protocol_name(cls): """ Return our protocol. """ return PROTOCOL_NAME def on_open(self, *args, **kwargs): """ New Web socket connection opened. """ if self.client: # we have a running client?! self.client.close() # @TODO: get guacd host and port! self.client = GuacamoleClient('localhost', 4822) # @TODO: get Remote server connection properties self.client.handshake(protocol=PROTOCOL, hostname=HOST, port=PORT, username=USERNAME, password=PASSWORD, domain=DOMAIN, security=SEC, remote_app=APP) self._start_listener() def on_message(self, message): """ New message received on the websocket. """ # send message to guacd server self.client.send(message) def on_close(self, reason): """ Websocket closed. """ # @todo: consider reconnect from client. (network glitch?!) self._stop_listener() self.client.close() self.client = None def _start_listener(self): if self._listener: self._stop_listener() self._listener = gevent.spawn(self.guacd_listener) self._listener.start() def _stop_listener(self): if self._listener: self._listener.kill() self._listener = None def guacd_listener(self): """ A listener that would handle any messages sent from Guacamole server and push directly to browser client (over websocket). """ while True: instruction = self.client.receive() self.ws.send(instruction)
“MIHI REKMA” (Protecting Mother Earth) – Environmental Unit of Sarvodaya was ceremonially re-established at the Sarvodaya Head Quarters on 3rd August 2007 with the intention of re-commencing Sarvodaya Environmental Activities that are deeply rooted with spiritual values. On this auspicious day, as the initial step, an ecological awareness programme was conducted for the full-time staff of Sarvodaya Headquarters. The destructive trends of the industrial growth society, the importance of reconnecting with the web of life and cultivation of ecological awareness through various activities that are rooted in spiritual values were discussed at the gathering. Mrs. Udani Mendis, Deputy Executive Director (Technical Programmes) briefly explained the purpose of re-establishing the unit, its role and the present environmental activities undertaken by the movement. Dr. Vinya Ariyaratne, Executive Director enlightened the participants on the importance of protecting environment for the survival of humanity. Mrs. Kanchana Weerakoon, an environmentalist addressed the gathering on the responsibilities of the present generation to conserve natural resources for posterity. Mr. Achala Navaratne, an Environmental Engineer also made a presentation and screened “An Inconvenient Truth” a film on “Global Warming”. The main objective of the Environmental Unit is to create a shift in consciousness of people through various actions and activities which are rooted in spiritual values in order to build a life-sustaining, environmentally friendly society. Initially such ecological awareness programmes will be conducted at Sarvodaya Head Quarters and will be progressively expanded to Sarvodaya District Centers and then to Sarvodaya Villages and other villages. So the following activity plan for the Head quaretres suggested by the project team. Gathering and dissemination of information and knowledge on all environmental issues and application of best practices, Promoting Eco-village concept, Solid Waste Management, Home Gardening, Composting, Managing waste water systems, Environment friendly energy options, are few activities planned by the unit for the near future. Networking with national/international resource persons and forming an advisory committee, creating a data base of resource persons who are willing to support on voluntary basis and obtaining paid services of environmentalists engaged in related activities will also be planned by the new unit. Solid Waste Management Programme of Sarvodaya is now being implemented through 25 selected schools and five communities in five major towns of Western Province and this awareness programme will also be handled by the new unit. Sarvodaya Damniyamgama Eco-village at Lagoswatte, a housing project for Tsunami victims will also come under Mihirekma unit from now onwards.
# Settings para ColombiaTransparente # # Debe utilizar local_settings.py para inicializar variables especificas. # Ver fin del archivo. import os.path PROJECT_DIR = os.path.dirname(__file__) # A estos usarios se les manda emails # cuando hay excepciones ADMINS = ( ('Julian Pulgarin', '[email protected]'), ) SEND_BROKEN_LINK_EMAILS = True MANAGERS = ADMINS TIME_ZONE = 'America/Bogota' LANGUAGE_CODE = 'es-co' SITE_ID = 1 USE_I18N = True USE_L10N = True MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media/') TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'urls' FIXTURE_DIRS = ( os.path.join(PROJECT_DIR, 'fixtures'), ) TEMPLATE_DIRS = ( os.path.join(PROJECT_DIR, 'templates'), ) TEMPLATE_CONTEXT_PROCESSORS = ( "django.core.context_processors.request", "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.contrib.messages.context_processors.messages" ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.admin', 'django.contrib.admindocs', 'transparencia', 'sorl.thumbnail', 'django_bcrypt', 'django.contrib.markup', ) # local_settings.py inicializa variables especificas de la instalacion de CT. try: from local_settings import * # en el mismo directorio que este archivo except ImportError: import sys sys.stderr.write("Error: Must use a local_settings.py file to set specific settings for this CT installation.") sys.exit(1)
The value of farmland has doubled since 2008, according to the latest figures from the Tompkins County Department of Assessment. In 2008, after the last reappraisal, the average price was $1000 per acre; now it is $2300 per acre. For me the interesting part is that this is not due to commercial development or natural gas speculation, but a higher demand for agricultural land. There are about 500 farms in Tompkins County and 100 full time farmers. Farms constitute about one-third of the county’s 300,000 acres. ©2019 Margaret Hobbie. All Rights Reserved.
from pytest import fixture from sqlalchemy.orm import sessionmaker from pycds import Contact, Network, Station, History, Variable @fixture def empty_sesh(base_engine, set_search_path): """Test-function scoped database session, with no schema or content. All session actions are rolled back on teardown. """ sesh = sessionmaker(bind=base_engine)() set_search_path(sesh) yield sesh sesh.rollback() sesh.close() @fixture def pycds_sesh_with_small_data(pycds_sesh): # TODO: Use add_then_delete_objs (which ought to be renamed) here so that objects # are removed after test moti = Network(name="MoTIe") ec = Network(name="EC") wmb = Network(name="FLNROW-WMB") pycds_sesh.add_all([moti, ec, wmb]) simon = Contact(name="Simon", networks=[moti]) eric = Contact(name="Eric", networks=[wmb]) pat = Contact(name="Pat", networks=[ec]) pycds_sesh.add_all([simon, eric, pat]) stations = [ Station( native_id="11091", network=moti, histories=[ History( station_name="Brandywine", the_geom="SRID=4326;POINT(-123.11806 50.05417)", ) ], ), Station( native_id="1029", network=wmb, histories=[ History( station_name="FIVE MILE", the_geom="SRID=4326;POINT(-122.68889 50.91089)", ) ], ), Station( native_id="2100160", network=ec, histories=[ History( station_name="Beaver Creek Airport", the_geom="SRID=4326;POINT(-140.866667 62.416667)", ) ], ), ] pycds_sesh.add_all(stations) variables = [ Variable(name="CURRENT_AIR_TEMPERATURE1", unit="celsius", network=moti), Variable(name="precipitation", unit="mm", network=ec), Variable(name="relative_humidity", unit="percent", network=wmb), ] pycds_sesh.add_all(variables) yield pycds_sesh
In recent years, extreme climate events have negatively impacted many parts of the globe, but due to its already high vulnerability, Sub Saharan Africa has been the theatre for some of the early and more dramatic climate impacts. This has affected most significantly the livelihoods and health of the most deprived people. As observed in the countries concerned by this case study (Malawi, South Africa, Kenya and Zimbabwe), droughts, floods, extreme temperatures have caused successive crop failures, the drying up of water sources and the spread of malaria to locations where it was not endemic (Koelle et al 2010; Wakhungu et al 2010; Zvigadza et al 2010).
"""Tests for sysconfig.""" import unittest import sys import os import shutil import subprocess from copy import copy, deepcopy from test.test_support import run_unittest, TESTFN, unlink, get_attribute import sysconfig from sysconfig import (get_paths, get_platform, get_config_vars, get_path, get_path_names, _INSTALL_SCHEMES, _get_default_scheme, _expand_vars, get_scheme_names, get_config_var) import _osx_support class TestSysConfig(unittest.TestCase): def setUp(self): """Make a copy of sys.path""" super(TestSysConfig, self).setUp() self.sys_path = sys.path[:] self.makefile = None # patching os.uname if hasattr(os, 'uname'): self.uname = os.uname self._uname = os.uname() else: self.uname = None self._uname = None os.uname = self._get_uname # saving the environment self.name = os.name self.platform = sys.platform self.version = sys.version self.sep = os.sep self.join = os.path.join self.isabs = os.path.isabs self.splitdrive = os.path.splitdrive self._config_vars = copy(sysconfig._CONFIG_VARS) self.old_environ = deepcopy(os.environ) def tearDown(self): """Restore sys.path""" sys.path[:] = self.sys_path if self.makefile is not None: os.unlink(self.makefile) self._cleanup_testfn() if self.uname is not None: os.uname = self.uname else: del os.uname os.name = self.name sys.platform = self.platform sys.version = self.version os.sep = self.sep os.path.join = self.join os.path.isabs = self.isabs os.path.splitdrive = self.splitdrive sysconfig._CONFIG_VARS = copy(self._config_vars) for key, value in self.old_environ.items(): if os.environ.get(key) != value: os.environ[key] = value for key in os.environ.keys(): if key not in self.old_environ: del os.environ[key] super(TestSysConfig, self).tearDown() def _set_uname(self, uname): self._uname = uname def _get_uname(self): return self._uname def _cleanup_testfn(self): path = TESTFN if os.path.isfile(path): os.remove(path) elif os.path.isdir(path): shutil.rmtree(path) def test_get_path_names(self): self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS) def test_get_paths(self): scheme = get_paths() default_scheme = _get_default_scheme() wanted = _expand_vars(default_scheme, None) wanted = wanted.items() wanted.sort() scheme = scheme.items() scheme.sort() self.assertEqual(scheme, wanted) def test_get_path(self): # xxx make real tests here for scheme in _INSTALL_SCHEMES: for name in _INSTALL_SCHEMES[scheme]: res = get_path(name, scheme) def test_get_config_vars(self): cvars = get_config_vars() self.assertIsInstance(cvars, dict) self.assertTrue(cvars) def test_get_platform(self): # windows XP, 32bits os.name = 'nt' sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' '[MSC v.1310 32 bit (Intel)]') sys.platform = 'win32' self.assertEqual(get_platform(), 'win32') # windows XP, amd64 os.name = 'nt' sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' '[MSC v.1310 32 bit (Amd64)]') sys.platform = 'win32' self.assertEqual(get_platform(), 'win-amd64') # windows XP, itanium os.name = 'nt' sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' '[MSC v.1310 32 bit (Itanium)]') sys.platform = 'win32' self.assertEqual(get_platform(), 'win-ia64') # macbook os.name = 'posix' sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) ' '\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]') sys.platform = 'darwin' self._set_uname(('Darwin', 'macziade', '8.11.1', ('Darwin Kernel Version 8.11.1: ' 'Wed Oct 10 18:23:28 PDT 2007; ' 'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC')) _osx_support._remove_original_values(get_config_vars()) get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3' get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g ' '-fwrapv -O3 -Wall -Wstrict-prototypes') maxint = sys.maxint try: sys.maxint = 2147483647 self.assertEqual(get_platform(), 'macosx-10.3-ppc') sys.maxint = 9223372036854775807 self.assertEqual(get_platform(), 'macosx-10.3-ppc64') finally: sys.maxint = maxint self._set_uname(('Darwin', 'macziade', '8.11.1', ('Darwin Kernel Version 8.11.1: ' 'Wed Oct 10 18:23:28 PDT 2007; ' 'root:xnu-792.25.20~1/RELEASE_I386'), 'i386')) _osx_support._remove_original_values(get_config_vars()) get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3' get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g ' '-fwrapv -O3 -Wall -Wstrict-prototypes') maxint = sys.maxint try: sys.maxint = 2147483647 self.assertEqual(get_platform(), 'macosx-10.3-i386') sys.maxint = 9223372036854775807 self.assertEqual(get_platform(), 'macosx-10.3-x86_64') finally: sys.maxint = maxint # macbook with fat binaries (fat, universal or fat64) _osx_support._remove_original_values(get_config_vars()) get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4' get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') self.assertEqual(get_platform(), 'macosx-10.4-fat') _osx_support._remove_original_values(get_config_vars()) get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') self.assertEqual(get_platform(), 'macosx-10.4-intel') _osx_support._remove_original_values(get_config_vars()) get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') self.assertEqual(get_platform(), 'macosx-10.4-fat3') _osx_support._remove_original_values(get_config_vars()) get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') self.assertEqual(get_platform(), 'macosx-10.4-universal') _osx_support._remove_original_values(get_config_vars()) get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') self.assertEqual(get_platform(), 'macosx-10.4-fat64') for arch in ('ppc', 'i386', 'x86_64', 'ppc64'): _osx_support._remove_original_values(get_config_vars()) get_config_vars()['CFLAGS'] = ('-arch %s -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3'%(arch,)) self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,)) # linux debian sarge os.name = 'posix' sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) ' '\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]') sys.platform = 'linux2' self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7', '#1 Mon Apr 30 17:25:38 CEST 2007', 'i686')) self.assertEqual(get_platform(), 'linux-i686') # XXX more platforms to tests here def test_get_config_h_filename(self): config_h = sysconfig.get_config_h_filename() self.assertTrue(os.path.isfile(config_h), config_h) def test_get_scheme_names(self): wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user', 'posix_home', 'posix_prefix', 'posix_user') self.assertEqual(get_scheme_names(), wanted) @unittest.skipIf(sys.platform.startswith('win'), 'Test is not Windows compatible') def test_get_makefile_filename(self): makefile = sysconfig.get_makefile_filename() self.assertTrue(os.path.isfile(makefile), makefile) # Issue 22199 self.assertEqual(sysconfig._get_makefile_filename(), makefile) def test_symlink(self): # Issue 7880 symlink = get_attribute(os, "symlink") def get(python): cmd = [python, '-c', 'import sysconfig; print sysconfig.get_platform()'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) return p.communicate() real = os.path.realpath(sys.executable) link = os.path.abspath(TESTFN) symlink(real, link) try: self.assertEqual(get(real), get(link)) finally: unlink(link) def test_user_similar(self): # Issue #8759: make sure the posix scheme for the users # is similar to the global posix_prefix one base = get_config_var('base') user = get_config_var('userbase') # the global scheme mirrors the distinction between prefix and # exec-prefix but not the user scheme, so we have to adapt the paths # before comparing (issue #9100) adapt = sys.prefix != sys.exec_prefix for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'): global_path = get_path(name, 'posix_prefix') if adapt: global_path = global_path.replace(sys.exec_prefix, sys.prefix) base = base.replace(sys.exec_prefix, sys.prefix) user_path = get_path(name, 'posix_user') self.assertEqual(user_path, global_path.replace(base, user, 1)) @unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX") def test_platform_in_subprocess(self): my_platform = sysconfig.get_platform() # Test without MACOSX_DEPLOYMENT_TARGET in the environment env = os.environ.copy() if 'MACOSX_DEPLOYMENT_TARGET' in env: del env['MACOSX_DEPLOYMENT_TARGET'] with open('/dev/null', 'w') as devnull_fp: p = subprocess.Popen([ sys.executable, '-c', 'import sysconfig; print(sysconfig.get_platform())', ], stdout=subprocess.PIPE, stderr=devnull_fp, env=env) test_platform = p.communicate()[0].strip() test_platform = test_platform.decode('utf-8') status = p.wait() self.assertEqual(status, 0) self.assertEqual(my_platform, test_platform) # Test with MACOSX_DEPLOYMENT_TARGET in the environment, and # using a value that is unlikely to be the default one. env = os.environ.copy() env['MACOSX_DEPLOYMENT_TARGET'] = '10.1' p = subprocess.Popen([ sys.executable, '-c', 'import sysconfig; print(sysconfig.get_platform())', ], stdout=subprocess.PIPE, stderr=open('/dev/null'), env=env) test_platform = p.communicate()[0].strip() test_platform = test_platform.decode('utf-8') status = p.wait() self.assertEqual(status, 0) self.assertEqual(my_platform, test_platform) def test_main(): run_unittest(TestSysConfig) if __name__ == "__main__": test_main()
In the latest eruption of mass violence in the United States, a dozen people were shot dead and at least 14 others injured Monday morning when a gunman opened fire on military and civilian employees at the Washington Navy Yard, located in southeast Washington DC, just a few miles from the White House and capitol building. Police killed the alleged gunman—identified as a military contractor—in a shootout, bringing the total number of fatalities to 13. This is the largest loss of life in a single incident in the American capital in over 30 years. The attack occurred at a building housing the Naval Sea Systems Command for the US Navy, which is responsible for building, buying and maintaining naval ships and equipment. There are 3,000 employees at the location, which was locked down while police SWAT teams and other uniformed personnel swept the building. There were initial reports of multiple gunmen throughout the day, but by the evening officials said it was likely the act of a lone perpetrator. Late into the day, however, DC police said there was one other possible suspect, described as wearing a military-style uniform. Police identified the dead suspect as 34-year-old Aaron Alexis, a civilian information technology contractor for the Navy from Fort Worth, Texas. According to reports, Alexis had served for four years—from 2007 to 2011—in the Navy Reserve until being discharged for “a pattern of misbehavior,” which officials refused to detail. Alexis reached the rank of Aviation Electrician’s Mate 3rd Class and was decorated with the National Defense Service Medal and the Global War on Terrorism Service Medal, a Navy official told Reuters. Family members reported that he had been sent overseas while in the armed services. Officials said it was unclear whether Alexis was working at the Washington Navy Yard in a civilian capacity at the time of the incident. They also said it was not known how the alleged gunman brought weapons into the building. While military personnel are generally banned from carrying weapons on military installations, it has been reported those with credentials are not routinely checked. According to several media accounts, Alexis had earlier encounters with police. The most recent was in 2010, when Fort Worth police arrested him for discharging a firearm within city limits. Misdemeanor charges were dropped after investigators accepted his story that the weapon had gone off accidentally while Alexis was cleaning it. Reuters reported that Alexis had also been arrested in Seattle in 2004 for shooting out a construction worker’s car tires in an anger-fueled “blackout” triggered by perceived “disrespect,” according to the Seattle Police Department. Alexis told detectives “he was present during the tragic events of September 11th, 2001, and how those events had disturbed him,” according to a police report. Employees at the Navy Yard were given orders to “shelter in place,” as authorities also shut down streets and bridges in the vicinity. Admiral Jonathan W. Greenert, chief of naval operations and a member of the Joint Chiefs of Staff was reportedly evacuated along with his family from his residence on base upon the initial shots being fired. The shooting rampage sent shockwaves throughout Washington DC. Federal buildings in the Washington area were immediately placed on alert, with increased police presence at all locations. Flights were briefly halted at Reagan National Airport, and as many as ten DC public and charter schools were placed on lockdown. An announcement by the Senate Sergeant-at-Arms informed employees at the US Capitol building that “out of an abundance of caution,” the legislative headquarters of Congress would be shut down, with no one allowed to enter or leave the premises. This was the worst attack at a US military installation since US Army Major Nidal Hasan opened fire killing 13 soldiers and wounding 31 others at Fort Hood, Texas, in 2009. Hasan said he acted in retaliation for US wars in Muslim countries. In August, he was convicted and sentenced to death by a military jury. Speaking of the regularity of mass killings in America, Janis Orlowski, the chief medical officer at MedStar Washington Hospital Center, where several of the victims from the Navy Yard were admitted, commented, “There’s something evil in our society … when we have these multiple shootings, these multiple injuries. I would like you to put my trauma center out of business,” she added. After decades of mass shootings—from Columbine to Newtown, Connecticut—President Obama responded yesterday with routine platitudes. “We are confronting yet another mass shooting, and today it happened at another military installation, in our nation’s capital,” said President Barack Obama in a brief statement. In fact, the endless military violence perpetrated by Obama and his predecessors is having its inevitable repercussions in America, as hundreds of thousands of soldiers return home after being physically and psychologically damaged from the wars carried out by the US government. A recent study released by the American Medical Association (AMA) showed that since 2005, suicides within the US military have skyrocketed. The report tracks the increase of such fatalities as being up from 10.3 to 11.3 per 100,000 in the base year to being over 16.3 per 100,000 just three years later. The increased rate of depression, suicide, and other forms of mental breakdown has been directly attributed to increased deployments for troops into combat zones.
from __future__ import unicode_literals import sys import pytest from test.integration.misc import populate_dapath, run_da class TestHelp(object): top_level_help = '\n'.join([ 'You can either run assistants with:', '\033[1mda [--debug] {create,tweak,prepare,extras} [ASSISTANT [ARGUMENTS]] ...\033[0m', '', 'Where:', '\033[1mcreate \033[0mused for creating new projects', '\033[1mtweak \033[0mused for working with existing projects', '\033[1mprepare \033[0mused for preparing environment for upstream projects', '\033[1mextras \033[0mused for performing custom tasks not related to a specific project', 'You can shorten "create" to "crt", "tweak" to "twk" and "extras" to "extra".', '', 'Or you can run a custom action:', '\033[1mda [--debug] [ACTION] [ARGUMENTS]\033[0m', '', 'Available actions:', '\033[1mdoc \033[0mDisplay documentation for a DAP package.', '\033[1mhelp \033[0mPrint detailed help.', '\033[1mpkg \033[0mLets you interact with online DAPI service and your local DAP packages.', '\033[1mversion \033[0mPrint version', '']) no_assistant_help_newlines = '\n'.join([ 'No subassistants available.', '', 'To search DevAssistant Package Index (DAPI) for new assistants,', 'you can either browse https://dapi.devassistant.org/ or run', '', '"da pkg search <term>".', '', 'Then you can run', '', '"da pkg install <DAP-name>"', '', 'to install the desired DevAssistant package (DAP).' ]) no_assistants_help_singleline = '\n'.join([ ' No subassistants available. To search DevAssistant Package Index (DAPI)', ' for new assistants, you can either browse https://dapi.devassistant.org/', ' or run "da pkg search <term>". Then you can run "da pkg install <DAP-', ' name>" to install the desired DevAssistant package (DAP).' ]) def test_top_level_help(self): res = run_da('-h') # use repr because of bash formatting chars assert repr(res.stdout) == repr(self.top_level_help) def test_top_level_without_arguments(self): res = run_da('', expect_error=True) msg = 'Couldn\'t parse input, displaying help ...\n\n' # use repr because of bash formatting chars assert repr(res.stdout) == repr(msg + self.top_level_help) @pytest.mark.parametrize('alias', [ # test both assistant primary name and an alias 'crt', 'create', ]) def test_category_with_no_assistants_without_arguments(self, alias): res = run_da(alias, expect_error=True, expect_stderr=True) assert self.no_assistant_help_newlines in res.stderr @pytest.mark.parametrize('alias', [ # test both assistant primary name and an alias 'crt', 'create', ]) def test_category_with_no_assistants_help(self, alias): res = run_da(alias + ' -h') assert self.no_assistants_help_singleline in res.stdout def test_didnt_choose_subassistant(self): env = populate_dapath({'assistants': {'crt': ['a.yaml', {'a': ['b.yaml']}]}}) res = env.run_da('create a', expect_error=True, expect_stderr=True) assert 'You have to select a subassistant' in res.stderr def test_subassistants_help(self): env = populate_dapath({'assistants': {'crt': ['a.yaml', {'a': ['b.yaml']}]}}) res = env.run_da('create a -h') assert res.stdout == '\n'.join([ 'usage: create a [-h] {b} ...', '', 'optional arguments:', ' -h, --help show this help message and exit', '', 'subassistants:', ' Following subassistants will help you with setting up your project.', '', ' {b}', '']) def test_didnt_choose_subaction(self): res = run_da('pkg', expect_error=True, expect_stderr=True) assert 'You have to select a subaction' in res.stderr def test_subactions_help(self): res = run_da('pkg -h') # TODO: seems that subparsers order cannot be influenced in 2.6 # investigate and possibly improve this test if sys.version_info[:2] == (2, 6): return assert res.stdout == '\n'.join([ 'usage: pkg [-h] {info,install,lint,list,remove,search,uninstall,update} ...', '', 'Lets you interact with online DAPI service and your local DAP packages.', '', 'optional arguments:', ' -h, --help show this help message and exit', '', 'subactions:', ' This action has following subactions.', '', ' {info,install,lint,list,remove,search,uninstall,update}', ''])
Shammi Kapoor was part of a family that went on to become Bollywood blue blood. For sheer charisma and star quality, this Kapoor was unmatched in his generation. He talks about his famous family, his brand of movies, and what makes him tick.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('instance', '0038_merge'), ] operations = [ migrations.RenameField( model_name='openstackserver', old_name='progress', new_name='_progress', ), migrations.RenameField( model_name='openstackserver', old_name='status', new_name='_status', ), migrations.AlterField( model_name='openstackserver', name='_progress', field=models.CharField(choices=[('failed', 'type'), ('running', 'type'), ('success', 'type')], default='running', db_column='progress', max_length=7), ), migrations.AlterField( model_name='openstackserver', name='_status', field=models.CharField(choices=[('active', 'type'), ('booted', 'type'), ('new', 'type'), ('provisioning', 'type'), ('ready', 'type'), ('rebooting', 'type'), ('started', 'type'), ('terminated', 'type')], default='new', db_column='status', max_length=20, db_index=True), ), migrations.AlterField( model_name='generallogentry', name='level', field=models.CharField(choices=[('DEBUG', 'Debug'), ('INFO', 'Info'), ('WARNING', 'Warning'), ('ERROR', 'Error'), ('CRITICAL', 'Critical')], default='INFO', max_length=9, db_index=True), ), migrations.AlterField( model_name='instancelogentry', name='level', field=models.CharField(choices=[('DEBUG', 'Debug'), ('INFO', 'Info'), ('WARNING', 'Warning'), ('ERROR', 'Error'), ('CRITICAL', 'Critical')], default='INFO', max_length=9, db_index=True), ), migrations.AlterField( model_name='serverlogentry', name='level', field=models.CharField(choices=[('DEBUG', 'Debug'), ('INFO', 'Info'), ('WARNING', 'Warning'), ('ERROR', 'Error'), ('CRITICAL', 'Critical')], default='INFO', max_length=9, db_index=True), ), ]
How To Successfully Conquer A New Language On A Budget - The National Memo - Smart. Sharp. Funny. Fearless. As telecommunications and travel methods get better, our world continues to get smaller. Whether you’re engaging in international business or just looking to relate to the broader planet around you, the benefits of being a citizen of the world — with the multilingual talents, to boot — will only help differentiate you from the crowd. Not to mention that multilingualism has been said to have a range of benefits including better brain functionality. Luckily, learning a new language is easier than ever thanks to technology. Right now, Rocket Languages is offering up introductory language training combo packs that’ll get you speaking the most popular languages around for just $49, over 80 percent off the regular price. Just pick a language, even if you have no experience at all, and Rocket will get you started. Each package in this award-winning system includes dozens of lessons and hundreds of hours of training content, helping you grow from rudimentary beginner to fluent speaker at your own pace. With on-the-go audio lessons for any iOS or Android powered device, all you need to do is spend a few minutes a day training to start building useable language skills quickly. You’ll go from knowing a handful of words to assembling simple sentences to understanding and answering more complex exchanges. Choose from lesson 1 & 2 bundles covering Spanish, French, Italian, German, Japanese and Chinese, each for just $49 (a $299 value) while this offer lasts.
from PyQt4 import QtGui, Qt class Question(QtGui.QFrame): def __init__(self, qnumber, question, card, *args): super(QtGui.QFrame, self).__init__(*args) print "qnumber:", qnumber print "question:", question self.card = card self.main_layout = QtGui.QGridLayout() self.answers_layout = QtGui.QVBoxLayout() #row = cur.execute("select valore from domande where id=%d" % qid).fetchone() title = QtGui.QLabel("Domanda %d" % qnumber) title.setFont(QtGui.QFont("Arial",9,75)) self.main_layout.addWidget(title, 0, 0) self.question = QtGui.QLabel(question["name"]) self.main_layout.addWidget(self.question, 1, 0) self.setFixedHeight(200) rows = [(qnumber,i,a) for i,a in enumerate(question["answers"])] # cur = self.conn.cursor() # rows = cur.execute("select id_domanda,id,valore from risposte where id_domanda=%d" % qid).fetchall() # cur.close() self.showButtons(rows) self.setLayout(self.main_layout) self.main_layout.addLayout(self.answers_layout, 2, 0) def updateValue(self): pass def showButtons(self, rows): pass
Hello my name is Jessica, i am 25 years old, i am from Manchester and started my teaching career in Barcelona. I possess a very positive, friendly attitude and I am punctual and professional at all times. Teaching is my passion and i enjoy seeing my students learn the English language.I am a CELTA qualified native English teacher with 2 years experience teaching children, adults, company classes and business classes. I have worked in a private school and an English academy. I have experience teaching exam preparation and also conversation classes.
# -*- coding: utf-8 -*- """ Created on Sun Feb 26 13:20:51 2017 @author: Thautwarm """ def EntityWrapper(func): def _func(**kw2): attrs=kw2['attrs'] types=kw2['types'] return func(attrs,types) return _func def InitWithTypeMapSet(Name,attrs,types): Unit = lambda x,y:\ """ TypeMap.put("%s","%s");"""%(x,y) body=\ """ public static Map<String,String> %s(){ Map<String,String> TypeMap=new HashMap<String,String>(); %s return TypeMap; } """%("getTypeMap",'\n'.join(Unit(attr_i,type_i) for attr_i,type_i in zip(attrs,types) )) return body @EntityWrapper def toSQLValuesSet(attrs,types): tosql=[attr_i if type_i!="Date" else '(new Timestamp (%s.getTime()))'%attr_i for attr_i,type_i in zip(attrs,types)] body=\ """ public String toSQLValues(){ return %s; } """%("""+","+""".join(tosql)) return body @EntityWrapper def toSQLColumnsSet(attrs,types): body=\ """ public static String toSQLColumns(){ return "%s"; } """%(','.join(attrs)) return body
I am a total and complete novice when it comes to knitting. It's never too late to try something new, so I thought I would learn! My stepmother loves to knit. It relaxes her at the end of a long day. I figured I'd learn too so that we'd have a few more things in common! The problem is, I live in Rochester, and she lives in Pittsburgh - so I couldn't learn from her. I had to find another way to score lessons, and I found them at Kelley's Quality Sewing in NW Rochester. They regularly host two-hour classes at their store, and most of them welcome beginners like me. I chose to sit in on a knitting class this week to accomplish my goal, and I picked it up fast! If you don't know how to get started, check out these photos. I learned early on that you're almost making a "cradle" with your hands (like my instructor Heralda did) when you start a new row of knitting. It all starts with a slipknot. I already got started on a scarf I plan on wearing this winter! Lord knows it'll creep up on us again soon! Wanna learn a new craft? Kelley's is definitely the spot to go! They were more than patient with me, and I couldn't be more grateful. Check out their upcoming classes here! Having someone by my side while I learned ensured I didn't pick up bad habits! See you guys with a new craft in June!
# # me: Will Martin # data: 3.12.2015 # license: BSD # """ Run this program to look ad the amazon-meta data interactively. """ # future from __future__ import print_function, division # standard import os # works on unix machines FILENAME = os.path.abspath(r"../amazon-meta/amazon-meta.txt") # Parameters for the interactive prompt PROMPT = "[amazon-meta] : " # data printing routine def print_till_blank(amfile): """ Amazon data entry printer. in: amazon-meta-file out: None """ # print lines until a blank is found. while True: line = amfile.readline().strip() print(line) if line == "": break return None # interactive data viewer print("Running the interactive data viewer.\n", end='\n') # open the amazon database with open(FILENAME, 'r') as amfile: # Print the header print_till_blank(amfile) print("\nKeep pressing enter to view data:") # start the propt while True: # Run the prompt print(PROMPT, end="") ui = raw_input() if ui == "": print_till_blank(amfile) else: break
The Nike Air Fear of God colab is one of the cleanest ball silhouettes we’ve seen on the hardwood in recent times, with Jerry Lorenzo pulling out all the stops to drop perhaps the best basketball sneaker in 2018 (sorry, Kyrie). We’ve now got a second look at the silhouette with the ‘Shoot Around’ remix, a theme taken from the players’ inclination to switch out shoes during their pre-game warmups. The ‘Shoot Around’ design removes the laces cage for an even more minimalist rendition. Get your hands on the Nike Air Fear of God ‘Shoot Around’ on December 15 for $300.
# =============================================================================== # Copyright 2013 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from traits.api import Str # ============= standard library imports ======================== # ============= local library imports ========================== from pychron.loggable import Loggable class AnalysisHealth(Loggable): analysis_type = Str # ============= EOF =============================================
Matthew: I love free flow juice!!! 花絮:Matthew真的很頑皮,去完廁所後拿了 (按:他跟我們去女廁) 衛生袋一直玩... Oh no!!! 唔識matthew o既, 好多時都以為佢4-5歲, 去飲茶都俾人呃左唔少茶錢了... ^_^"
#!/usr/bin/env python """ ListeningSocketHandler ====================== A python logging handler. logging.handlers.SocketHandler is a TCP Socket client that sends log records to a tcp server. This class is the opposite. When a TCP client connects (e.g. telnet or netcat), log records are streamed through the connection. """ import logging import sys import socket import threading # Workaround for http://bugs.python.org/issue14308 # http://stackoverflow.com/questions/13193278/understand-python-threading-bug threading._DummyThread._Thread__stop = lambda x: 42 class ListeningSocketHandler(logging.Handler): def __init__(self, port=0, ipv6=False): super(ListeningSocketHandler, self).__init__() self.port = port self.ipv6 = ipv6 self.clients = set() if self.ipv6: self.socket = socket.socket(socket.AF_INET6) self.socket.bind(("::", self.port)) else: self.socket = socket.socket(socket.AF_INET) self.socket.bind(("0.0.0.0", self.port)) self.socket.listen(5) print ("ListeningSocketHandler on port: {}".format(self.socket.getsockname()[1])) def start_accepting(self): while True: conn, addr = self.socket.accept() self.clients.add(conn) self._accept_thread = threading.Thread(target=start_accepting, args=(self,)) self._accept_thread.daemon = True self._accept_thread.start() def emit(self, record): closed_clients = set() for client in self.clients: try: try: # Python3 message = bytes(record.getMessage() + "\r\n", 'UTF-8') except TypeError: # Python2 message = bytes(record.getMessage() + "\r\n").encode('UTF-8') client.sendall(message) except socket.error: closed_clients.add(client) for client in closed_clients: client.close() # just to be sure self.clients.remove(client) def getsockname(self): return self.socket.getsockname()
XP boasts the creation of XP-DEUS, the first wireless metal detector as well as the first completely telescopic S-shaped stem. Clearly, at first glance, the XP Deus receives a couple of points for its innovative metal detector. It is now time to take a look at whether the device lives up to its full potential. For the most part, the XP Deus resembles most other metal detectors. It consists of a coil, stem, and control box. I like the quality of this product, I know it doesn’t come cheap but it’s made for the metal detector enthusiasts. There is the very obvious lack of the wire wrapped around the lower part of the stem. The audio headphones that are also a part of this piece of equipment do not require wires either which is a great addition to the mobility when you are moving in fields. The remote control and the headphones receive signals by means of a digital radio link. The search coil has the elements to process the signals. This is then digitised and analyzed at the source via an ultra-miniature digital circuit. Once the information is processed, it is sent to the remote control and the headphones with the help of radio link. The XP DEUS can easily be folded due to the design of the S-stem. This way it can be stored without needing to be dismantled. Once collapsed, it also takes up less space when stored. The rubber handle also makes it more comfortable to manage. The shape and the structure of the telescopic stem make it much easier to move the metal detector around. It also has that very important factor that is required for manoeuvrable devices – weight. This is where the lack of wires really comes in handy. Overall, the XP Deus weighs a mere 987 grams. When you take away the control, it stands at 887 grams. This is definitely something to take into consideration if you are planning on using it for long periods of time. The XP DEUS remote control has six buttons. These are the Power, Menu, Ground Balance (GB), Change Program, and Pinpoint buttons. Situated above these buttons is the LED screen. The screen gives you access to the time, name of active program, battery level of the coil and remote, and analog scale of target conductivity. It also shows ground mineralization index, mineralization strength, level of ground effect corrections, profile, and detection frequency. Using the Ground Balance Button, you have access to four different modes – Manual, Pumping, Tracking, and Beach ON/OFF. These are not to be confused with the four non-motion modes also present with this device. Non-motion Audio Disc: distinguishes between “good” targets and ferrous ones. Can be used for both general use as well as mineralised ground. The XP Deus is not a metal detector that you can simply turn on and begin using. You are going to need to go through the manual carefully and figure out how to use each setting. The discrimination, reactivity, iron volume, audio response, and frequency can all be altered with the MENU button. This can be done by using the buttons for increasing and decreasing. The frequency can be manipulated from 4 kHz to 18 kHz. When compared with its predecessors, the XP Deus does have a few superior qualities, wireless technology notwithstanding. One such feature is the Tonal Discrimination. This allows the user to manipulate sizes and tone pitches up to four different conductivity ranges. This way, the user can reject or accept the targets based on the conductivities that you have decided upon. Another advantage of the XP Deus is with its audio headphones. Due to the wireless nature of this metal detector, the signals are sent to both the audio headphones and the remote control in real time. This means that you can work with just the audio headphones alone. In addition to being weatherproof, these headphones actually allow you to change the functions with the headphones themselves. The Reactivity option of this metal detector is also incredibly useful. It allows you to alter the width of the coil’s electromagnetic field depending on the target that you are hoping to find. There is a high-speed sweep option available with the XP Deus. This should be used with caution, however, as it can cause you to miss out on certain targets. When used at a typical speed, however, there is an impressive distinction between ferrous and non-ferrous items. Particularly if run at a reactivity of 2 or 3. Overall, the XP Deus seems to get glowing recommendations. In an age where everything is going wireless, your metal detector might as well too. If you do not mind the price tag, this is certainly not a metal detector that you should miss out on. While there are not too many complaints about this machine, it is certainly not something that should be wielded by a beginner. This is a technologically advanced metal detector and it shows with the options and the controls. If you are willing to be patient and go over the manual with a fine toothed comb, you will eventually get the hang of it. While there is no doubt that this a truly useful detector, you will need to get used to it first. For an advanced metal detector that is a cut above the rest, the XP Deus is definitely a good purchase. I’m one of the founders of this blog. A 32 yr old from Lincolnshire & proud father of 3 beautiful children. My first metal detector was a garret ace 150 but I am currently using the Garrett ace 400i. My best find was a roman coin. Feel free to ask me any questions. When will we have one with ground penetrating radar and a nine in. screen?
# -*- coding: utf-8 -*- """ garage.html_utils Utility functions to handle html-text conversions. * FIXME: need to reorganize, refactor and delete unnecessary code. * created: 2008-06-22 kevin chan <[email protected]> * updated: 2014-11-21 kchan """ from __future__ import (absolute_import, unicode_literals) import six import re from htmlentitydefs import codepoint2name, name2codepoint from markdown import markdown from textile import textile # functions to escape html special characters def html_escape(text): """ Escape reserved html characters within text. """ htmlchars = { "&": "&amp;", '"': "&quot;", "'": "&apos;", ">": "&gt;", "<": "&lt;", } if isinstance(text, six.text_type): text = ''.join([htmlchars.get(c, c) for c in text]) return text def html_entities(u): """ Convert non-ascii characters to old-school html entities. """ result = [] for c in u: if ord(c) < 128: result.append(c) else: try: result.append('&%s;' % codepoint2name[ord(c)]) except KeyError: result.append("&#%s;" % ord(c)) return ''.join(result) def escape(txt): """ Escapes html reserved characters (<>'"&) and convert non-ascii text to html entities. * To escape only html reserved characters (<>'"&), use `html_escape`. """ return html_escape(html_entities(txt)) def unescape(text): """ Removes HTML or XML character references and entities from a text string. * Note: does not strip html tags (use `strip_tags` instead for that). :Info: http://effbot.org/zone/re-sub.htm#unescape-html :param text: The HTML (or XML) source text. :return: The plain text, as a Unicode string, if necessary. """ def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)) else: return unichr(int(text[2:-1])) except ValueError: pass else: # named entity try: text = unichr(name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return re.sub("&#?\w+;", fixup, text) def strip_tags(html_txt): """ Strip tags from html text (uses strip_tags from django.utils.html). * also unescapes html entities * fall back on using `re.sub` if django's `strip_tags` is not importable for some reason. """ try: from django.utils.html import strip_tags as _strip_tags except ImportError: stripped = re.sub(r'<[^>]*?>', '', html_txt) else: stripped = _strip_tags(html_txt) return unescape(stripped) # functions for converting plain text content to html # * available conversion methods: # * no conversion # * markdown # * textile # * simple conversion of line breaks # * visual editor (using wysiwyg editor like TinyMCE) NO_CONVERSION = 1 MARKDOWN_CONVERSION = 2 TEXTILE_CONVERSION = 3 SIMPLE_CONVERSION = 4 VISUAL_EDITOR = 5 CONVERSION_CHOICES = ( (NO_CONVERSION, 'None'), (MARKDOWN_CONVERSION, 'Markdown'), (TEXTILE_CONVERSION, 'Textile'), (SIMPLE_CONVERSION, 'Simple (Convert Line Breaks)'), (VISUAL_EDITOR, 'Visual (WYSIWYG) Editor'), ) CONVERSION_METHODS = ( (NO_CONVERSION, 'none'), (MARKDOWN_CONVERSION, 'markdown'), (TEXTILE_CONVERSION, 'textile'), (SIMPLE_CONVERSION, 'markdown'), (VISUAL_EDITOR, 'visual') ) def txt2html(txt, method): try: assert txt is not None and len(txt) > 0 if method == MARKDOWN_CONVERSION: txt = markdown(txt) elif method == TEXTILE_CONVERSION: txt = textile(txt) elif method == SIMPLE_CONVERSION: txt = markdown(txt) else: # NO_CONVERSION pass except (TypeError, AssertionError): pass return txt def get_cvt_method(name): """ Get conversion method "code" corresponding to name """ c = { 'none': NO_CONVERSION, 'markdown': MARKDOWN_CONVERSION, 'textile': TEXTILE_CONVERSION } try: method = c.get(name.lower(), NO_CONVERSION) except (TypeError, AttributeError): method = NO_CONVERSION return method def get_cvt_method_name(code): """ Get conversion method name corresponding to "code" """ if code > 0: code -= 1 try: codenum, name = CONVERSION_METHODS[code] except: codenum, name = CONVERSION_METHODS[NO_CONVERSION] return name def to_html(txt, cvt_method='markdown'): """ Convert text block to html * cvt_method is name of method (markdown, textile, or none) * cf. txt2html where method is the conversion "code" (number) """ return txt2html(txt, get_cvt_method(cvt_method)) # html-to-text utility function def html_to_text(html): """ Utility function to convert html content to plain text. * uses Django `strip_tags` function to convert html to text; * multiple blank lines are reduced to 2; * strips beginning and ending white space. * does not perform any kind of formatting or structuring to the plain text result. :param html: html content :returns: plain text content after conversion """ from garage.text_utils import tidy_txt txt = tidy_txt(strip_tags(html)) lines = [] for line in txt.splitlines(): s = line.strip() if len(s) > 0: lines.append(s) txt = '\n\n'.join(lines) txt = '%s\n' % txt return txt
Koda is a sweet 17 month old male mantle with natural ears. He is a very energetic puppy who will require guidance, training, routine and reinforcement. Koda sometimes responds to redirection if you can get his attention. He is highly treat motivated which really helps to get his attention. Koda does need to learn some manners. He does not know what personal space is and he likes to jump when he is excited. His foster home is working on his manners. Koda does well with the 2 male Danes and 1 female Dane in his foster home. He can be a little over bearing sometimes with the other Danes. He will bark at them and sometimes nudge them to try to get them to play. Koda is very excited to meet new dogs and can be a little much for them right away. He does have a high prey drive with small animals such as squirrels and cats. He seems to fine with small dogs though. Koda is crate trained. He will run for the crate if there is a treat involved. He’s doing much better on a leash, but does get very excited when he sees other dogs and people. He is all puppy and so curious about anything new. Training will be a condition of his adoption. Donations are always welcome, and help us to continue to be able to bring Danes that need our help into our rescue program. Your donations help cover vet costs, transportation, etc.
# # This file is part of Plinth. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Forms for radicale module. """ from django import forms from django.utils.translation import ugettext_lazy as _ from plinth.forms import ServiceForm CHOICES = [ ('owner_only', _('Only the owner of a calendar/addressbook can view or ' 'make changes.')), ('owner_write', _('Any user can view any calendar/addressbook, but only ' 'the owner can make changes.')), ('authenticated', _('Any user can view or make changes to any ' 'calendar/addressbook.')), ] class RadicaleForm(ServiceForm): """Specialized configuration form for radicale service.""" access_rights = forms.ChoiceField(choices=CHOICES, required=True, widget=forms.RadioSelect())
Bangalore, December 27 2016: Tata Docomo, the unified telecom brand of Tata Teleservices Limited (TTL), is now offering customers across Karnataka & Kerala an all new bouquet of “Unlimited Calling Plans” with unlimited STD and Local calls priced as low as Rs. 148. The unlimited calling plan provides pre-pay customers unrestrained voice bandwidth both within and outside of the network coupled with 3G data (on select packs) at the best available market rates.
"""Bika's browser views are based on this one, for a nice set of utilities. """ from Products.CMFCore.utils import getToolByName from AccessControl import ClassSecurityInfo from Products.CMFPlone.i18nl10n import ulocalized_time from Products.Five.browser import BrowserView from bika.lims import logger from zope.cachedescriptors.property import Lazy as lazy_property from zope.i18n import translate import plone, json class BrowserView(BrowserView): security = ClassSecurityInfo() logger = logger def __init__(self, context, request): super(BrowserView, self).__init__(context, request) security.declarePublic('ulocalized_time') def ulocalized_time(self, time, long_format=None, time_only=None): if time: # no printing times if they were not specified in inputs if time.second() + time.minute() + time.hour() == 0: long_format = False time_str = ulocalized_time(time, long_format, time_only, self.context, 'bika', self.request) return time_str @lazy_property def portal(self): return getToolByName(self.context, 'portal_url').getPortalObject() @lazy_property def portal_url(self): return self.portal.absolute_url().split("?")[0] @lazy_property def portal_catalog(self): return getToolByName(self.context, 'portal_catalog') @lazy_property def reference_catalog(self): return getToolByName(self.context, 'reference_catalog') @lazy_property def bika_analysis_catalog(self): return getToolByName(self.context, 'bika_analysis_catalog') @lazy_property def bika_setup_catalog(self): return getToolByName(self.context, 'bika_setup_catalog') @lazy_property def bika_catalog(self): return getToolByName(self.context, 'bika_catalog') @lazy_property def portal_membership(self): return getToolByName(self.context, 'portal_membership') @lazy_property def portal_groups(self): return getToolByName(self.context, 'portal_groups') @lazy_property def portal_workflow(self): return getToolByName(self.context, 'portal_workflow') @lazy_property def checkPermission(self, perm, obj): return self.portal_membership.checkPermission(perm, obj) def user_fullname(self, userid): member = self.portal_membership.getMemberById(userid) if member is None: return userid member_fullname = member.getProperty('fullname') c = self.portal_catalog(portal_type = 'Contact', getUsername = userid) contact_fullname = c[0].getObject().getFullname() if c else None return contact_fullname or member_fullname or userid def user_email(self, userid): member = self.portal_membership.getMemberById(userid) if member is None: return userid member_email = member.getProperty('email') c = self.portal_catalog(portal_type = 'Contact', getUsername = userid) contact_email = c[0].getObject().getEmailAddress() if c else None return contact_email or member_email or '' def python_date_format(self, long_format=None, time_only=False): """This convert bika domain date format msgstrs to Python strftime format strings, by the same rules as ulocalized_time. XXX i18nl10n.py may change, and that is where this code is taken from. """ # get msgid msgid = long_format and 'date_format_long' or 'date_format_short' if time_only: msgid = 'time_format' # get the formatstring formatstring = translate(msgid, domain='bika', mapping={}, context=self.request) if formatstring is None or formatstring.startswith('date_') or formatstring.startswith('time_'): self.logger.error("bika/%s/%s could not be translated" % (self.request.get('LANGUAGE'), msgid)) # msg catalog was not able to translate this msgids # use default setting properties = getToolByName(self.context, 'portal_properties').site_properties if long_format: format = properties.localLongTimeFormat else: if time_only: format = properties.localTimeOnlyFormat else: format = properties.localTimeFormat return format return formatstring.replace(r"${", '%').replace('}', '') @lazy_property def date_format_long(self): fmt = self.python_date_format(long_format=1) if fmt == "date_format_long": fmt = "%Y-%m-%d %I:%M %p" return fmt @lazy_property def date_format_short(self): fmt = self.python_date_format() if fmt == "date_format_short": fmt = "%Y-%m-%d" return fmt @lazy_property def time_format(self): fmt = self.python_date_format(time_only=True) if fmt == "time_format": fmt = "%I:%M %p" return fmt
Cor, this is the sort of look that will turn grown men giddy! Bianca's vampy Valentino dress happens to be floaty and feminine at the exact same time. Like Little Red Riding Hood's sexier and sultrier older sister. Plus it's lace and sheer. All the better to see you with, my dear! Maddison reminds us of a spring chicken: young, bright and fluffy. She's got her legs out too (which takes GUTS in January). And hang on - is that a bomber jacket? Not the floral type preferred by Mary Berry or the butch bouncer number worn by the Mitchell brothers. This is the Dior version, darling, and it's quite different. There's a whiff of the pretty Parisian schoolgirl about Alma's white-rollneck-and-blue-tunic look, paired with her butter-wouldn't-melt ponytail. Then you glance down at her wrists. Are they CUFFS? Not so innocent after all, are we?
import argparse import json import pifacedigitalio as pfdio import zmq PINS = ( 0b00000001, 0b00000010, 0b00000100, 0b00001000, 0b00010000, 0b00100000, 0b01000000, 0b10000000 ) def parse_args(): """ Specify and parse command line arguments. """ p = argparse.ArgumentParser() p.add_argument("pub_uri") p.add_argument("--prefix", default="INPUT") return p.parse_args() def set_up_pub_socket(uri): """ Create ZeroMQ PUB socket and bind it to the specified uri. """ context = zmq.Context() socket = context.socket(zmq.PUB) socket.bind(uri) return socket def input_changed(event): """ Handler for input changes. Forms a dictionary containing event information and PUBlishes it using the global ZeroMQ PUB socket. """ input_port = event.chip.input_port.value data = { "state": {i: bool(input_port & PINS[i]) for i, _ in enumerate(PINS)} } socket.send("%s%s" % (args.prefix, json.dumps(data))) if __name__ == "__main__": args = parse_args() socket = set_up_pub_socket(args.pub_uri) listener = pfdio.InputEventListener() for i, _ in enumerate(PINS): listener.register(i, pfdio.IODIR_BOTH, input_changed) listener.activate()
Workaround is to find the problematic recipe (the recipe id is mentioned in the debug logs as per above), go to the Beaker web ui for that recipe, click through to the job which contains it, and edit the whiteboard to delete any non-ASCII characters. In this case we replaced the U+3000 with a normal space character and beaker-watchdog was able to proceed as usual. We should just switch it over to lxml. It looks to be one of the last remaining usages of xmltramp we have left.
#!/usr/bin/python """ Generate encrypted messages wrapped in a self-decrypting python script usage: python enc.py password > out.py where password is the encryption password and out.py is the message/script file to decrypt use: python out.py password this will print the message to stdout. """ import sys, random def encrypt(key, msg): encrypted = [] for i, c in enumerate(msg): key_c = ord(key[i % len(key)])-32 msg_c = ord(c)-32 encrypted.append(chr(((msg_c + key_c) % 95)+32)) return ''.join(encrypted) def decrypt(key, enc): msg=[] for i, c in enumerate(enc): key_c = ord(key[i % len(key)])-32 enc_c = ord(c)-32 msg.append(chr(((enc_c - key_c) % 95)+32)) return ''.join(msg) def make_randstr(msg_len): sl = [] r = random.SystemRandom() for i in range(msg_len): sl.append(chr(r.randint(32,126))) return ''.join(sl) if __name__ == '__main__': msg = sys.stdin.read().replace("\n","\\n").replace("\t","\\t") randstr = make_randstr(len(msg)) key = encrypt(sys.argv[1], randstr) encrypted = encrypt(key, msg) decrypted = decrypt(key, encrypted) if not msg == decrypted: raise Exception("Encryption Fail") print """ #!/usr/bin/python import sys def encrypt(key, msg): encrypted = [] for i, c in enumerate(msg): key_c = ord(key[i % len(key)])-32 msg_c = ord(c)-32 encrypted.append(chr(((msg_c + key_c) % 95)+32)) return ''.join(encrypted) def decrypt(key, enc): msg=[] for i, c in enumerate(enc): key_c = ord(key[i % len(key)])-32 enc_c = ord(c)-32 msg.append(chr(((enc_c - key_c) % 95)+32)) return ''.join(msg) if __name__ == '__main__':""" print "\trandstr = ", repr(randstr) print "\tenc = ", repr(encrypted) print "\tkey = encrypt(sys.argv[1], randstr)" print "\tdecrypted = decrypt(key, enc).replace(\"\\\\n\",\"\\n\").replace(\"\\\\t\",\"\\t\")" print "\tprint decrypted"
Combines performance and style with ultra-portability. Dell has unveiled its ultraportable XPS 13 laptop, the most compact 13.3-inch Ultrabook featuring an edge-to-edge glass, near “frameless” display, all-day battery life, and the latest innovative technology for a superb overall user experience. Starting at 1.36kg, and less than a quarter-inch at its thinnest point, the XPS 13 combines performance-oriented thoughtful design with the latest Intel technology, such as Rapid Start and Smart Connecti, to enable users to be productive, connected and responsive anywhere. The latest and most mobile laptop in Dell’s portfolio, the XPS 13 is part of Dell’s high-end XPS brand.The XPS 13 delivers second-generation Intel Core i5 or i7 processors, Intel HD 3000 graphics and a bright high definition WLED 300-nit display for outstanding viewing experiences, packaged in an elegantly-designed, ultraportable laptop. For users who store movies, music and photos, the XPS 13 has 128GB and 256GB solid state hard drive options, plus an additional 100GB of cloud storage through Dell DataSafe for seamless content backup and sharing at no additional cost. Purposefully designed and artfully built, the XPS 13 maximizes the Ultrabook experience for users: edge-to-edge display with hardened Gorilla Glass; a full-size backlit keyboard with a large glass touchpad with integrated buttons and multi-gestural support; and up to eight hours, fifty-three minutes of battery life. All in a package starting at 1.36 kilograms. The 13.3-inch high definition display with slim bezel fits in a body size similar to the form factor of an 11-inch product, making it the most compact Ultrabook available. (Other 13.3-inch laptops offer the same viewing area but with up to a 15 percent larger footprint.) The carbon fiber composite base extends the design process, offering a premium visual appeal, and is lighter and cooler to the touch than aluminum. Keeping connected and getting online quickly is now a reality. The XPS 13 is one of the first Ultrabooks to feature Intel Smart Connect technology, which wakes periodically to detect known networks and update calendar and email. With solid state drives and Intel Rapid Start technology, the XPS 13 boots in seconds, giving customers the performance of a laptop with the instant gratification experience of a smartphone. Dell will also integrate location awareness via Skyhook and Google Places shortly after launch. Because the ultrathin and compact XPS 13 is ideal for use on the go, staying connected will be even easier for those who are first to buy one.
__problem_title__ = "Cyclic numbers" __problem_url___ = "https://projecteuler.net/problem=358" __problem_description__ = "A with digits has a very interesting property: When it is multiplied " \ "by 1, 2, 3, 4, ... , all the products have exactly the same digits, " \ "in the same order, but rotated in a circular fashion! The smallest " \ "cyclic number is the 6-digit number 142857 : 142857 × 1 = 142857 " \ "142857 × 2 = 285714 142857 × 3 = 428571 142857 × 4 = 571428 142857 × " \ "5 = 714285 142857 × 6 = 857142 The next cyclic number is " \ "0588235294117647 with 16 digits : 0588235294117647 × 1 = " \ "0588235294117647 0588235294117647 × 2 = 1176470588235294 " \ "0588235294117647 × 3 = 1764705882352941 ... 0588235294117647 × 16 = " \ "9411764705882352 Note that for cyclic numbers, leading zeros are " \ "important. There is only one cyclic number for which, the eleven " \ "leftmost digits are 00000000137 and the five rightmost digits are " \ "56789 (i.e., it has the form 00000000137...56789 with an unknown " \ "number of digits in the middle). Find the sum of all its digits." import timeit class Solution(): @staticmethod def solution1(): pass @staticmethod def time_solutions(): setup = 'from __main__ import Solution' print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1)) if __name__ == '__main__': s = Solution() print(s.solution1()) s.time_solutions()
Home East Side Re-Rides Consignment Leathers and Motorcycle Gear How Does Consignment Work? Let’s pretend this is your jacket. Ain’t it beautiful? Let’s sell it! You loved this thing, there’s nothing wrong with this thing — but now you want something smaller, or bigger, or brown, or with less zippers, or with more zippers, or with more armor. Or you ordered it online and it came out of the package shaped like a human being who is completely dissimilar from you. Whatever the reason, here’s this thing, and you’re not wearing it. You can always try putting a classified ad up yourself. Sometimes that works really well! But sometimes people are (ahem) less polite than they could be. And sometimes it’s hard for someone to travel a distance to a stranger’s house and try on their clothing in front of them. Awkward. Once a month, at month-end (actually just after month-end, but you get the picture) we shake the database, which takes the prices each Thing sold for, apply the consignment percentage, and gives us a total amount that we’re going to send you. Basically, all the preceding month’s sales turn into a huge pile of personal letters itemizing which Things sold that month from your collection, and how much we’re sending you for each, and the cheques to go with the letters. Those cheques and letters go into the Big Accordion-File-of-Picking-Up, and if you don’t drop by to get it that month, we stuff it in an envelope and send it to you by Canada Post. But what if I want my Thing back? But what if the Thing doesn’t sell? We call you and ask you to reclaim your item. We’ve tried selling it, and given up because we see people are not interested. When I bring in the Thing, how do we determine the selling price? It’s sort of a balancing act between getting you what it’s worth, and offering it at a price which will make buyers happy to purchase it and take it home. On our side, we consider what the item would have cost new, what a similar item is now fetching new, what that type of item is fetching currently in the North American secondhand markets, how old the thing is, how much wear there is, or how many major or minor faults it may have, how collectable the brand is, whether it’s vintage-just-old or vintage-must-have-it, and whether that class of items is much in demand (cafe racer-style jackets) or not at all in demand (90s-style lambskin jackets). We also may know from experience (and statistics!) that a particular thing in this store sells best at a certain best range. On your side, you think about whether you want to hold out for the highest return, or whether you want to price a thing attractively so its delighted new owners will take it home as fast as possible (which gets you your consignment cheque sooner). Does it sound complicated? Yeah, it kind of is.
"""Python wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit. """ import collections as _collections from google.protobuf import text_format as _text_format from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 # Needed to trigger the call to _set_call_cpp_shape_fn. from tensorflow.python.framework import common_shapes as _common_shapes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library def gather_tree(step_ids, parent_ids, sequence_length, name=None): r"""Calculates the full beams from the per-step ids and parent beam ids. This op implements the following mathematical equations: ```python TODO(ebrevdo): fill in ``` Args: step_ids: A `Tensor`. Must be one of the following types: `int32`. `[max_time, batch_size, beam_width]`. parent_ids: A `Tensor`. Must have the same type as `step_ids`. `[max_time, batch_size, beam_width]`. sequence_length: A `Tensor`. Must have the same type as `step_ids`. `[batch_size, beam_width]`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `step_ids`. `[max_time, batch_size, beam_width]`. """ result = _op_def_lib.apply_op("GatherTree", step_ids=step_ids, parent_ids=parent_ids, sequence_length=sequence_length, name=name) return result _ops.RegisterShape("GatherTree")(None) def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "GatherTree" input_arg { name: "step_ids" type_attr: "T" } input_arg { name: "parent_ids" type_attr: "T" } input_arg { name: "sequence_length" type_attr: "T" } output_arg { name: "beams" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 } } } } """ _op_def_lib = _InitOpDefLibrary()
An anti-SB 1070 protester with a bullhorn walks past a line of Phoenix police, who have blocked the street in front of Sheriff Joe Arpaio's office. No counter-protesters have shown up, though a few downtown office workers have stopped by to watch. Most have been reluctant to comment. "This is just craziness," one woman said.
from django.contrib.auth.models import User from rest_framework import serializers from rest_framework_jwt.settings import api_settings from .models import EmailVerification, modelresolver class UserSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('id', 'username', 'first_name', 'last_name', 'email', 'verified', 'is_staff', 'is_superuser', 'is_active', 'date_joined') read_only_fields = ('is_staff', 'is_superuser', 'is_active', 'date_joined',) verified = serializers.SerializerMethodField() def get_verified(self, obj): try: return obj.verification.verified except EmailVerification.DoesNotExist: return True class UserCreateSerializer(serializers.ModelSerializer): jwt_token = serializers.CharField(read_only=True) class Meta: model = User fields = ('id', 'username', 'first_name', 'last_name', 'email', 'password', 'jwt_token') extra_kwargs = {'password': {'write_only': True}} def create(self, validated_data): user = self.Meta.model( email=validated_data['email'], username=validated_data['username'], first_name=validated_data['first_name'], last_name=validated_data['last_name'] ) user.set_password(validated_data['password']) user.save() # XXX should be jwt / token agnostic! jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER payload = jwt_payload_handler(user) token = jwt_encode_handler(payload) user.jwt_token = token return user class OrganizationSerializer(serializers.ModelSerializer): class Meta: model = modelresolver('Organization') fields = ("id", "name") class PasswordResetSerializer(serializers.Serializer): class Meta: model = modelresolver('Organization') token = serializers.CharField() password = serializers.CharField() class InviteSerializer(serializers.Serializer): handle = serializers.CharField() strict = serializers.BooleanField() role = serializers.IntegerField() class JoinSerializer(serializers.Serializer): JOIN_ACCEPT = 1 JOIN_REJECT = 2 token = serializers.CharField() action = serializers.ChoiceField(choices=(JOIN_ACCEPT, JOIN_REJECT), default=JOIN_ACCEPT)
Ten brain-damaged veterans have been discharged from their special therapeutic group homes because Congress has failed to renew their rehabilitation program, The Wall Street Journal reported Friday. Ten brain-damaged veterans have been discharged from their special therapeutic group homes because Congress has failed to renew their rehabilitation program, The Wall Street Journal reported Friday. The program expires October 6, but 53 veterans have been told by their Veterans Affairs office case workers that they'll have to leave their private homes by September 15 if Congress doesn't act. Meanwhile, the 10 discharges vets were sent "to nursing homes, state veterans homes or to live with family members," according to The Journal. The five-year pilot program was created to see if veterans with brain injuries would improve more with intense therapy. The VA hasn't assessed how successful the program is, but "all indications are that the satisfaction is high among the veterans with the services they're receiving, and they seem to be making gains," Sharon Benedict, the program's manager, told The Journal. Democrats and Republicans agree that the program should be renewed, they just haven't figured out how to do it. This is just one of several veterans health care problems coming to the forefront, in addition to the widespread problem of long waiting lists and patients dying before their first appointments. NPR reported Thursday that the VA and the Pentagon are worried about the rise of painkiller addiction among troops, which contributes to the homelessness and suicide rates. Part of the problem is that soldiers are prescribed the painkillers by military doctors. And while Congress is dragging its feet on renewing the group-home program, Rep. Jeff Miller, the chairman of the House VA committee, introduced a bill that would reduce the red tape veterans face when trying to access mental health services, according to The Washington Post.
import logging import socket from email.mime.text import MIMEText from smtplib import SMTP, SMTPAuthenticationError from torrt.base_notifier import BaseNotifier from torrt.utils import NotifierClassesRegistry LOGGER = logging.getLogger(__name__) class EmailNotifier(BaseNotifier): alias = 'email' def __init__(self, email, host='localhost', port=25, user=None, password=None, use_tls=False, sender=None): self.email = email self.sender = sender self.host = host self.port = int(port) self.user = user self.password = password self.use_tls = str(use_tls) == 'True' self.connection = self.get_connection() def get_connection(self): try: connection = SMTP(self.host, self.port) connection.ehlo() except socket.error as e: LOGGER.error('Could not connect to SMTP server: %s' % e) return if self.use_tls: try: connection.starttls() connection.ehlo() except Exception as e: LOGGER.error(e) return if self.user and self.password: try: connection.login(self.user, self.password) except SMTPAuthenticationError as e: LOGGER.error(e) return return connection def send_message(self, msg): self.connection.sendmail(self.sender, [self.email], msg) def test_configuration(self): return bool(self.connection) def make_message(self, torrent_data): text = '''The following torrents were updated:\n%s\n\nBest regards,\ntorrt.''' \ % '\n'.join(map(lambda t: t['name'], torrent_data.values())) msg = MIMEText(text) msg['Subject'] = 'New torrents were added to download queue.' msg['From'] = self.sender msg['To'] = self.email LOGGER.info('Notification message was sent to user %s' % self.email) return msg.as_string() NotifierClassesRegistry.add(EmailNotifier)
Getting Reviews from Customers, Clients or Patients. – Tools to find your customer's level of satisfaction. Get More Customer Feedback and Online Reviews. You just provide a list of their email addresses and their first and last names. We will slowly send out email (to a few at a time) and ask them to give us feedback for the question: “How are we doing?” In that email we will include a link to a webpage that is branded to your business. That webpage is setup to make it easy for your customer to give their rating and leave any comments for you. It can be done in less than a minute! You will be sent an email showing you the name, email address and what each customer said and how they rated you. Then, all you need to do is to email those customers with a link to your Google page and ask them to post their review to the online review site of your choice. You could make it easier for them by including their original comments so that they can copy and paste them into Google. I guarantee that you will be both surprised and pleased at the information you will receive from your customers. It is simple, really. All you need to do to get more customer feedback and online reviews is to ask more customers how they feel about your products or services. All customer feedback can make us better. We have been doing this successfully for others for over 6 years. We have a safe, proven system. I am confident that you will be pleased with the results. Why am I offering to do this at such reasonable price? Because when you see the results, I believe that you will want me to continue asking your customers for feedback.
# -*- coding: utf-8 -*- # (c) 2015 Pedro M. Baeza # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from openerp import models, fields, api, exceptions, _ class AccountMoveMakeNetting(models.TransientModel): _name = "account.move.make.netting" journal = fields.Many2one( comodel_name="account.journal", required=True, domain="[('type', '=', 'general')]") move_lines = fields.Many2many(comodel_name="account.move.line") balance = fields.Float(readonly=True) balance_type = fields.Selection( selection=[('pay', 'To pay'), ('receive', 'To receive')], readonly=True) @api.model def default_get(self, fields): if len(self.env.context.get('active_ids', [])) < 2: raise exceptions.ValidationError( _("You should compensate at least 2 journal entries.")) move_lines = self.env['account.move.line'].browse( self.env.context['active_ids']) if (any(x not in ('payable', 'receivable') for x in move_lines.mapped('account_id.type'))): raise exceptions.ValidationError( _("All entries must have a receivable or payable account")) if any(move_lines.mapped('reconcile_id')): raise exceptions.ValidationError( _("All entries mustn't been reconciled")) partner_id = None for move in move_lines: if (not move.partner_id or ( move.partner_id != partner_id and partner_id is not None)): raise exceptions.ValidationError( _("All entries should have a partner and the partner must " "be the same for all.")) partner_id = move.partner_id res = super(AccountMoveMakeNetting, self).default_get(fields) res['move_lines'] = [(6, 0, move_lines.ids)] balance = (sum(move_lines.mapped('debit')) - sum(move_lines.mapped('credit'))) res['balance'] = abs(balance) res['balance_type'] = 'pay' if balance < 0 else 'receive' return res @api.multi def button_compensate(self): self.ensure_one() # Create account move move = self.env['account.move'].create( { 'ref': _('AR/AP netting'), 'journal_id': self.journal.id, }) # Group amounts by account account_groups = self.move_lines.read_group( [('id', 'in', self.move_lines.ids)], ['account_id', 'debit', 'credit'], ['account_id']) debtors = [] creditors = [] total_debtors = 0 total_creditors = 0 for account_group in account_groups: balance = account_group['debit'] - account_group['credit'] group_vals = { 'account_id': account_group['account_id'][0], 'balance': abs(balance), } if balance > 0: debtors.append(group_vals) total_debtors += balance else: creditors.append(group_vals) total_creditors += abs(balance) # Create move lines move_line_model = self.env['account.move.line'] netting_amount = min(total_creditors, total_debtors) field_map = {1: 'debit', 0: 'credit'} for i, group in enumerate([debtors, creditors]): available_amount = netting_amount for account_group in group: if account_group['balance'] > available_amount: amount = available_amount else: amount = account_group['balance'] move_line_vals = { field_map[i]: amount, 'move_id': move.id, 'partner_id': self.move_lines[0].partner_id.id, 'date': move.date, 'period_id': move.period_id.id, 'journal_id': move.journal_id.id, 'name': move.ref, 'account_id': account_group['account_id'], } move_line_model.create(move_line_vals) available_amount -= account_group['balance'] if available_amount <= 0: break # Make reconciliation for move_line in move.line_id: to_reconcile = move_line + self.move_lines.filtered( lambda x: x.account_id == move_line.account_id) to_reconcile.reconcile_partial() # Open created move action = self.env.ref('account.action_move_journal_line').read()[0] action['view_mode'] = 'form' del action['views'] del action['view_id'] action['res_id'] = move.id return action
DUI News: Denver Nuggets’ Ty Lawson Arrested for 2nd DUI in 6 Mos. > Criminal Law > DUI > DUI News > DUI News: Denver Nuggets’ Ty Lawson Arrested for 2nd DUI in 6 Mos. Ty Lawson, point guard for the Denver Nuggets, was reportedly arrested for drunk driving yesterday morning in Los Angeles. According to court records, this is Lawson’s second DUI arrest since January 2015. Boulder DUI attorneys take a look at some recent DUI news involving Ty Lawson, point guard for the Denver Nuggets, being arrested for DUI in LA. Contact us for the best DUI defense. The police report for Lawson’s recent DUI arrest in Los Angeles explains that he was pulled over after a California Highway Patrol officer witnessed him speeding down Route 101 at about 2 a.m. During the DUI stop, the officer alleges that Lawson exhibited various indications of impairment. Although details have not been disclosed regarding whether Lawson submitted to BAC testing, he was reportedly arrested just after 3:40 a.m. Lawson spent about five hours in custody and was subsequently released on $5,000 bail. His first court date for this case has been set for August 4th. Lawson’s second DUI arrest could impact the proceedings of his first DUI case. In fact, there is a court hearing for the first DUI case set for this Friday in Denver. Mr. Lawson posted a $1,500 bond in the Denver case earlier this year and is currently on pretrial supervision… The conditions of his bond include no alcohol consumption and monitored sobriety. According to the police report for this arrest, Lawson was pulled over after an officer witnessed him speeding and driving carelessly on the night of Friday Jan. 23rd. Following both arrests, the Denver Nuggets stated that it was aware of the incident and that it would not comment on it. Share your opinions about this DUI news with us on Facebook & Google+. If you or someone you love is facing DUI charges, it is time to contact the experienced Adams County and Boulder DUI attorneys at Peter Loyd Weber & Associates. Our attorneys are dedicated to aggressively defending the rights of the accused while helping them obtain the best possible outcomes to their cases. We have a proven record of success and excellence in helping our clients favorably resolve their criminal cases, and we are ready to put our experience, knowledge and skills to work defending you.
babaListe=list() Ad_Soyad = input('Ad_Soyad:') babaListe.insert(1,Ad_Soyad) Yas = input('Yas:') babaListe.insert(2,Yas) Boy = input('Boy:') babaListe.insert(3,Boy) print(babaListe) print('======================') anneListe=list() Ad_Soyad = input('Ad_Soyad:') anneListe.insert(1,Ad_Soyad) Yas = input('Yas:') anneListe.insert(2,Yas) Boy = input('Boy:') anneListe.insert(3,Boy) print(anneListe) print('======================') dedeListe=list() Ad_Soyad = input('Ad_Soyad:') dedeListe.insert(1,Ad_Soyad) Yas = input('Yas:') dedeListe.insert(2,Yas) Boy = input('Boy:') dedeListe.insert(3,Boy) print(dedeListe) print('======================') nineListe=list() Ad_Soyad = input('Ad_Soyad:') nineListe.insert(1,Ad_Soyad) Yas = input('Yas:') nineListe.insert(2,Yas) Boy = input('Boy:') nineListe.insert(3,Boy) print(nineListe) print('=======================') babaListe=list() Ad_Soyad = input('Ad_Soyad:') babaListe.insert(1,Ad_Soyad) Yas = input('Yas:') babaListe.insert(2,Yas) Boy = input('Boy:') babaListe.insert(3,Boy) print(babaListe) print('======================') anneListe=list() Ad_Soyad = input('Ad_Soyad:') anneListe.insert(1,Ad_Soyad) Yas = input('Yas:') anneListe.insert(2,Yas) Boy = input('Boy:') anneListe.insert(3,Boy) print(anneListe) print('======================') dedeListe=list() Ad_Soyad = input('Ad_Soyad:') dedeListe.insert(1,Ad_Soyad) Yas = input('Yas:') dedeListe.insert(2,Yas) Boy = input('Boy:') dedeListe.insert(3,Boy) print(dedeListe) print('======================') nineListe=list() Ad_Soyad = input('Ad_Soyad:') nineListe.insert(1,Ad_Soyad) Yas = input('Yas:') nineListe.insert(2,Yas) Boy = input('Boy:') nineListe.insert(3,Boy) print(nineListe) print('=======================')
After two years of arduous research and planning, San Francisco’s Fine Arts Museum has announced the launch date of this year’s most highly anticipated exhibition. The exhibition, entitled Contemporary Muslim Fashions—due to open on 22 September—is the first of its kind in scale and scope and will explore the style evolution of Muslim fashion around the globe, from the abayas of the gulf, to the turbans widely worn in America and Europe. Although the exhibition’s intention is more than likely to be a showcase of the lives and dress of past and contemporary Muslims (and possibly even an attempt at celebrating Muslims and Islam as a whole) the exhibition begs a question surrounding the premise behind it, that neither the museum nor the exhibition’s curators have provided a clear answer for. On their website, the museum speaks of “an increased awareness of Muslim dress as an important segment of the global fashion industry”. When asked about the intention behind the project in an interview with French publication Les Inrocks, the exhibition’s curators, Jill d’Alessandro and Laura Camerlengo (both of whom are neither Arab nor Muslim), cited the large Muslim population living in the area surrounding the museum as a qualifier for the institution to host the exhibition. But most telling was what followed: an attestation to what is likely their biggest motivator. D’Alessandro and Camerlengo go on to discuss the fact that “modest fashion” has recently become one of the largest markets globally. They cite the Thomson Reuters and DinarStandard report detailing the 18% stake in purchasing power modest fashion has amassed as of this year. This information is neither new nor surprising. At this point, the height of Muslims’ purchasing power is known to everyone around the globe. The number of international brands centring their focus on Middle Easten and Muslim customers nations has steadily increased over the last few years. 2017 saw the launch of Nike’s first hijab, and a few years before that, Dolce & Gabbana—and a few other luxury fashion houses—turned their attention towards the Gulf by creating exclusive collections in celebration of Ramadan and Eid. Despite the blatant capitalistic intentions behind these projects, this shift has been largely celebrated. And in a sense, we shouldn’t necessarily reject these attempts to include our region—and religion—in global conversations. At the end of the day, the purchasing power our region holds isn’t something that anyone can realistically ignore or push to the backburner. However, it’s important that we steer clear from false narratives and not fall into the tropes of capitalism masked through a face of ‘inclusivity’. When it comes to the San Francisco Fine Art Museum’s exhibition—perhaps offering a wider audience a perspective on Muslim style could prove beneficial in a country where a Muslim ban is in effect. Simultaneously however, it’s difficult as an Arab Muslim to ignore the fact that it is just as much a capitalistic opportunity the institution is cashing in on—the same way others have done.
from core.himesis import Himesis, HimesisPreConditionPatternLHS import uuid class HUnitR03b_ConnectedLHS(HimesisPreConditionPatternLHS): def __init__(self): """ Creates the himesis graph representing the AToM3 model HUnitR03b_ConnectedLHS """ # Flag this instance as compiled now self.is_compiled = True super(HUnitR03b_ConnectedLHS, self).__init__(name='HUnitR03b_ConnectedLHS', num_nodes=0, edges=[]) # Add the edges self.add_edges([]) # Set the graph attributes self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule'] self["MT_constraint__"] = """return True""" self["name"] = """""" self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR03b_ConnectedLHS') self["equations"] = [] # Set the node attributes # match class State(3.1.m.0State) node self.add_node() self.vs[0]["MT_pre__attr1"] = """return True""" self.vs[0]["MT_label__"] = """1""" self.vs[0]["mm__"] = """MT_pre__State""" self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'3.1.m.0State') # match class State(3.1.m.1State) node self.add_node() self.vs[1]["MT_pre__attr1"] = """return True""" self.vs[1]["MT_label__"] = """2""" self.vs[1]["mm__"] = """MT_pre__State""" self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'3.1.m.1State') # match association State--states-->Statenode self.add_node() self.vs[2]["MT_pre__attr1"] = """return attr_value == "states" """ self.vs[2]["MT_label__"] = """3""" self.vs[2]["mm__"] = """MT_pre__directLink_S""" self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'3.1.m.0Stateassoc23.1.m.1State') # Add the edges self.add_edges([ (0,2), # match class State(3.1.m.0State) -> association states (2,1), # association State -> match class State(3.1.m.1State) ]) # define evaluation methods for each match class. def eval_attr11(self, attr_value, this): return True def eval_attr12(self, attr_value, this): return True # define evaluation methods for each match association. def eval_attr13(self, attr_value, this): return attr_value == "states" def constraint(self, PreNode, graph): return True
According to a report on Tuesday, said Delaware State would embark on a new Medicaid policy program all in an attempt to treat hepatitis C patients. Ever since Obama Administration and lawsuits assumed office, most States are considered to be under pressure from the Delaware's case. For instance, Harvard Law School's Center for Health Law and Policy Innovation had threatened litigation to abandon money in response to policies that limits treatment of sick patients with expensive new medications. However, many US citizens are waiting for their respective State to come up with a legislation that would cover the high costs of Hepatitis C treatment. Delaware is not an insulated case; but there is a long way from legislation to actual treatment. Hepatitis C patients are advised to seek treatment as quickly as possible in order to prevent further liver decay - and by far quickest method of getting Hepatitis C medicines with a considerable discount is by using Fix Hep C Buyers Club. Here is Dr. Freeman's contact where you can inquire about your treatment options and how to import Hepatitis C medicines for low cost. Over three million Americans are likely to be infected with hepatitis C, a bloodborne virus spread viа blood аnd bоdу-fluid contact ѕuсh аѕ blood, ѕеmеn, bоdу fluid, IV drug abusers ѕhаring needles or ѕоmеоnе uѕing tainted needles. Hераtitiѕ C uѕеd tо be thе mоѕt соmmоn tуре оf hераtitiѕ аcquirеd thrоugh blood trаnѕfuѕiоnѕ until a tеѕt fоr it bесаmе аvаilаblе in thе 1980ѕ. Hераtitiѕ C саuѕеѕ inflаmmаtiоn оf the liver rеѕulting in liver damage that саn lead tо cancer. It аlѕо соmmоnlу lеаdѕ tо chronic livеr inflаmmаtiоn аnd ѕlоwlу dаmаgеѕthе livеr оvеr a lоng period оf time bеfоrе lеаding tо cirrhosis оf thе liver, that means scar tiѕѕuе rерlасing normal, healthy tiѕѕuе in rеѕult оf blocking thе flоw оf blood thrоugh the livеr and preventing it from frоm working аѕ it ѕhоuld. The Delaware State issue can be likened to that of New Jersey in that evidence is still required prior to liver damage approval treatment. The new initiative policy permits a specific severity restrictions by July 1 and holds a requirement for patients on drugs abuse, thus bring in line with medical standard recommended for treatment and prevention of such infectious diseases. Other changes will occur, for all hepatitis C-infected patients on Medicaid in Delaware who are eligible. The treatment new legislation takes effect on Jan. 1, 2018. The change as announced on Tuesday at Harvard was confirmed by a spokeswoman from Delaware Department of Health and Human Services that the State will definitely change its policy. This initiative policy changes has already been embarked by other states. As it stands, an advisory committee set in Pennsylvania recommended that the state Medicaid initiative policy should treat all patients in that regard; a decision yet to be confirmed. For all other patients, it is best to seek help using Fix Hep C Buyers Club. Contact us today and get your medications within a month.
# -*- coding: utf-8 -*- # # This file is part of Glances. # # Copyright (C) 2015 Nicolargo <[email protected]> # # Glances is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Glances is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ I am your father... ...for all Glances exports IF. """ # Import system libs # None... # Import Glances lib from glances.core.glances_logging import logger class GlancesExport(object): """Main class for Glances export IF.""" def __init__(self, config=None, args=None): """Init the export class.""" # Export name (= module name without glances_) self.export_name = self.__class__.__module__[len('glances_'):] logger.debug("Init export interface %s" % self.export_name) # Init the config & args self.config = config self.args = args # By default export is disable # Had to be set to True in the __init__ class of child self.export_enable = False def exit(self): """Close the export module.""" logger.debug("Finalise export interface %s" % self.export_name) def plugins_to_export(self): """Return the list of plugins to export.""" return ['cpu', 'percpu', 'load', 'mem', 'memswap', 'network', 'diskio', 'fs', 'processcount', 'ip', 'system', 'uptime', 'sensors', 'docker'] def get_item_key(self, item): """Return the value of the item 'key'.""" try: ret = item[item['key']] except KeyError: logger.error("No 'key' available in {0}".format(item)) if isinstance(ret, list): return ret[0] else: return ret def parse_tags(self): """ Parses some tags into a dict""" if self.tags: try: self.tags = dict([x.split(':') for x in self.tags.split(',')]) except ValueError: # one of the keyvalue pairs was missing logger.info('invalid tags passed: %s', self.tags) self.tags = {} else: self.tags = {} def update(self, stats): """Update stats to a server. The method builds two lists: names and values and calls the export method to export the stats. Be aware that CSV export overwrite this class and use a specific one. """ if not self.export_enable: return False # Get all the stats & limits all_stats = stats.getAllExports() all_limits = stats.getAllLimits() # Get the plugins list plugins = stats.getAllPlugins() # Loop over available plugins for i, plugin in enumerate(plugins): if plugin in self.plugins_to_export(): if isinstance(all_stats[i], dict): all_stats[i].update(all_limits[i]) elif isinstance(all_stats[i], list): all_stats[i] += all_limits[i] else: continue export_names, export_values = self.__build_export(all_stats[i]) self.export(plugin, export_names, export_values) return True def __build_export(self, stats): """Build the export lists.""" export_names = [] export_values = [] if isinstance(stats, dict): # Stats is a dict # Is there a key ? if 'key' in list(stats.keys()): pre_key = '{0}.'.format(stats[stats['key']]) else: pre_key = '' # Walk through the dict try: iteritems = stats.iteritems() except AttributeError: iteritems = stats.items() for key, value in iteritems: if isinstance(value, list): try: value = value[0] except IndexError: value = '' if isinstance(value, dict): item_names, item_values = self.__build_export(value) item_names = [pre_key + key.lower() + str(i) for i in item_names] export_names += item_names export_values += item_values else: export_names.append(pre_key + key.lower()) export_values.append(value) elif isinstance(stats, list): # Stats is a list (of dict) # Recursive loop through the list for item in stats: item_names, item_values = self.__build_export(item) export_names += item_names export_values += item_values return export_names, export_values
Series 6 - Episode 3 - Reality series. The guest list for Dawn's blessing is confirmed, but who is invited? Ester attempts to heal old wounds. Nermina hosts a party to remember.
import time, sys, os from mote import detectAllPossible from moteCache import MoteCache from mote import Mote import traceback from connectToMote import connectToMote from moteX11 import MoteX11, Rect class MoteMouseProcessor: def __init__(self, connectedMote): self.mote = connectedMote def processMouse(self): # print "processAndUpdateMouse", self.mote, self.mote.connected, self.mote.irMode if self.mote == None or not self.mote.irMode != None: return None, None # Use standard bar with 2 dots # Must receive at least 2 dots to be valid (and change mouse pos) # if self.mote.isIrModeFull(): print "Unimplemented" elif self.mote.isIrModeExt(): print "Unimplemented" elif self.mote.isIrModeBasic(): # the wiimote can report up to 4 points # we'll to convert the two brightest into "base" position so # we can generate a mouse x,y from them pointList = self.mote.extractNormalizedPoints() if len(pointList) > 0: # print "points:", pointList #pointSet = pointList[-1] for pointSet in pointList: # self.updateMinMaxEdge(pointSet) if len(pointSet) > 1: # just use the frst two points (we're assuming they're the brightest) # We're going to require at least two valid led coordinates (i.e. not 0) if not (pointSet[0][0] == 0.0 or pointSet[1][0] == 0.0 or pointSet[0][1] == 0.0 or pointSet[1][1] == 0.0 or pointSet[0][0] == 1.0 or pointSet[1][0] == 1.0 or pointSet[0][1] == 1.0 or pointSet[1][1] == 1.0): midpoint = ( (pointSet[0][0] + pointSet[1][0]) / 2. , (pointSet[0][1] + pointSet[1][1]) / 2. ) scale = 1.4 scaledMidpoint = ( ((midpoint[0]-.5) * scale) + 0.5, ((midpoint[1]-.5) * scale) + 0.5) # print "Setting mouse pos:", scaledMidpoint #self.moteX11.setMousePosNormalized(1.0 - scaledMidpoint[0], scaledMidpoint[1]) return (1.0-scaledMidpoint[0], scaledMidpoint[1]) """ pt = self.mote.extractLastNormalizedPoint() if pt != None: scale = 1.4 scaledPoint = ( ((pt[0]-.5) * scale) + 0.5, ((pt[1]-.5) * scale) + 0.5) self.moteX11.setMousePosNormalized(1.0 - scaledPoint[0], scaledPoint[1]) """ else: # basic print "Unhandled ir mode:", self.mote.irMode print "DEBUG:", self.mote, self.mote.connected raise Exception("Unhandled ir mode") return None, None class MoteMouse(MoteMouseProcessor): def __init__(self, connectedMote, moteX11): MoteMouseProcessor.__init__(self, connectedMote) # self.mote = connectedMote # done in parent class self.moteX11 = moteX11 def processAndUpdateMouse(self): x, y = MoteMouseProcessor.processMouse(self) if x != None: self.moteX11.setMousePosNormalized(x, y) if __name__ == "__main__": mote = connectToMote() x = MoteX11() try: x.connectToX() moteMouse = MoteMouse(mote,x) while 1: moteMouse.processAndUpdateMouse() time.sleep(0.0001) except: traceback.print_exc() finally: mote.disconnect() if mote.readThread != None: print "Exiting, joining thread" mote.readThread.join()
Finding a chiropractor in Mountlake Terrace can be overwhelming, but your search doesn’t have to be. If you are looking for a chiropractor in Mountlake Terrace, you have options. A referral from your primary care doctor or specialist should point you toward a reputable Mountlake Terrace chiropractor. A doctor should only offer recommendations that they would use for themselves and family members. This can help you narrow down your search. If you have special criteria, such as location or their technique, let your doctor know that too. Once you’ve finished asking around, compare how many people have recommended the same Mountlake Terrace chiropractor. Chances are that is a great place to focus. Your chiropractor can treat mechanical issues musculoskeletal system. However, your Mountlake Terrace chiropractor can’t treat all associated pain with these areas. Severe arthritis, osteoporosis, broken or fractured bones, infected bones, and bone tumor related pain are a few conditions your chiropractor may not treat. Keep in mind you might not be aware of what you prefer or dislike until after you’ve had your first few treatments. You should be comfortable expressing yourself. Your Mountlake Terrace chiropractor should listen to your wishes. Skill and technique do improve with time, so you might prefer an experienced Mountlake Terrace chiropractor. A few years or longer, in addition to their education, is a decent amount of time for a chiropractor to hone their skills. You should get along well with your Mountlake Terrace chiropractor and feel comfortable around them. This includes speaking to them about your care as well as when they touch you. If you don’t feel at-ease, you should consider finding a new chiropractor. Mountlake Terrace is a suburban city in Snohomish County, Washington, United States. It lies on the southern border of the county, adjacent to Shoreline and Lynnwood, and is 13 miles (21 km) to the north of Seattle. As of the 2010 census, the city had a population of 19,909 people. Mountlake Terrace was founded in 1949 by suburban developers on the site of a disused airfield. Within five years, the community had grown to over 5,000 people and was incorporated as a city in 1954 to provide municipal services. In recent decades, Mountlake Terrace has begun development of its own downtown with mixed-use buildings and large employers in lieu of remaining a bedroom community for Seattle commuters. Interstate 5 runs north–south through the city and connects Mountlake Terrace to Seattle and Everett.
from uuid import uuid1 from .exception import OMDataStoreError, OMRequiredHashlessIRIError class IriGenerator(object): """An :class:`~oldman.iri.IriGenerator` object generates the IRIs of some new :class:`~oldman.resource.Resource` objects. """ def __init__(self): pass def generate(self, **kwargs): """Generates an IRI. :return: Unique IRI (unicode string). """ raise NotImplementedError() class PrefixedUUIDIriGenerator(IriGenerator): """Uses a prefix, a fragment and a unique UUID1 number to generate IRIs. Recommended generator because UUID1 is robust and fast (no DB access). :param prefix: IRI prefix. :param fragment: IRI fragment to append to the hash-less IRI. Defaults to `None`. """ def __init__(self, prefix, fragment=None): self._prefix = prefix self._fragment = fragment def generate(self, **kwargs): """See :func:`oldman.iri.IriGenerator.generate`.""" partial_iri = _skolemize(prefix=self._prefix) if self._fragment is not None: return u"%s#%s" % (partial_iri, self._fragment) return partial_iri class BlankNodeIriGenerator(PrefixedUUIDIriGenerator): """Generates skolem IRIs that denote blank nodes. :param hostname: Defaults to `"localhost"`. """ def __init__(self, hostname=u"localhost"): prefix = u"http://%s/.well-known/genid/" % hostname PrefixedUUIDIriGenerator.__init__(self, prefix=prefix) class IncrementalIriGenerator(IriGenerator): """Generates IRIs with short numbers. Beautiful but **slow** in concurrent settings. The number generation implies a critical section and a sequence of two SPARQL requests, which represents a significant bottleneck. :param prefix: IRI prefix. :param graph: :class:`rdflib.Graph` object where to store the counter. :param class_iri: IRI of the RDFS class of which new :class:`~oldman.resource.Resource` objects are instance of. Usually corresponds to the class IRI of the :class:`~oldman.model.Model` object that owns this generator. :param fragment: IRI fragment to append to the hash-less IRI. Defaults to `None`. """ def __init__(self, prefix, data_store, class_iri, fragment=None): self._prefix = prefix self._data_store = data_store self._class_iri = class_iri self._fragment = fragment self._data_store.check_and_repair_counter(class_iri) def generate(self, **kwargs): """See :func:`oldman.iri.IriGenerator.generate`.""" number = self._data_store.generate_instance_number(self._class_iri) partial_iri = u"%s%d" % (self._prefix, number) if self._fragment is not None: return u"%s#%s" % (partial_iri, self._fragment) return partial_iri def reset_counter(self): """ For test purposes only """ self._data_store.reset_instance_counter(self._class_iri) class UUIDFragmentIriGenerator(IriGenerator): """Generates an hashed IRI from a hash-less IRI. Its fragment is a unique UUID1 number. """ def generate(self, hashless_iri, **kwargs): """See :func:`oldman.iri.IriGenerator.generate`.""" if hashless_iri is None: raise OMRequiredHashlessIRIError(u"Hash-less IRI is required to generate an IRI") if '#' in hashless_iri: raise OMRequiredHashlessIRIError(u"%s is not a valid hash-less IRI" % hashless_iri) return u"%s#%s" % (hashless_iri, uuid1().hex) def _skolemize(prefix=u"http://localhost/.well-known/genid/"): return u"%s%s" % (prefix, uuid1().hex)
Tell me walk in interview date of Electronics Corporation of India Limited (ECIL) for the recruitment of Technical Officer Posts? I want to apply for this recruitment. Also tell me the documents required for verification at the time of interview?
""" This module implements the upload and remove endpoints of the profile image api. """ from contextlib import closing import datetime import logging from django.utils.translation import ugettext as _ from django.utils.timezone import utc from rest_framework import permissions, status from rest_framework.parsers import MultiPartParser, FormParser from rest_framework.response import Response from rest_framework.views import APIView from openedx.core.djangoapps.user_api.errors import UserNotFound from openedx.core.lib.api.authentication import ( OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser, ) from openedx.core.lib.api.permissions import IsUserInUrl, IsUserInUrlOrStaff from openedx.core.djangoapps.user_api.accounts.image_helpers import get_profile_image_names, set_has_profile_image from .images import validate_uploaded_image, create_profile_images, remove_profile_images, ImageValidationError log = logging.getLogger(__name__) LOG_MESSAGE_CREATE = 'Generated and uploaded images %(image_names)s for user %(user_id)s' LOG_MESSAGE_DELETE = 'Deleted images %(image_names)s for user %(user_id)s' def _make_upload_dt(): """ Generate a server-side timestamp for the upload. This is in a separate function so its behavior can be overridden in tests. """ return datetime.datetime.utcnow().replace(tzinfo=utc) class ProfileImageUploadView(APIView): """ **Use Cases** Upload an image to be used for the user's profile. The requesting user must be signed in. The signed in user can only upload his or her own profile image. **Example Requests** POST /api/profile_images/v1/{username}/upload **Response for POST** If the requesting user tries to upload the image for a different user: * If the requesting user has staff access, the request returns a 403 error. * If the requesting user does not have staff access, the request returns a 404 error. If no user matches the "username" parameter, the request returns a 404 error. If the upload could not be performed, the request returns a 400 error is with details. If the upload is successful, the request returns a 204 status with no additional content. """ parser_classes = (MultiPartParser, FormParser,) authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser) permission_classes = (permissions.IsAuthenticated, IsUserInUrl) def post(self, request, username): """ POST /api/profile_images/v1/{username}/upload """ # validate request: # verify that the user's # ensure any file was sent if 'file' not in request.FILES: return Response( { "developer_message": u"No file provided for profile image", "user_message": _(u"No file provided for profile image"), }, status=status.HTTP_400_BAD_REQUEST ) # process the upload. uploaded_file = request.FILES['file'] # no matter what happens, delete the temporary file when we're done with closing(uploaded_file): # image file validation. try: validate_uploaded_image(uploaded_file) except ImageValidationError as error: return Response( {"developer_message": error.message, "user_message": error.user_message}, status=status.HTTP_400_BAD_REQUEST, ) # generate profile pic and thumbnails and store them profile_image_names = get_profile_image_names(username) create_profile_images(uploaded_file, profile_image_names) # update the user account to reflect that a profile image is available. set_has_profile_image(username, True, _make_upload_dt()) log.info( LOG_MESSAGE_CREATE, {'image_names': profile_image_names.values(), 'user_id': request.user.id} ) # send client response. return Response(status=status.HTTP_204_NO_CONTENT) class ProfileImageRemoveView(APIView): """ **Use Cases** Remove all of the profile images associated with the user's account. The requesting user must be signed in. Users with staff access can remove profile images for other user accounts. Users without staff access can only remove their own profile images. **Example Requests** POST /api/profile_images/v1/{username}/remove **Response for POST** Requesting users who do not have staff access and try to remove another user's profile image receive a 404 error. If no user matches the "username" parameter, the request returns a 404 error. If the request could not remove the image, the request returns a 400 error with details. If the request successfully removes the image, the request returns a 204 status with no additional content. """ authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser) permission_classes = (permissions.IsAuthenticated, IsUserInUrlOrStaff) def post(self, request, username): # pylint: disable=unused-argument """ POST /api/profile_images/v1/{username}/remove """ try: # update the user account to reflect that the images were removed. set_has_profile_image(username, False) # remove physical files from storage. profile_image_names = get_profile_image_names(username) remove_profile_images(profile_image_names) log.info( LOG_MESSAGE_DELETE, {'image_names': profile_image_names.values(), 'user_id': request.user.id} ) except UserNotFound: return Response(status=status.HTTP_404_NOT_FOUND) # send client response. return Response(status=status.HTTP_204_NO_CONTENT)
The off-shore tax haven Anguilla is a British overseas territory located in the northern Caribbean. The territory consists of mainland Anguilla and several smaller islands and cays. The capital of Anguilla is called The Valley. The collective population of Anguilla is about 15,000 occupying a main area of 35 sq. miles. The official language is English. The main economic activities of off-shore tax haven Anguilla are tourism, offshore company incorporation and management, offshore banking and offshore financial services, fishing and boat building. Anguilla is internally self governing, under a parliamentary representative democratic dependency framework. The Chief Minister is the head of the Government. Off-shore tax haven Anguilla has an excellent communications infrastructure. The main international airport can service modern sized aircraft and other narrow body jets. Regional air travel is provided by LIAT Airlines; there are regularly scheduled flights from Continental America and Europe. There is a reliable ferry network providing service between Anguilla and neighbouring island of St. Maarten. The fiscal importance of the Anguilla off-shore tax haven sector is noteworthy. The government continues to ensure that Anguilla adopts and implements all the necessary legislation to encourage growth in the offshore sector and to win the interest of offshore investors. Anguilla has offshore legislation for asset protecting entities such as offshore companies IBC’s and LLC’s and offshore foundations. When you choose Anguilla as your off-shore tax shelter, you choose stability for your investment, tax-free income and profits, and no exchange controls for the offshore IBC or the LLC. Anguilla is an ideal tax haven that does not discriminate between local and offshore companies, all Anguilla companies are tax free companies. An offshore IBC and offshore LLC must maintain a registered office in off-shore tax haven Anguilla and must appoint a registered agent, who is a resident of Anguilla. All these services are provided by TH Limited. A copy of the corporate documents such as the Certificate of Incorporation and the Memorandum & Articles of Association must also be kept at the registered office in the off-shore tax haven Anguilla. Note. Although off-shore incorporation in off-shore tax haven Anguilla will be finished in one working day upon receipt of the application form and appropriate fee, please allow for an additional 2 to 5 working days for the delivery of the documents depending upon your location.
"""Django settings for eventhunt project.""" import datetime import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) # remove /sswmain/settings to get base folder # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'ajsdgas7&*kosdsa21[]jaksdhlka-;kmcv8l$#diepsm8&ah^' # Eventbrite OAUTH_TOKEN EVENTBRITE_OAUTH_TOKEN = os.environ.get('EVENTBRITE_OAUTH_TOKEN') DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [''] # Application definition INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.staticfiles', 'rest_framework', 'django_extensions', 'api', 'base' ) MIDDLEWARE_CLASSES = ( 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.common.CommonMiddleware' ) ROOT_URLCONF = 'eventhunt.urls' WSGI_APPLICATION = 'eventhunt.wsgi.application' LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True ACCOUNT_ACTIVATION_DAYS = 7 # days STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static_root') STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static_dist'), ) # store static files locally and serve with whitenoise STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # ############# REST FRAMEWORK ################### REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': (), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.BasicAuthentication', ), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination', 'PAGE_SIZE': 20, 'DEFAULT_PARSER_CLASSES': ( 'rest_framework.parsers.JSONParser', 'rest_framework.parsers.FormParser', 'rest_framework.parsers.MultiPartParser', ), }
That had to be fun to see. I tell you, I get a kick over the opening of "A Haunting". "In this world, there is real evil." Each time I hear that, I think "And as this show demostrates, there is also real ignorance/stupidity." I recall one episode, a child had problems breathing (this occured after a string of "events"). A clergy man was present, and when he saw it, he exclaimed "Demon attack!" I had to keep myself from laughing. I think this story was from Peter Jones, the biologist, but I may be mistaken. In one of his books he describes vividly how he found himself running for his life from a polar bear, absolutely convinced that at any second he would be caught and torn apart. When this hadn't happened for an improbably long time, he risked a look over his shoulder, and realised he had been fleeing a shadow on the ice. You don't have to see it or think you saw it. In me yoof, a friend and myself were out after mushrooms. Farmer was a family friend so access no problem but we knew he had just bought a bull at market recently. Said friend decides to wait until I am getting a huge one (mushroom!) and nudges me and legs it yelling BULL! I passed him and noticed he was laughing but I still did not stop until I had cleared the fence in a single bound (superman, me). No bull in the field, it was all bull. ETA meant to add, the triggers are already there. I think some have more than others and depending of life experiences or what you are expecting. For example, your example, polar bears very dangerous and you are in their territory. My example with the bull and my mate playing on it. Or if you go on a show hoping to see something, etc. You old guys got to have all the fun! Most mushrooms of any sort do for me is calm my digestive tract. it is all good harmless fun until the "ghost" starts scratching and hurting people. people do anything to get on TV, I guess. I'm in yo' planet, abducting yo' farmers. I personally find paranormal stories and parapsychology fun and interesting. Not so much in a "All this must be true" kind of way, I just think it's a cool subject, once you get past some of the bull clogging everything up. Take the show "Haunted History" for example. At first glance, it seems like your average ghost story show, but it's interesting point is how it talks a lot about the history of certain places, and how these ghost stories might have come to be. It's just fun to watch, even if you don't really believe. But I can see how people can be turned away by these kinds of shows. The ghost hunters like to claim they have this "scientific" way of looking at their cases, when they are just as bias and unscientific as any other guy that does this. I like paranormal stories too (if they're told well) because I have a vivid imagination. Kind of a temporary suspension of disbelief maybe. Also I confess that years ago I had a summer job in a museum and a fellow employee and I told some ghost stories to tourists, so it might be hypocritical for me to complain about this subject. Welcome to the board, Tidesofeuropa. Cool name. Welcome to the board, tidesofeuropa. The difficulty with the supernatural tales shows is that if they are willing present such a credulous story as ghosts and haunting, there is little reason to believe anything else they say about the history of the place. One can reasonably surmise that any history is slanted to give credence to the ghost stories. My time is just too limited to spend it on that kind of show. If I want fictional entertainment, then I'll choose good acting over breathless narration. As far as I can tell, Ghosthunters just consists of some would-be scientists letting their imaginations get the better of them. I agree. There's not one bonafide "smoking gun"... not one. The IR & EM toys, tape recorders, and [name your gizmo] are just more modern props to an age-old superstition and don't make sense if you put any thought into it. I chuckle when it's taken for granted that "lights out" is a prerequisite for one's dear departed Uncle Fred to pay a visit. You are very generous with your characterization. I will say that an episode of Haunted History made me regret, at least a little, not having gone to my prom. I could have lurked in the bathroom of the hotel it was held in and looked for the ghost of Marilyn Monroe. Certainly it would have been better than listening to the music they would have been playing. And I think that's just a matter of personal taste, really. I enjoy the show, although, I don't believe everything they come up with. Sure, the show is built up to give more suspense to the stories, and they do tend to play it off like they're real, but it's all in good fun for me. I can find enjoyment in it. But TAPS (The Ghost hunters) tend to think that if you can't explain something, it's evidence of a haunting. There's a lot of facepalming moments in Ghost Hunters, to say the least. Yes it is ultimately a matter of personal tastes. Any bodies ghost would have been more fun than a High School prom. People kept telling me I'd regret it if I didn't go. I haven't yet!
import unittest from Skoarcery import langoids, terminals, nonterminals, dragonsets, parsetable, emissions from Skoarcery.langoids import Terminal, Nonterminal class Code_Parser_Py(unittest.TestCase): def setUp(self): terminals.init() nonterminals.init() langoids.init() dragonsets.init() parsetable.init() emissions.init() def test_pyrdpp(self): from Skoarcery.dragonsets import FIRST, FOLLOW from Skoarcery.terminals import Empty fd = open("../pymp/rdpp.py", "w") PY = emissions.PY PY.fd = fd # Header # Imports # class SkoarParseException # class SkoarParser: # __init__ # fail self.code_start() PY.tab += 1 N = nonterminals.nonterminals.values() # write each nonterminal as a function for A in N: R = A.production_rules #PY.cmt(str(A)) PY.stmt("def " + A.name + "(self, parent):") PY.tab += 1 PY.stmt("self.tab += 1") if A.intermediate: PY.stmt("noad = parent") else: PY.stmt("noad = SkoarNoad('" + A.name + "', None, parent)") PY.nl() #PY.code_line("print('" + A.name + "')") for P in R: if P.derives_empty: continue # A -> alpha alpha = P.production desires = FIRST(alpha) if Empty in desires: desires.discard(Empty) desires.update(FOLLOW(A)) PY.cmt(str(P)) i = 0 n = len(desires) PY.stmt("desires = [", end="") for toke in desires: PY.raw(toke.toker_name) i += 1 if i != n: if i % 5 == 0: PY.raw(",\n") PY.stmt(" ", end="") else: PY.raw(", ") else: PY.raw("]\n") PY.if_("self.toker.sees(desires)") #PY.print(str(P)) for x in alpha: if isinstance(x, Terminal): PY.stmt("noad.add_toke('" + x.toker_name + "', self.toker.burn(" + x.toker_name + "))") #PY.print("burning: " + x.name) else: if x.intermediate: PY.stmt("self." + x.name + "(noad)") else: PY.stmt("noad.add_noad(self." + x.name + "(noad))") else: PY.return_("noad") PY.tab -= 1 PY.nl() if A.derives_empty: PY.cmt("<e>") #PY.print("burning empty") PY.return_("noad") else: PY.cmt("Error State") PY.stmt("self.fail()") PY.tab -= 1 PY.nl() PY.tab -= 1 fd.close() def code_start(self): from Skoarcery.terminals import Empty PY = emissions.PY PY.file_header("rdpp.py", "PyRDPP - Create Recursive Descent Predictive Parser") s = "from Skoarcery.pymp.apparatus import SkoarNoad\n"\ "from Skoarcery.pymp.lex import " T = terminals.tokens.values() n = len(T) i = 0 for t in T: if t == Empty: n -= 1 continue s += t.toker_name i += 1 if i < n: if i % 5 == 0: s += ", \\\n " else: s += ", " PY.raw(s + """ class SkoarParseException(Exception): pass class SkoarParser: def __init__(self, runtime): self.runtime = runtime self.toker = runtime.toker self.tab = 0 def fail(self): self.toker.dump() raise SkoarParseException @property def tabby(self): if self.tab == 0: return "" return ("{:>" + str(self.tab * 2) + "}").format(" ") def print(self, line, end): print(self.tabby + line, end=end) """)
18 Inch Bathroom Vanity With Top - Antique Bedroom Vanity With Mirror. Vintage Bedroom Vanity With Mirror. vanity lighting ideas. bedroom lighting design guide. cube storage for clothes.
from __future__ import absolute_import, print_function, division import theano import theano.tensor as T import numpy as np class Model(object): def __init__(self, name=""): self.name = name self.layers = [] self.params = [] self.other_updates = {} def add_layer(self, layer): self.layers.append(layer) for p in layer.params: self.params.append(p) if hasattr(layer, 'other_updates'): for y in layer.other_updates: self.other_updates[y[0]] = y[1] def get_params(self): return self.params def uniform(stdev, size): """uniform distribution with the given stdev and size""" return np.random.uniform( low=-stdev * np.sqrt(3), high=stdev * np.sqrt(3), size=size ).astype(theano.config.floatX) def linear_transform_weights(input_dim, output_dim, param_list=None, name=""): "theano shared variable given input and output dimension" weight_inialization = uniform(np.sqrt(2.0 / input_dim), (input_dim, output_dim)) W = theano.shared(weight_inialization, name=name) assert(param_list is not None) param_list.append(W) return W def bias_weights(length, param_list=None, name=""): "theano shared variable for bias unit, given length" bias_initialization = np.zeros(length).astype(theano.config.floatX) bias = theano.shared( bias_initialization, name=name ) if param_list is not None: param_list.append(bias) return bias class Layer(object): '''Generic Layer Template which all layers should inherit''' def __init__(self, name=""): self.name = name self.params = [] def get_params(self): return self.params class GRU(Layer): def __init__(self, input_dim, output_dim, input_layer, s0=None, name=""): '''Layers information''' self.name = name self.input_dim = input_dim self.hidden_dim = output_dim self.output_dim = output_dim self.input_layer = input_layer self.X = input_layer.output() self.s0 = s0 self.params = [] '''Layers weights''' '''self.params is passed so that any paramters could be appended to it''' self.W_r = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_r") self.b_wr = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wr") self.W_i = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_i") self.b_wi = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wi") self.W_h = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_h") self.b_wh = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wh") self.R_r = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_r") self.b_rr = bias_weights((output_dim,), param_list=self.params, name=name + ".b_rr") self.R_i = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_i") self.b_ru = bias_weights((output_dim,), param_list=self.params, name=name + ".b_ru") self.R_h = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_h") self.b_rh = bias_weights((output_dim,), param_list=self.params, name=name + ".b_rh") '''step through processed input to create output''' def step(inp, s_prev): i_t = T.nnet.sigmoid( T.dot(inp, self.W_i) + T.dot(s_prev, self.R_i) + self.b_wi + self.b_ru) r_t = T.nnet.sigmoid( T.dot(inp, self.W_r) + T.dot(s_prev, self.R_r) + self.b_wr + self.b_rr) h_hat_t = T.tanh( T.dot(inp, self.W_h) + (r_t * (T.dot(s_prev, self.R_h) + self.b_rh)) + self.b_wh) s_curr = ((1.0 - i_t) * h_hat_t) + (i_t * s_prev) return s_curr outputs_info = self.s0 states, updates = theano.scan( fn=step, sequences=[self.X], outputs_info=outputs_info ) self.Y = states def output(self): return self.Y class LSTM(Layer): def __init__(self, input_dim, output_dim, input_layer, s0=None, c0=None, name=""): '''Layers information''' self.name = name self.input_dim = input_dim self.hidden_dim = output_dim self.output_dim = output_dim self.input_layer = input_layer self.X = input_layer.output() self.s0 = s0 self.c0 = c0 self.params = [] '''Layers weights''' '''self.params is passed so that any paramters could be appended to it''' self.W_i = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_i") self.b_wi = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wi") self.W_f = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_f") self.b_wf = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wf") self.W_c = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_c") self.b_wc = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wc") self.W_o = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_o") self.b_wo = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wo") self.R_i = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_i") self.b_ri = bias_weights((output_dim,), param_list=self.params, name=name + ".b_ri") self.R_f = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_f") self.b_rf = bias_weights((output_dim,), param_list=self.params, name=name + ".b_rf") self.R_c = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_c") self.b_rc = bias_weights((output_dim,), param_list=self.params, name=name + ".b_rc") self.R_o = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_o") self.b_ro = bias_weights((output_dim,), param_list=self.params, name=name + ".b_ro") '''step through processed input to create output''' def step(x_t, h_tm1, c_tm1): i_t = T.nnet.sigmoid( T.dot(x_t, self.W_i) + T.dot(h_tm1, self.R_i) + self.b_wi + self.b_ri) f_t = T.nnet.sigmoid( T.dot(x_t, self.W_f) + T.dot(h_tm1, self.R_f) + self.b_wf + self.b_rf) o_t = T.nnet.sigmoid( T.dot(x_t, self.W_o) + T.dot(h_tm1, self.R_o) + self.b_ro + self.b_wo) c_hat_t = T.tanh( T.dot(x_t, self.W_c) + T.dot(h_tm1, self.R_c) + self.b_wc + self.b_rc) c_t = f_t * c_tm1 + i_t * c_hat_t h_t = o_t * T.tanh(c_t) return h_t, c_t outputs_info = [self.s0, self.c0] states, updates = theano.scan( fn=step, sequences=[self.X], outputs_info=outputs_info ) self.Y = states[0] self.C = states[1] def output(self): return self.Y class FC(Layer): def __init__(self, input_dim, output_dim, input_layer, name=""): self.input_layer = input_layer self.name = name self.params = [] self.input_dim = input_dim self.output_dim = output_dim self.X = self.input_layer.output() self.W = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W") self.b = bias_weights((output_dim,), param_list=self.params, name=name + ".b") def output(self): return T.dot(self.X, self.W) + self.b class WrapperLayer(Layer): def __init__(self, X, name=""): self.params = [] self.name = name self.X = X def output(self): return self.X
With world-class facilities and great sporting legacy, East Midlands is the perfect place to enjoy sport all year round. Few know, for instance, that Nottingham is the home to the oldest association football team in the world that is still playing at a professional level, namely Notts County Football Club. Another important football team from the area is Leicester City Football Club, which competes in the Premier League, England’s top division of football. We are now exactly half-way through this season’s Premier League and the team is only on the 12th position in the charts, with 13 points after eleven matches played. Though a successful team last season, when they won their first ever Premier League title, Leicester City are now in danger of relegation and have the serious task of trying to stay in the Premier League. They need to keep up with Liverpool, which has some time ago had some interesting additions to the team, which have actually strengthened them as a club. They have only won four games this season, against Swansea City, Burnley, Crystal Palace and Manchester City. The most recent encounter on the field was with Stoke City, and Leicester City led twice, through Vicente Iborra and Riyad Mahrez, but the game ended in a 2-2 draw as Stoke came from behind and Xherdan Shaqiri and Peter Crouch both equalised. At the moment, Leicester City manager is Claude Puel and the Foxes have scored two goals against both Everton and Stoke since he took over. The man of the match was Riyad Mahrez, who managed two goals and two assists in his last four games, reminding everyone of the ability that made him a key part of Leicester’s Premier League triumph last year. The international break is set to begin, so the teams are not in action for a couple of weeks, but up next, Leicester City will host Manchester City on Saturday, November 18. We’ll keep an eye out to see their progress in the tournament. Top three in the Premier League at the moment is made up of Manchester City with 31 points, Manchester United and Tottenham Hotspur with 23 points each, followed by Chelsea with 22 points. Even though Manchester City has a big lead, there is still a possibility of many interesting things happening. Aside from Leicester City and Liverpool, there are some other teams that may still surprise us. Matches are set to rekindle on November 18, so stay tuned.
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from __future__ import absolute_import from __future__ import division import tensorflow.compat.v1 as tf import sonnet as snt import numpy as np from ebp.common.tf_utils import MLP from ebp.common.flow_family import iCondResFlow from ebp.common.flow_family import HyperNet, NormFlow class Generator(snt.AbstractModule): def __init__(self, pc_dim=(2048, 3), fc_dims=(64, 128, 512, 1024), act=tf.nn.relu, entropy_reg=True, batch_norm=False, name='gen'): super(Generator, self).__init__(name=name) self.pc_dim = pc_dim self.act = act self.batch_norm = batch_norm self.entropy_reg = entropy_reg self.fc_body = [] self.fc_sigma_body = [] self.bn_body = [] self.bn_sigma_body = [] with self._enter_variable_scope(): for i, fc_dim in enumerate(fc_dims): fc = snt.Linear(fc_dim, name='fc_%d' % i) self.fc_body.append(fc) self.bn_body.append( snt.BatchNorm(offset=True, scale=True, name='bn_%d' % i)) self.fc_final = snt.Linear(np.prod(pc_dim), name='fc_final') for i, fc_dim in enumerate(fc_dims): fc = snt.Linear(fc_dim, name='fc_sigma_%d' % i) self.fc_sigma_body.append(fc) self.bn_sigma_body.append( snt.BatchNorm(offset=True, scale=True, name='bn_sigma_%d' % i)) self.fc_sigma_final = snt.Linear(np.prod(pc_dim), name='fc_sigma_final') def _build(self, z, is_training=True): x = self.fc_body[0](z) if self.batch_norm: x = self.bn_body[0](x, is_training) for i in range(1, len(self.fc_body)): x = self.act(x) x = self.fc_body[i](x) if self.batch_norm: x = self.bn_body[i](x, is_training) x = self.act(x) x = self.fc_final(x) logprob = None if self.entropy_reg: sigma = self.fc_sigma_body[0](z) for fc in self.fc_sigma_body[1:]: sigma = self.act(sigma) sigma = fc(sigma) sigma = self.act(sigma) sigma = self.fc_sigma_final(sigma) sigma = tf.sigmoid(sigma) #sigma = tf.abs(1e-3 * tf.sigmoid(sigma)) logprob = tf.reduce_sum(-tf.log(sigma + 1e-6), axis=1) x = x + sigma * tf.random_normal(tf.shape(sigma)) x = tf.reshape(x, (-1,) + self.pc_dim) #with tf.control_dependencies([tf.print('ent', tf.reduce_mean(logprob))]): return x, tf.identity(logprob) def generate_noise(self, num_samples, z_dim=128, mu=0, sigma=0.2): return np.random.normal(mu, sigma, (num_samples, *z_dim)) class LVMBlock(snt.AbstractModule): def __init__(self, gauss_dim, depth=3, act_hidden=tf.nn.relu, name='lvm_block'): super(LVMBlock, self).__init__(name=name) hidden_dims = [min(gauss_dim, 256)] * depth with self._enter_variable_scope(): self.mlp = snt.nets.MLP( output_sizes=hidden_dims, activation=act_hidden, activate_final=True) self.w_mu = tf.get_variable('w_mu', shape=[hidden_dims[-1], gauss_dim]) self.b_mu = tf.get_variable('b_mu', shape=[1, gauss_dim]) self.w_logsig = tf.get_variable( 'w_logsig', shape=[hidden_dims[-1], gauss_dim]) self.b_logsig = tf.get_variable('b_logsig', shape=[1, gauss_dim]) def _build(self, inputs): z = self.mlp(inputs) mu = tf.matmul(z, self.w_mu) + self.b_mu logsig = tf.matmul(z, self.w_logsig) + self.b_logsig sigma = tf.sigmoid(logsig) sigma = tf.exp(logsig) eps = tf.random.normal( shape=tf.shape(mu), mean=0, stddev=1, dtype=tf.float32) x = mu + sigma * eps ent = tf.reduce_sum(-tf.log(sigma + 1e-6), axis=-1) return x, mu, logsig, ent class RNNGenerator(snt.AbstractModule): def __init__(self, block_size, rnn_input_dim=128, state_dim=128, pc_dim=(2048, 3), cell_type='lstm', act_hidden=tf.nn.relu, gen_depth=3, name='rnn_generator'): """Args: state_dim: dimensionality of hidden states of the RNN cell block_size: number of points to generate at once pc_dim: a single point cloud's dimension cell_type: one of [lstm, gru]. """ assert (pc_dim[0] % block_size == 0) super(RNNGenerator, self).__init__(name=name) self.rnn_input_dim = rnn_input_dim self.pc_dim = pc_dim self.gauss_dim = block_size * pc_dim[-1] self.block_size = block_size self.num_blocks = pc_dim[0] // block_size self.state_dim = state_dim self.cell_type = cell_type with self._enter_variable_scope(): self.input_proj = snt.nets.MLP( output_sizes=[rnn_input_dim * 2, rnn_input_dim], activation=act_hidden, activate_final=True) if cell_type == 'lstm': self.rnn_cell = tf.nn.rnn_cell.BasicLSTMCell(state_dim) elif cell_type == 'gru': self.rnn_cell = tf.nn.rnn_cell.GRUCell(state_dim) else: raise ValueError('cell_type {} not implemented'.format(cell_type)) self.output_lvm = LVMBlock( self.gauss_dim, act_hidden=act_hidden, depth=gen_depth) def _build(self, z): x, mu, logsig, ent = self.output_lvm(z) state_input = self.input_proj(tf.concat([x, mu, logsig], axis=-1)) sample_output = tf.expand_dims(x, 0) ent_output = tf.expand_dims(ent, 0) if self.cell_type == 'lstm': init_state = tf.nn.rnn_cell.LSTMStateTuple(z, z) else: init_state = z def loop_body(prev_state, state_input, sample_output, ent_output): state_output, next_state = self.rnn_cell(state_input, prev_state) x, mu, logsig, ent = self.output_lvm(state_output) sample_output = tf.concat([sample_output, tf.expand_dims(x, 0)], axis=0) ent_output = tf.concat([ent_output, tf.expand_dims(ent, 0)], axis=0) # prep for next iteration state_input = self.input_proj(tf.concat([x, mu, logsig], axis=-1)) return next_state, state_input, sample_output, ent_output def loop_cond(prev_state, state_input, sample_output, ent_output): return tf.shape(ent_output)[0] < self.num_blocks if self.cell_type == 'lstm': shape_invariant = tf.nn.rnn_cell.LSTMStateTuple( tf.TensorShape((None, self.state_dim)), tf.TensorShape((None, self.state_dim))) else: shape_invariant = tf.TensorShape((None, self.state_dim)) _, _, sample_output, ent_output = tf.while_loop( loop_cond, loop_body, [init_state, state_input, sample_output, ent_output], shape_invariants=[ shape_invariant, tf.TensorShape((None, self.rnn_input_dim)), tf.TensorShape((None, None, self.gauss_dim)), tf.TensorShape((None, None)) ]) sample_output = tf.reshape( tf.transpose(sample_output, [1, 0, 2]), (-1,) + self.pc_dim) ent_output = tf.reduce_sum(ent_output, axis=0) return sample_output, ent_output class GPRNN(snt.AbstractModule): def __init__(self, block_size, act_hidden=tf.nn.relu, pc_dim=(2048, 3), init_z_dim=128, name='rnn_generator'): super(GPRNN, self).__init__(name=name) self.pc_dim = pc_dim gauss_dim = block_size * pc_dim[-1] assert (pc_dim[0] % block_size == 0) self.num_blocks = pc_dim[0] // block_size - 1 with self._enter_variable_scope(): self.first_block = LVMBlock( init_z_dim, gauss_dim, act_hidden=self.act_hidden) if self.num_blocks > 0: self.lvm_block = LVMBlock( gauss_dim * pc_dim[-1], gauss_dim, act_hidden=self.act_hidden) def _build(self, z): list_x = [] list_ent = [] x, mu, logsig, ent = self.first_block(z) list_x.append(x) list_ent.append(ent) for _ in range(self.num_blocks): x, mu, logsig, ent = self.lvm_block(tf.concat([x, mu, logsig], axis=-1)) list_x.append(x) list_ent.append(ent) x = tf.reshape(tf.concat(list_x, axis=-1), (-1,) + self.pc_dim) ent = tf.reduce_sum(list_ent, axis=0) return x, tf.identity(ent) class DeterministicEncoder(snt.AbstractModule): """The Encoder.""" def __init__(self, output_sizes): super(DeterministicEncoder, self).__init__(name='DeterministicEncoder') """CNP encoder. Args: output_sizes: An iterable containing the output sizes of the encoding MLP. """ self._output_sizes = output_sizes def _build(self, context_x, context_y, num_context_points): """Encodes the inputs into one representation. Args: context_x: Tensor of size bs x observations x m_ch. For this 1D regression task this corresponds to the x-values. context_y: Tensor of size bs x observations x d_ch. For this 1D regression task this corresponds to the y-values. num_context_points: A tensor containing a single scalar that indicates the number of context_points provided in this iteration. Returns: representation: The encoded representation averaged over all context points. """ # Concatenate x and y along the filter axes encoder_input = tf.concat([context_x, context_y], axis=-1) # Get the shapes of the input and reshape to parallelise across observations batch_size, _, filter_size = encoder_input.shape.as_list() hidden = tf.reshape(encoder_input, (batch_size * num_context_points, -1)) hidden.set_shape((None, filter_size)) # Pass through MLP with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE): for i, size in enumerate(self._output_sizes[:-1]): hidden = tf.nn.relu( tf.layers.dense(hidden, size, name='Encoder_layer_{}'.format(i))) # Last layer without a ReLu hidden = tf.layers.dense( hidden, self._output_sizes[-1], name='Encoder_layer_{}'.format(i + 1)) # Bring back into original shape hidden = tf.reshape(hidden, (batch_size, num_context_points, size)) # Aggregator: take the mean over all points representation = tf.reduce_mean(hidden, axis=1) return representation class DeterministicDecoder(snt.AbstractModule): """The Decoder.""" def __init__(self, output_sizes): """CNP decoder. Args: output_sizes: An iterable containing the output sizes of the decoder MLP as defined in `basic.Linear`. """ super(DeterministicDecoder, self).__init__(name='DeterministicDecoder') self._output_sizes = output_sizes def _build(self, representation, target_x, num_total_points): """Decodes the individual targets. Args: representation: The encoded representation of the context target_x: The x locations for the target query num_total_points: The number of target points. Returns: dist: A multivariate Gaussian over the target points. mu: The mean of the multivariate Gaussian. sigma: The standard deviation of the multivariate Gaussian. """ # Concatenate the representation and the target_x representation = tf.tile( tf.expand_dims(representation, axis=1), [1, num_total_points, 1]) input = tf.concat([representation, target_x], axis=-1) # Get the shapes of the input and reshape to parallelise across observations batch_size, _, filter_size = input.shape.as_list() hidden = tf.reshape(input, (batch_size * num_total_points, -1)) hidden.set_shape((None, filter_size)) # Pass through MLP with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE): for i, size in enumerate(self._output_sizes[:-1]): hidden = tf.nn.relu( tf.layers.dense(hidden, size, name='Decoder_layer_{}'.format(i))) # Last layer without a ReLu hidden = tf.layers.dense( hidden, self._output_sizes[-1], name='Decoder_layer_{}'.format(i + 1)) # Bring back into original shape hidden = tf.reshape(hidden, (batch_size, num_total_points, -1)) # Get the mean an the variance mu, log_sigma = tf.split(hidden, 2, axis=-1) # Bound the variance sigma = 0.1 + 0.9 * tf.nn.softplus(log_sigma) # Get the distribution dist = tf.contrib.distributions.MultivariateNormalDiag( loc=mu, scale_diag=sigma) return dist, mu, sigma class MLPGen(snt.AbstractModule): def __init__(self, dim, hidden_dim, depth, output_dim, act_hidden=tf.nn.relu, sp_iters=0, mlp=None, name='mlp_gauss'): super(MLPGen, self).__init__(name=name) self.dim = dim with self._enter_variable_scope(): if mlp is None: self.mlp = MLP(self.dim + 31, hidden_dim, depth, 1, act_hidden, sp_iters) else: self.mlp = mlp def _build(self, raw_x): z = tf.random.normal( shape=[tf.shape(raw_x)[0], tf.shape(raw_x)[1], 32], mean=0, stddev=1, dtype=tf.float32) x = tf.concat([raw_x, z], -1) y = self.mlp(x) y = tf.reshape(y, [-1, tf.shape(raw_x)[1], 1]) return tf.concat([raw_x, y], -1) class iCondGen(snt.AbstractModule): def __init__(self, dim, cond_dim, num_layers, act_hidden='tanh', sp_iters=1, name='icondres_flow'): super(iCondGen, self).__init__(name=name) self.dim = dim self.cond_dim = cond_dim self.i_cond_flow = iCondResFlow(dim, cond_dim, num_layers, act_hidden, sp_iters) def _build(self, raw_x): x = tf.reshape(raw_x, [-1, self.dim]) z = tf.random.normal( shape=[tf.shape(x)[0], self.cond_dim], mean=0, stddev=1, dtype=tf.float32) y, logp = self.i_cond_flow(z, x, 0) y = tf.reshape(y, [-1, tf.shape(raw_x)[1], 1]) logp = tf.reshape(logp, [-1, 1]) return tf.concat([raw_x, y], -1), logp class iDoubleCondGen(snt.AbstractModule): def __init__(self, dim, condx_dim, condz_dim, num_layers, act_hidden='tanh', sp_iters=1, name='icondres_flow'): super(iDoubleCondGen, self).__init__(name=name) self.dim = dim self.condx_dim = condx_dim self.condz_dim = condz_dim with self._enter_variable_scope(): self.i_cond_flow = iCondResFlow(dim, condz_dim + condz_dim, num_layers, act_hidden, sp_iters) self.fc = snt.Linear(condz_dim) self.mlp = MLP(condz_dim, condz_dim, 2, condz_dim, tf.nn.relu) def _build(self, raw_x, z_cond): x = tf.reshape(raw_x, [-1, self.dim]) z_cond = tf.tile(z_cond, [1, tf.shape(raw_x)[1]]) z_cond = tf.reshape(z_cond, [-1, self.condz_dim]) z_cond = self.mlp(z_cond) z = tf.random.normal( shape=[tf.shape(x)[0], self.condx_dim], mean=0, stddev=1, dtype=tf.float32) x = self.fc(x) ctx = tf.concat([x, z_cond], axis=-1) y, logp = self.i_cond_flow(z, ctx, 0) y = tf.reshape(y, [-1, tf.shape(raw_x)[1], 1]) logp = tf.reshape(logp, [-1, tf.shape(raw_x)[1], 1]) logp = tf.reduce_sum(logp, axis=1, keepdims=False) return tf.concat([raw_x, y], -1), logp class HyperGen(snt.AbstractModule): def __init__(self, dim, condx_dim, condz_dim, num_layers, name='HyperGen'): super(HyperGen, self).__init__(name=name) self.dim = dim self.condx_dim = condx_dim self.condz_dim = condz_dim with self._enter_variable_scope(): self.fc = snt.Linear(condz_dim) self.norm_flow = NormFlow(self.dim, num_layers, 'planar') self.hnet = HyperNet( 2 * condz_dim, 256, self.norm_flow.num_params, depth=2) def _build(self, raw_x, z_cond): x = tf.reshape(raw_x, [-1, self.dim]) z_cond = tf.tile(z_cond, [1, tf.shape(raw_x)[1]]) z_cond = tf.reshape(z_cond, [-1, self.condz_dim]) z = tf.random.normal( shape=[tf.shape(x)[0], 1, self.dim], mean=0, stddev=1, dtype=tf.float32) x = self.fc(x) ctx = tf.concat([x, z_cond], axis=-1) params = self.hnet(ctx) y, logp = self.norm_flow(z, 0, params) y = tf.reshape(y, [-1, tf.shape(raw_x)[1], 1]) logp = tf.reshape(logp, [-1, tf.shape(raw_x)[1], 1]) logp = tf.reduce_sum(logp, axis=1, keepdims=False) return tf.concat([raw_x, y], -1), logp
Belt Conveyor Spare Parts , Find Complete Details about Belt Conveyor Spare Parts,Conveyor Spare Parts,Coal Belt Conveyor,Wide Application Belt Conveyor from Conveyors Supplier or Manufacturer-Shandong Chengming Construction Machinery Co., Ltd. belt conveyor for coal parts - miningbmw.com. Conveyor belt - Wikipedia, the free encyclopedia. A conveyor belt is the carrying medium of a belt conveyor system (often shortened to belt conveyor).
"""Validator for Flow123D data structure .. codeauthor:: Tomas Krizek <[email protected]> """ from ..notifications import Notification from gm_base.geomop_util import TextValue, Span, Parameter from . import checks from ..data_node import DataNode from ..format import is_scalar, is_param class Validator: """Handles data structure validation.""" def __init__(self, notification_handler): """Initializes the validator with a NotificationHandler.""" self.notification_handler = notification_handler self.valid = True self.params = [] def validate(self, node, input_type): """ Performs data validation of node with the specified input_type. Validation is performed recursively on all children nodes as well. Options are added to nodes where applicable (record keys, selection, ...). Returns True when all data was correctly validated, False otherwise. Attribute errors contains a list of occurred errors. """ self.valid = True self.params = [] self._validate_node(node, input_type) return self.valid def _validate_node(self, node, input_type): """ Determines if node contains correct value. Method verifies node recursively. All descendant nodes are checked. """ if node is None: raise Notification.from_name('ValidationError', 'Invalid node (None)') # parameters # TODO: enable parameters in unknown IST? if hasattr(node, 'value'): match = is_param(node.value) if match: # extract parameters new_param = Parameter(match.group(1)) exists = False for param in self.params: if param.name == new_param.name: exists = True break if not exists: self.params.append(new_param) node.input_type = input_type # assume parameters are correct, do not validate further return if input_type['base_type'] != 'Abstract' and hasattr(node, 'type') \ and node.type is not None and 'implemented_abstract_record' not in input_type: notification = Notification.from_name('UselessTag', node.type.value) notification.span = node.type.span self.notification_handler.report(notification) node.input_type = input_type if is_scalar(input_type): self._validate_scalar(node, input_type) elif input_type['base_type'] == 'Record': self._validate_record(node, input_type) elif input_type['base_type'] == 'Abstract': self._validate_abstract(node, input_type) elif input_type['base_type'] == 'Array': self._validate_array(node, input_type) else: notification = Notification.from_name('InputTypeNotSupported', input_type['base_type']) self._report_notification(notification) def _validate_scalar(self, node, input_type): """Validates a Scalar node.""" if input_type['base_type'] == 'Selection': node.options = input_type['values'] try: checks.check_scalar(node, input_type) except Notification as notification: if notification.name in ['InvalidSelectionOption', 'ValueTooBig', 'ValueTooSmall', 'ValidationTypeError']: notification.span = node.span else: notification.span = get_node_key(node).notification_span self._report_notification(notification) def _validate_record(self, node, input_type): """Validates a Record node.""" if not node.implementation == DataNode.Implementation.mapping: notification = Notification.from_name('ValidationTypeError', 'Record') notification.span = get_node_key(node).notification_span self._report_notification(notification) return keys = node.children_keys node.options = input_type['keys'].keys() keys.extend(input_type['keys'].keys()) for key in set(keys): if node.origin == DataNode.Origin.error: continue child = node.get_child(key) if child is not None and \ child.origin==DataNode.Origin.duplicit: notification = Notification.from_name('DuplicateRecord') notification.span = child.key.span self._report_notification(notification) continue if child is not None and \ child.origin==DataNode.Origin.redefination: notification = Notification.from_name('RedefinateRecord') notification.span = child.key.span self._report_notification(notification) continue try: checks.check_record_key(node.children_keys, key, input_type) except Notification as notification: if notification.name == 'UnknownRecordKey': notification.span = child.notification_span else: notification.span = get_node_key(node).notification_span self._report_notification(notification) else: if child is not None: child_input_type = input_type['keys'][key]['type'] self._validate_node(child, child_input_type) def _validate_abstract(self, node, input_type): """Validates an AbtractRecord node.""" try: concrete_type = checks.get_abstractrecord_type(node, input_type) except Notification as notification: if notification.name == 'InvalidAbstractType': notification.span = node.type.span else: notification.span = get_node_key(node).notification_span self._report_notification(notification) else: if node.type is None: # if default_descendant defines the Abstract type, add it to data structure node.type = TextValue() node.type.value = concrete_type.get('name') node.type.span = Span(node.span.start, node.span.start) concrete_type['implemented_abstract_record'] = input_type node.input_type = concrete_type self._validate_record(node, concrete_type) def _validate_array(self, node, input_type): """Validates an Array node.""" if not node.implementation == DataNode.Implementation.sequence: notification = Notification.from_name('ValidationTypeError', 'Array') notification.span = get_node_key(node).notification_span self._report_notification(notification) return try: checks.check_array(node.children, input_type) except Notification as notification: notification.span = get_node_key(node).notification_span self._report_notification(notification) for child in node.children: self._validate_node(child, input_type['subtype']) def _report_notification(self, notification): """Reports a notification.""" if notification.severity.value >= Notification.Severity.error.value: self.valid = False self.notification_handler.report(notification) def get_node_key(node): """Return node that has originated from the text structure (not autoconversion).""" while node.origin != DataNode.Origin.structure: node = node.parent return node
A good sign. When you’re building a brewery you have some good weeks and you have some long, stressful weeks. We’re hoping that this new sign (being installed by the masterful @leamansigns) that you’ll see when you’re driving down Commonwealth (between Richard Nolan and Old Placentia) is a sign of better weeks ahead. Design, as always, by @perfectday_ca. Keep your eyes peeled next week for the word brewery to join the party and for some beautiful nighttime shots when it's all lit up!
"""Test suite for phlgit_fetch.""" # ============================================================================= # TEST PLAN # ----------------------------------------------------------------------------- # Here we detail the things we are concerned to test and specify which tests # cover those concerns. # # Concerns: # TODO # ----------------------------------------------------------------------------- # Tests: # TODO # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import unittest import phlgit_fetch import phlgitu_fixture class Test(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def testBreathing(self): # pychecker won't recognise the attributes on 'f' if we create it in # the closing parameter list and use 'as', at least not if we create an # alias to the CentralisedWithTwoWorkers class # f = phlgitu_fixture.CentralisedWithTwoWorkers() with contextlib.closing(f): phlgit_fetch.prune_safe(f.w0.repo, 'origin') def _setupBranchBomb(self, fixture): """Setup a situation where fetching on w0 will fail. :fixture: a phlgitu_fixture.CentralisedWithTwoWorkers :returns: None """ fixture.w1.repo('push', 'origin', 'HEAD:refs/heads/mybranch') fixture.w0.repo('fetch', '--prune') fixture.w1.repo('push', 'origin', ':refs/heads/mybranch') fixture.w1.repo('push', 'origin', 'HEAD:refs/heads/mybranch/bomb') def testBranchBomb(self): f = phlgitu_fixture.CentralisedWithTwoWorkers() with contextlib.closing(f): self._setupBranchBomb(f) phlgit_fetch.prune_safe(f.w0.repo, 'origin') f.w0.repo('fetch', '--prune') phlgit_fetch.all_prune(f.w0.repo) def testFetchSpec(self): fetchspec = ["+refs/heads/*:refs/remotes/origin/*"] fetchspec_nonexistant = ["+refs/nope/*:refs/heads/__private_nope/*"] f = phlgitu_fixture.CentralisedWithTwoWorkers() with contextlib.closing(f): phlgit_fetch.prune_safe(f.w0.repo, 'origin', []) phlgit_fetch.prune_safe(f.w0.repo, 'origin', fetchspec) phlgit_fetch.prune_safe(f.w0.repo, 'origin', fetchspec_nonexistant) # ----------------------------------------------------------------------------- # Copyright (C) 2014-2015 Bloomberg Finance L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------ END-OF-FILE ----------------------------------
Searching for hotels in Corbett (India) or Corbett accommodation? Corbett is the home of the infamous Corbett National Park in India named after Jim Corbett who hunted down tigers that had turned man-eaters between 1907 and1939. The park is home to arrange of wildlife animals, plants and over 1,300 acres of forests and grasslands as well as a charming river. There are nearly 500 species of plants and 33 different species of reptiles, 7 different species of amphibians, fish and dragonflies species. While visiting the park, you can check out Dhikala with its spectacular view of the valley, Bijrani famous for being an ancient hunting spot, and Jhirna which is a rest house at the southern boundary of the park. Corbett’s park offers a number of wildlife safaris such as a jeep safari through the mountain and savannahs, an exciting elephant safari through the periphery areas of Corbett Tiger Reserve, forests, valleys and mountain trails and a bird and fishing safari which is very enjoyable. Nature walks are also available through the forests where you can explore the rich diversity of the land. The park is home to endangered species such as the hog deer and the Indian pangolin. The tigers here are simply amazing. The experience of watching herds of elephants trample across the grassland is quite memorable. A variety of sporting activities to complement this fascinating wildlife reserve are readily available in Corbett. These include table tennis, cricket, badminton, archery, beach volleyball and board games like chess, carom and playing cards. You can enjoy sensual massages from trained masseuse at the nearby swimming pool and spa. The natural pool close to the park facilitates leisurely fishing expeditions. The Nature shop with its unique souvenirs and memorabilia as well as the town library should be visited. Gurney House and Jim’s Grill are excellent restaurants to visit, while lively entertainment can be had at the Tusker Bar. Corbett’s accommodations include superb lodges with all the necessary amenities. These are ideally located among the splendid natural environment of the nature park reserve. Facilities include sloping tiled roofs, cool stone floors with throw rugs, modern furniture and rooms with attached toilet & bath with running hot & cold water. The Leopard, Tiger and Tusk Lodges are among the most attractive accommodations here. - Browse the Corbett Map or compare Corbett accommodation from Hotels, Motels, B&Bs, Lodging & Hostels to Holiday & Caravan Parks. Resources, Information and guides on Corbett. For things to do, attractions, restaurants, holidays and sites refer below. About Page: Corbett Motels, Family Packages, Holiday Stays, Caravan Parks, Apartments, Hostels, Last Minute Corbett accommodation, 5 star Luxury to 2 star Budget hotels and resort deals. Compare Corbett hotel reservations, bookings and cheap rates.
from PIL import Image, ImageOps import urllib.request from io import StringIO import boto from boto.s3.key import Key from configs import settings from application.cel import celery @celery.task def upload(space, path, image=None, url=None, async=True, make_thumbnails=True): conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY) bucket_name = space bucket = conn.get_bucket(bucket_name) k = Key(bucket) def make_thumb(image): im = Image.open(image) for size in [(400, 400), (150, 150)]: output = StringIO() im2 = ImageOps.fit(im, size, Image.ANTIALIAS) im2.save(output, "JPEG") k.key = "thumbnails/%sx%s/%s"%(size[0], size[1], path) k.set_contents_from_string(output.getvalue()) k.make_public() output.close() # save original img if image is None and url: fd = urllib.request.urlopen(url) image = StringIO(fd.read()) else: image = StringIO(image) k.key = path k.set_contents_from_file(image) k.make_public() # make thumbnails if make_thumbnails: make_thumb(image) image.close() orig_url = "http://assets.maybi.cn/%s"%path return orig_url @celery.task def make_thumbnails(space, path, url, async=True): conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY) bucket_name = space bucket = conn.get_bucket(bucket_name) k = Key(bucket) # save original img fd = urllib.request.urlopen(url) image = StringIO(fd.read()) im = Image.open(image) for size in [(480, 480), (180, 180)]: output = StringIO() im2 = ImageOps.fit(im, size, Image.ANTIALIAS) im2.save(output, "JPEG") k.key = "post_thumbs/%sx%s/%s"%(size[0], size[1], path) k.set_contents_from_string(output.getvalue()) k.make_public() output.close() @celery.task def save_avatar(space, path, url, save_original=False, async=True): conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY) bucket_name = space bucket = conn.get_bucket(bucket_name) k = Key(bucket) fd = urllib.request.urlopen(url) image = StringIO(fd.read()) # save original img if save_original: k.key = path k.set_contents_from_file(image) k.make_public() im = Image.open(image) for size in [(200, 200), (80, 80)]: output = StringIO() im2 = ImageOps.fit(im, size, Image.ANTIALIAS) im2.save(output, "JPEG") k.key = "avatar_thumbs/%sx%s/%s"%(size[0], size[1], path) k.set_contents_from_string(output.getvalue()) k.make_public() output.close()
Our Winemaker Wedding Menu is our exclusive, plated dinner option that offers a culinary vision beyond compare. Your guests will savory rich and flavorful Hors d’ Oeuvres, freshly prepared salads with locally-grown produce and house-made dressings, a choice of scrumptious entrees prepared especially for your guests, rich sides and fresh, seasonal vegetables selections. A 20% service charge and applicable sales tax will be added to prices above.
from xml.dom import minidom import numpy as np import os.path as osp # A map from xml`s sample_type string to numpy.fromfile function argument. # Numpy treats 'float' as float64 (=double) and 'float32' as float32 NP_TYPES = {'double': 'float', 'float': 'float32'} class signalParser(object): """This class can extract some information from signal and it's xml descriptors""" def __init__(self, file_prefix): """Check for file and it's descriptors This function initializes class and checks for files: .raw - contains the signal .xml - contains signal description .tag - contains experiment tags """ file_prefix = osp.expanduser(file_prefix) if osp.exists(file_prefix+'.raw'): self.raw_file = file_prefix + '.raw' else: raise IOError(file_prefix+'.raw doest exist!') if osp.exists(file_prefix+'.xml'): self.xml_file = file_prefix + '.xml' else: raise IOError(file_prefix+'.xml does not exist!') if osp.exists(file_prefix+'.tag'): self.tag_file = file_prefix + '.tag' else: print "Warning: "+file_prefix+".tag does not exist!" self.montage = 0 self.channel_count, self.sample_count, self.sampling_frequency = self.__get_xml_elems() self.channel_list = self.__get_channel_list() def extract_channel(self,channel_list, filt = None): """This extracts channels from .raw file The filt parameter should be a function of len(channel_list) parameters. if filt is None than raw signals are returned. If not, the output of filt function is returned""" return self.__get_filtered_channels(self.__channels_no(channel_list), filt) def __get_xml_elems(self): """Returns number of channels""" fxml = minidom.parse(self.xml_file) return int(fxml.getElementsByTagName('rs:channelCount')[0].firstChild.data), \ int(fxml.getElementsByTagName('rs:sampleCount')[0].firstChild.data), \ float(fxml.getElementsByTagName('rs:samplingFrequency')[0].firstChild.data) def getSamplingFrequency(self): return self.sampling_frequency def __get_channel_list(self): """Returns list of channels from .xml file""" fxml = minidom.parse(self.xml_file) return [x.firstChild.data for x in fxml.getElementsByTagName('rs:label')] def getChannelList(self): return self.__get_channel_list() def __get_filtered_channels(self, channel_list, filt): """Returns channels filtered wit filt function""" fxml = minidom.parse(self.xml_file) sample_type = fxml.getElementsByTagName('rs:sampleType')[0].firstChild.data ch_no = self.channel_count sig = np.fromfile(self.raw_file, NP_TYPES[sample_type.lower()]) signal = np.zeros([len(channel_list), self.sample_count]) print ("DEBUG GET FILTERED: "+str(sample_type)+ " / "+str(ch_no)+" / "+str(sig.shape)+" / "+str(signal.shape)+" / "+str(channel_list)) for i,v in enumerate(channel_list): signal[i] = sig[v::ch_no][0:self.sample_count] if filt != None: return filt(signal) else: return signal def __channels_no(self, ch_list): """If in ch_list is string describing a channel, it is converted to channel no using .xml file""" ch_str_list = self.channel_list real_ch_list = [] for i in ch_list: if isinstance(i, int): real_ch_list.append(i) elif isinstance(i, str) or isinstance(i, unicode): try: real_ch_list.append(ch_str_list.index(i)) except ValueError: print "Wrong channel name "+i raise else: raise ValueError("Channel name must be a string or integer") return real_ch_list def get_channel(self, channel): """Returns number of channel (if channel is a string) or channel name (if channel is an integer)""" ch_str_list = self.channel_list if isinstance(channel, int): return ch_str_list[channel] elif isinstance(channel, str): try: return ch_str_list.index(channel) except ValueError: print "Can not find this channel" raise else: raise ValueError("Channel must be a string or an integer") def setMontage(self, montage): self.montage = self.extract_channel(montage).mean(axis=0) def getData(self, channels): s = self.extract_channel(channels) return s - self.montage def getAllTags(self,inSamples=True): ftags = minidom.parse(self.tag_file) tagArray = [] for tag in ftags.getElementsByTagName('tag'): tagTime = float(tag.attributes['position'].value) if tagTime - t > approxTimeDiff: tagArray.append(tagTime) if inSamples: return np.array(tagArray)*self.sampling_frequency else: return np.array(tagArray) def getTrialTags(self, approxTimeDiff=2, inSamples=True): ftags = minidom.parse(self.tag_file) tagArray = [] t = 0 for tag in ftags.getElementsByTagName('tag'): tagTime = float(tag.attributes['position'].value) if tagTime - t > approxTimeDiff: tagArray.append(tagTime) t = tagTime if inSamples: return np.array(tagArray)*self.sampling_frequency else: return np.array(tagArray) def get_train_tags(self, trial_separator_name='trial', screen = False, tag_filter = None, ccof = False ): """Extracts positions an stimulation frequencies from .tag file Parameters: =========== screen [= False] : bool if True a 'freq' tag will be considered when choosing stimulation frequency tag_filter [= None] : tuple a tuple of strings. First element is a name of a tag, second is value of the tag. This will limit the function to consider only tags specified in the tuple. ccof [= Flase] : bool if True a concentrating_on_field tag will be considered when choosing stimulation frequency Returns: ======== tags : list a list of tuples. First element is time (in seconds) denoting start of the stimulation. Second element is frequency of the stimulation """ ftags = minidom.parse(self.tag_file) exp_update_list_all = [e for e in ftags.getElementsByTagName('tag')\ if e.attributes['name'].value == trial_separator_name]# \ #or e.attributes['name'].value == 'experiment__screen_break'] if tag_filter is None: exp_update_list = exp_update_list_all else: exp_update_list = [e for e in exp_update_list_all \ if e.getElementsByTagName(tag_filter[0])[0].firstChild.data == tag_filter[1]] tag_list = [] for i,exp in enumerate(exp_update_list): position = float(exp.attributes['position'].value) if screen: #cor_tab = [36, 38, 40, 42] scr = exp.getElementsByTagName('freq')[0].firstChild.data #scr_idx = int(scr)#int(scr.split('_')[-1]) #frq = cor_tab[scr_idx - 1] #frq = np.array(eval(exp.getElementsByTagName('freqs')[0].firstChild.data))[scr_idx - 1] frq = int(scr) elif ccof: scr = exp.getElementsByTagName('concentrating_on_field')[0].firstChild.data frq = np.array(eval(exp.getElementsByTagName('freqs')[0].firstChild.data))[int(scr)] else: f1 = exp.getElementsByTagName('freqs')[0].firstChild.data frq = eval(f1)[1] #frq = frq_list[int(exp.getElementsByTagName('concentrating_on_field')[0].firstChild.data)] #tag = screen_tag[0].firstChild.data tag_list.append((position, frq)) return tag_list ####################################################### def get_all_tags(self, idx=1, samples = True, Fs = None): ftag = minidom.parse(self.tag_file) tag_list = [e for e in ftag.getElementsByTagName('tag') \ if e.attributes['name'].value == 'blink'] exp_list = {} fsp = self.sampling_frequency if(samples): if Fs != None: fsp = Fs else: fsp = 1.0 for e in tag_list: index = e.getElementsByTagName('index')[0].firstChild.data timestamp = float(e.attributes['position'].value) exp_list[timestamp*fsp] = int(index) return exp_list def get_not_p300_tags(self, idx=1, samples = True, Fs = None): """Returns tags with words from different groups Parameters: ----------- idx [= 1]: int defines which tags to return samples : bool if true, positions will be returned as samples not in seconds Fs : float or None the sampling frequency used to convert positions to samples Returns: -------- exp_list : list a list of positions of target """ ftag = minidom.parse(self.tag_file) tag_list = [e for e in ftag.getElementsByTagName('tag') \ if e.attributes['name'].value == 'blink'] exp_list = [] fsp = self.sampling_frequency if(samples): if Fs != None: fsp = Fs else: fsp = 1.0 # If int passed as target -> change it into list if isinstance(idx, int): idx = [idx] for e in tag_list: index = e.getElementsByTagName('index')[0].firstChild.data if int(index) not in idx: exp_list.append(float(e.attributes['position'].value)) return np.array(exp_list) * fsp def get_p300_tags(self, idx=1, samples = True, Fs = None): """Returns tags with words from different groups Parameters: ----------- idx [= 1]: int defines which tags to return samples : bool if true, positions will be returned as samples not in seconds Fs : float or None the sampling frequency used to convert positions to samples Returns: -------- exp_list : list a list of positions of target """ ftag = minidom.parse(self.tag_file) tag_list = [e for e in ftag.getElementsByTagName('tag') \ if e.attributes['name'].value == 'blink'] exp_list = [] fsp = self.sampling_frequency if(samples): if Fs != None: fsp = Fs else: fsp = 1.0 for e in tag_list: index = e.getElementsByTagName('index')[0].firstChild.data # If int passed as target -> change it into list if isinstance(idx, int): idx = [idx] for e in tag_list: index = e.getElementsByTagName('index')[0].firstChild.data if int(index) in idx: exp_list.append(float(e.attributes['position'].value)) return np.array(exp_list) * fsp def getTargetNontarget(self, signal, trgTags, ntrgTags): self.chL = signal.shape[0] self.Fs = self.getSamplingFrequency() print "self.Fs: ", self.Fs ## Get target data and stuck it into numpy arrays target = np.zeros((len(trgTags), self.chL, self.Fs)) nontarget = np.zeros((len(ntrgTags), self.chL, self.Fs)) # Target trials for idx, tag in enumerate(trgTags): index = int(tag) target[idx] = signal[:,index:index+self.Fs] # Nontarget trials for idx, tag in enumerate(ntrgTags): index = int(tag) nontarget[idx] = signal[:, index:index+self.Fs] return target, nontarget
THESE wrestling champs are some of the top athletes who will be helping pupils set and achieve personal ambitions. The Champions in Schools programme uses the power of role models to create positive change in the lives and lifestyles of children. And teenagers, like these ones from Calderhead High School in Shotts, will hopefully pick up a winning attitude from some of the country’s leading sportsmen and women. Twin sisters Fiona Robertson and Donna Robertson represented Scotland at judo in the Commonwealth Games and are now competing in wrestling. They delivered an inspirational workshop to the pupils. The programme will now spread out to other schools in North Lanarkshire. Champions in Schools is a role-model programme which places Scotland’s top international athletes in the classroom to help pupils set and achieve personal ambitions, pursue a healthy and active lifestyle, appreciate the value of hard work and adopt a positive, winning attitude. It is aimed at pupils in late primary and early secondary school years. An athlete will visit the youngsters three times over the school year. The champions guide, advise and motivate pupils to become winners who fulfil their potential and who believe that what is important is not how good they are now, but how good they want to become.
# Copyright (c) 2015, Javier Gonzalez # Copyright (c) 2015, the GPy Authors (see GPy AUTHORS.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np import time from ..util.general import best_value, reshape, spawn from ..core.optimization import lp_batch_optimization, random_batch_optimization, predictive_batch_optimization try: from ..plotting.plots_bo import plot_acquisition, plot_convergence except: pass class BO(object): def __init__(self, acquisition_func): self.acquisition_func = acquisition_func def _init_model(self): pass def run_optimization(self, max_iter = None, n_inbatch=1, acqu_optimize_method='fast_random', acqu_optimize_restarts=200, batch_method='predictive', eps = 1e-8, n_procs=1, true_gradients = True, verbose=True): """ Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data) :param max_iter: exploration horizon, or number of acquisitions. It nothing is provided optimizes the current acquisition. :param n_inbatch: number of samples to collected everytime *f* is evaluated (one by default). :param acqu_optimize_method: method to optimize the acquisition function -'DIRECT': uses the DIRECT algorithm of Jones and Stuckmann. -'CMA': uses the Covariance Matrix Adaptation Algorithm. -'brute': Run local optimizers in a grid of points. -'random': Run local optimizers started at random locations. -'fast_brute': the same as brute but runs only one optimizer in the best location. It is used by default. -'fast_random': the same as random but runs only one optimizer in the best location. :param acqu_optimize_restarts: numbers of random restarts in the optimization of the acquisition function, default = 20. :param batch_method: method to collect samples in batches -'predictive': uses the predicted mean in the selected sample to update the acquisition function. -'lp': used a penalization of the acquisition function to based on exclusion zones. -'random': collects the element of the batch randomly :param eps: minimum distance between two consecutive x's to keep running the model :param n_procs: The number of processes used for evaluating the given function *f* (ideally nprocs=n_inbatch). :param true_gradients: If the true gradients (can be slow) of the acquisition ar an approximation is used (True, default). :param save_interval: number of iterations after which a file is produced with the current results. """ # --- Load the parameters of the function into the object. if max_iter == None: self.max_iter = 10*self.input_dim else: self.max_iter = max_iter self.num_acquisitions = 0 self.n_inbatch=n_inbatch self.batch_method = batch_method if batch_method=='lp': from .acquisition import AcquisitionMP if not isinstance(self.acquisition_func, AcquisitionMP): self.acquisition_func = AcquisitionMP(self.acquisition_func, self.acquisition_func.acquisition_par) self.eps = eps self.acqu_optimize_method = acqu_optimize_method self.acqu_optimize_restarts = acqu_optimize_restarts self.acquisition_func.set_model(self.model) self.n_procs = n_procs # --- Decide wether we use the true gradients to optimize the acquitision function if true_gradients !=True: self.true_gradients = False self.acquisition_func.d_acquisition_function = None else: self.true_gradients = true_gradients # --- Get starting of running time self.time = time.time() # --- If this is the first time to optimization is run - update the model and normalize id needed if self.first_time_optimization: self._update_model() prediction = self.model.predict(self.X) self.s_in_min = np.sqrt(abs(prediction[1])) self.first_time_optimization = False # --- Initialization of stop conditions. k=0 distance_lastX = np.sqrt(sum((self.X[self.X.shape[0]-1,:]-self.X[self.X.shape[0]-2,:])**2)) # --- BO loop: this loop does the hard work. while k<self.max_iter and distance_lastX > self.eps: # --- Augment X self.X = np.vstack((self.X,self.suggested_sample)) # --- Evaluate *f* in X and augment Y if self.n_procs==1: self.Y = np.vstack((self.Y,self.f(np.array(self.suggested_sample)))) else: try: # --- Parallel evaluation of *f* if several cores are available from multiprocessing import Process, Pipe from itertools import izip divided_samples = [self.suggested_sample[i::self.n_procs] for i in xrange(self.n_procs)] pipe=[Pipe() for i in xrange(self.n_procs)] proc=[Process(target=spawn(self.f),args=(c,x)) for x,(p,c) in izip(divided_samples,pipe)] [p.start() for p in proc] [p.join() for p in proc] rs = [p.recv() for (p,c) in pipe] self.Y = np.vstack([self.Y]+rs) except: if not hasattr(self, 'parallel_error'): print 'Error in parallel computation. Fall back to single process!' self.parallel_error = True self.Y = np.vstack((self.Y,self.f(np.array(self.suggested_sample)))) # --- Update internal elements (needed for plotting) self.num_acquisitions += 1 pred_min = self.model.predict(reshape(self.suggested_sample,self.input_dim)) self.s_in_min = np.vstack((self.s_in_min,np.sqrt(abs(pred_min[1])))) # --- Update model try: self._update_model() except np.linalg.linalg.LinAlgError: break # --- Update stop conditions k +=1 distance_lastX = np.sqrt(sum((self.X[self.X.shape[0]-1,:]-self.X[self.X.shape[0]-2,:])**2)) # --- Stop messages and execution time self.Y_best = best_value(self.Y) self.x_opt = self.X[np.argmin(self.Y),:] self.fx_opt = min(self.Y) self.time = time.time() - self.time # --- Print stopping reason if verbose: print '*Optimization completed:' if k==self.max_iter and distance_lastX > self.eps: if verbose: print ' -Maximum number of iterations reached.' return 1 else: if verbose: print ' -Method converged.' return 0 def change_to_sparseGP(self, num_inducing): """ Changes standard GP estimation to sparse GP estimation :param num_inducing: number of inducing points for sparse-GP modeling """ if self.sparse == True: raise 'Sparse GP is already in use' else: self.num_inducing = num_inducing self.sparse = True self._init_model(self.X,self.Y) def change_to_standardGP(self): """ Changes sparse GP estimation to standard GP estimation """ if self.sparse == False: raise 'Sparse GP is already in use' else: self.sparse = False self._init_model(self.X,self.Y) def _optimize_acquisition(self): """ Optimizes the acquisition function. This function selects the type of batch method and passes the arguments for the rest of the optimization. """ # ------ Elements of the acquisition function acqu_name = self.acqu_name acquisition = self.acquisition_func.acquisition_function d_acquisition = self.acquisition_func.d_acquisition_function acquisition_par = self.acquisition_par model = self.model # ------ Parameters to optimize the acquisition acqu_optimize_restarts = self.acqu_optimize_restarts acqu_optimize_method = self.acqu_optimize_method n_inbatch = self.n_inbatch bounds = self.bounds # ------ Selection of the batch method (if any, predictive used when n_inbathc=1) if self.batch_method == 'predictive': X_batch = predictive_batch_optimization(acqu_name, acquisition_par, acquisition, d_acquisition, bounds, acqu_optimize_restarts, acqu_optimize_method, model, n_inbatch) elif self.batch_method == 'lp': X_batch = lp_batch_optimization(self.acquisition_func, bounds, acqu_optimize_restarts, acqu_optimize_method, model, n_inbatch) elif self.batch_method == 'random': X_batch = random_batch_optimization(acquisition, d_acquisition, bounds, acqu_optimize_restarts,acqu_optimize_method, model, n_inbatch) return X_batch def _update_model(self): """ Updates X and Y in the model and re-optimizes the parameters of the new model """ # ------- Normalize acquisition function (if needed) if self.normalize: self.model.set_XY(self.X,(self.Y-self.Y.mean())/(self.Y.std())) else: self.model.set_XY(self.X,self.Y) # ------- Optimize model when required if (self.num_acquisitions%self.model_optimize_interval)==0: self.model.optimization_runs = [] # clear previous optimization runs so they don't get used. self.model.optimize_restarts(num_restarts=self.model_optimize_restarts, verbose=self.verbosity) # ------- Optimize acquisition function self.suggested_sample = self._optimize_acquisition() def plot_acquisition(self,filename=None): """ Plots the model and the acquisition function. if self.input_dim = 1: Plots data, mean and variance in one plot and the acquisition function in another plot if self.input_dim = 2: as before but it separates the mean and variance of the model in two different plots :param filename: name of the file where the plot is saved """ return plot_acquisition(self.bounds,self.input_dim,self.model,self.model.X,self.model.Y,self.acquisition_func.acquisition_function,self.suggested_sample,filename) def plot_convergence(self,filename=None): """ Makes three plots to evaluate the convergence of the model plot 1: Iterations vs. distance between consecutive selected x's plot 2: Iterations vs. the mean of the current model in the selected sample. plot 3: Iterations vs. the variance of the current model in the selected sample. :param filename: name of the file where the plot is saved """ return plot_convergence(self.X,self.Y_best,self.s_in_min,filename) def get_evaluations(self): return self.X.copy(), self.Y.copy() def save_report(self, report_file= 'GPyOpt-results.txt ' ): """ Save a report with the results of the optimization. A file is produced every :param report_file: name of the file in which the results of the optimization are saved. """ with open(report_file,'w') as file: file.write('---------------------------------' + ' Results file ' + '--------------------------------------\n') file.write('GPyOpt Version 1.0.0 \n') file.write('Date and time: ' + time.strftime("%c")+'\n') if self.num_acquisitions==self.max_iter: file.write('Optimization completed: ' +'YES, ' + str(self.X.shape[0]).strip('[]') + ' samples collected.\n') else: file.write('Optimization completed: ' +'NO,' + str(self.X.shape[0]).strip('[]') + ' samples collected.\n') file.write('Optimization time: ' + str(self.time).strip('[]') +' seconds.\n') file.write('---------------------------------' + ' Problem set up ' + '------------------------------------\n') file.write('Problem Dimension: ' + str(self.input_dim).strip('[]') +'\n') file.write('Problem bounds: ' + str(self.bounds).strip('[]') +'\n') file.write('Batch size: ' + str(self.n_inbatch).strip('[]') +'\n') file.write('Acquisition: ' + self.acqu_name + '\n') file.write('Acquisition optimizer: ' + self.acqu_optimize_method+ '\n') file.write('Sparse GP: ' + str(self.sparseGP).strip('[]') + '\n') file.write('---------------------------------' + ' Summary ' + '------------------------------------------\n') file.write('Best found minimum: ' + str(min(self.Y)).strip('[]') +'\n') file.write('Minumum location: ' + str(self.X[np.argmin(self.Y),:]).strip('[]') +'\n') file.close()
The F5 private cloud solution package for OpenStack provides joint certification and testing with Red Hat to orchestrate F5® BIG-IP® Application Delivery Controllers (ADCs) with OpenStack Networking services. The validated solutions and use cases are based on customer requirements utilizing BIG-IP ADC and OpenStack integrations. F5’s OpenStack LBaaSv2 integration provides under-the-cloud L4–L7 services for OpenStack Networking tenants. F5’s OpenStack orchestration (HEAT) templates provide over-the-cloud, single-tenant onboarding of BIG-IP virtual edition (VE) ADC clusters and F5 iApps® templating for application services deployment. So why did we spend the time to develop this Private Cloud Solution Package for Red Hat OpenStack Platform v9? And why do I think it is valuable? Well several different reasons. First, if you are like me, six months ago I had no idea where to start. How do I build an OpenStack cloud so I can test and understand OpenStack? Do I build a Faux-penStack in a single VM on my laptop? Do I purchase a labs worth of machines to build it out? Those were my initial questions. It seems to be the question a lot of enterprises are also facing. In a study commissioned by Suse, 50% of those who have started an OpenStack initiative have failed. The fact is, OpenStack is difficult. There are many, many options. There are many, many configuration files. Until you are grounded in what each does and how they interact, it all seems like a bunch of gibberish. So, we first created this Private Cloud Solution Package with Red Hat to provide that starting point. It is intended to be the ‘You Are Here’ marker for a successful deployment of a real-world production cloud which meets the needs of an enterprise. The deployment guide marries the Red Hat install guide with specific instruction gained through setting up our Red Hat OpenStack Platform many times. The aim isn’t to provide answers to questions about each configuration option, or to provide multiple paths with a decision tree for options which differ and many times conflict with each other. Our guidance is specific, and prescriptive. We wanted a documentation path that ensures a functioning cloud. Follow this step by step using the variable inputs we did and you will end up with a validated, known-good cloud. I hope you will find the effort we put into it to be of value. John, Mark, Dave, and I (Our “Pizza team”, as John called it—although we didn’t eat any pizza as we were all working remotely from one another) spent many hours getting to the point where we could create documentation, that when followed, creates a reproducible, functioning, and validated Red Hat OpenStack Platform v9 Overcloud with F5 LBaaS v2 functionality connected to a pair of F5’s new iSeries 5800 appliances. Get that, REPRODUCABLE, and VALIDATED. We can wipe our overcloud away, and redeploy the overcloud in our environment in 44 minutes. That includes reinstalling the LBaaS components and validating that they are working using OpenStack community tests. VALIDATED. That is the second reason we spent all this time creating this solution package. We wanted to define a way for our customers, the Red Hat and F5 support organizations, and for our two awesome Professional Services organizations to KNOW that what has been built is validated, and will function. To that end, as part of the package development we created a test and troubleshooting Docker container, and have released it on Github here. This container bundles up all the software requirements and specific software versions required to run the community OpenStack tempest tests against any newly installed OpenStack Red Hat Platform environment. These tests let you know definitely whether the cloud was built correctly. We’ll run a set of test against your cloud installation assuring networking is working before we install any F5 service components. We’ll run a set of tests after we install F5 service components assuring the proper functionality of these services we provide. We’ll leave you with the test results in a common testing format as documentation that your cloud tenants should be good to go. As we develop certified use cases of our technologies with our customers, we’ll write tests for those and run those too. Cool huh? This is DevOps after all. It’s all about tested solutions. You don’t have to wait for our professional services to test your own cloud. By default, the test client includes all of the community tempest tests needed to validate LBaaS v2 on Liberty Red Hat OSP v8 (just for fun and to demonstrate the extensibility of the toolset) and Mitaka Red Hat OSPv9. Not only does it include the tests, and the toolsets to run them, get this, John even created a script which will generate the required tempest configuration files on the fly. Simply provide your overcloudrc file and the environment will be interrogated and the proper settings will be added to the config files. Again Cool. Testing is king, and we’re doing our best to hand out crowns.
""" A stationary rectangular solid that something may collide with """ import numpy as np from mpl_toolkits.mplot3d.art3d import Poly3DCollection from static import StaticObject class Obstacle(StaticObject): def __init__(self, pt1, pt2, color='r'): """Create a 3D Rectangle from 2 points :param pt1: The first point (x, y, z) defining the rect :type pt1: numpy.ndarray :param pt2: The second point (x, y, z) defining the rect :type pt2: numpy.ndarray :param color: color of the obstacle :type color: str :rtype: None """ self.pt1 = pt1 self.pt2 = pt2 self.color = color # TODO: Make this use numpy arrays instead of lists def get_paths(self): """Returns the paths for each of the surfaces of the rectangle for plotting. :returns (bottom, top, front, back, left, right) :rtype: list of 6 4x3 numpy.ndarrays """ [x1, y1, z1] = self.pt1 [x2, y2, z2] = self.pt2 pt1 = [x1, y1, z1] pt2 = [x1, y1, z2] pt3 = [x1, y2, z1] pt4 = [x1, y2, z2] pt5 = [x2, y1, z1] pt6 = [x2, y1, z2] pt7 = [x2, y2, z1] pt8 = [x2, y2, z2] bottom = [pt1, pt3, pt7, pt5] top = [pt2, pt4, pt8, pt6] front = [pt1, pt2, pt6, pt5] back = [pt3, pt4, pt8, pt7] left = [pt1, pt2, pt4, pt3] right = [pt5, pt6, pt8, pt7] paths = [bottom, top, front, back, left, right] return paths def is_hit(self, position): """Checks if the rectangle is hit by a point or path :param position: An objects position (x, y, z) or positions if it is a path([x1, x2, ..], [y1, y2, ..], [z1, z2, ..] :type position: numpy.ndarray or numpy.matrix :returns: Whether the obstacle was hit by a point or path :rtype: bool """ is_point = len(position.shape) == 1 if is_point: x, y, z = position else: assert position.shape[1] == 3 x = position[:, 0] y = position[:, 1] z = position[:, 2] [x1, y1, z1] = self.pt1 [x2, y2, z2] = self.pt2 x_hit = (x >= x1) & (x <= x2) y_hit = (y >= y1) & (y <= y2) z_hit = (z >= z1) & (z <= z2) all_hit = x_hit & y_hit & z_hit if is_point: return (x_hit and y_hit and z_hit) else: return np.any(all_hit) def is_hit_by_sphere(self, center, radius): """Checks if the rectangle is hit by a sphere :param center: Sphere's center (x, y, z) :type center: numpy.ndarray :param radius: The sphere's radius :type radius: int :returns: Whether obstacle was hit by a sphere :rtype: bool """ [x1, y1, z1] = self.pt1 [x2, y2, z2] = self.pt2 x, y, z = center x_hit = (x + radius >= x1) & (x - radius <= x2) y_hit = (y + radius >= y1) & (y - radius <= y2) z_hit = (z + radius >= z1) & (z - radius <= z2) return x_hit and y_hit and z_hit def display(self): """Display obstacle properties :rtype: None """ print "Center: {}".format(self.center) print "Width: {}".format(self.width) print "Height: {}".format(self.height) print "Depth: {}".format(self.depth) def plot(self, ax): """Plots the obstacle at its location :param ax: Figure to plot on :type ax: matplotlib.axes :rtpye: None """ paths = self.get_paths() rectangle = Poly3DCollection(paths, facecolors=self.color) ax.add_collection3d(rectangle)
After all the hearings, demonstrations and opinion polls confirming that public opinion is against nuclear power, the ruling Democratic Party of Japan (民主党 minshuto), gearing up for a general election, cobbled together the main points of its new energy policy last week. The goal was to be no nuclear plants by 2030 (genpatsu zero 原発ゼロ), and the main points were: nuclear power plants to be taken out of service after 40 years, the decision to restart plants to be taken by the new nuclear standards agency, and no new nuclear plants to be built. The very next day local officials from Rokkasho in Aomori in the very north of the country and home to the nation's only reprocessing plant, went to Tokyo to protest that if all the nuclear plants are going to be closed and not use the fuel they’ve been reprocessing then they’re going to send the fuel back to where it came from - nuclear plants all over the country. They don’t want their area turned into a dump for nuclear waste. America expressed concern too. What’s going to happen to the 30 tons of plutonium that's been extracted from the spent fuel and was to be re-used? Plutonium that could be used for nuclear weapons if it got into the wrong hands. The UK and France aren’t happy either. They’ve been reprocessing fuel for Japan and have stocks that they want taking off their hands. Then yesterday the Minister of Economy and Industry went to Aomori and gave the green light to two new reactors under construction. If these are built they will be in operation until the mid 2050’s. What’s going on here? The ‘new energy policy’ has been exposed as a shallow, vote-getting policy. The LDP (Liberal Democratic Party, 自民党 jiminto) which was in power since the war but lost three years ago has been parading its five aged leadership candidates (including one ex prime minister). The five appear together on TV and say more or less the same things. More like an advert for the party than rivals. Odd. But they're all saying that though they sympathise with the sentiments for zero nuclear the reality is much more complex. Even the PM no longer talks of 'zero nuclear' but 'reducing the dependence on nuclear'. Maybe the tide has turned and there’s going to be more measured debate from now on. The 'miracle pine tree' (奇跡の一本松 kiseki no ippon matsu) in Rikuzen Takada, the only one left standing after the tsunami, is terminally ill and has been cut down. It’s going to be stuffed and put back in time for the anniversary next year! We moved the factory a year ago – in lovely autumn weather. This year it’s still hot, over 30’C every day. When will it end? Tomorrow, Monday, is a holiday. Respect the Aged Day. My mother-in-law turned 100 in January and joins the 50,000 people in this country aged over 100. She's sound in body and mind. Amazing.
#!/usr/bin/env python # -*- coding: utf-8 -*- # # PCR-GLOBWB (PCRaster Global Water Balance) Global Hydrological Model # # Copyright (C) 2016, Ludovicus P. H. (Rens) van Beek, Edwin H. Sutanudjaja, Yoshihide Wada, # Joyce H. C. Bosmans, Niels Drost, Inge E. M. de Graaf, Kor de Jong, Patricia Lopez Lopez, # Stefanie Pessenteiner, Oliver Schmitz, Menno W. Straatsma, Niko Wanders, Dominik Wisser, # and Marc F. P. Bierkens, # Faculty of Geosciences, Utrecht University, Utrecht, The Netherlands # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import datetime import subprocess import os import types from pcraster.framework import * import pcraster as pcr import logging logger = logging.getLogger(__name__) import waterBodies_for_modflow as waterBodies import virtualOS as vos from ncConverter import * class GroundwaterModflow(object): def getState(self): result = {} # groundwater head (unit: m) for all layers for i in range(1, self.number_of_layers+1): var_name = 'groundwaterHeadLayer'+str(i) result[var_name] = vars(self)[var_name] return result def getGroundwaterDepth(self): result = {} # groundwater head (unit: m) for all layers for i in range(1, self.number_of_layers+1): var_name = 'groundwaterDepthLayer'+str(i) headname = 'groundwaterHeadLayer' +str(i) result[var_name] = self.dem_average - vars(self)[headname] return result def getVariableValuesForPCRGLOBWB(self): result = {} result['relativeGroundwaterHead'] = pcr.ifthen(self.landmask, self.relativeGroundwaterHead) result['baseflow'] = pcr.ifthen(self.landmask, self.baseflow) result['storGroundwater'] = pcr.ifthen(self.landmask, self.storGroundwater) return result def __init__(self, iniItems, landmask): object.__init__(self) # cloneMap, temporary directory for the resample process, temporary directory for the modflow process, absolute path for input directory, landmask self.cloneMap = iniItems.cloneMap self.tmpDir = iniItems.tmpDir self.tmp_modflow_dir = iniItems.tmp_modflow_dir self.inputDir = iniItems.globalOptions['inputDir'] self.landmask = landmask # configuration from the ini file self.iniItems = iniItems # number of modflow layers: self.number_of_layers = int(iniItems.modflowParameterOptions['number_of_layers']) # topography properties: read several variables from the netcdf file for var in ['dem_minimum','dem_maximum','dem_average','dem_standard_deviation',\ 'slopeLength','orographyBeta','tanslope',\ 'dzRel0000','dzRel0001','dzRel0005',\ 'dzRel0010','dzRel0020','dzRel0030','dzRel0040','dzRel0050',\ 'dzRel0060','dzRel0070','dzRel0080','dzRel0090','dzRel0100']: vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['topographyNC'], \ var, self.cloneMap) vars(self)[var] = pcr.cover(vars(self)[var], 0.0) # channel properties: read several variables from the netcdf file for var in ['lddMap','cellAreaMap','gradient','bankfull_width', 'bankfull_depth','dem_floodplain','dem_riverbed']: vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['channelNC'], \ var, self.cloneMap) vars(self)[var] = pcr.cover(vars(self)[var], 0.0) # minimum channel width minimum_channel_width = 5.0 # TODO: Define this one in the configuration file self.bankfull_width = pcr.max(minimum_channel_width, self.bankfull_width) #~ # cell fraction if channel water reaching the flood plain # NOT USED YET #~ self.flood_plain_fraction = self.return_innundation_fraction(pcr.max(0.0, self.dem_floodplain - self.dem_minimum)) # coefficient of Manning self.manningsN = vos.readPCRmapClone(self.iniItems.modflowParameterOptions['manningsN'],\ self.cloneMap,self.tmpDir,self.inputDir) # minimum channel gradient minGradient = 0.00005 # TODO: Define this one in the configuration file minGradient = 0.000005 # 24 March 2016: I lower this so that we don't have too deep water table. # TODO: Define this one in the configuration file self.gradient = pcr.max(minGradient, pcr.cover(self.gradient, minGradient)) # correcting lddMap self.lddMap = pcr.ifthen(pcr.scalar(self.lddMap) > 0.0, self.lddMap) self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap)) # channelLength = approximation of channel length (unit: m) # This is approximated by cell diagonal. cellSizeInArcMin = np.round(pcr.clone().cellSize()*60.) # FIXME: This one will not work if you use the resolution: 0.5, 1.5, 2.5 arc-min verticalSizeInMeter = cellSizeInArcMin*1852. horizontalSizeInMeter = self.cellAreaMap/verticalSizeInMeter self.channelLength = ((horizontalSizeInMeter)**(2)+\ (verticalSizeInMeter)**(2))**(0.5) # option for lakes and reservoir self.onlyNaturalWaterBodies = False if self.iniItems.modflowParameterOptions['onlyNaturalWaterBodies'] == "True": self.onlyNaturalWaterBodies = True ####### options for sensitivity analysis on river depth and conductivites : Inge jan 2019 ########## self.factor_cond = vos.readPCRmapClone(self.iniItems.modflowParameterOptions['factorcond'],\ self.cloneMap,self.tmpDir,self.inputDir) self.factor_riverdepth = vos.readPCRmapClone(self.iniItems.routingOptions['depthFactor'],\ self.cloneMap,self.tmpDir,self.inputDir) self.dem_riverbed= pcr.cover(self.dem_riverbed*self.factor_riverdepth) ###################################################################################### # a netcdf file containing the groundwater properties if iniItems.groundwaterOptions['groundwaterPropertiesNC'] != "None": groundwaterPropertiesNC = vos.getFullPath(\ iniItems.groundwaterOptions[\ 'groundwaterPropertiesNC'],self.inputDir) ###################################################################################### ##################################################################################################################################################### # assign aquifer specific yield (dimensionless) if iniItems.groundwaterOptions['groundwaterPropertiesNC'] == "None" or 'specificYield' in iniItems.groundwaterOptions.keys(): self.specificYield = vos.readPCRmapClone(\ iniItems.groundwaterOptions['specificYield'],self.cloneMap,self.tmpDir,self.inputDir) else: self.specificYield = vos.netcdf2PCRobjCloneWithoutTime(\ groundwaterPropertiesNC,'specificYield',self.cloneMap) self.specificYield = pcr.cover(self.specificYield,0.0) self.specificYield = pcr.max(0.010,self.specificYield) # TODO: Set the minimum values of specific yield. self.specificYield = pcr.min(1.000,self.specificYield) ##################################################################################################################################################### ##################################################################################################################################################### # assign aquifer hydraulic conductivity (unit: m/day) if iniItems.groundwaterOptions['groundwaterPropertiesNC'] == "None" or 'kSatAquifer' in iniItems.groundwaterOptions.keys(): self.kSatAquifer = vos.readPCRmapClone(\ iniItems.groundwaterOptions['kSatAquifer'],self.cloneMap,self.tmpDir,self.inputDir) else: self.kSatAquifer = vos.netcdf2PCRobjCloneWithoutTime(\ groundwaterPropertiesNC,'kSatAquifer',self.cloneMap) self.kSatAquifer = pcr.cover(self.kSatAquifer,0.0) self.kSatAquifer = pcr.max(0.010,self.kSatAquifer) ##################################################################################################################################################### ##################################################################################################################################################### # try to assign the reccesion coefficient (unit: day-1) from the netcdf file of groundwaterPropertiesNC try: self.recessionCoeff = vos.netcdf2PCRobjCloneWithoutTime(\ groundwaterPropertiesNC,'recessionCoeff',\ cloneMapFileName = self.cloneMap) except: self.recessionCoeff = None msg = "The 'recessionCoeff' cannot be read from the file: "+groundwaterPropertiesNC logger.warning(msg) # assign the reccession coefficient based on the given pcraster file if 'recessionCoeff' in iniItems.groundwaterOptions.keys(): if iniItems.groundwaterOptions['recessionCoeff'] != "None":\ self.recessionCoeff = vos.readPCRmapClone(iniItems.groundwaterOptions['recessionCoeff'],self.cloneMap,self.tmpDir,self.inputDir) # calculate the reccession coefficient based on the given parameters if isinstance(self.recessionCoeff,types.NoneType) and\ 'recessionCoeff' not in iniItems.groundwaterOptions.keys(): msg = "Calculating the groundwater linear reccesion coefficient based on the given parameters." logger.info(msg) # reading the 'aquiferWidth' value from the landSurfaceOptions (slopeLength) if iniItems.landSurfaceOptions['topographyNC'] == None: aquiferWidth = vos.readPCRmapClone(iniItems.landSurfaceOptions['slopeLength'],self.cloneMap,self.tmpDir,self.inputDir) else: topoPropertiesNC = vos.getFullPath(iniItems.landSurfaceOptions['topographyNC'],self.inputDir) aquiferWidth = vos.netcdf2PCRobjCloneWithoutTime(topoPropertiesNC,'slopeLength',self.cloneMap) # covering aquiferWidth with its maximum value aquiferWidth = pcr.ifthen(self.landmask, pcr.cover(aquiferWidth, pcr.mapmaximum(aquiferWidth))) # aquifer thickness (unit: m) for recession coefficient aquiferThicknessForRecessionCoeff = vos.readPCRmapClone(iniItems.groundwaterOptions['aquiferThicknessForRecessionCoeff'],\ self.cloneMap,self.tmpDir,self.inputDir) # calculate recessionCoeff (unit; day-1) self.recessionCoeff = (math.pi**2.) * aquiferThicknessForRecessionCoeff / \ (4.*self.specificYield*(aquiferWidth**2.)) # assign the reccession coefficient based on the given pcraster file if 'recessionCoeff' in iniItems.groundwaterOptions.keys(): if iniItems.groundwaterOptions['recessionCoeff'] != "None":\ self.recessionCoeff = vos.readPCRmapClone(iniItems.groundwaterOptions['recessionCoeff'],self.cloneMap,self.tmpDir,self.inputDir) # minimum and maximum values for groundwater recession coefficient (day-1) self.recessionCoeff = pcr.cover(self.recessionCoeff,0.00) self.recessionCoeff = pcr.min(0.9999,self.recessionCoeff) if 'minRecessionCoeff' in iniItems.groundwaterOptions.keys(): minRecessionCoeff = float(iniItems.groundwaterOptions['minRecessionCoeff']) else: minRecessionCoeff = 1.0e-4 # This is the minimum value used in Van Beek et al. (2011). self.recessionCoeff = pcr.max(minRecessionCoeff,self.recessionCoeff) ##################################################################################################################################################### ##################################################################################################################################################### # assign the river/stream/surface water bed conductivity # - the default value is equal to kSatAquifer self.riverBedConductivity = self.kSatAquifer # - assign riverBedConductivity coefficient based on the given pcraster file if 'riverBedConductivity' in iniItems.groundwaterOptions.keys(): if iniItems.groundwaterOptions['riverBedConductivity'] != "None":\ self.riverBedConductivity = vos.readPCRmapClone(iniItems.groundwaterOptions['riverBedConductivity'],self.cloneMap,self.tmpDir,self.inputDir) # # surface water bed thickness (unit: m) bed_thickness = 0.1 # surface water bed resistance (unit: day) bed_resistance = bed_thickness / (self.riverBedConductivity) minimum_bed_resistance = 1.0 self.bed_resistance = pcr.max(minimum_bed_resistance,\ bed_resistance,) ############################################################################################################################################## ##################################################################################################################################################### # total groundwater thickness (unit: m) # - For PCR-GLOBWB, the estimate of total groundwater thickness is needed to estimate for the following purpose: # - productive aquifer areas (where capillary rise can occur and groundwater depletion can occur) # - and also to estimate fossil groundwater capacity (the latter is needed only for run without MODFLOW) totalGroundwaterThickness = None if 'estimateOfTotalGroundwaterThickness' in iniItems.groundwaterOptions.keys(): totalGroundwaterThickness = vos.readPCRmapClone(iniItems.groundwaterOptions['estimateOfTotalGroundwaterThickness'], self.cloneMap, self.tmpDir, self.inputDir) # extrapolation of totalGroundwaterThickness # - TODO: Make a general extrapolation option as a function in the virtualOS.py totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, pcr.windowaverage(totalGroundwaterThickness, 0.75)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, pcr.windowaverage(totalGroundwaterThickness, 0.75)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, pcr.windowaverage(totalGroundwaterThickness, 0.75)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, pcr.windowaverage(totalGroundwaterThickness, 1.00)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, 0.0) # set minimum thickness if 'minimumTotalGroundwaterThickness' in iniItems.groundwaterOptions.keys(): minimumThickness = pcr.scalar(float(\ iniItems.groundwaterOptions['minimumTotalGroundwaterThickness'])) totalGroundwaterThickness = pcr.max(minimumThickness, totalGroundwaterThickness) # set maximum thickness if 'maximumTotalGroundwaterThickness' in iniItems.groundwaterOptions.keys(): maximumThickness = float(self.iniItems.groundwaterOptions['maximumTotalGroundwaterThickness']) totalGroundwaterThickness = pcr.min(maximumThickness, totalGroundwaterThickness) # estimate of total groundwater thickness (unit: m) self.totalGroundwaterThickness = totalGroundwaterThickness ##################################################################################################################################################### ############################################################################################################################################## # confining layer thickness (for more than one layer) self.usePreDefinedConfiningLayer = False if self.number_of_layers > 1 and self.iniItems.modflowParameterOptions['usePreDefinedConfiningLayer'] == "True": self.usePreDefinedConfiningLayer = True # confining layer thickness (unit: m) self.confiningLayerThickness = pcr.cover(\ vos.readPCRmapClone(self.iniItems.modflowParameterOptions['confiningLayerThickness'],\ self.cloneMap, self.tmpDir, self.inputDir), 0.0) # maximum confining layer vertical conductivity (unit: m/day) self.maximumConfiningLayerVerticalConductivity = pcr.cover(\ vos.readPCRmapClone(self.iniItems.modflowParameterOptions['maximumConfiningLayerVerticalConductivity'],\ self.cloneMap, self.tmpDir, self.inputDir), 0.0) # confining layer resistance (unit: day) self.maximumConfiningLayerResistance = pcr.cover(\ vos.readPCRmapClone(self.iniItems.modflowParameterOptions['maximumConfiningLayerResistance'],\ self.cloneMap, self.tmpDir, self.inputDir), 0.0) #confining layer location #* self.estimateConfinedLayers = pcr.cover(\ vos.readPCRmapClone(self.iniItems.modflowParameterOptions['estimateConfinedLayers'],\ self.cloneMap, self.tmpDir, self.inputDir), 0.0) ############################################################################################################################################## ##################################################################################################################################################### # extent of the productive aquifer (a boolean map) # - Principle: In non-productive aquifer areas, no capillary rise and groundwater abstraction should not exceed recharge # self.productive_aquifer = pcr.ifthen(self.landmask, pcr.boolean(1.0)) excludeUnproductiveAquifer = True if excludeUnproductiveAquifer: if 'minimumTransmissivityForProductiveAquifer' in iniItems.groundwaterOptions.keys() and\ (iniItems.groundwaterOptions['minimumTransmissivityForProductiveAquifer'] != "None" or\ iniItems.groundwaterOptions['minimumTransmissivityForProductiveAquifer'] != "False"): minimumTransmissivityForProductiveAquifer = \ vos.readPCRmapClone(iniItems.groundwaterOptions['minimumTransmissivityForProductiveAquifer'],\ self.cloneMap, self.tmpDir, self.inputDir) self.productive_aquifer = pcr.cover(\ pcr.ifthen(self.kSatAquifer * totalGroundwaterThickness > minimumTransmissivityForProductiveAquifer, pcr.boolean(1.0)), pcr.boolean(0.0)) # - TODO: Check and re-calculate the GLHYMPS map to confirm the kSatAquifer value in groundwaterPropertiesNC (e.g. we miss some parts of HPA). ##################################################################################################################################################### ##################################################################################################################################################### # option to ignore capillary rise self.ignoreCapRise = False if 'ignoreCapRise' in self.iniItems.modflowParameterOptions.keys() and \ self.iniItems.modflowParameterOptions['ignoreCapRise'] == "True": self.ignoreCapRise = True ##################################################################################################################################################### ##################################################################################################################################################### # assumption for the thickness (m) of accessible groundwater (needed for coupling to PCR-GLOBWB) # - Note that this assumption value does not affect the modflow calculation. The values is needed merely for reporting "accesibleGroundwaterVolume". accesibleDepth = 1000.0 if 'accesibleDepth' in self.iniItems.modflowParameterOptions.keys(): if self.iniItems.modflowParameterOptions['accesibleDepth'] != "None": accesibleDepth = float(self.iniItems.modflowParameterOptions['accesibleDepth']) self.max_accesible_elevation = self.dem_average - accesibleDepth # list of the convergence criteria for HCLOSE (unit: m) # - Deltares default's value is 0.001 m # check this value with Jarno #~ self.criteria_HCLOSE = [0.001, 0.005, 0.01, 0.02, 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] #~ self.criteria_HCLOSE = [0.001, 0.01, 0.1, 0.5, 1.0] #~ self.criteria_HCLOSE = [0.001, 0.01, 0.1, 0.15, 0.2, 0.5, 1.0] #~ self.criteria_HCLOSE = [0.001, 0.005, 0.01, 0.1, 0.15, 0.2, 0.5, 1.0] #~ self.criteria_HCLOSE = [0.001, 0.005, 0.01, 0.1, 0.2, 0.5, 1.0] self.criteria_HCLOSE = [0.001, 0.005, 0.01, 0.1, 0.2, 0.3, 0.5, 0.75, 1.0] #self.criteria_HCLOSE = [0.01, 0.1, 0.15, 0.2, 0.5, 1.0] #self.criteria_HCLOSE = [0.5, 1.0] #self.criteria_HCLOSE = [1.0, 1.0] self.criteria_HCLOSE = sorted(self.criteria_HCLOSE) # list of the convergence criteria for RCLOSE (unit: m3) # - Deltares default's value for their 25 and 250 m resolution models is 10 m3 # check this value with Jarno cell_area_assumption = verticalSizeInMeter * float(pcr.cellvalue(pcr.mapmaximum(horizontalSizeInMeter),1)[0]) #~ self.criteria_RCLOSE = [10., 100., 10.* cell_area_assumption/(250.*250.), 10.* cell_area_assumption/(25.*25.), 100.* cell_area_assumption/(25.*25.)] #~ self.criteria_RCLOSE = [10.* cell_area_assumption/(250.*250.), 10.* cell_area_assumption/(25.*25.), 100.* cell_area_assumption/(25.*25.)] #~ self.criteria_RCLOSE = [10.* cell_area_assumption/(25.*25.), 100.* cell_area_assumption/(25.*25.)] self.criteria_RCLOSE = [10.* cell_area_assumption/(25.*25.), 100.* cell_area_assumption/(25.*25.), 10000.* cell_area_assumption/(25.*25.)] #~ self.criteria_RCLOSE = [10.* cell_area_assumption/(25.*25.), 10000.* cell_area_assumption/(25.*25.)] self.criteria_RCLOSE = sorted(self.criteria_RCLOSE) # initiate somes variables/objects/classes to None # - lakes and reservoir objects (they will be constant for the entrie year, only change at the beginning of the year) self.WaterBodies = None # - surface water bed conductance (also only change at the beginning of the year) self.bed_conductance = None # initiate pcraster modflow object to None self.pcr_modflow = None # the following condition is needed if we have to self.valuesRechargeAndAbstractionInMonthlyTotal = False if 'valuesRechargeAndAbstractionInMonthlyTotal' in self.iniItems.modflowTransientInputOptions.keys(): if self.iniItems.modflowTransientInputOptions['valuesRechargeAndAbstractionInMonthlyTotal'] == "True":\ self.valuesRechargeAndAbstractionInMonthlyTotal = True # minimum and maximum transmissivity values (unit: m2/day) self.minimumTransmissivity = 10.0 # assumption used by Deltares self.maximumTransmissivity = 100000.0 # ridiculosly high (for 20 m/day with the thickness = 5 km) if 'minimumTransmissivity' in self.iniItems.modflowParameterOptions.keys(): self.minimumTransmissivity = float(self.iniItems.modflowParameterOptions['minimumTransmissivity']) if 'maximumTransmissivity' in self.iniItems.modflowParameterOptions.keys(): self.maximumTransmissivity = float(self.iniItems.modflowParameterOptions['maximumTransmissivity']) # option for online coupling purpose, we also need to know the location of pcrglobwb output self.online_coupling = self.iniItems.online_coupling_between_pcrglobwb_and_modflow # initiate old style reporting (this is usually used for debugging process) self.initiate_old_style_reporting(iniItems) def initiate_modflow(self): logger.info("Initializing pcraster modflow.") # TODO: removing all previous pcraster modflow files: # initialise pcraster modflow self.pcr_modflow = pcr.initialise(pcr.clone()) # setup the DIS package specifying the grids/layers used for the groundwater model # - Note the layer specification must start with the bottom layer (layer 1 is the lowermost layer) if self.number_of_layers == 1: self.set_grid_for_one_layer_model() if self.number_of_layers == 2: self.set_grid_for_two_layer_model() # specification for the boundary condition (ibound) # - active cells only in landmask # - constant head for outside the landmask ibound = pcr.ifthen(self.landmask, pcr.nominal(1)) ibound = pcr.cover(ibound, pcr.nominal(-1)) self.ibound = ibound for i in range(1, self.number_of_layers+1): self.pcr_modflow.setBoundary(self.ibound, i) # setup the BCF package if self.number_of_layers == 1: self.set_bcf_for_one_layer_model() if self.number_of_layers == 2: self.set_bcf_for_two_layer_model() # TODO: defining/incorporating anisotrophy values def set_grid_for_one_layer_model(self): # grid specification - one layer model top = self.dem_average bottom = top - self.totalGroundwaterThickness self.pcr_modflow.createBottomLayer(bottom, top) # make the following value(s) available for the other modules/methods: self.thickness_of_layer_1 = top - bottom self.total_thickness = self.thickness_of_layer_1 self.bottom_layer_1 = bottom def set_grid_for_two_layer_model(self): # grid specification - two layer model # - top upper layer is elevation top_layer_2 = self.dem_average # - thickness of layer 2 is at least 10% of totalGroundwaterThickness bottom_layer_2 = self.dem_average - 0.10 * self.totalGroundwaterThickness # - thickness of layer 2 should be until 5 m below the river bed bottom_layer_2 = pcr.min(self.dem_riverbed - 5.0, bottom_layer_2) # - make sure that the minimum thickness of layer 2 is at least 0.1 m thickness_of_layer_2 = pcr.max(0.1, top_layer_2 - bottom_layer_2) bottom_layer_2 = top_layer_2 - thickness_of_layer_2 # - thickness of layer 1 is at least 5.0 m thickness_of_layer_1 = pcr.max(5.0, self.totalGroundwaterThickness - thickness_of_layer_2) bottom_layer_1 = bottom_layer_2 - thickness_of_layer_1 if self.usePreDefinedConfiningLayer: # make sure that totalGroundwaterThickness is at least 50 m thicker than confiningLayerThickness total_thickness = pcr.max(self.totalGroundwaterThickness, self.confiningLayerThickness + 50.0) # - top upper layer is elevation top_layer_2 = self.dem_average # - thickness of layer 2 is based on the predefined confiningLayerThickness bottom_layer_2 = self.dem_average - self.confiningLayerThickness # - thickness of layer 2 should be until 5 m below the river bed elevation bottom_layer_2 = pcr.min(self.dem_riverbed - 5.0, bottom_layer_2) # - make sure that the minimum thickness of layer 2 is at least 0.1 m thickness_of_layer_2 = pcr.max(0.1, top_layer_2 - bottom_layer_2) bottom_layer_2 = top_layer_2 - thickness_of_layer_2 # - thickness of layer 1 is at least 5.0 m thickness_of_layer_1 = pcr.max(5.0, total_thickness - thickness_of_layer_2) bottom_layer_1 = bottom_layer_2 - thickness_of_layer_1 # set grid in modflow self.pcr_modflow.createBottomLayer(bottom_layer_1, bottom_layer_2) self.pcr_modflow.addLayer(top_layer_2) # make the following value(s) available for the other modules/methods: self.thickness_of_layer_1 = thickness_of_layer_1 self.thickness_of_layer_2 = thickness_of_layer_2 self.total_thickness = self.thickness_of_layer_1 + self.thickness_of_layer_2 self.bottom_layer_1 = bottom_layer_1 self.bottom_layer_2 = bottom_layer_2 self.top_layer_2 = top_layer_2 #~ # report elevation in pcraster map #~ pcr.report(pcr.ifthen(self.landmask, self.top_layer_2), "top_uppermost_layer.map") #~ pcr.report(pcr.ifthen(self.landmask, self.bottom_layer_2), "bottom_uppermost_layer.map") #~ pcr.report(pcr.ifthen(self.landmask, self.bottom_layer_1), "bottom_lowermost_layer.map") def set_bcf_for_one_layer_model(self): # specification for storage coefficient (BCF package) # - correction due to the usage of lat/lon coordinates primary = pcr.cover(self.specificYield * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0) primary = pcr.max(1e-10, primary) secondary = primary # dummy values as we used the layer type 00 self.pcr_modflow.setStorage(primary, secondary, 1) # specification for horizontal conductivities (BCF package) horizontal_conductivity = self.kSatAquifer # unit: m/day # set the minimum value for transmissivity horizontal_conductivity = pcr.max(self.minimumTransmissivity, \ horizontal_conductivity * self.total_thickness) / self.total_thickness # set the maximum value for transmissivity horizontal_conductivity = pcr.min(self.maximumTransmissivity, \ horizontal_conductivity * self.total_thickness) / self.total_thickness # specification for horizontal conductivities (BCF package) vertical_conductivity = horizontal_conductivity # dummy values, as one layer model is used #~ # for areas with ibound <= 0, we set very high horizontal conductivity values: # TODO: Check this, shall we implement this? #~ horizontal_conductivity = pcr.ifthenelse(self.ibound > 0, horizontal_conductivity, \ #~ pcr.mapmaximum(horizontal_conductivity)) # set BCF package self.pcr_modflow.setConductivity(00, horizontal_conductivity, \ vertical_conductivity, 1) # make the following value(s) available for the other modules/methods: self.specific_yield_1 = self.specificYield def set_bcf_for_two_layer_model(self): # specification for storage coefficient (BCF package) # - correction due to the usage of lat/lon coordinates primary = pcr.cover(self.specificYield * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0) primary = pcr.max(1e-20, primary) #~ secondary = pcr.max(1e-5, primary * 0.001) # dummy values if we use layer type 00 secondary = pcr.cover(pcr.min(0.005, self.specificYield) * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0) # dummy values if we use layer type 00 secondary = pcr.max(1e-20, secondary) self.pcr_modflow.setStorage(primary, secondary, 1) self.pcr_modflow.setStorage(primary, secondary, 2) # specification for conductivities (BCF package) horizontal_conductivity = self.kSatAquifer # unit: m/day # layer 2 (upper layer) - horizontal conductivity horizontal_conductivity_layer_2 = pcr.max(self.minimumTransmissivity, \ horizontal_conductivity * self.thickness_of_layer_2) / self.thickness_of_layer_2 horizontal_conductivity_layer_2 = pcr.min(self.maximumTransmissivity, \ horizontal_conductivity * self.thickness_of_layer_2) / self.thickness_of_layer_2 # layer 2 (upper layer) - vertical conductivity # INGE: kh:kv = 1:0.1 vertical_conductivity_layer_2 = (self.kSatAquifer*0.1) * self.cellAreaMap/\ (pcr.clone().cellSize()*pcr.clone().cellSize()) if self.usePreDefinedConfiningLayer: # vertical conductivity values are limited by the predefined minimumConfiningLayerVerticalConductivity and maximumConfiningLayerResistance vertical_conductivity_layer_2 = pcr.min(self.kSatAquifer, self.maximumConfiningLayerVerticalConductivity) vertical_conductivity_layer_2 = pcr.ifthenelse(self.confiningLayerThickness > 0.0, vertical_conductivity_layer_2, self.kSatAquifer) vertical_conductivity_layer_2 = pcr.max(self.thickness_of_layer_2/self.maximumConfiningLayerResistance, \ vertical_conductivity_layer_2) # minimum resistance is one day vertical_conductivity_layer_2 = pcr.min(self.thickness_of_layer_2/1.0,\ vertical_conductivity_layer_2) # correcting vertical conductivity vertical_conductivity_layer_2 *= self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()) # layer 1 (lower layer) horizontal_conductivity_layer_1 = pcr.max(self.minimumTransmissivity, \ horizontal_conductivity * self.thickness_of_layer_1) / self.thickness_of_layer_1 horizontal_conductivity_layer_1 = pcr.min(self.maximumTransmissivity, \ horizontal_conductivity * self.thickness_of_layer_1) / self.thickness_of_layer_1 # ignoring the vertical conductivity in the lower layer # such that the values of resistance (1/vcont) depend only on vertical_conductivity_layer_2 vertical_conductivity_layer_1 = pcr.spatial(pcr.scalar(1e99)) * self.cellAreaMap/\ (pcr.clone().cellSize()*pcr.clone().cellSize()) vertical_conductivity_layer_2 *= 0.5 # see: http://inside.mines.edu/~epoeter/583/08/discussion/vcont/modflow_vcont.htm #~ # for areas with ibound <= 0, we set very high horizontal conductivity values: # TODO: Check this, shall we implement this? #~ horizontal_conductivity_layer_2 = pcr.ifthenelse(self.ibound > 0, horizontal_conductivity_layer_2, \ #~ pcr.mapmaximum(horizontal_conductivity_layer_2)) #~ horizontal_conductivity_layer_1 = pcr.ifthenelse(self.ibound > 0, horizontal_conductivity_layer_1, \ #~ pcr.mapmaximum(horizontal_conductivity_layer_1)) # set conductivity values to MODFLOW self.pcr_modflow.setConductivity(00, horizontal_conductivity_layer_2, \ vertical_conductivity_layer_2, 2) self.pcr_modflow.setConductivity(00, horizontal_conductivity_layer_1, \ vertical_conductivity_layer_1, 1) #~ self.pcr_modflow.setConductivity(02, horizontal_conductivity_layer_1, \ #~ vertical_conductivity_layer_1, 1) # make the following value(s) available for the other modules/methods: #self.specific_yield_1 = self.specificYield #self.specific_yield_2 = self.specificYield # INGE : increase specific yields of aquifers (not confining layers) self.specific_yield_2 = pcr.cover(pcr.ifthenelse(self.estimateConfinedLayers >0.0, 0.11, self.specificYield), self.specificYield) self.specific_yield_1 = pcr.cover(pcr.ifthenelse(self.estimateConfinedLayers >0.0, self.specificYield * 3, self.specificYield), self.specificYield) def get_initial_heads(self): if self.iniItems.modflowTransientInputOptions['usingPredefinedInitialHead'] == "True": msg = "Using pre-defined groundwater head(s) given in the ini/configuration file." logger.info(msg) # using pre-defined groundwater head(s) described in the ini/configuration file for i in range(1, self.number_of_layers+1): var_name = 'groundwaterHeadLayer'+str(i) vars(self)[var_name] = vos.readPCRmapClone(self.iniItems.modflowTransientInputOptions[var_name+'Ini'],\ self.cloneMap, self.tmpDir, self.inputDir) vars(self)[var_name] = pcr.cover(vars(self)[var_name], 0.0) else: msg = "Estimating initial conditions based on the steady state simulation using the input as defined in the ini/configuration file." logger.info(msg) # using the digital elevation model as the initial heads for i in range(1, self.number_of_layers+1): var_name = 'groundwaterHeadLayer'+str(i) vars(self)[var_name] = self.dem_average # using initial head estimate given in the configuration file if 'usingInitialHeadEstimate' in self.iniItems.modflowSteadyStateInputOptions.keys() and\ self.iniItems.modflowSteadyStateInputOptions['usingInitialHeadEstimate'] == "True": for i in range(1, self.number_of_layers+1): var_name = 'groundwaterHeadLayer'+str(i) vars(self)[var_name] = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions[var_name+'Estimate'],\ self.cloneMap, self.tmpDir, self.inputDir) vars(self)[var_name] = pcr.cover(vars(self)[var_name], 0.0) # calculate/simulate a steady state condition (until the modflow converges) # get the current state(s) of groundwater head and put them in a dictionary groundwaterHead = self.getState() self.modflow_simulation("steady-state", groundwaterHead, None, 1, 1) # An extra steady state simulation using transient simulation with constant input self.transient_simulation_with_constant_input() # extrapolating the calculated heads for areas/cells outside the landmask (to remove isolated cells) # # - the calculate groundwater head within the landmask region for i in range(1, self.number_of_layers+1): var_name = 'groundwaterHeadLayer'+str(i) vars(self)[var_name] = pcr.ifthen(self.landmask, vars(self)[var_name]) # keep the ocean values (dem <= 0.0) - this is in order to maintain the 'behaviors' of sub marine groundwater discharge vars(self)[var_name] = pcr.cover(vars(self)[var_name], pcr.ifthen(self.dem_average <= 0.0, self.dem_average)) # extrapolation vars(self)[var_name] = pcr.cover(vars(self)[var_name], pcr.windowaverage(vars(self)[var_name], 3.*pcr.clone().cellSize())) vars(self)[var_name] = pcr.cover(vars(self)[var_name], pcr.windowaverage(vars(self)[var_name], 5.*pcr.clone().cellSize())) vars(self)[var_name] = pcr.cover(vars(self)[var_name], pcr.windowaverage(vars(self)[var_name], 7.*pcr.clone().cellSize())) vars(self)[var_name] = pcr.cover(vars(self)[var_name], self.dem_average) # TODO: Define the window sizes as part of the configuration file. Also consider to use the inverse distance method. # TODO: Also please consider to use Deltares's trick to remove isolated cells. def transient_simulation_with_constant_input(self): time_step_length = 30 # unit: days number_of_extra_years = 10 if "extraSpinUpYears" in self.iniItems.modflowSteadyStateInputOptions.keys() and\ self.iniItems.modflowSteadyStateInputOptions['extraSpinUpYears'] != "None": number_of_extra_years = int(\ self.iniItems.modflowSteadyStateInputOptions['extraSpinUpYears']) number_of_extra_months = 12 * number_of_extra_years # maximum number of months = 999 if number_of_extra_months > 999: msg = "To avoid a very long spin up, we limit the number of extra months to 999 months." logger.info(msg) number_of_extra_months = min(999, number_of_extra_months) if number_of_extra_months > 0: # preparing extra spin up folder/directory: extra_spin_up_directory = self.iniItems.endStateDir + "/extra_spin_up/" if os.path.exists(extra_spin_up_directory): shutil.rmtree(extra_spin_up_directory) os.makedirs(extra_spin_up_directory) for i_month in range(1, number_of_extra_months + 1): msg = "\n" msg += "\n" msg += "Extra steady state simulation (transient simulation with constant input and monthly stress period): " + str(i_month) + " from " + str(number_of_extra_months) msg += "\n" msg += "\n" logger.info(msg) groundwaterHead = self.getState() self.modflow_simulation("steady-state-extra", groundwaterHead, None, time_step_length, time_step_length) # reporting the calculated head to pcraster files # - extension for output file: extension = "00" + str(i_month) if i_month > 9: extension = "0" + str(i_month) if i_month > 99: extension = str(i_month) for i in range(1, self.number_of_layers+1): var_name = 'groundwaterHeadLayer' + str(i) file_name = extra_spin_up_directory + "/gwhead" + str(i) + "_." + extension pcr.report(groundwaterHead[var_name], file_name) def estimate_bottom_of_bank_storage(self): # influence zone depth (m) # TODO: Define this one as part of influence_zone_depth = 5.0 # bottom_elevation = flood_plain elevation - influence zone bottom_of_bank_storage = self.dem_floodplain - influence_zone_depth # reducing noise (so we will not introduce unrealistic sinks) # TODO: Define the window size as part of the configuration/ini file bottom_of_bank_storage = pcr.max(bottom_of_bank_storage,\ pcr.windowaverage(bottom_of_bank_storage, 3.0 * pcr.clone().cellSize())) # bottom_elevation > river bed bottom_of_bank_storage = pcr.max(self.dem_riverbed, bottom_of_bank_storage) # reducing noise by comparing to its downstream value (so we will not introduce unrealistic sinks) bottom_of_bank_storage = pcr.max(bottom_of_bank_storage, \ (bottom_of_bank_storage + pcr.cover(pcr.downstream(self.lddMap, bottom_of_bank_storage), bottom_of_bank_storage))/2.) # bottom_elevation >= 0.0 (must be higher than sea level) bottom_of_bank_storage = pcr.max(0.0, bottom_of_bank_storage) # bottom_elevation <= dem_average (this is to drain overland flow) bottom_of_bank_storage = pcr.min(bottom_of_bank_storage, self.dem_average) bottom_of_bank_storage = pcr.cover(bottom_of_bank_storage, self.dem_average) # for the mountainous region, the bottom of bank storage equal to its lowest point # - extent of mountainous region mountainous_extent = pcr.ifthen((self.dem_average - self.dem_floodplain) > 50.0, pcr.boolean(1.0)) # - sub_catchment classes sub_catchment_class = pcr.ifthen(mountainous_extent, \ pcr.subcatchment(self.lddMap, pcr.nominal(pcr.uniqueid(mountainous_extent)))) # - bottom of bak storage bottom_of_bank_storage = pcr.cover(pcr.areaminimum(bottom_of_bank_storage, sub_catchment_class), \ bottom_of_bank_storage) # rounding down bottom_of_bank_storage = pcr.rounddown(bottom_of_bank_storage * 1000.)/1000. # TODO: We may want to improve this concept - by incorporating the following: # - smooth bottom_elevation # - upstream areas in the mountainous regions and above perrenial stream starting points may also be drained (otherwise water will be accumulated and trapped there) # - bottom_elevation > minimum elevation that is estimated from the maximum of S3 from the PCR-GLOBWB simulation return bottom_of_bank_storage def initiate_old_style_reporting(self,iniItems): self.report = True try: self.outDailyTotNC = iniItems.oldReportingOptions['outDailyTotNC'].split(",") self.outMonthTotNC = iniItems.oldReportingOptions['outMonthTotNC'].split(",") self.outMonthAvgNC = iniItems.oldReportingOptions['outMonthAvgNC'].split(",") self.outMonthEndNC = iniItems.oldReportingOptions['outMonthEndNC'].split(",") self.outAnnuaTotNC = iniItems.oldReportingOptions['outAnnuaTotNC'].split(",") self.outAnnuaAvgNC = iniItems.oldReportingOptions['outAnnuaAvgNC'].split(",") self.outAnnuaEndNC = iniItems.oldReportingOptions['outAnnuaEndNC'].split(",") except: self.report = False if self.report == True: self.outNCDir = iniItems.outNCDir self.netcdfObj = PCR2netCDF(iniItems) # # daily output in netCDF files: if self.outDailyTotNC[0] != "None": for var in self.outDailyTotNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_dailyTot.nc",\ var,"undefined") # MONTHly output in netCDF files: # - cummulative if self.outMonthTotNC[0] != "None": for var in self.outMonthTotNC: # initiating monthlyVarTot (accumulator variable): vars(self)[var+'MonthTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthTot.nc",\ var,"undefined") # - average if self.outMonthAvgNC[0] != "None": for var in self.outMonthAvgNC: # initiating monthlyTotAvg (accumulator variable) vars(self)[var+'MonthTot'] = None # initiating monthlyVarAvg: vars(self)[var+'MonthAvg'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthAvg.nc",\ var,"undefined") # - last day of the month if self.outMonthEndNC[0] != "None": for var in self.outMonthEndNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthEnd.nc",\ var,"undefined") # YEARly output in netCDF files: # - cummulative if self.outAnnuaTotNC[0] != "None": for var in self.outAnnuaTotNC: # initiating yearly accumulator variable: vars(self)[var+'AnnuaTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaTot.nc",\ var,"undefined") # - average if self.outAnnuaAvgNC[0] != "None": for var in self.outAnnuaAvgNC: # initiating annualyVarAvg: vars(self)[var+'AnnuaAvg'] = None # initiating annualyTotAvg (accumulator variable) vars(self)[var+'AnnuaTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaAvg.nc",\ var,"undefined") # - last day of the year if self.outAnnuaEndNC[0] != "None": for var in self.outAnnuaEndNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaEnd.nc",\ var,"undefined") def update(self,currTimeStep): # at the end of the month, calculate/simulate a steady state condition and obtain its calculated head values if currTimeStep.isLastDayOfMonth(): # get the previous state groundwaterHead = self.getState() # length of a stress period PERLEN = currTimeStep.day if currTimeStep.startTime.day != 1 and currTimeStep.monthIdx == 1: PERLEN = currTimeStep.day - currTimeStep.startTime.day + 1 # number of time step within a stress period NSTP = PERLEN * 3 self.PERLEN = PERLEN # number of days within a stress period self.NSTP = NSTP # number of time steps within a stress period self.modflow_simulation("transient", groundwaterHead, currTimeStep, PERLEN, NSTP) # old-style reporting (this is usually used for debugging process) self.old_style_reporting(currTimeStep) def modflow_simulation(self,\ simulation_type,\ initialGroundwaterHeadInADictionary,\ currTimeStep = None,\ PERLEN = 1.0, NSTP = 1, \ MXITER = 1500,\ ITERI = 1250,\ NPCOND = 1,\ RELAX = 0.98,\ NBPOL = 2,\ DAMP = 1,\ ITMUNI = 4, LENUNI = 2, TSMULT = 1.0): # initiate pcraster modflow object including its grid/layer/elevation: # - constant for the entire simulation if self.pcr_modflow == None: self.initiate_modflow() if simulation_type == "transient": logger.info("Preparing MODFLOW input for a transient simulation.") SSTR = 0 if simulation_type == "steady-state": logger.info("Preparing MODFLOW input for a steady-state simulation.") SSTR = 1 if simulation_type == "steady-state-extra": msg = "Preparing MODFLOW input for an 'extra' steady-state simulation: " msg += "a transient simulation with constant input for 30 day (monthly) stress period with daily time step." logger.info(msg) SSTR = 0 # extract and set initial head for modflow simulation groundwaterHead = initialGroundwaterHeadInADictionary for i in range(1, self.number_of_layers+1): var_name = 'groundwaterHeadLayer'+str(i) initial_head = pcr.scalar(groundwaterHead[var_name]) self.pcr_modflow.setInitialHead(initial_head, i) # read input files (for the steady-state condition, we use pcraster maps): if simulation_type == "steady-state" or simulation_type == "steady-state-extra": # - discharge (m3/s) from PCR-GLOBWB discharge = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgDischargeInputMap'],\ self.cloneMap, self.tmpDir, self.inputDir) # - recharge/capillary rise (unit: m/day) from PCR-GLOBWB gwRecharge = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgGroundwaterRechargeInputMap'],\ self.cloneMap, self.tmpDir, self.inputDir) # # - groundwater abstraction (unit: m/day) from PCR-GLOBWB gwAbstraction = pcr.spatial(pcr.scalar(0.0)) gwAbstraction = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgGroundwaterAbstractionInputMap'],\ self.cloneMap, self.tmpDir, self.inputDir) # - average channel storage (unit: m3) from PCR-GLOBWB channelStorage = None if 'avgChannelStorageInputMap' in self.iniItems.modflowSteadyStateInputOptions.keys() and\ self.iniItems.modflowSteadyStateInputOptions['avgChannelStorageInputMap'][-4:] != "None": channelStorage = pcr.cover(\ vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgChannelStorageInputMap'],\ self.cloneMap, self.tmpDir, self.inputDir), 0.0) # read input files if simulation_type == "transient": if self.online_coupling: # for online coupling, we will read files from pcraster maps directory = self.iniItems.main_output_directory + "/global/maps/" # - discharge (m3/s) from PCR-GLOBWB discharge_file_name = directory + "monthly_discharge_cubic_meter_per_second_" + str(currTimeStep.fulldate) + ".map" discharge = pcr.cover(vos.readPCRmapClone(discharge_file_name, self.cloneMap, self.tmpDir), 0.0) # - recharge/capillary rise (unit: m/day) from PCR-GLOBWB gwRecharge_file_name = directory + "groundwater_recharge_meter_per_day_" + str(currTimeStep.fulldate) + ".map" gwRecharge = pcr.cover(vos.readPCRmapClone(gwRecharge_file_name, self.cloneMap, self.tmpDir), 0.0) # - groundwater abstraction (unit: m/day) from PCR-GLOBWB gwAbstraction_file_name = directory + "groundwater_abstraction_meter_per_day_" + str(currTimeStep.fulldate) + ".map" gwAbstraction = pcr.cover(vos.readPCRmapClone(gwAbstraction_file_name, self.cloneMap, self.tmpDir), 0.0) # - channel storage (unit: m/day) channel_storage_file_name = directory + "channel_storage_cubic_meter_" + str(currTimeStep.fulldate) + ".map" channelStorage = pcr.cover(vos.readPCRmapClone(channel_storage_file_name, self.cloneMap, self.tmpDir), 0.0) # TODO: Try to read from netcdf files, avoid reading from pcraster maps (avoid resampling using gdal) else: # for offline coupling, we will read files from netcdf files # - discharge (m3/s) from PCR-GLOBWB discharge = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['dischargeInputNC'], "discharge", str(currTimeStep.fulldate), None, self.cloneMap) # - recharge/capillary rise (unit: m/day) from PCR-GLOBWB gwRecharge = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['groundwaterRechargeInputNC'],\ "groundwater_recharge", str(currTimeStep.fulldate), None, self.cloneMap) # - groundwater abstraction (unit: m/day) from PCR-GLOBWB gwAbstraction = pcr.spatial(pcr.scalar(0.0)) if self.iniItems.modflowTransientInputOptions['groundwaterAbstractionInputNC'][-4:] != "None": gwAbstraction = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['groundwaterAbstractionInputNC'],\ "total_groundwater_abstraction", str(currTimeStep.fulldate), None, self.cloneMap) # - for offline coupling, the provision of channel storage (unit: m3) is only optional channelStorage = None if 'channelStorageInputNC' in self.iniItems.modflowTransientInputOptions.keys() and\ self.iniItems.modflowTransientInputOptions['channelStorageInputNC'][-4:] != "None": channelStorage = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['channelStorageInputNC'],\ "channel_storage", str(currTimeStep.fulldate), None, self.cloneMap) ##################################################################################################################################################### # for a steady-state simulation, the capillary rise is usually ignored: if (simulation_type == "steady-state" or\ simulation_type == "steady-state-extra"): self.ignoreCapRise = True if 'ignoreCapRiseSteadyState' in self.iniItems.modflowSteadyStateInputOptions.keys() and\ self.iniItems.modflowSteadyStateInputOptions['ignoreCapRiseSteadyState'] == "False": self.ignoreCapRise = False ##################################################################################################################################################### # ignore capillary rise if needed: if self.ignoreCapRise: gwRecharge = pcr.max(0.0, gwRecharge) # convert the values of abstraction and recharge to daily average if self.valuesRechargeAndAbstractionInMonthlyTotal: gwAbstraction = gwAbstraction/currTimeStep.day gwRecharge = gwRecharge/currTimeStep.day # set recharge, river, well and drain packages self.set_drain_and_river_package(discharge, channelStorage, currTimeStep, simulation_type) self.set_recharge_package(gwRecharge) self.set_well_package(gwAbstraction) # set parameter values for the DIS package self.pcr_modflow.setDISParameter(ITMUNI, LENUNI, PERLEN, NSTP, TSMULT, SSTR) # # Some notes about the values # # ITMUNI = 4 # indicates the time unit (0: undefined, 1: seconds, 2: minutes, 3: hours, 4: days, 5: years) # LENUNI = 2 # indicates the length unit (0: undefined, 1: feet, 2: meters, 3: centimeters) # PERLEN = 1.0 # duration of a stress period # NSTP = 1 # number of time steps in a stress period # TSMULT = 1.0 # multiplier for the length of the successive iterations # SSTR = 1 # 0 - transient, 1 - steady state # initiate the index for HCLOSE and RCLOSE for the interation until modflow_converged self.iteration_HCLOSE = 0 self.iteration_RCLOSE = 0 self.modflow_converged = False # execute MODFLOW while self.modflow_converged == False: # convergence criteria HCLOSE = self.criteria_HCLOSE[self.iteration_HCLOSE] RCLOSE = self.criteria_RCLOSE[self.iteration_RCLOSE] # set PCG solver self.pcr_modflow.setPCG(MXITER, ITERI, NPCOND, HCLOSE, RCLOSE, RELAX, NBPOL, DAMP) # some notes for PCG solver values # # MXITER = 50 # maximum number of outer iterations # Deltares use 50 # ITERI = 30 # number of inner iterations # Deltares use 30 # NPCOND = 1 # 1 - Modified Incomplete Cholesky, 2 - Polynomial matrix conditioning method; # HCLOSE = 0.01 # HCLOSE (unit: m) # RCLOSE = 10.* 400.*400. # RCLOSE (unit: m3) # RELAX = 1.00 # relaxation parameter used with NPCOND = 1 # NBPOL = 2 # indicates whether the estimate of the upper bound on the maximum eigenvalue is 2.0 (but we don ot use it, since NPCOND = 1) # DAMP = 1 # no damping (DAMP introduced in MODFLOW 2000) msg = "Executing MODFLOW with HCLOSE = "+str(HCLOSE)+" and RCLOSE = "+str(RCLOSE)+" and MXITER = "+str(MXITER)+" and ITERI = "+str(ITERI)+" and PERLEN = "+str(PERLEN)+" and NSTP = "+str(NSTP) logger.info(msg) try: self.pcr_modflow.run() self.modflow_converged = self.pcr_modflow.converged() # TODO: Ask Oliver to fix the non-convergence issue that can appear before reaching the end of stress period. #~ self.modflow_converged = self.old_check_modflow_convergence() except: self.modflow_converged = False print self.modflow_converged if self.modflow_converged == False: logger.info('') msg = "MODFLOW FAILED TO CONVERGE with HCLOSE = "+str(HCLOSE)+" and RCLOSE = "+str(RCLOSE) logger.info(msg) logger.info('') ####################################################################################################################################### OPTIONAL ###### # for the steady state simulation, we still save the calculated head(s) # so that we can use them as the initial estimate for the next iteration (by doing this, it may ease the convergence?? - TODO: check this # NOTE: We must NOT extract the calculated heads of a transient simulation result that does not converge. if simulation_type == "steady-state": msg = "Set the result from the uncoverged modflow simulation as the initial new estimate (for a steady-state simulation only)." logger.info(msg) # obtain the result from the uncoverged modflow simulation for i in range(1, self.number_of_layers+1): var_name = 'groundwaterHeadLayer'+str(i) vars(self)[var_name] = None vars(self)[var_name] = self.pcr_modflow.getHeads(i) # set the result from the uncoverged modflow simulation as the initial new estimate for i in range(1, self.number_of_layers+1): var_name = 'groundwaterHeadLayer'+str(i) initial_head = pcr.scalar(vars(self)[var_name]) self.pcr_modflow.setInitialHead(initial_head, i) ####################################################################################################################################### OPTIONAL ###### # set a new iteration index for the RCLOSE self.iteration_RCLOSE += 1 # reset if the index has reached the length of available criteria if self.iteration_RCLOSE > (len(self.criteria_RCLOSE)-1): self.iteration_RCLOSE = 0 # set a new iteration index for the HCLOSE if self.iteration_RCLOSE == 0: self.iteration_HCLOSE += 1 # if we already using all available HCLOSE if self.iteration_RCLOSE == 0 and self.iteration_HCLOSE == len(self.criteria_HCLOSE): msg = "\n\n\n" msg += "NOT GOOD!!! MODFLOW STILL FAILED TO CONVERGE with HCLOSE = "+str(HCLOSE)+" and RCLOSE = "+str(RCLOSE) msg += "\n\n" # for a steady-state simulation, we give up if simulation_type == "steady-state": msg += "But, we give up and we can only decide/suggest to use the last calculated groundwater heads." msg += "\n\n" logger.warning(msg) # force MODFLOW to converge self.modflow_converged = True else: additional_HLCOSE = HCLOSE * 2.0 msg += "We will try again using the HCLOSE: " + str(additional_HLCOSE) msg += "\n\n" logger.warning(msg) self.criteria_HCLOSE.append(additional_HLCOSE) self.criteria_HCLOSE = sorted(self.criteria_HCLOSE) # TODO: Shall we also increase RCLOSE ?? else: msg = "\n\n\n" msg += "HURRAY!!! MODFLOW CONVERGED with HCLOSE = "+str(HCLOSE)+" and RCLOSE = "+str(RCLOSE) msg += "\n\n" logger.info(msg) # obtaining the results from modflow simulation if self.modflow_converged: self.get_all_modflow_results(simulation_type) # clear modflow object self.pcr_modflow = None # calculate some variables that will be accessed from PCR-GLOBWB (for online coupling purpose) self.calculate_values_for_pcrglobwb() def calculate_values_for_pcrglobwb(self): logger.info("Calculate some variables for PCR-GLOBWB (needed for online coupling purpose: 'relativeGroundwaterHead', 'baseflow', and 'storGroundwater'") # relative uppermost groundwater head (unit: m) above the minimum elevation within grid uppermost_head = vars(self)['groundwaterHeadLayer'+str(self.number_of_layers)] self.relativeGroundwaterHead = uppermost_head - self.dem_minimum # baseflow (unit: m/day) # - initiate the (accumulated) volume rate (m3/day) (for accumulating the fluxes from all layers) totalBaseflowVolumeRate = pcr.scalar(0.0) # - accumulating fluxes from all layers for i in range(1, self.number_of_layers+1): # from the river leakage var_name = 'riverLeakageLayer'+str(i) totalBaseflowVolumeRate += pcr.cover(vars(self)[var_name], 0.0) # from the drain package var_name = 'drainLayer'+str(i) totalBaseflowVolumeRate += pcr.cover(vars(self)[var_name], 0.0) # use only in the landmask region if i == self.number_of_layers: totalBaseflowVolumeRate = pcr.ifthen(self.landmask, totalBaseflowVolumeRate) # - convert the unit to m/day and convert the flow direction # for this variable, positive values indicates flow leaving aquifer (following PCR-GLOBWB assumption, opposite direction from MODFLOW) self.baseflow = pcr.scalar(-1.0) * (totalBaseflowVolumeRate/self.cellAreaMap) # storGroundwater (unit: m) # - from the lowermost layer accesibleGroundwaterThickness = pcr.ifthen(self.landmask, \ self.specific_yield_1 * \ pcr.max(0.0, self.groundwaterHeadLayer1 - pcr.max(self.max_accesible_elevation, \ self.bottom_layer_1))) # - from the uppermost layer if self.number_of_layers == 2:\ accesibleGroundwaterThickness += pcr.ifthen(self.landmask, \ self.specific_yield_2 * \ pcr.max(0.0, self.groundwaterHeadLayer2 - pcr.max(self.max_accesible_elevation, \ self.bottom_layer_2))) # - TODO: Make this flexible for a model that has more than two layers. # - storGroundwater (unit: m) that can be accessed for abstraction self.storGroundwater = accesibleGroundwaterThickness def get_all_modflow_results(self, simulation_type): logger.info("Get all modflow results.") # obtaining the results from modflow simulation for i in range(1, self.number_of_layers+1): # groundwater head (unit: m) var_name = 'groundwaterHeadLayer'+str(i) vars(self)[var_name] = None vars(self)[var_name] = self.pcr_modflow.getHeads(i) # river leakage (unit: m3/day) var_name = 'riverLeakageLayer'+str(i) vars(self)[var_name] = None vars(self)[var_name] = self.pcr_modflow.getRiverLeakage(i) # drain (unit: m3/day) var_name = 'drainLayer'+str(i) vars(self)[var_name] = None vars(self)[var_name] = self.pcr_modflow.getDrain(i) # bdgfrf - cell-by-cell flows right (m3/day) var_name = 'flowRightFaceLayer'+str(i) vars(self)[var_name] = None vars(self)[var_name] = self.pcr_modflow.getRightFace(i) # bdgfff - cell-by-cell flows front (m3/day) var_name = 'flowFrontFaceLayer'+str(i) vars(self)[var_name] = None vars(self)[var_name] = self.pcr_modflow.getFrontFace(i) # bdgflf - cell-by-cell flows lower (m3/day) # Note: No flow through the lower face of the bottom layer if i > 1: var_name = 'flowLowerFaceLayer'+str(i) vars(self)[var_name] = None vars(self)[var_name] = self.pcr_modflow.getLowerFace(i) # flow to/from constant head cells (unit: m3/day) var_name = 'flowConstantHeadLayer'+str(i) vars(self)[var_name] = None vars(self)[var_name] = self.pcr_modflow.getConstantHead(i) # cell-by-cell storage flow term (unit: m3) if simulation_type == "transient": var_name = 'flowStorageLayer'+str(i) vars(self)[var_name] = None vars(self)[var_name] = self.pcr_modflow.getStorage(i) #~ # for debuging only #~ pcr.report(self.groundwaterHeadLayer1 , "gw_head_layer_1.map") #~ pcr.report(self.groundwaterDepthLayer1, "gw_depth_layer_1.map") def old_check_modflow_convergence(self, file_name = "pcrmf.lst"): # open and read the lst file file_name = self.tmp_modflow_dir + "/" + file_name f = open(file_name) ; all_lines = f.read() ; f.close() # split the content of the file into several lines all_lines = all_lines.replace("\r","") all_lines = all_lines.split("\n") # scan the last 200 lines and check if the model modflow_converged = True for i in range(0,200): if 'FAILED TO CONVERGE' in all_lines[-i]: modflow_converged = False print modflow_converged return modflow_converged def set_drain_and_river_package(self, discharge, channel_storage, currTimeStep, simulation_type): logger.info("Set the river package.") # set WaterBodies class to define the extent of lakes and reservoirs (constant for the entie year, annual resolution) # and also set drain package (constant for the entire year, unless there are changes in the WaterBodies class) if simulation_type == "steady-state" or simulation_type == "steady-state-extra": self.WaterBodies = waterBodies.WaterBodies(self.iniItems,\ self.landmask,\ self.onlyNaturalWaterBodies) self.WaterBodies.getParameterFiles(date_given = self.iniItems.globalOptions['startTime'],\ cellArea = self.cellAreaMap, \ ldd = self.lddMap) if simulation_type == "transient": if self.WaterBodies == None: self.WaterBodies = waterBodies.WaterBodies(self.iniItems,\ self.landmask,\ self.onlyNaturalWaterBodies) self.WaterBodies.getParameterFiles(date_given = str(currTimeStep.fulldate),\ cellArea = self.cellAreaMap, \ ldd = self.lddMap) if currTimeStep.month == 1: self.WaterBodies.getParameterFiles(date_given = str(currTimeStep.fulldate),\ cellArea = self.cellAreaMap, \ ldd = self.lddMap) # reset bed conductance at the first month (due to possibility of new inclusion of lakes/reservoirs) if currTimeStep == None or currTimeStep.month == 1: self.bed_conductance = None if isinstance(self.bed_conductance, types.NoneType): logger.info("Estimating surface water bed elevation.") #~ # - for lakes and resevoirs, alternative 1: make the bottom elevation deep --- Shall we do this? NOTE: This will provide unrealistic groundwater depth. Need further investigations (consider to use US). #~ additional_depth = 1500. #~ surface_water_bed_elevation = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, \ #~ self.dem_riverbed - additional_depth) # #~ # - for lakes and resevoirs, alternative 2: estimate bed elevation from dem and bankfull depth #~ surface_water_bed_elevation = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, self.dem_average) #~ surface_water_bed_elevation = pcr.areaaverage(surface_water_bed_elevation, self.WaterBodies.waterBodyIds) #~ surface_water_bed_elevation -= pcr.areamaximum(self.bankfull_depth, self.WaterBodies.waterBodyIds) # - for lakes and resevoirs, alternative 3: estimate bed elevation from DEM only # This is to avoid that groundwater heads fall too far below DEM # This will also smooth groundwater heads. surface_water_bed_elevation = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, self.dem_average) # surface water bed elevation for rivers, lakes and reservoirs surface_water_bed_elevation = pcr.cover(surface_water_bed_elevation, self.dem_riverbed) #~ surface_water_bed_elevation = self.dem_riverbed # This is an alternative, if we do not want to introduce very deep bottom elevations of lakes and/or reservoirs. # rounding values for surface_water_bed_elevation self.surface_water_bed_elevation = pcr.rounddown(surface_water_bed_elevation * 100.)/100. logger.info("Estimating surface water bed conductance.") ############################################################################################################################################ # lake and reservoir fraction (dimensionless) lake_and_reservoir_fraction = pcr.cover(\ pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, \ self.WaterBodies.fracWat), 0.0) # river fraction (dimensionless) river_fraction = (1.0 - lake_and_reservoir_fraction) * (self.bankfull_width * self.channelLength)/self.cellAreaMap # lake and reservoir resistance (day) lake_and_reservoir_resistance = self.bed_resistance # - assuming a minimum resistance (due to the sedimentation, conductivity: 0.001 m/day and thickness 0.50 m) lake_and_reservoir_resistance = pcr.max(0.50 / 0.001, self.bed_resistance) #~ # to further decrease bed conductance in lakes and reservoir, we limit the lake and reservoir fraction as follows: #~ lake_and_reservoir_fraction = pcr.cover(\ #~ pcr.min(lake_and_reservoir_fraction,\ #~ pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, \ #~ pcr.areaaverage(self.bankfull_width * self.channelLength, self.WaterBodies.waterBodyIds))), 0.0) #~ # make the lake and reservor resistance even higher (to avoid too high seepage) # TODO: Investigate this !!!! #~ lake_and_reservoir_resistance *= 10. # lake and reservoir conductance (m2/day) lake_and_reservoir_conductance = (1.0/lake_and_reservoir_resistance) * lake_and_reservoir_fraction * \ self.cellAreaMap # river conductance (m2/day) river_conductance = (1.0/self.bed_resistance) * river_fraction *\ self.cellAreaMap # surface water bed condutance (unit: m2/day) bed_conductance = lake_and_reservoir_conductance + river_conductance self.bed_conductance = pcr.cover(bed_conductance, 0.0) ############################################################################################################################################ # set minimum conductance values (to remove water above surface level) # - assume all cells have minimum river width minimum_width = 2.0 # Sutanudjaja et al. (2011) minimum_conductance = (1.0/self.bed_resistance) * \ pcr.max(minimum_width, self.bankfull_width) * self.channelLength/self.cellAreaMap self.bed_conductance = pcr.max(minimum_conductance, self.bed_conductance) logger.info("Estimating outlet widths of lakes and/or reservoirs.") # - 'channel width' for lakes and reservoirs channel_width = pcr.areamaximum(self.bankfull_width, self.WaterBodies.waterBodyIds) self.channel_width = pcr.cover(channel_width, self.bankfull_width) logger.info("Estimating surface water elevation.") # - convert discharge value to surface water elevation (m) river_water_height = (self.channel_width**(-3/5)) * (discharge**(3/5)) * ((self.gradient)**(-3/10)) *(self.manningsN**(3/5)) surface_water_elevation = self.dem_riverbed + \ river_water_height # # - calculating water level (unit: m) above the flood plain # TODO: Improve this concept (using Rens's latest innundation scheme) #---------------------------------------------------------- water_above_fpl = pcr.max(0.0, surface_water_elevation - self.dem_floodplain) # unit: m, water level above the floodplain (not distributed) water_above_fpl *= self.bankfull_depth * self.channel_width / self.cellAreaMap # unit: m, water level above the floodplain (distributed within the cell) # TODO: Improve this concept using Rens's latest scheme # # - corrected surface water elevation surface_water_elevation = pcr.ifthenelse(surface_water_elevation > self.dem_floodplain, \ self.dem_floodplain + water_above_fpl, \ surface_water_elevation) # - surface water elevation for lakes and reservoirs: lake_reservoir_water_elevation = pcr.ifthen(self.WaterBodies.waterBodyOut, pcr.min(surface_water_elevation, self.dem_floodplain)) lake_reservoir_water_elevation = pcr.areamaximum(lake_reservoir_water_elevation, self.WaterBodies.waterBodyIds) lake_reservoir_water_elevation = pcr.cover(lake_reservoir_water_elevation, \ pcr.areaaverage(surface_water_elevation, self.WaterBodies.waterBodyIds)) # - maximum and minimum values for lake_reservoir_water_elevation lake_reservoir_water_elevation = pcr.min(self.dem_floodplain, lake_reservoir_water_elevation) lake_reservoir_water_elevation = pcr.max(self.surface_water_bed_elevation, lake_reservoir_water_elevation) # - smoothing lake_reservoir_water_elevation = pcr.areaaverage(surface_water_elevation, self.WaterBodies.waterBodyIds) lake_reservoir_water_elevation = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, lake_reservoir_water_elevation) # # - to minimize negative channel storage, ignore river infiltration in smaller rivers ; no infiltration if HRIV = RBOT (and h < RBOT) minimum_channel_width = 5.0 surface_water_elevation = pcr.ifthenelse(self.channel_width > minimum_channel_width, surface_water_elevation, \ self.surface_water_bed_elevation) # # - merge lake and reservoir water elevation surface_water_elevation = pcr.cover(lake_reservoir_water_elevation, surface_water_elevation) # # - covering missing values and rounding surface_water_elevation = pcr.cover(surface_water_elevation, self.dem_average) surface_water_elevation = pcr.rounddown(surface_water_elevation * 1000.)/1000. # # - make sure that HRIV >= RBOT ; no infiltration if HRIV = RBOT (and h < RBOT) surface_water_elevation = pcr.max(surface_water_elevation, self.surface_water_bed_elevation) # - to minimize negative channel storage, ignore river infiltration with low surface_water_elevation minimum_water_height = 0.50 surface_water_elevation = pcr.ifthenelse((surface_water_elevation - self.surface_water_bed_elevation) > minimum_water_height, surface_water_elevation, \ self.surface_water_bed_elevation) # - to minimize negative channel storage, ignore river infiltration with low channel storage if not isinstance(channel_storage, types.NoneType): minimum_channel_storage = pcr.max(0.0, 0.10 * self.bankfull_depth * self.bankfull_width * self.channelLength) # unit: m3 surface_water_elevation = pcr.ifthenelse(channel_storage > minimum_channel_storage, surface_water_elevation, self.surface_water_bed_elevation) # - also ignore river infiltration in the mountainous region mountainous_extent = pcr.cover(\ pcr.ifthen((self.dem_average - self.dem_floodplain) > 50.0, pcr.boolean(1.0)), pcr.boolean(0.0)) surface_water_elevation = pcr.ifthenelse(mountainous_extent, self.surface_water_bed_elevation, surface_water_elevation) # make sure that HRIV >= RBOT ; no infiltration if HRIV = RBOT (and h < RBOT) surface_water_elevation = pcr.rounddown(surface_water_elevation * 1000.)/1000. surface_water_elevation = pcr.max(surface_water_elevation, self.surface_water_bed_elevation) # reducing the size of table by ignoring cells outside the landmask region bed_conductance_used = pcr.ifthen(self.landmask, self.bed_conductance) bed_conductance_used = pcr.cover(bed_conductance_used, 0.0) #~ # for the case HRIV == RBOT, we can use drain package --------- NOT NEEDED #~ additional_drain_elevation = pcr.cover(\ #~ pcr.ifthen(surface_water_elevation <= self.surface_water_bed_elevation, self.surface_water_bed_elevation), 0.0) #~ additional_drain_conductance = pcr.cover(\ #~ pcr.ifthen(surface_water_elevation <= self.surface_water_bed_elevation, bed_conductance_used), 0.0) #~ bed_conductance_used = \ #~ pcr.ifthenelse(surface_water_elevation <= self.surface_water_bed_elevation, 0.0, bed_conductance_used) #~ # #~ # set the DRN package only to the uppermost layer #~ self.pcr_modflow.setDrain(additional_drain_elevation, \ #~ additional_drain_conductance, self.number_of_layers) # set the RIV package only to the uppermost layer self.pcr_modflow.setRiver(surface_water_elevation, self.surface_water_bed_elevation, bed_conductance_used, self.number_of_layers) # TODO: Improve the concept of RIV package, particularly while calculating surface water elevation in lakes and reservoirs # set drain package self.set_drain_package() def set_recharge_package(self, \ gwRecharge, gwAbstraction = 0.0, gwAbstractionReturnFlow = 0.0): # Note: We ignored the latter as MODFLOW should capture this part as well. # We also moved the abstraction to the WELL package logger.info("Set the recharge package.") # specify the recharge package # + recharge/capillary rise (unit: m/day) from PCR-GLOBWB # - groundwater abstraction (unit: m/day) from PCR-GLOBWB # + return flow of groundwater abstraction (unit: m/day) from PCR-GLOBWB net_recharge = gwRecharge - gwAbstraction + \ gwAbstractionReturnFlow # - correcting values (considering MODFLOW lat/lon cell properties) # and pass them to the RCH package net_RCH = pcr.cover(net_recharge * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0) net_RCH = pcr.cover(pcr.ifthenelse(pcr.abs(net_RCH) < 1e-20, 0.0, net_RCH), 0.0) # put the recharge to the top grid/layer self.pcr_modflow.setRecharge(net_RCH, 1) #~ # if we want to put RCH in the lower layer #~ self.pcr_modflow.setIndicatedRecharge(net_RCH, pcr.spatial(pcr.nominal(1))) def set_well_package(self, gwAbstraction): logger.info("Set the well package.") if self.number_of_layers == 1: self.set_well_package_for_one_layer_model(gwAbstraction) if self.number_of_layers == 2: self.set_well_package_for_two_layer_model(gwAbstraction) def set_well_package_for_one_layer_model(self, gwAbstraction): gwAbstraction = pcr.cover(gwAbstraction, 0.0) gwAbstraction = pcr.max(gwAbstraction, 0.0) # abstraction volume (negative value, unit: m3/day) abstraction = pcr.cover(gwAbstraction, 0.0) * self.cellAreaMap * pcr.scalar(-1.0) # set the well package self.pcr_modflow.setWell(abstraction, 1) def set_well_package_for_two_layer_model(self, gwAbstraction): gwAbstraction = pcr.cover(gwAbstraction, 0.0) gwAbstraction = pcr.max(gwAbstraction, 0.0) # abstraction for the layer 1 (lower layer) is limited only in productive aquifer abstraction_layer_1 = pcr.cover(pcr.ifthen(self.productive_aquifer, gwAbstraction), 0.0) # abstraction for the layer 2 (upper layer) abstraction_layer_2 = pcr.max(0.0, gwAbstraction - abstraction_layer_1) # abstraction volume (negative value, unit: m3/day) abstraction_layer_1 = abstraction_layer_1 * self.cellAreaMap * pcr.scalar(-1.0) abstraction_layer_2 = abstraction_layer_2 * self.cellAreaMap * pcr.scalar(-1.0) # set the well package self.pcr_modflow.setWell(abstraction_layer_1, 1) self.pcr_modflow.setWell(abstraction_layer_2, 2) def set_well_package_OLD(self, gwAbstraction): logger.info("Set the well package.") # reducing the size of table by ignoring cells with zero abstraction gwAbstraction = pcr.ifthen(gwAbstraction > 0.0, gwAbstraction) # abstraction only in productive aquifer gwAbstraction = pcr.ifthen(self.productive_aquifer, gwAbstraction) # abstraction volume (negative value, unit: m3/day) abstraction = gwAbstraction * self.cellAreaMap * pcr.scalar(-1.0) # FIXME: The following cover operations should not be necessary (Oliver should fix this). abstraction = pcr.cover(abstraction, 0.0) # set the well based on number of layers if self.number_of_layers == 1: self.pcr_modflow.setWell(abstraction, 1) if self.number_of_layers == 2: self.pcr_modflow.setWell(abstraction, 1) # at the bottom layer #~ print('test') def set_drain_package(self): logger.info("Set the drain package (for the release of over bank storage).") # specify the drain package the drain package is used to simulate the drainage of bank storage # - estimate bottom of bank storage for flood plain areas drain_elevation = self.estimate_bottom_of_bank_storage() # unit: m # - for lakes and/or reservoirs, ignore the drainage drain_conductance = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, pcr.scalar(0.0)) # - drainage conductance is a linear reservoir coefficient drain_conductance = pcr.cover(drain_conductance, \ self.recessionCoeff * self.specificYield * self.cellAreaMap) # unit: m2/day #~ drain_conductance = pcr.ifthenelse(drain_conductance < 1e-20, 0.0, \ #~ drain_conductance) #~ drain_conductance = pcr.rounddown(drain_conductance*10000.)/10000. # It is not a good idea to round the values down (water can be trapped). # reducing the size of table by ignoring cells outside landmask region drain_conductance = pcr.ifthen(self.landmask, drain_conductance) drain_conductance = pcr.cover(drain_conductance, 0.0) #~ # set the DRN package only to the uppermost layer #~ self.pcr_modflow.setDrain(drain_elevation, drain_conductance, self.number_of_layers) # set the DRN package only to both layers ------ 4 January 2016: I think that we should use this as we want all recharge will be released as baseflow. self.pcr_modflow.setDrain(drain_elevation, drain_conductance, 1) self.pcr_modflow.setDrain(drain_elevation, drain_conductance, 2) #~ # set the DRN package only to the lowermost layer #~ self.pcr_modflow.setDrain(drain_elevation, drain_conductance, 1) #~ self.pcr_modflow.setDrain(pcr.spatial(pcr.scalar(0.0)),pcr.spatial(pcr.scalar(0.0)), 2) def return_innundation_fraction(self,relative_water_height): # - fractions of flooded area (in percentage) based on the relative_water_height (above the minimum dem) DZRIV = relative_water_height CRFRAC_RIV = pcr.min(1.0,1.00-(self.dzRel0100-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0100-self.dzRel0090) ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0090,0.90-(self.dzRel0090-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0090-self.dzRel0080),CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0080,0.80-(self.dzRel0080-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0080-self.dzRel0070),CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0070,0.70-(self.dzRel0070-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0070-self.dzRel0060),CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0060,0.60-(self.dzRel0060-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0060-self.dzRel0050),CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0050,0.50-(self.dzRel0050-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0050-self.dzRel0040),CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0040,0.40-(self.dzRel0040-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0040-self.dzRel0030),CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0030,0.30-(self.dzRel0030-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0030-self.dzRel0020),CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0020,0.20-(self.dzRel0020-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0020-self.dzRel0010),CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0010,0.10-(self.dzRel0010-DZRIV)*0.05/pcr.max(1e-3,self.dzRel0010-self.dzRel0005),CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0005,0.05-(self.dzRel0005-DZRIV)*0.04/pcr.max(1e-3,self.dzRel0005-self.dzRel0001),CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0001,0.01-(self.dzRel0001-DZRIV)*0.01/pcr.max(1e-3,self.dzRel0001) ,CRFRAC_RIV ) CRFRAC_RIV = pcr.ifthenelse(DZRIV<=0,0, CRFRAC_RIV) # - minimum value of innundation fraction is river/channel area CRFRAC_RIV = pcr.cover(pcr.max(0.0,pcr.min(1.0,pcr.max(CRFRAC_RIV,(self.bankfull_depth*self.bankfull_width/self.cellAreaMap)))),scalar(0)) ; # TODO: Improve this concept using Rens's latest scheme def old_style_reporting(self,currTimeStep): if self.report == True: timeStamp = datetime.datetime(currTimeStep.year,\ currTimeStep.month,\ currTimeStep.day,\ 0) # writing daily output to netcdf files timestepPCR = currTimeStep.timeStepPCR if self.outDailyTotNC[0] != "None": for var in self.outDailyTotNC: self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_dailyTot.nc",\ var,\ pcr2numpy(self.__getattribute__(var),vos.MV),\ timeStamp,timestepPCR-1) # writing monthly output to netcdf files # -cummulative if self.outMonthTotNC[0] != "None": for var in self.outMonthTotNC: # introduce variables at the beginning of simulation or # reset variables at the beginning of the month if currTimeStep.timeStepPCR == 1 or \ currTimeStep.day == 1:\ vars(self)[var+'MonthTot'] = pcr.scalar(0.0) # accumulating vars(self)[var+'MonthTot'] += vars(self)[var] # reporting at the end of the month: if currTimeStep.endMonth == True: self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthTot.nc",\ var,\ pcr2numpy(self.__getattribute__(var+'MonthTot'),\ vos.MV),timeStamp,currTimeStep.monthIdx-1) # -average if self.outMonthAvgNC[0] != "None": for var in self.outMonthAvgNC: # only if a accumulator variable has not been defined: if var not in self.outMonthTotNC: # introduce accumulator at the beginning of simulation or # reset accumulator at the beginning of the month if currTimeStep.timeStepPCR == 1 or \ currTimeStep.day == 1:\ vars(self)[var+'MonthTot'] = pcr.scalar(0.0) # accumulating vars(self)[var+'MonthTot'] += vars(self)[var] # calculating average & reporting at the end of the month: if currTimeStep.endMonth == True: vars(self)[var+'MonthAvg'] = vars(self)[var+'MonthTot']/\ currTimeStep.day self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthAvg.nc",\ var,\ pcr2numpy(self.__getattribute__(var+'MonthAvg'),\ vos.MV),timeStamp,currTimeStep.monthIdx-1) # # -last day of the month if self.outMonthEndNC[0] != "None": for var in self.outMonthEndNC: # reporting at the end of the month: if currTimeStep.endMonth == True: self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthEnd.nc",\ var,\ pcr2numpy(self.__getattribute__(var),vos.MV),\ timeStamp,currTimeStep.monthIdx-1) # writing yearly output to netcdf files # -cummulative if self.outAnnuaTotNC[0] != "None": for var in self.outAnnuaTotNC: # introduce variables at the beginning of simulation or # reset variables at the beginning of the month if currTimeStep.timeStepPCR == 1 or \ currTimeStep.doy == 1:\ vars(self)[var+'AnnuaTot'] = pcr.scalar(0.0) # accumulating vars(self)[var+'AnnuaTot'] += vars(self)[var] # reporting at the end of the year: if currTimeStep.endYear == True: self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaTot.nc",\ var,\ pcr2numpy(self.__getattribute__(var+'AnnuaTot'),\ vos.MV),timeStamp,currTimeStep.annuaIdx-1) # -average if self.outAnnuaAvgNC[0] != "None": for var in self.outAnnuaAvgNC: # only if a accumulator variable has not been defined: if var not in self.outAnnuaTotNC: # introduce accumulator at the beginning of simulation or # reset accumulator at the beginning of the year if currTimeStep.timeStepPCR == 1 or \ currTimeStep.doy == 1:\ vars(self)[var+'AnnuaTot'] = pcr.scalar(0.0) # accumulating vars(self)[var+'AnnuaTot'] += vars(self)[var] # # calculating average & reporting at the end of the year: if currTimeStep.endYear == True: vars(self)[var+'AnnuaAvg'] = vars(self)[var+'AnnuaTot']/\ currTimeStep.doy self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaAvg.nc",\ var,\ pcr2numpy(self.__getattribute__(var+'AnnuaAvg'),\ vos.MV),timeStamp,currTimeStep.annuaIdx-1) # # -last day of the year if self.outAnnuaEndNC[0] != "None": for var in self.outAnnuaEndNC: # reporting at the end of the year: if currTimeStep.endYear == True: self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaEnd.nc",\ var,\ pcr2numpy(self.__getattribute__(var),vos.MV),\ timeStamp,currTimeStep.annuaIdx-1)
SALINE COUNTY, Mo. — An investigation is underway after a body was pulled from the Missouri River Sunday afternoon in Saline County. Saline County Sheriff’s deputies responded to the river near 310 Road at Malta Bend at around 1:45 p.m. after they received a report of a body found in the water. The body was recovered by Missouri Stater Water Patrol. Saline County Coroner confirmed the body was a man and that an autopsy will be conducted to determine the cause of death. No further information has been released at this time. FOX4 will continue to update this story as new details become available.
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source, # Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS # SPDX - License - Identifier: GPL - 3.0 + import unittest import testhelpers from mantid.kernel import IntArrayOrderedPairsValidator, FloatArrayOrderedPairsValidator, \ IntArrayProperty, FloatArrayProperty from mantid.api import PythonAlgorithm class ArrayOrderedPairsValidatorTest(unittest.TestCase): def test_fail_odd_entries(self): alg = self._create_alg() int_vals = [5,7,13] float_vals = [2.1] self.assertRaises(ValueError, alg.setProperty, "IntInput", int_vals) self.assertRaises(ValueError, alg.setProperty, "FloatInput", float_vals) def test_fail_unordered_pairs(self): alg = self._create_alg() int_vals = [5, 18, 4, 2] float_vals = [2.1, 5.7, 4.3, 1.5] self.assertRaises(ValueError, alg.setProperty, "IntInput", int_vals) self.assertRaises(ValueError, alg.setProperty, "FloatInput", float_vals) def test_pass_ordered_pairs(self): alg = self._create_alg() int_vals = [5, 18, 4, 9] float_vals = [2.1, 5.7, 4.3, 6.7] testhelpers.assertRaisesNothing(self, alg.setProperty, "IntInput", int_vals) testhelpers.assertRaisesNothing(self, alg.setProperty, "FloatInput", float_vals) def _create_alg(self): """ Creates a test algorithm with a ordered pairs validator """ class TestAlgorithm(PythonAlgorithm): def PyInit(self): int_validator = IntArrayOrderedPairsValidator() self.declareProperty(IntArrayProperty("IntInput", int_validator)) float_validator = FloatArrayOrderedPairsValidator() self.declareProperty(FloatArrayProperty("FloatInput", float_validator)) def PyExec(self): pass alg = TestAlgorithm() alg.initialize() return alg if __name__ == '__main__': unittest.main()
Nov. 29, 2018 9:45 a.m. Conservative MP David Yurdiga says people suffering from disabilities that worsen and ease aren’t treated fairly under Canadian law and he wants to change that. Yurdiga says he wants to see legislative and policy changes that would ensure those living with multiple sclerosis, epilepsy, arthritis, and other “episodic” disabilities are protected, can stay in the workforce and get better access to programs they need. Yurdiga’s wife Kathy was diagnosed with MS in 2004 and she says she still wakes up every day wondering whether she will be disabled. The Fort McMurray-Cold Lake MP says his family had a support system when his wife first had difficulty using her hands, including those who worked with her in their family business and their children. But Yurdiga says he’s heard from thousands of Canadians living with episodic disabilities who have been forced into disability systems that don’t allow them to work. Yurdiga introduced a motion in the House of Commons in early November that calls on the Commons’ human-resources committee to study the issue. The committee discussed the topic for the first time today. Yurdiga says that’s a first step but his goal is stronger laws.
#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import with_statement __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <[email protected]>' __docformat__ = 'restructuredtext en' import sys from xml.sax.saxutils import escape from lxml import etree from calibre import guess_type, strftime from calibre.ebooks.BeautifulSoup import BeautifulSoup from calibre.ebooks.oeb.base import XPath, XHTML_NS, XHTML, xml2text, urldefrag from calibre.library.comments import comments_to_html from calibre.utils.date import is_date_undefined from calibre.ebooks.chardet import strip_encoding_declarations JACKET_XPATH = '//h:meta[@name="calibre-content" and @content="jacket"]' class Jacket(object): ''' Book jacket manipulation. Remove first image and insert comments at start of book. ''' def remove_images(self, item, limit=1): path = XPath('//h:img[@src]') removed = 0 for img in path(item.data): if removed >= limit: break href = item.abshref(img.get('src')) image = self.oeb.manifest.hrefs.get(href, None) if image is not None: self.oeb.manifest.remove(image) img.getparent().remove(img) removed += 1 return removed def remove_first_image(self): deleted_item = None for item in self.oeb.spine: removed = self.remove_images(item) if removed > 0: self.log('Removed first image') body = XPath('//h:body')(item.data) if body: raw = xml2text(body[0]).strip() imgs = XPath('//h:img|//svg:svg')(item.data) if not raw and not imgs: self.log('Removing %s as it has no content'%item.href) self.oeb.manifest.remove(item) deleted_item = item break if deleted_item is not None: for item in list(self.oeb.toc): href = urldefrag(item.href)[0] if href == deleted_item.href: self.oeb.toc.remove(item) def insert_metadata(self, mi): self.log('Inserting metadata into book...') try: tags = map(unicode, self.oeb.metadata.subject) except: tags = [] try: comments = unicode(self.oeb.metadata.description[0]) except: comments = '' try: title = unicode(self.oeb.metadata.title[0]) except: title = _('Unknown') root = render_jacket(mi, self.opts.output_profile, alt_title=title, alt_tags=tags, alt_comments=comments) id, href = self.oeb.manifest.generate('calibre_jacket', 'jacket.xhtml') item = self.oeb.manifest.add(id, href, guess_type(href)[0], data=root) self.oeb.spine.insert(0, item, True) self.oeb.inserted_metadata_jacket = item def remove_existing_jacket(self): for x in self.oeb.spine[:4]: if XPath(JACKET_XPATH)(x.data): self.remove_images(x, limit=sys.maxint) self.oeb.manifest.remove(x) self.log('Removed existing jacket') break def __call__(self, oeb, opts, metadata): ''' Add metadata in jacket.xhtml if specified in opts If not specified, remove previous jacket instance ''' self.oeb, self.opts, self.log = oeb, opts, oeb.log self.remove_existing_jacket() if opts.remove_first_image: self.remove_first_image() if opts.insert_metadata: self.insert_metadata(metadata) # Render Jacket {{{ def get_rating(rating, rchar, e_rchar): ans = '' try: num = float(rating)/2 except: return ans num = max(0, num) num = min(num, 5) if num < 1: return ans ans = ("%s%s") % (rchar * int(num), e_rchar * (5 - int(num))) return ans def render_jacket(mi, output_profile, alt_title=_('Unknown'), alt_tags=[], alt_comments='', alt_publisher=('')): css = P('jacket/stylesheet.css', data=True).decode('utf-8') try: title_str = mi.title if mi.title else alt_title except: title_str = _('Unknown') title = '<span class="title">%s</span>' % (escape(title_str)) series = escape(mi.series if mi.series else '') if mi.series and mi.series_index is not None: series += escape(' [%s]'%mi.format_series_index()) if not mi.series: series = '' try: publisher = mi.publisher if mi.publisher else alt_publisher except: publisher = '' try: if is_date_undefined(mi.pubdate): pubdate = '' else: pubdate = strftime(u'%Y', mi.pubdate.timetuple()) except: pubdate = '' rating = get_rating(mi.rating, output_profile.ratings_char, output_profile.empty_ratings_char) tags = mi.tags if mi.tags else alt_tags if tags: tags = output_profile.tags_to_string(tags) else: tags = '' comments = mi.comments if mi.comments else alt_comments comments = comments.strip() orig_comments = comments if comments: comments = comments_to_html(comments) try: author = mi.format_authors() except: author = '' def generate_html(comments): args = dict(xmlns=XHTML_NS, title_str=title_str, css=css, title=title, author=author, publisher=publisher, pubdate_label=_('Published'), pubdate=pubdate, series_label=_('Series'), series=series, rating_label=_('Rating'), rating=rating, tags_label=_('Tags'), tags=tags, comments=comments, footer='' ) for key in mi.custom_field_keys(): try: display_name, val = mi.format_field_extended(key)[:2] key = key.replace('#', '_') args[key] = escape(val) args[key+'_label'] = escape(display_name) except: # if the val (custom column contents) is None, don't add to args pass if False: print("Custom column values available in jacket template:") for key in args.keys(): if key.startswith('_') and not key.endswith('_label'): print(" %s: %s" % ('#' + key[1:], args[key])) # Used in the comment describing use of custom columns in templates # Don't change this unless you also change it in template.xhtml args['_genre_label'] = args.get('_genre_label', '{_genre_label}') args['_genre'] = args.get('_genre', '{_genre}') generated_html = P('jacket/template.xhtml', data=True).decode('utf-8').format(**args) # Post-process the generated html to strip out empty header items soup = BeautifulSoup(generated_html) if not series: series_tag = soup.find(attrs={'class':'cbj_series'}) if series_tag is not None: series_tag.extract() if not rating: rating_tag = soup.find(attrs={'class':'cbj_rating'}) if rating_tag is not None: rating_tag.extract() if not tags: tags_tag = soup.find(attrs={'class':'cbj_tags'}) if tags_tag is not None: tags_tag.extract() if not pubdate: pubdate_tag = soup.find(attrs={'class':'cbj_pubdata'}) if pubdate_tag is not None: pubdate_tag.extract() if output_profile.short_name != 'kindle': hr_tag = soup.find('hr', attrs={'class':'cbj_kindle_banner_hr'}) if hr_tag is not None: hr_tag.extract() return strip_encoding_declarations( soup.renderContents('utf-8').decode('utf-8')) from calibre.ebooks.oeb.base import RECOVER_PARSER try: root = etree.fromstring(generate_html(comments), parser=RECOVER_PARSER) except: try: root = etree.fromstring(generate_html(escape(orig_comments)), parser=RECOVER_PARSER) except: root = etree.fromstring(generate_html(''), parser=RECOVER_PARSER) return root # }}} def linearize_jacket(oeb): for x in oeb.spine[:4]: if XPath(JACKET_XPATH)(x.data): for e in XPath('//h:table|//h:tr|//h:th')(x.data): e.tag = XHTML('div') for e in XPath('//h:td')(x.data): e.tag = XHTML('span') break
performed by Dr. Oleh Slupchynskyj. Rhinoplasty is considered one of the most complex Facial Plastic Surgery procedures requiring a highly skilled and experienced surgeon. After having their Rhinoplasty done, not everyone sees the results they initially desired, for multi-factorial reasons, making Rhinoplasty a highly revised procedure–from 10-15% on the national average–in Cosmetic Surgery. The procedure to correct a poor or undesirable Rhinoplasty result is called Revision Rhinoplasty, or Secondary Rhinoplasty. To ensure a patient is pleased with the results, an experienced New York Revision Rhinoplasty Surgeon should be sought. Dr. Oleh Slupchynskyj, a Board Certified Manhattan Facial Plastic Surgeon has more than 18 years of experience performing Revision Nose Surgery on patients who underwent their Primary Rhinoplasty with another doctor and received less than satisfactory results. If you are unhappy with the results of your first Rhinoplasty and/or have trouble breathing post-operatively, are physically healthy and have realistic expectations, you may be a good candidate for this procedure. Because post-surgical swelling can exist after your Primary Rhinoplasty as long as 6 months and up to 1 year, it is advised that Revision Surgery be performed after this healing phase. If however, there are complications impairing function, for instance, a consultation is suggested, as this complication may need to be addressed immediately. How long does Revision Nose Surgery take? Depending on your specific requirements and the complexity of them, procedure times will vary. Generally speaking, Revision Rhinoplasty procedures typically take anywhere from 2 to 3 hours. What kind of Anesthesia is used for a Revision Rhinoplasty? Typically, as with a Primary Rhinoplasty, Revision Rhinoplasty is performed with Local Anesthesia combined with light sedation. Dr. Oleh Slupchynskyj and his Anesthesiologist(s) will review your medical history and discuss all questions and options with you. Should an “Open” and “Closed” Technique be Used? Revision Rhinoplasty New York Surgeon Dr. Slupchynskyj is experienced with both Open and Closed Rhinoplasty, and will assess each patient as to which technique would best benefit them to achieve the results they desire. Endonasal, also known as “Closed” Rhinoplasty, is a technique where incisions are only made on the inside of the nose, resulting in invisible scars. This Closed approach limits direct visualization of the nasal anatomy therefore compromising the results. The Closed approach is not recommended for nasal tip work and should be reserved for simpler cases such as a dorsal hump reduction. External or “Open” Rhinoplasty is a more advanced technique that is used for complex cases which may involve bridge augmentation, tip alternation, or Revision Surgery. With the open technique, a small incision is made between the nostrils on the outside of the nose. What if my nose looks too small or unnatural? Can this be corrected? This is a very common concern for New York / Manhattan / NYC Revision Rhinoplasty patients. This error is a result of another surgeon removing too much cartilage and excessively reducing the bony part of the nose during the initial Rhinoplasty. Dr. Slupchynskyj has years of experience with patients who come to him from elsewhere and are displeased with a nose that appears scooped out, upturned, too pointy or down turned due to poor Primary Rhinoplasty. Dr. Slupchynskyj can rebuild noses with grafting techniques and customized silastic implants, with which he has extensive experience. What kind of situations require Revision Rhinoplasty? People seek Revision Rhinoplasty when they are dissatisfied with their Primary Rhinoplasty results. This can be either aesthetic (appearance) or functional (breathing). Some patients come to Dr. Slupchynskyj experiencing both. The underlying cause of many poor results in Primary Nose surgery is that of surgical error: under-resection, where not enough cartilage is removed, or over-resection, when too much is removed during a Primary Rhinoplasty. Other common issues are asymmetry and abnormal scarring. Dr. Slupchynskyj’s patients are typically ready to go back to work within 7 days after surgery. He recommends that the patient take about 2 weeks from exercise and other strenuous physical activities in order to avoid accidental injury to the nose. Revision Rhinoplasty is performed as an outpatient procedure, thus requiring no overnight stay. What are the risks of Revision Rhinoplasty Surgery? After performing thousands of Revision Surgeries with no complications, Dr. Oleh Slupchynskyj is well versed in the best practices to avoiding risks. During your initial consultation, Dr. Slupchynskyj reviews any risks associated with Revision Rhinoplasty with you. Complications of Primary and even Revision Rhinoplasty Surgery happen when a surgeon removes too bone or cartilage, when an implant is improperly placed, or if the wrong type of implant was used. How many revisions are possible? While less surgery is always preferred, Dr. Slupchynskyj has worked with patients who have had one or many more revisions with other doctors, with less than satisfying results. If there are severe deformities or there is trouble breathing, revision may be a recommended option. Dr. Slupchynskyj will consult with you to discuss your individual situation and explain all risks, likelihood of success to achieve your desired result, and your next steps. Revision Rhinoplasty costs will vary significantly depending on the number and complexity of changes that need to be made, as well as the technique being used. During a comprehensive consultation, Dr. Slupchynskyj will discuss all aspects of the surgery, including how much it will cost. It is important for patients to consult with their insurance providers to determine if part of the cost may be covered under their plan. How can I be sure I’m choosing the right surgeon? Revision Rhinoplasty is a highly complex procedure only a few surgeons truly specialize in. First, the surgeon you choose should specialize in Facial Plastic Surgery and specifically Rhinoplasty versus that of General Plastic Surgeons who perform Body Plastic Surgery (Breast Augmentation, Liposuction). Second, this surgeon should be Double Board Certified by both The American Academy of Facial Plastic and Reconstructive Surgery and The American Board of Otolaryngology. Your surgeon should have not only the training and expertise to create excellent aesthetic Revision Rhinoplasty results; it is essential they be properly trained, skilled and experienced in maintaining the integrity and functionality of the nose. Only a Double Board Certified Facial Plastic Surgeon can do both.
from logistic_regression import LogRegression import random def random_point(): ''' Returns a random 2-dimensional vector of floats between -1 and +1 ''' return [random.uniform(-1., 1.), random.uniform(-1., 1.)] def generate_line(): ''' Randomly generates a line from 2 random points in [-1,1]x[-1,1] and returns the tuple (m, q, inv) for y = mx + q with inv a boolean which decides what side of the line maps to +1 (ignores vertical lines) ''' while (True): pointA = random_point() pointB = random_point() if ((pointB[0] - pointA[0]) != 0): break m = (pointB[1] - pointA[1]) / (pointB[0] - pointA[0]) q = pointA[1] - m*pointA[0] inv = bool(random.getrandbits(1)) return (m, q, inv) def compute_y(line, point): ''' Takes an (m, q, inv) tuple representing a line and takes a point, computes y Returns 1 if the point is over the line, returns -1 if it's under it ''' if (point[1] >= (line[0]*point[0] + line[1])): if (line[2]): return 1 else: return -1 else: if (line[2]): return -1 else: return 1 def generate_dataset(line, n): ''' Takes an (m, q, inv) tuple representing a line and n=total number of datapoints to generate Returns a length n list of tuples (x, y) with x a random vector and y=f(x) ''' data = [] for c in range(n): x = random_point() y = compute_y(line, x) data.append((x, y)) return data def experiment(n): l = LogRegression(2, 0.01) total_Eout = 0.0 total_epochs = 0 for run in range(100): line = generate_line() data = generate_dataset(line, n) l.reset(data) l.gradient_descent(0.01) total_epochs += l.epochs new_data = generate_dataset(line, n*10) total_Eout += l.cross_entropy_error(new_data) avg_Eout = total_Eout / 100 avg_epochs = total_epochs / 100 return (avg_Eout, avg_epochs) print(experiment(100))
PILOT is a recognised brand of premium quality bakery fats. The key to this achievement is quite simply the passion that PILOT has to deliver “Proven Perfection” for our customers. Our range of products is as diverse as our customers businesses and their needs. The PILOT range includes bakery margarines and shortenings for all baking applications including biscuits, cakes, pastries, donuts, creams and pan releases. PILOT is present in China, Hong Kong, Philippines, Singapore & Vietnam.
#!/usr/bin/env python # -*- coding: utf-8 -*- import urllib import chardet from pyquery import PyQuery as pq #urls = [ # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?acode=13&count=127', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=1&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=2&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=3&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=4&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=5&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=6&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=7&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=8&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=9&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=10&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=11&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=12&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', # 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=13&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false', #] urls = ['http://standard.navitime.biz/renoir/AroundMapSearch.act?acode=13&count=127'] if __name__ == '__main__': detected = [] for url in urls: data = ''.join(urllib.urlopen(url).readlines()) guess = chardet.detect(data) result = dict(url = url, data = data, **guess) detected.append(result) result = [] for p in detected: unicoded = p['data'].decode(p['encoding']) d = pq(unicoded) for item in d.find('.item'): shop_name = pq(item).find('.spotName a').text(); shop_detail_url = pq(item).find('.spotName a').eq(1).attr.href address = pq(item).find('.f12s').eq(0).text() tel = pq(item).find('.f12s').eq(1).text() print shop_name print 'http://standard.navitime.biz/renoir/' + shop_detail_url print address print tel print '----'
We are comprised of product development, engineering and technical service experts supporting specialized solutions groups such as strengthening, post-tensioning, cathodic protection, force protection, concrete repair, pipe rehabilitation, waterproofing, and industrial special structures. STRUCTURAL TECHNOLOGIES offers proven expertise in addressing the toughest, most complex construction and maintenance challenges. Using advanced technologies in its products and systems, STRUCTURAL TECHNOLOGIES delivers innovative, highly effective solutions to its customers. The solution choices are second to none in the specialty construction and maintenance industry.
#!/usr/bin/env python import sys import getopt import numpy import time import scipy import logging from scipy import interpolate from scipy import signal from scipy.io.numpyio import fwrite # Available Mass Balance class MassBalance: ( BAD_VAL , ZERO_BALANCE , CONSTANT_ELA , ELA_LOWERING , ELA_TIME_SERIES , EXTERNAL_FUNC , ELA_LOWERING2 , BALANCE_FILE , D180_TIME_SERIES ) = range( 9 ) class BoundaryCond: ( BAD_VAL , ICE_FREE_BOUND , ZERO_FLUX_BOUND , CONST_FLUX_BOUND , SURF_ELEV_BOUND , SURF_SLOPE_BOUND ) = range( 6 ) class Parameters: g = numpy.longdouble(9.81) # gravitional acceleration rhoI = numpy.longdouble(917) # density of ice rhoW = numpy.longdouble(1000) # density of water glensA = numpy.longdouble( (6.8e-15)*3.15e7/(1e9) ) # Patterson, 1994; MacGregor, 2000 day = numpy.longdouble(0.00274) # length of a day in years # Time t = numpy.longdouble(0) # set time to zero tMax = numpy.longdouble(100000) # maximum simulation time in years dtMax = numpy.longdouble(0.4 * 365*day) # maximum timestep in years dtDefault = numpy.longdouble(0.4 * 365*day) # timestep if VARIABLE_DT_TOGGLE==0 # Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used) UsChar = numpy.longdouble(10) taubChar = numpy.longdouble(100000) # Glacier Properties MinGlacThick = numpy.longdouble(1) WEST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND EAST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND NORTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND SOUTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND MASS_BALANCE_TOGGLE = MassBalance.ELA_LOWERING # select climate scenerio (off|on|select) initELA = numpy.longdouble(3000) gradBz = numpy.longdouble(0.01) maxBz = numpy.longdouble(2) ELAStepSize = numpy.longdouble(-50) ELAStepInterval = numpy.longdouble(500) tmin = numpy.longdouble(200) # Years, spin-up time # Standard Sliding -- used if ICESLIDE_TOGGLE==2 (generally not used) B = numpy.longdouble(0.0012) # m/(Pa*yr) -- MacGregor, 2000 DepthToWaterTable = numpy.longdouble(20) # distance from ice surface to water table MaxFloatFraction = numpy.longdouble(80) # limits water level in ice Hpeff = numpy.longdouble(20) # effective pressure (meters of water) # Avalanching angleOfRepose = numpy.longdouble(30) avalanchFreq = numpy.longdouble(3) # average number per year # Calving seaLevel = numpy.longdouble(-100) # meters calvingCoef = numpy.longdouble(2) # year^-1 # Thermal c = numpy.longdouble(2060) # specific heat capacity (J/(kg*K)) Qg = numpy.longdouble(0.05*3.15e7) # Geothermal heat flux (W/m^2)*seconds/year = (J/year)/(m^2) gradTz = numpy.longdouble(-0.0255) # Geothermal Gradient # Available Boundary Conditions ICE_FREE_BOUND = 1 # Ice Free Boundary ZERO_FLUX_BOUND = 2 # Zero Ice Flux CONST_FLUX_BOUND = 3 # Constant Ice Flux SURF_ELEV_BOUND = 4 # Constant Surface Elevation SURF_SLOPE_BOUND = 5 # Continuous Ice Surface Slope #g = numpy.longdouble(9.81) # gravitional acceleration #rhoI = numpy.longdouble(917) # density of ice #glensA = numpy.longdouble( (6.8e-15)*3.15e7/(1e9) ) # Patterson, 1994; MacGregor, 2000 # Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used) #UsChar = numpy.longdouble(10) #taubChar = numpy.longdouble(100000) # Glacier Properties #MinGlacThick = numpy.longdouble(1) #WEST_BC_TOGGLE = ICE_FREE_BOUND #EAST_BC_TOGGLE = ICE_FREE_BOUND #NORTH_BC_TOGGLE = ICE_FREE_BOUND #SOUTH_BC_TOGGLE = ICE_FREE_BOUND # Available Mass Balance ZERO_BALANCE = 1 # Constant Ice Flux CONSTANT_ELA = 2 # Ice Free Boundary ELA_LOWERING = 3 # Zero Ice Flux ELA_TIME_SERIES = 4 # Continuous Ice Surface Slope EXTERNAL_FUNC = 5 # Constant Surface Elevation ELA_LOWERING2 = 6 # Zero Ice Flux BALANCE_FILE = 7 # Zero Ice Flux D18O_TIME_SERIES = 8 # Load d18O record and convert to ELA history #MASS_BALANCE_TOGGLE = ELA_LOWERING # select climate scenerio (off|on|select) #initELA = numpy.longdouble(3000) #gradBz = numpy.longdouble(0.01) #maxBz = numpy.longdouble(2) #ELAStepSize = numpy.longdouble(-50) #ELAStepInterval = numpy.longdouble(500) #tmin = numpy.longdouble(200) # Years, spin-up time def compress_grid( H , Zb , COMPRESS_TOGGLE=False , RESTART_TOGGLE=0 ): # COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE if COMPRESS_TOGGLE and H.max() > 1 and RESTART_TOGGLE != 2: H_FullSpace = H.copy() Zb_FullSpace = Zb.copy() if THERMAL_TOGGLE: Ts_FullSpace = Ts.copy() Tb_FullSpace = Tb.copy() Tm_FullSpace = Tm.copy() #[indrw,indcl] = find(H ~= 0); indrw,indcl = numpy.where( H!=0 ) mxrw,mxcl = Zb.shape mnrw = max( 0 , min(indrw) - 2 ) mxrw = min( mxrw , max(indrw) + 2 ) mncl = max( 0 , min(indcl) - 2 ) mxcl = min( mxcl , max(indcl) + 2 ) H = H [ mnrw:mxrw , mncl:mxcl ] Zb = Zb[ mnrw:mxrw , mncl:mxcl ] Zi = Zb + numpy.choose( H<0 , (H,0) ) #Zi = Zb + numpy.choose( numpy.less(H,0) , (H,0) ) #Zi = Zb + max( H, 0 ) ; rws,cls = H.shape if THERMAL_TOGGLE: Ts = Ts[ mnrw:mxrw , mncl:mxcl ] Tb = Tb[ mnrw:mxrw , mncl:mxcl ] Tm = Tm[ mnrw:mxrw , mncl:mxcl ] mxrws,mxcls = Zb_FullSpace.shape rws,cls = Zb.shape compression_ratio = (mxcls*mxrws)/(cls*rws) COMPRESSED_FLAG = 1 else: #Zi = Zb + max( H, 0 ) # included for restarts Zi = Zb + numpy.choose( H<0 , (H,0) ) compression_ratio = 1. COMPRESSED_FLAG = 0 return ( Zi , compression_ratio , COMPRESSED_FLAG ) def add_halo( x ): x_ext = numpy.concatenate( ( x[:,0,numpy.newaxis] , x , x[:,-1,numpy.newaxis] ) , axis=1 ) x_ext = numpy.concatenate( ( [x_ext[0,:]] , x_ext , [x_ext[-1,:]] ) ) return x_ext def set_bc( H , Zb , Zi , THERMAL_TOGGLE=False , WEST_BC_TOGGLE=ICE_FREE_BOUND , EAST_BC_TOGGLE=ICE_FREE_BOUND , SOUTH_BC_TOGGLE=ICE_FREE_BOUND , NORTH_BC_TOGGLE=ICE_FREE_BOUND ): ###### ### MODIFY BOUNDARY CELLS TO ENFORCE BOUNDARY CONDITIONS # DEFAULT BOUNDARY CONDITION IS ZERO FLUX H_ext = add_halo( H ) Zb_ext = add_halo( Zb ) Zi_ext = add_halo( Zi ) if THERMAL_TOGGLE: Ts_ext = add_halo( Ts ) Tb_ext = add_halo( Tb ) Tm_ext = add_halo( Tm ) # WESTERN BOUNDARY CONDTION if WEST_BC_TOGGLE == SURF_ELEV_BOUND: # Constant Ice Surface Height ZiBound = mean(Zb[:,0]) + Hbound H_ext[:,0] = ZiBound - Zb_ext[:,0] elif WEST_BC_TOGGLE == CONST_FLUX_BOUND: # Constant Ice Flux B.C. pass elif WEST_BC_TOGGLE == SURF_SLOPE_BOUND: # Constant Ice Surface Slope Zi_ext[:,0] = 2*Zi_ext[:,1] - Zi_ext[:,2] H_ext [:,0] = Zi_ext[:,0] - Zb_ext[:,0] H_ext [:,0] = numpy.choose( H_ext[:,0]<0 , (H_ext[:,0],0) ) elif WEST_BC_TOGGLE == ICE_FREE_BOUND: # Ice Free Boundary H_ext[:,0] = 0 # EASTERN BOUNDARY CONDTION if EAST_BC_TOGGLE == SURF_ELEV_BOUND: # Constant Ice Surface Height ZiBound = mean(Zb[:,-1]) + Hbound H_ext[:,-1] = ZiBound - Zb_ext[:,-1] elif EAST_BC_TOGGLE == CONST_FLUX_BOUND: # Constant Ice Flux B.C. pass elif EAST_BC_TOGGLE == SURF_SLOPE_BOUND: # Constant Ice Surface Slope Zi_ext[:,-1] = 2*Zi_ext[:,-2] - Zi_ext[:,-3] H_ext [:,-1] = Zi_ext[:,-1] - Zb_ext[:,-1] H_ext [:,-1] = numpy.choose( H_ext[:,-1]<0 , (H_ext[:,-1],0) ) elif EAST_BC_TOGGLE == ICE_FREE_BOUND: # Ice Free Boundary H_ext[:,-1] = 0 # SOUTHERN BOUNDARY CONDTION if SOUTH_BC_TOGGLE == SURF_ELEV_BOUND: # Constant Ice Surface Height ZiBound = mean(Zb[0,:]) + Hbound H_ext[0,:] = ZiBound - Zb_ext[0,:] elif SOUTH_BC_TOGGLE == CONST_FLUX_BOUND: # Constant Ice Flux B.C. pass elif SOUTH_BC_TOGGLE == SURF_SLOPE_BOUND: # Constant Ice Surface Slope Zi_ext[0,:] = 2*Zi_ext[1,:] - Zi_ext[2,:] H_ext [0,:] = Zi_ext[0,:] - Zb_ext[0,:] H_ext [0,:] = numpy.choose( H_ext[0,:]<0 , (H_ext[0,:],0) ) elif SOUTH_BC_TOGGLE == ICE_FREE_BOUND: # Ice Free Boundary H_ext[0,:] = 0 # NORTHERN BOUNDARY CONDTION if NORTH_BC_TOGGLE == SURF_ELEV_BOUND: # Constant Ice Surface Height ZiBound = mean(Zb[-1,:]) + Hbound H_ext[-1,:] = ZiBound - Zb_ext[-1,:] elif NORTH_BC_TOGGLE == CONST_FLUX_BOUND: # Constant Ice Flux B.C. pass elif NORTH_BC_TOGGLE == SURF_SLOPE_BOUND: # Constant Ice Surface Slope Zi_ext[-1,:] = 2*Zi_ext[-2,:] - Zi_ext[-3,:] H_ext [-1,:] = Zi_ext[-1,:] - Zb_ext[-1,:] H_ext [-1,:] = numpy.choose( H_ext[-1,:]<0 , (H_ext[-1,:],0) ) elif NORTH_BC_TOGGLE == ICE_FREE_BOUND: # Ice Free Boundary H_ext[-1,:] = 0 Zi_ext = Zb_ext + H_ext return ( H_ext , Zb_ext , Zi_ext ) def difference_grid( A , dx , dy ): dAdx_ext = ( A[:,1:] - A[:,:-1] ) / dx dAdy_ext = ( A[1:,:] - A[:-1,:] ) / dy dAdx = dAdx_ext[1:-1,:] dAdy = dAdy_ext[:,1:-1] return ( dAdx , dAdy ) def basal_shear_stress( H_ext , Zi_ext , dx=1. , dy=1. , g=Parameters.g , rhoI=Parameters.rhoI ): ###### ### CALCULATE THE BASAL SHEAR STRESS # forward differences dZidxX_ext = ( Zi_ext[:,1:] - Zi_ext[:,:-1] ) / dx dZidyY_ext = ( Zi_ext[1:,:] - Zi_ext[:-1,:] ) / dy dZidxX = dZidxX_ext[1:-1,:] dZidyY = dZidyY_ext[:,1:-1] HX_ext = ( H_ext[:,1:] + H_ext[:,:-1] ) / 2. HY_ext = ( H_ext[1:,:] + H_ext[:-1,:] ) / 2. HX = HX_ext[1:-1,:] HY = HY_ext[:,1:-1] taubxX_ext = -rhoI * g * HX_ext * dZidxX_ext taubyY_ext = -rhoI * g * HY_ext * dZidyY_ext taubxX = taubxX_ext[1:-1,:] taubyY = taubyY_ext[:,1:-1] taubxY = ( taubxX_ext[:-1,:-1] + taubxX_ext[:-1,1:] + taubxX_ext[1: ,:-1] + taubxX_ext[1: ,1:] ) / 4. taubyX = ( taubyY_ext[:-1,:-1] + taubyY_ext[:-1,1:] + taubyY_ext[1: ,:-1] + taubyY_ext[1: ,1:] ) / 4. taubX = numpy.sqrt( taubxX**2 + taubyX**2 ) taubY = numpy.sqrt( taubxY**2 + taubyY**2 ) taubX = numpy.choose( HX>0 , (0,taubX) ) taubY = numpy.choose( HY>0 , (0,taubY) ) # fill in zero values with 1 for use in division xcmpnt = numpy.choose( numpy.abs(taubX)<1e-5 , ( taubxX / taubX , 0. ) ) ycmpnt = numpy.choose( numpy.abs(taubY)<1e-5 , ( taubyY / taubY , 0. ) ) return ( ( xcmpnt , ycmpnt ) , ( taubX , taubY ) , ( HX , HY ) ) def iceflow( taubX , taubY , HX , HY , xcmpnt , ycmpnt , THERMAL_TOGGLE=False , MinGlacThick=1. , glensA=Parameters.glensA ): ###### ### CALCULATE ICE VELOCITY DUE TO DEFORMATION if THERMAL_TOGGLE: A_ext = numpy.zeros(H_ext.shape , dtype=numpy.longdouble ) ind = nonzero( ravel(H_ext) >= MinGlacThick ) Ts_ext = To + lapseRate*( Zi_ext - Elev0 ) #A_ext(ind) = interp3( eHs, eTs, eTm, eA, H_ext(ind), Ts_ext(ind), Tm_ext(ind) ) ; try: put( A_ext , ind , interpolate.interp3d( eHs , eTs , eTm )( take(H_ext,ind) , take(Ts_ext,ind) , take(Tm_ext,ind) ) ) except: logging.error( "NaN in A, likely H_node exceeds H_glens limits" ) return -1 AX = ( A_ext[1:-1, :-1] + A_ext[1:-1,1: ] ) / 2. AY = ( A_ext[ :-1,1:-1] + A_ext[1: ,1:-1] ) / 2. else: AX = glensA AY = glensA # here's the guts of calculating the depth averaged velocity UdxX = numpy.abs( .4 * AX * taubX*taubX*taubX * HX ) * xcmpnt UdyY = numpy.abs( .4 * AY * taubY*taubY*taubY * HY ) * ycmpnt #UdxX = numpy.fix(UdxX*1e6)*1e-6 #UdyY = numpy.fix(UdyY*1e6)*1e-6 return ( UdxX , UdyY ) def ice_sliding( taubX , taubY , xcmpnt , ycmpnt , THERMAL_TOGGLE=False , FREEZEON_TOGGLE=0 , UsChar=Parameters.UsChar , taubChar=Parameters.taubChar ): ###### ### CALCULATE SLIDING VELOCITY # here's the guts of calculating the sliding velocity UsxX = numpy.choose( numpy.abs(taubX)<1e-5 , ( UsChar * numpy.exp(1 - taubChar / taubX) * xcmpnt , UsChar * numpy.exp(1 - taubChar ) * xcmpnt ) ) UsyY = numpy.choose( numpy.abs(taubY)<1e-5 , ( UsChar * numpy.exp(1 - taubChar / taubY) * ycmpnt , UsChar * numpy.exp(1 - taubChar ) * ycmpnt ) ) if THERMAL_TOGGLE and FREEZEON_TOGGLE: notFrozen = Tb_ext > -.5 or Zb_ext < seaLevel notFrozenX = ( notFrozen[1:-1, :-1] + notFrozen[1:-1,1: ] ) / 2. notFrozenY = ( notFrozen[ :-1,1:-1] + notFrozen[1: ,1:-1] ) / 2. UsxX *= notFrozenX UsyY *= notFrozenY return ( UsxX , UsyY ) def sum_ice_motion( UdxX , UdyY , UsxX , UsyY ): UxX = UdxX + UsxX UyY = UdyY + UsyY return ( UxX , UyY ) def mass_conservation( H_ext , UxX , UyY , HX , HY , dZidxX , dZidyY , dx=1. , dy=1. , MinGlacThick=1. , WEST_BC_TOGGLE=ICE_FREE_BOUND , EAST_BC_TOGGLE=ICE_FREE_BOUND , SOUTH_BC_TOGGLE=ICE_FREE_BOUND , NORTH_BC_TOGGLE=ICE_FREE_BOUND ): ###### ### MASS CONSERVATION -- CONTINUITY # ensure that no ice is drawn from the rock #CLASS = H_ext >= MinGlacThick CLASS = numpy.choose( H_ext>=MinGlacThick , (0.,1.) ) DCLASSx = ( CLASS[1:-1,1: ] - CLASS[1:-1, :-1] ) * numpy.sign( dZidxX ) DCLASSy = ( CLASS[1: ,1:-1] - CLASS[ :-1,1:-1] ) * numpy.sign( dZidyY ) UxX = numpy.choose( numpy.abs(DCLASSx+1)<1e-5 , (UxX,0.) ) UyY = numpy.choose( numpy.abs(DCLASSy+1)<1e-5 , (UyY,0.) ) # calculate both components of the ice flux qxX = UxX * HX qyY = UyY * HY if WEST_BC_TOGGLE == CONST_FLUX_BOUND: qxX[: , 0] = BoundaryFlux if EAST_BC_TOGGLE == CONST_FLUX_BOUND: qxX[: ,-1] = BoundaryFlux if SOUTH_BC_TOGGLE == CONST_FLUX_BOUND: qyY[0 , :] = BoundaryFlux if NORTH_BC_TOGGLE == CONST_FLUX_BOUND: qyY[-1, :] = BoundaryFlux # here's the guts of the continuity equation dqdxX = ( qxX[ :,1:] - qxX[: ,:-1] ) / dx dqdyY = ( qyY[1:, :] - qyY[:-1,: ] ) / dy dHdt = -dqdxX - dqdyY return ( dHdt , ( qxX , qyY ) ) def mass_balance( Zi , t , MASS_BALANCE_TOGGLE=MassBalance.ELA_LOWERING , initELA=Parameters.initELA , tmin=Parameters.tmin , ELAStepSize=Parameters.ELAStepSize , ELAStepInterval=Parameters.ELAStepInterval , gradBz=Parameters.gradBz , maxBz=Parameters.maxBz ): ###### ### CALCULATE MASS BALANCE # the imposed mass balance is the imposed climate # there are many possibilities, here are only a few # all must populate the 2D matrix Bxy of size = size(Zb) # with values of net precip/melt rate in m/yr # define the scalar, ELA (m), for plotting if MASS_BALANCE_TOGGLE == CONSTANT_ELA: # Simple ELA, maxBz, gradBz ELA = initELA #Bxy = min( maxBz , gradBz * ( Zi - ELA ) ) Bxy = gradBz * ( Zi - ELA ) Bxy = numpy.choose( Bxy>maxBz , (Bxy,maxBz) ) elif MASS_BALANCE_TOGGLE == ELA_LOWERING: # ELA changing with time experiment # ELAStepSize = -10 ; # positive/negative values raise/lower ELA # ELAStepInterval = 500 ; ELA = initELA + ELAStepSize * max( 0 , numpy.floor( (t-tmin)/ELAStepInterval ) ) Bxy = gradBz * ( Zi - ELA ) Bxy = numpy.choose( Bxy>maxBz , (Bxy,maxBz) ) elif MASS_BALANCE_TOGGLE == ELA_LOWERING2: # ELA changing with time experiment tau = numpy.longdouble(25) # intrinsic timescale of ice dynamics tmin = numpy.longdouble(0) # time to begin ELA modification initELA = numpy.longdouble(4200) # initial ELA stepSize = numpy.longdouble(-10) # positive/negative values raise/lower ELA dELAdt = numpy.longdouble(-0.1) ELA = initELA + stepSize * max( 0, numpy.floor( (t-tmin) / (8*tau) ) ) Bxy = gradBz * ( Zi - ELA ) Bxy = numpy.choose( Bxy>maxBz , (Bxy,maxBz) ) elif MASS_BALANCE_TOGGLE == EXTERNAL_FUNC: # external mass balance function try: Bxy except NameError: # Mass Balance 2D Must Return Bxy (2d Matrix) Bxy = mass_balance_gc2d( t , cellsize , Zi ) nextGetBxy = t + getBxyInterval else: if t >= nextGetBxy: Bxy = mass_balance_gc2d( t , cellsize , Zi ) nextGetBxy = t + getBxyInterval elif MASS_BALANCE_TOGGLE == ELA_TIME_SERIES or MASS_BALANCE_TOGGLE == D18O_TIME_SERIES: # ELA time series ELA = interpolate.interp1d( trecord , ELArecord )( t ) Bxy = gradBz * ( Zi - ELA ) Bxy = numpy.choose( Bxy>maxBz , (Bxy,maxBz) ) elif MASS_BALANCE_TOGGLE == BALANCE_FILE: # external mass balance file Bxy = load_dem_var( filenameDEM, 'Bxy' ) ind = nonzero( ravel(abs(Bxy)==min(abs(Bxy))) ) ELA = mean( take( ravel(Zi) , ind ) ) elif MASS_BALANCE_TOGGLE == ZERO_BALANCE: ELA = 0 Bxy = numpy.zeros( Zb.shape , dtype=numpy.longdouble ) else: logging.error( "Unrecognized Mass Balance" ) return -1 return ( Bxy , ELA ) def get_timestep( H , Zi_ext , Zi , dHdt , Bxy , dtMax=None , dtDefault=None ): ####### ### CALCULATE TIMESTEP # now that we know the rate of change in ice surface heights due to # ice motion and due to precipitation or melt we need to know over # what period of time we can project forward with these rates and # maintain stability of the ice surface. The basic idea here is that # we don't want to take a timestep any longer then it would take to # reverse the ice surface slope between two cells, such that ice # should be flowing in the other direction. In fact, let's make our # timestep much less then that. # this calculation sets the timestep such that the change # in ice surface elevation nowhere exceeds a set fraction # of the local standard deviation in ice surface elevations # include ice changes by precip and melt dHdtTot = dHdt + Bxy adHdt = numpy.abs(dHdtTot) # something like standard deviation of 3x3 cell areas around each cell filt = numpy.ones( (3,3) , dtype=numpy.longdouble ) / 9. ZiMean = filter2d( filt , Zi_ext , 'valid' ) dHmax = numpy.sqrt( filter2d( filt, (ZiMean - Zi)**2 ) ) # only consider cells with ice thickness > 10 m isGlac = H>10. # find limiting timestep for each considered cell ind = ( numpy.logical_and( numpy.logical_and( adHdt!=0 , dHmax!=0 ) , isGlac!=0 ) ).flatten().nonzero() if ind[0].size>0: dtLimits = dHmax.flatten()[ ind ] / adHdt.flatten()[ ind ] dt = dtLimits.min() idt = ( dtLimits==dt ).nonzero() #ind = find( adHdt~=0 & dHmax~=0 & isGlac~=0 ) ; #dtLimits = dHmax(ind)./adHdt(ind) ; #[dt, idt] = min( dtLimits ) ; # locate the x and y position of limiting cell for plotting #[rwDT,clDT] = ind2sub( size(adHdt), ind(idt) ) ; # limit timestep to dtMax or some fraction of the calculated timestep if dtMax is not None : dt = min( dtMax, dt/2. ) else: # catch an error, (e.g. if H<10 in all cells ) #if dt.size==0: dt = dtDefault #dt = numpy.fix(dt*1e6)*1e-6 return dt def update_vars( H , Zb , Zi , Bxy , qxX , qyY , dHdt , t , dt , conserveIce , dx=1. , dy=1. ): t = t + dt # numTimeSteps = numTimeSteps + 1 ; # timeSteps(numTimeSteps) = dt ; # increase in ice thicknesses due to precip Bxy_pos = numpy.choose( Bxy>0 , (0,Bxy) ) H += Bxy_pos * dt # change ice thicknesses due to ice motion H += dHdt*dt # decrease in ice thicknesses due to melt Bxy_neg = numpy.choose( Bxy<0 , (0,Bxy) ) Bxy_neg = - numpy.choose( H<-Bxy_neg , (-Bxy_neg,H) ) H += Bxy_neg * dt # record ice addition or removal by climate snowFall = ( Bxy_neg + Bxy_pos ) * dt conserveIce = conserveIce + snowFall.sum(axis=0).sum() # record ice flux through boundaries qbound = qyY[0,:].sum(axis=0).sum() - qyY[-1,:].sum(axis=0).sum() + qxX[:,0].sum(axis=0).sum() - qxX[:,-1].sum(axis=0).sum() conserveIce = conserveIce + dt * qbound / dx Zi = Zb + numpy.choose( H<0 , (H,0) ) if numpy.isnan(Zi).any(): #save workspacedump logging.error( "NaN in ice thickness" ) return -1 return ( t , H , Zi , conserveIce ) def avalanche( H , angleOfRepose=30. ): ###### ### AVALANCHE SNOW OFF OF STEEP SURFACES # move ice downslope until the ice surface is everywhere # less then or near the angle of repose rws,cls = Zb.shape dHRepose = dx*numpy.tan(angleOfRepose*numpy.pi/180.) Ho = numpy.choose( H<0 , (H,0) ) while True: dZidx_down = numpy.zeros( (rws,cls) , dtype=numpy.longdouble ) dZidx_up = numpy.zeros( (rws,cls) , dtype=numpy.longdouble ) dZidx_down[:,1:] = numpy.choose( Zi[:,1:] < Zi[:,:-1] , ( Zi[:,1:] - Zi[:,:-1] , 0 ) ) dZidx_up [:,:-1] = numpy.choose( Zi[:,:-1] < Zi[:,1:] , ( Zi[:,:-1] - Zi[:,1:] , 0 ) ) dZidx = numpy.choose( dZidx_up > dZidx_down , ( dZidx_down , dZidx_up ) ) dZidy_left = numpy.zeros( (rws,cls) , dtype=numpy.longdouble ) dZidy_right = numpy.zeros( (rws,cls) , dtype=numpy.longdouble ) dZidy_left [1:,:] = numpy.choose( Zi[1:,:] < Zi[:-1,:] , ( Zi[1:,:] - Zi[:-1,:] , 0 ) ) dZidy_right[:-1,:] = numpy.choose( Zi[:-1,:] < Zi[1:,:] , ( Zi[:-1,:] - Zi[1:,:] , 0 ) ) dZidy = numpy.choose( dZidy_left > dZidy_right , ( dZidy_right , dZidy_left ) ) grad = numpy.sqrt( dZidx**2 + dZidy**2 ) gradT = dZidy_left + dZidy_right + dZidx_down + dZidx_up gradT = numpy.choose( gradT==0 , (gradT,1) ) grad = numpy.choose( Ho <0.1 , (grad ,0) ) mxGrad = grad.max() if mxGrad <= 1.1*dHRepose: break delH = numpy.choose( grad<dHRepose , ( ( grad-dHRepose)/3. , 0 ) ) Htmp = Ho.copy() Ho = numpy.choose( Htmp<delH , ( Htmp-delH , 0 ) ) delH = Htmp - Ho delHdn = numpy.zeros( (rws,cls) , dtype=numpy.longdouble ) delHup = numpy.zeros( (rws,cls) , dtype=numpy.longdouble ) delHlt = numpy.zeros( (rws,cls) , dtype=numpy.longdouble ) delHrt = numpy.zeros( (rws,cls) , dtype=numpy.longdouble ) delHup[:,1: ] = delH[:, :-1] * dZidx_up [:, :-1] / gradT[:, :-1] delHdn[:, :-1] = delH[:,1: ] * dZidx_down[:,1: ] / gradT[:,1: ] delHrt[1: ,:] = delH[ :-1,:] * dZidy_right[ :-1,:] / gradT[ :-1,:] delHlt[ :-1,:] = delH[1: ,:] * dZidy_left [1: ,:] / gradT[1: ,:] Ho = Ho + delHdn + delHup + delHlt + delHrt Ho = numpy.choose( Ho<0 , (Ho,0) ) Zi = Zb + Ho #H = Ho + (H<0).*H ; H = Ho + numpy.choose( H<0 , (0,H) ) return H def calve( H , dt , CALVING_TOGGLE=True ): ###### ### CALVING GLACIER FRONT if CALVING_TOGGLE: # one reason this is difficult is that the height of ice in the cell # is really just recording the volume of ice, the position of the # margin in the cell not the actual ice height. Here floation # height is assumed (or higher if necessary to account for ice volume) Hold = H.copy() calvedIce = 0 # count time backwards with a sshorted timestep until the whole # timestep used during this itteration has been simulated dtTot = dt while dtTot > 0: # find the calving front, aka the wet glacier margin G = H > 1 W = numpy.logical_and( G==0 , Zb <= seaLevel ) filt = numpy.array( [[0,1,0],[1,1,1],[0,1,0]] , dtype=numpy.longdouble ) Wfilt = filter2d( filt , W ) Wfilt[:,(0,-1)] = Wfilt[:,(2,-3)] Wfilt[(0,-1),:] = Wfilt[(2,-3),:] wetGmargin = Gi * Wfilt > 0 indWGM = wetGmargin.ravel().nonzero() # if calving front exists, find water depth, ensure it's positive if indWGM.size>0: WDmarg = seaLevel - Zb.flatten()[indWGM] WDmarg = numpy.choose( WDmarg<0 , (WDmarg,0) ) ind = (WDmarg!=0).nonzero() indWGM = take( indWGM , ind ) WDmarg = take( WDmarg , ind ) #WDmarg = max( 0, seaLevel - Zb(indWGM) ) ; #ind = find( WDmarg == 0 ) ; #indWGM(ind) = [] ; #WDmarg(ind) = [] ; # if calving front exists, remove some ice if indWGM.size>0: # ice thickness in calving cells Hmarg = H.flatten()[indWGM] Hmarg = numpy.choose( Hmarg<WDmarg/0.917 , (Hmarg,WDmarg/0.917) ) # a new timestep is calculated such that the calving rate times the # timesstep does not exceed the total contents of any calving cell. dLinCalvdt = calvingCoef * WDmarg # front migration rate dVolCalvdt = dx * dLinCalvdt * Hmarg # rate of volume calved volAvailMarg = dx * dx * H.flatten()[indWGM] # ice volume available calvDt = min( dtTot, ( volAvailMarg / dVolCalvdt ).min() ) # calving timestep # remove this calving timestep from total time to calve dtTot = dtTot - calvDt # convert the volume calved to ice thickness and remove calve = dVolCalvdt * calvDt / ( dx * dx ) H[indWGM] = H[indWGM] - calve # record total volume calved for posterity calvedIce = calvedIce + calve.sum(asis=0).sum() * dx * dx else: dtTot = 0 # record ice removal by calving for conservation test conserveIce = conserveIce + ( H - Hold ).sum(axis=0).sum() def print_watch_point( fd , x ): y = numpy.double( x ) fwrite( fd , y.size , y ) fd.flush() def filter2d( b , x , shape='same' ): y = scipy.signal.convolve( b ,x , mode=shape ) return y def load_dem( file ): vars = scipy.io.loadmat( file ) cellsize = numpy.longdouble(vars['cellsize']) easting = numpy.longdouble(vars['easting']) northing = numpy.longdouble(vars['northing']) topo = numpy.longdouble(vars['topo']) n_rows , n_cols = topo.shape logging.info( 'Shape of topo is %d by %d' , n_rows , n_cols ) logging.info( 'Shape of easting is %d' , easting.size ) logging.info( 'Shape of northing is %d' , northing.size ) if easting.size != n_cols: sys.exit( 'Easting does not match dimension of topo (%d != %d)' % (easting.size,n_cols) ) if northing.size != n_rows: sys.exit( 'Northing does not match dimension of topo (%d != %d)' % (northing.size,n_rows) ) return ( topo , easting , northing , cellsize ) def load_dem_var( file , val_s ): vars = scipy.io.loadmat( file ) if vars.has_key( val_s ): var = vars[val_s] else: var = None return var def load_input_args( ): CLEAR_FIGURE = 1 CONTOUR_INTERVAL = 50. DEBUG_TOGGLE = 0 DT_LIMIT = 0 ELA_CONTOUR = 1. ICE_CONTOUR = 1. NEW_FIGURE = 0 QUIVER_VECS = 0 RECONSTRUCT = 0 SUBFIGURE = 0 THERMAL_CONTOUR = 0 return 1 class Usage( Exception ): def __init__( self , msg ): self.msg = msg def old_gc2d( argv=None , inputFile='Animas_200.mat' ): if argv is None: argv = sys.argv try: try: opts, args = getopt.getopt( argv[1:] , "h" , ["help"] ) except getopt.error, msg: raise Usage(msg) except Usage, err: print >> sys.stderr, err.msg print >> sys.stderr, "for help use --help" return 2 RESTART_TOGGLE = 0 ###### ### Load a saved state if RESTART_TOGGLE == 0 or RESTART_TOGGLE == 3: # LOAD A SAVED STATE # CODE BEHAVIOR TOGGLES # toggles turn on/off segments of the code or select # between multiple possibilities for a given process # values can be reset in INIT_COND segment GUISTART_TOGGLE = 0 # started simulation with the gui (off|on) SAVE_TOGGLE = 1 # saving (off|on) PLOT_TOGGLE = 1 # plotting (off|on) REPORT_TOGGLE = 1 # reporting (off|on) COMPRESS_TOGGLE = 0 # only simulate area with ice (off|on) VARIABLE_DT_TOGGLE = 1 # state dependent time step (off|on) INIT_COND_TOGGLE = 1 # load DEM and climate (synth|valley|sheet) GENERIC_ICE_TOGGLE = 0 # start with generic ice surface (off|on) ICEFLOW_TOGGLE = 1 # ice motion by deformation (off|on) ICESLIDE_TOGGLE = 0 # ice motion by sliding (off|on|select) THERMAL_TOGGLE = 0 # temp dependance of flow (off|on) FREEZEON_TOGGLE = 0 # basal ice freeze to bed (off|on) AVALANCH_TOGGLE = 0 # avalanch off steep surfaces (off|on) ERODE_TOGGLE = 0 # erode the bed (off|on|select) CALVING_TOGGLE = 0 # calving front (off|on) CRN_TOGGLE = 0 # CRN accumulation (off|on) # Available Mass Balance ZERO_BALANCE = 1 # Constant Ice Flux CONSTANT_ELA = 2 # Ice Free Boundary ELA_LOWERING = 3 # Zero Ice Flux ELA_TIME_SERIES = 4 # Continuous Ice Surface Slope EXTERNAL_FUNC = 5 # Constant Surface Elevation ELA_LOWERING2 = 6 # Zero Ice Flux BALANCE_FILE = 7 # Zero Ice Flux D18O_TIME_SERIES = 8 # Load d18O record and convert to ELA history MASS_BALANCE_TOGGLE = ELA_LOWERING # select climate scenerio (off|on|select) # Available Boundary Conditions ICE_FREE_BOUND = 1 # Ice Free Boundary ZERO_FLUX_BOUND = 2 # Zero Ice Flux CONST_FLUX_BOUND = 3 # Constant Ice Flux SURF_ELEV_BOUND = 4 # Constant Surface Elevation SURF_SLOPE_BOUND = 5 # Continuous Ice Surface Slope WEST_BC_TOGGLE = ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow) EAST_BC_TOGGLE = ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow) SOUTH_BC_TOGGLE = ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow) NORTH_BC_TOGGLE = ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow) # OUTPUT BEHAVIOR plotInterval = 60 * 120 # seconds saveInterval = 100 # whole years reportInterval = 30 # seconds nextPlot = 0 # initialize to plot on first timestep nextSave = 0 # initialize to save on first timestep nextReport = 0 # initialize to report on first timestep outputFile = 'savetmp' ###### ### Set numerical and physical constants # Constants g = numpy.longdouble(9.81) # gravitional acceleration rhoI = numpy.longdouble(917) # density of ice rhoW = numpy.longdouble(1000) # density of water day = numpy.longdouble(0.00274) # length of a day in years # Time t = numpy.longdouble(0) # set time to zero tMax = numpy.longdouble(100000) # maximum simulation time in years dtMax = numpy.longdouble(0.4 * 365*day) # maximum timestep in years dtDefault = numpy.longdouble(0.4 * 365*day) # timestep if VARIABLE_DT_TOGGLE==0 # Glacier Properties MinGlacThick = numpy.longdouble(1) # Ice Deformation glensA = numpy.longdouble((6.8e-15)*3.15e7/(1e9)) # Patterson, 1994; MacGregor, 2000 # Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used) UsChar = numpy.longdouble(10) taubChar = numpy.longdouble(100000) # Standard Sliding -- used if ICESLIDE_TOGGLE==2 (generally not used) B = numpy.longdouble(0.0012) # m/(Pa*yr) -- MacGregor, 2000 DepthToWaterTable = numpy.longdouble(20) # distance from ice surface to water table MaxFloatFraction = numpy.longdouble(80) # limits water level in ice Hpeff = numpy.longdouble(20) # effective pressure (meters of water) # Avalanching angleOfRepose = numpy.longdouble(30) avalanchFreq = numpy.longdouble(3) # average number per year # Calving seaLevel = numpy.longdouble(-100) # meters calvingCoef = numpy.longdouble(2) # year^-1 # Thermal c = numpy.longdouble(2060) # specific heat capacity (J/(kg*K)) Qg = numpy.longdouble(0.05*3.15e7) # Geothermal heat flux (W/m^2)*seconds/year = (J/year)/(m^2) gradTz = numpy.longdouble(-0.0255) # Geothermal Gradient # Mass Balance initELA = numpy.longdouble(4500) initELA = numpy.longdouble(3000) gradBz = numpy.longdouble(0.01) maxBz = numpy.longdouble(2) ELAStepSize = numpy.longdouble(-50) ELAStepInterval = numpy.longdouble(500) tmin = numpy.longdouble(200) # Years, spin-up time ###### ### RELOAD INPUT ARGUMENTS #load inputArgs inputArgs = load_input_args #if ( GUISTART_TOGGLE & exist('guiSimParams.mat','file') ) # load guiSimParams # delete guiSimParams.mat # clear newInitFile #elseif ( ~GUISTART_TOGGLE & exist( './guiPlotParams.mat', 'file' ) ) # delete guiPlotParams.mat #end ###### ### INITIALIZE COUNTERS # numTimeSteps = 0 ; # timeSteps = zeros(1000000,1) ; ###### ### INITIALIZE BED and ICE TOPOGRAPHY, and CLIMATE VARIABLES # Must define topo, cellsize, dx, and dy if INIT_COND_TOGGLE: ### .mat file contains: 'topo' = matrix of bed elevations and 'cellsize', ### both in meters. 'easting' and 'northing' are included for plotting if INIT_COND_TOGGLE == 1: # Valley glaciers # filenameDEM = 'Yosemite200_rot35_400x650' ; # filenameDEM = 'Nederland100' ; # filenameDEM = 'KingsCanyon200Rot256x256shift' ; # filenameDEM = 'sample200' ; # filenameDEM = 'animas_200' ; # filenameDEM = '4J_newDEM_200' ; # filenameDEM = 'reproj4j_200' ; filenameDEM = inputFile filenameDEM = 'Animas_200.mat' #load( filenameDEM ) ; ( topo , easting , northing , cellsize ) = load_dem( filenameDEM ) dx = numpy.longdouble(200) # set a new dx dy = numpy.longdouble(dx) # AAR and eroded volume watershed mask mask_file = 'watershed_mask' try: #load( mask_file ); watershed_mask = load_mask( mask_file ) except: watershed_mask = numpy.ones( topo.shape , dtype=numpy.longdouble ) # Use the whole grid if no watershed mask is available logging.warning( 'No watershed mask found; using the whole grid for AAR and eroded flux calculations.' ) # Mass Balance try: initELA except NameError: initELA = numpy.longdouble(3350) maxBz = numpy.longdouble(2) gradBz = numpy.longdouble(1./100.) else: if INIT_COND_TOGGLE == 2: # Ice sheets filenameDEM = 'Baffin200d' filenameDEM = 'ValleyNonFjordTopo' #load( filenameDEM ) ; ( topo , easting , northing ) = load_dem( filenameDEM ) dx = numpy.longdouble(2000) # set a new dx dy = dx UsChar = numpy.longdouble(100) taubChar = numpy.longdouble(50000) #load( filenameDEM, 'Bxy' ) ; Bxy = load_dem_var( filenameDEM , 'Bxy' ) # Mass Balance initELA = numpy.longdouble(3500) maxBz = numpy.longdouble(0) gradBz = numpy.longdouble(1./100) Hbound = numpy.longdouble(2000) Elev0 = numpy.longdouble(0) # reference elevation To = numpy.longdouble(-30) # temperature at Elev0 lapseRate = numpy.longdouble(-0.0065) # degrees per meter COMPRESS_TOGGLE = 0 GENERIC_ICE_TOGGLE = 0 MASS_BALANCE_TOGGLE = ELA_TIME_SERIES CALVING_TOGGLE = 1 ERODE_TOGGLE = 0 THERMAL_TOGGLE = 0 FREEZEON_TOGGLE = 0 HORZTL_ADVECT_TOGGLE = 0 GEOTHERMAL_HEAT_TOGGLE = 0 STRAIN_HEAT_TOGGLE = 0 SLIDING_HEAT_TOGGLE = 0 SURFACE_HEAT_FLUX_TOGGLE= 0 THERMAL_3D_TOGGLE = 0 WEST_BC_TOGGLE = ZERO_FLUX_BOUND EAST_BC_TOGGLE = ZERO_FLUX_BOUND SOUTH_BC_TOGGLE = ZERO_FLUX_BOUND NORTH_BC_TOGGLE = ZERO_FLUX_BOUND elif INIT_COND_TOGGLE == 3: # gui_start #load( filenameDEM ) ; ( topo , easting , northing ) = load_dem( filenameDEM ) dy = dx rws,cls = topo.shape #if !exist('easting') : easting = numpy.arange( cls ) #if !exist('northing'): northing = numpy.arange( rws ) try: easting except NameError: easting = numpy.arange( cls ) try: northing except NameError: northing = numpy.arange( rws ) # resample DEM at new node spacing if cellsize != dx: rws,cls = topo.shape xOld = numpy.arange(cls-1)*cellsize yOld = numpy.arange(rws-1)*cellsize #xOld = (0:cls-1)*cellsize ; #yOld = (0:rws-1)*cellsize ; XOld,YOld = numpy.meshgrid( xOld , yOld ) #if rem(max(xOld),dx) == 0 and rem(max(yOld),dy) == 0: if max(xOld) % dx == 0 and max(yOld) % dy == 0: clsNew = max(xOld)/dx + 1 rwsNew = max(yOld)/dy + 1 else: clsNew = numpy.ceil( xOld[-1] / dx ) rwsNew = numpy.ceil( yOld[-1] / dy ) x = numpy.arange(clsNew)*dx y = numpy.arange(rwsNew)*dy X,Y = numpy.meshgrid( x , y ) topo = interpolate.interp2d( XOld , YOld , topo , kind='linear' )( X , Y ) #topo = interpolate.interp2d( XOld , YOld , topo, X, Y ) ; easting = interpolate.interp1d( xOld , easting , kind='linear' )( x ) northing = interpolate.interp1d( yOld , northing , kind='linear' )( y ) cellsize = dx # Set the bed elevation to 'topo' Zb = topo.copy() initZb = Zb.copy() #if !exist('H'): H = numpy.zeros(Zb.shape) try: H except NameError: H = numpy.zeros( Zb.shape , dtype=numpy.longdouble ) Zi = H + Zb #clear topo rws,cls = Zb.shape x = numpy.arange( cls )*dx y = numpy.arange( rws )*dy X,Y = numpy.meshgrid( x , y ) # Create a generic ice surface if GENERIC_ICE_TOGGLE: # This code segment rotates the topo such that the # ice boundary is on the left side of the simulation # need to check code; better to rotate DEM prior to use ZiBound = numpy.mean(Zb[:,0]) + Hbound taub = 200000 H = numpy.zeros(Zb.shape, dtype=numpy.longdouble ) rws,cls = Zb.shape beta = taub/(rhoI*g) jtermlast = cls-2 icefree = 0 # for each row, find the cell for which the ice surface # height at the left boundary would be ZiBound if the # terminus that starts in that cell #for i =1:rws for i in range(rws): mZb = Zb[i,:] slope = -numpy.diff(mZb)/dx # search starts in front of the terminus # of the adjacent row that was just found jterm = min( jtermlast+1, cls-2 ) while jterm > 0: # backwater calculation mH = numpy.zeros(mZb.shape, dtype=numpy.longdouble ) for j in range(jterm-1,-1,-1): term1 = ( -slope[j]/2. - (mH[j+1]/dx) )**2 term2 = -(2./dx) * ( slope[j] * mH[j+1] - beta ) deltaH = -slope[j]*dx/2. - mH[j+1] + dx * numpy.sqrt(term1+term2) mH[j] = mH[j+1] + deltaH # the following ensures that the search for # the terminus was started beyond the terminus mZi = mZb + mH if mZi[0] > ZiBound: icefree = 1 elif icefree and mZi[0] < ZiBound: H[i,:] = mH jtermlast = jterm icefree = 0 break else: jterm = jterm + 2 if jterm >= cls-1: logging.error( "Generic ice overruns boundary" ) return -1 jterm = jterm - 1 Zi = Zb + H rws,cls = Zb.shape filt = numpy.ones( (3,3) , dtype=numpy.longdouble ) / 9 ZiBig = numpy.zeros( (rws+2,cls+2) , dtype=numpy.longdouble ) ZiBig[1:-1,1:-1] = Zi for i in range(10): ZiBig[(0,-1),:] = ZiBig[(1,-2),:] ZiBig[:,(0,-1)] = ZiBig[:,(1,-2)] ZiBig = filter2d( filt , ZiBig ) Zi = ZiBig[1:-2,1:-2] ind = H == 0 Zi[ind] = Zb[ind] conserveIce = H.sum(axis=0).sum() iceVolumeLast = conserveIce*dx*dy else: # SYNTHETIC BEDROCK TOPOGRAPHY logging.error( "Must code synthetic initial condition" ) return -1 ### INIT_COND_TOGGLE ###### ### Load a saved state ###### # Initialize matrices #n_rows = 100 #n_cols = 200 #H = numpy.ones( ( n_rows , n_cols ) )*100 #Zb = numpy.ones( ( n_rows , n_cols ) ) #Tb = numpy.ones( ( n_rows , n_cols ) ) #Tm = numpy.ones( ( n_rows , n_cols ) ) #Ts = numpy.ones( ( n_rows , n_cols ) ) # #COMPRESS_TOGGLE = True #THERMAL_TOGGLE = True # #RESTART_TOGGLE = 1 # Start the time loop fd_watch = {} fd_watch['thick'] = open( 'thickness_py.bin' , 'wb' ) fd_watch['taubxx'] = open( 'taubxX_py.bin' , 'wb' ) fd_watch['taubyy'] = open( 'taubyY_py.bin' , 'wb' ) fd_watch['taubx'] = open( 'taubX_py.bin' , 'wb' ) fd_watch['tauby'] = open( 'taubY_py.bin' , 'wb' ) fd_watch['xcmpnt'] = open( 'xcmpnt_py.bin' , 'wb' ) fd_watch['ycmpnt'] = open( 'ycmpnt_py.bin' , 'wb' ) fd_watch['udxx'] = open( 'UdxX_py.bin' , 'wb' ) fd_watch['udyy'] = open( 'UdyY_py.bin' , 'wb' ) fd_watch['usxx'] = open( 'UsxX_py.bin' , 'wb' ) fd_watch['usyy'] = open( 'UsyY_py.bin' , 'wb' ) fd_csv = open( 'dt.csv' , 'w' ) ( H , Zb , dx , dy ) = load_state( inputFile ) run_for( t , tMax , H , Zb , dx , dy ) return counter = 0 tic = time.time() while t<tMax or RESTART_TOGGLE==2: # COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE ( Zi , compression_ratio , COMPRESSED_FLAG ) = compress_grid( H , Zb , COMPRESS_TOGGLE=False ) ###### ### MODIFY BOUNDARY CELLS TO ENFORCE BOUNDARY CONDITIONS ( H_ext , Zb_ext , Zi_ext ) = set_bc( H , Zb , Zi ) ###### ### CALCULATE THE BASAL SHEAR STRESS # forward differences #dZidxX_ext = ( Zi_ext[:,1:] - Zi_ext[:,:-1] ) / dx #dZidyY_ext = ( Zi_ext[1:,:] - Zi_ext[:-1,:] ) / dy #dZidxX = dZidxX_ext[1:-1,:] #dZidyY = dZidyY_ext[:,1:-1] ( dZidxX , dZidyY ) = difference_grid( Zi_ext , dx , dy ) ( ( xcmpnt , ycmpnt ) , ( taubX , taubY ) , ( HX , HY ) ) = basal_shear_stress( H_ext , Zi_ext , dx=dx , dy=dy ) ###### ### CALCULATE ICE VELOCITY DUE TO DEFORMATION if ICEFLOW_TOGGLE: ( UdxX , UdyY ) = iceflow( taubX , taubY , HX , HY , xcmpnt , ycmpnt ) else: UdxX = numpy.zeros( xcmpnt.shape , dtype=numpy.longdouble ) UdyY = numpy.zeros( ycmpnt.shape , dtype=numpy.longdouble ) ###### ### CALCULATE SLIDING VELOCITY if ICESLIDE_TOGGLE: ( UsxX , UsyY ) = ice_sliding( taubX , taubY , xcmpnt , ycmpnt ) else: UsxX = numpy.zeros( xcmpnt.shape , dtype=numpy.longdouble ) UsyY = numpy.zeros( ycmpnt.shape , dtype=numpy.longdouble ) # sum all contributions to ice motion ( UxX , UyY ) = sum_ice_motion( UdxX , UdyY , UsxX , UsyY ) ###### ### MASS CONSERVATION -- CONTINUITY ( dHdt , ( qxX , qyY ) ) = mass_conservation( H_ext , UxX , UyY , HX , HY , dZidxX , dZidyY , dx=dx , dy=dy ); ###### ### CALCULATE MASS BALANCE ( Bxy , ELA ) = mass_balance( Zi , t ) ####### ### CALCULATE TIMESTEP if VARIABLE_DT_TOGGLE: dt = get_timestep( H , Zi_ext , Zi , dHdt , Bxy , dtMax=dtMax , dtDefault=dtDefault ) else: dt = dtDefault ###### ### UPDATE the TIME and ICE THICKNESS ( t , H , Zi , conserveIce ) = update_vars( H , Zb , Zi , Bxy , qxX , qyY , dHdt , t , dt , conserveIce , dx=dx , dy=dy ) fd_csv.write( '%f\n' % t ) fd_csv.flush() # Calculate AAR # AccumGrid = (Zi > ELA) .* (H > 0); IndGlacier = numpy.choose( H >0 , (0,watershed_mask) ) AccumGrid = numpy.choose( Bxy>0 , (0,IndGlacier ) ) AccumArea = AccumGrid.sum(axis=0).sum() TotArea = IndGlacier.sum(axis=0).sum() AAR = AccumArea / TotArea ###### ### CALCULATION OF ICE TEMPERATURES if THERMAL_TOGGLE == 0: pass elif THERMAL_TOGGLE == 1: Ts = To + lapseRate*( Zi - Elev0 ) Tb = Ts - gradTz * H Tb = numpy.choose( Tb>0 , (Tb,0) ) Tm = Ts.copy() Htemp = Ts / gradTz ind = nonzero( H.flatten() <= Htemp ) put( Tm , ind , ( Ts.flatten()[ind] + Tb.flatten()[ind] ) * .5 ) ind = nonzero( H.flatten() > Htemp ) put( Tm , ind , Ts.flatten()[ind] * (1. - Htemp.flatten()[ind] / ( 2.*H.flatten()[ind] ) ) ) elif THERMAL_TOGGLE == 2: thermal_gc2d ###### ### COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE #if COMPRESS_TOGGLE and H.max() > 1 and RESTART_TOGGLE != 2: # disp( 'Error!!!' ) # H_FullSpace = H.copy() # Zb_FullSpace = Zb.copy() # if THERMAL_TOGGLE: # Ts_FullSpace = Ts.copy() # Tb_FullSpace = Tb.copy() # Tm_FullSpace = Tm.copy() # indrw,indcl = (H!=0).nonzero() # mxrw ,mxcl = Zb.shape # mnrw = max( 0 , indrw.min() - 2 ) # mxrw = min( mxrw , indrw.max() + 2 ) # mncl = max( 0 , indcl.min() - 2 ) # mxcl = min( mxcl , indcl.max() + 2 ) # H = H [mnrw:mxrw,mncl:mxcl] # Zb = Zb[mnrw:mxrw,mncl:mxcl] # Zi = Zb + numpy.choose( H<0 , (H,0) ) # rws,cls = H.shape # if THERMAL_TOGGLE: # Ts = Ts[mnrw:mxrw,mncl:mxcl] # Tb = Tb[mnrw:mxrw,mncl:mxcl] # Tm = Tm[mnrw:mxrw,mncl:mxcl] # mxrws,mxcls = Zb_FullSpace.shape # rws ,cls = Zb.shape # compression_ratio = (mxcls*mxrws)/(cls*rws) # COMPRESSED_FLAG = 1 #else: # Zi = Zb + numpy.choose( H<0 , (H,0) ) # included for restarts # compression_ratio = 1 # COMPRESSED_FLAG = 0 # THIS IS THE END OF THE CONTINUUM CALCULATIONS # NOW SIMULATE PROCESSES FOR WHICH WE HAVE NO EQUATIONS ###### ### AVALANCHE SNOW OFF OF STEEP SURFACES if AVALANCH_TOGGLE and ( numpy.random.uniform() < dt*avalanchFreq ): avalanche( H ) ###### ### CALVING GLACIER FRONT if CALVING_TOGGLE: calve( H , dt ) if counter%1==0: print_watch_point( fd_watch['thick'] , H ) #print_watch_point( fd_watch['taubxx'] , taubxX[:,1:] ) #print_watch_point( fd_watch['taubyy'] , taubyY[1:,:] ) #print_watch_point( fd_watch['taubx'] , taubX [:,1:] ) #print_watch_point( fd_watch['tauby'] , taubY [1:,:] ) #print_watch_point( fd_watch['xcmpnt'] , xcmpnt[:,1:] ) #print_watch_point( fd_watch['ycmpnt'] , ycmpnt[1:,:] ) #print_watch_point( fd_watch['udxx'] , UdxX [:,1:] ) #print_watch_point( fd_watch['udyy'] , UdyY [1:,:] ) #print_watch_point( fd_watch['usxx'] , UsxX [:,1:] ) #print_watch_point( fd_watch['usyy'] , UsyY [1:,:] ) counter += 1 if counter > 3000: return ###### ### ERODE THE BED and TRACK CRN INVENTORY if CRN_TOGGLE: CRN_gc2d # Call the CRN module ###### ### ERODE THE BED - now handled in CRN module # # if ERODE_TOGGLE: # erode_gc2d # ###### ### REPORT SOME STUFF toc = time.time() if REPORT_TOGGLE and toc >= nextReport: logging.info( 'elapsed time: %1.2f seconds' , (toc-tic) ) logging.info( 'simulation time: %1.2f yr' , t ) logging.info( 'timestep: %1.2e yr' , dt ) logging.info( 'ELA: %1.0f m' , ELA ) logging.info( 'AAR: %1.2f' , AAR ) # print 'Erosion mass flux: %1.1e kg/yr' % eroded_mass_flux # fractional ice conservation iceVolume = numpy.choose( H<0 , (H,0) ).sum(axis=0).sum()*dx*dy logging.info( 'total ice: %1.2e km^3' , (iceVolume*1e-9) ) logging.info( 'excess ice: %1.2f m^3' , (iceVolume - conserveIce*dx*dy) ) logging.info( 'ice change: %f m^3' , (iceVolume - iceVolumeLast) ) logging.info( 'max ice thickness: %1.2e km' , (H.max()/1000.) ) if iceVolume != 0: logging.info( 'ice conservation (%%): %1.15f' , (100 - 100*( iceVolume - conserveIce*dx*dy ) / iceVolume) ) iceVolumeLast = iceVolume if CALVING_TOGGLE: logging.info( 'calved ice volume: %1.2e m^3' , calvedIce ) if COMPRESS_TOGGLE: logging.info( 'compression ratio = %f' , compression_ratio ) nextReport = toc + reportInterval fd_watch.close() logging.info( "Finished!" ) return 0 def run_for( t , t_max , H , Zb , dx , dy , ICEFLOW_TOGGLE=True , ICESLIDE_TOGGLE=False , VARIABLE_DT_TOGGLE=True , dtDefault=Parameters.dtDefault , dtMax=Parameters.dtMax): fd_watch = {} fd_watch['thick'] = open( 'thickness_py.bin' , 'wb' ) conserveIce = numpy.longdouble(0.) counter = 0 tic = time.time() while t<t_max: # COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE ( Zi , compression_ratio , COMPRESSED_FLAG ) = compress_grid( H , Zb , COMPRESS_TOGGLE=False ) ###### ### MODIFY BOUNDARY CELLS TO ENFORCE BOUNDARY CONDITIONS ( H_ext , Zb_ext , Zi_ext ) = set_bc( H , Zb , Zi ) ( dZidxX , dZidyY ) = difference_grid( Zi_ext , dx , dy ) ###### ### CALCULATE THE BASAL SHEAR STRESS ( ( xcmpnt , ycmpnt ) , ( taubX , taubY ) , ( HX , HY ) ) = basal_shear_stress( H_ext , Zi_ext , dx=dx , dy=dy ) ###### ### CALCULATE ICE VELOCITY DUE TO DEFORMATION if ICEFLOW_TOGGLE: ( UdxX , UdyY ) = iceflow( taubX , taubY , HX , HY , xcmpnt , ycmpnt ) else: UdxX = numpy.zeros( xcmpnt.shape , dtype=numpy.longdouble ) UdyY = numpy.zeros( ycmpnt.shape , dtype=numpy.longdouble ) ###### ### CALCULATE SLIDING VELOCITY if ICESLIDE_TOGGLE: ( UsxX , UsyY ) = ice_sliding( taubX , taubY , xcmpnt , ycmpnt ) else: UsxX = numpy.zeros( xcmpnt.shape , dtype=numpy.longdouble ) UsyY = numpy.zeros( ycmpnt.shape , dtype=numpy.longdouble ) # sum all contributions to ice motion ( UxX , UyY ) = sum_ice_motion( UdxX , UdyY , UsxX , UsyY ) ###### ### MASS CONSERVATION -- CONTINUITY ( dHdt , ( qxX , qyY ) ) = mass_conservation( H_ext , UxX , UyY , HX , HY , dZidxX , dZidyY , dx=dx , dy=dy ); ###### ### CALCULATE MASS BALANCE ( Bxy , ELA ) = mass_balance( Zi , t ) ####### ### CALCULATE TIMESTEP if VARIABLE_DT_TOGGLE: dt = get_timestep( H , Zi_ext , Zi , dHdt , Bxy , dtMax=dtMax , dtDefault=dtDefault ) else: dt = dtDefault ###### ### UPDATE the TIME and ICE THICKNESS ( t , H , Zi , conserveIce ) = update_vars( H , Zb , Zi , Bxy , qxX , qyY , dHdt , t , dt , conserveIce , dx=dx , dy=dy ) if counter%1==0: print_watch_point( fd_watch['thick'] , H ) counter = counter + 1 class Toggles: # CODE BEHAVIOR TOGGLES # toggles turn on/off segments of the code or select # between multiple possibilities for a given process # values can be reset in INIT_COND segment GUISTART_TOGGLE = 0 # started simulation with the gui (off|on) SAVE_TOGGLE = 1 # saving (off|on) PLOT_TOGGLE = 1 # plotting (off|on) REPORT_TOGGLE = 1 # reporting (off|on) COMPRESS_TOGGLE = 0 # only simulate area with ice (off|on) VARIABLE_DT_TOGGLE = 1 # state dependent time step (off|on) INIT_COND_TOGGLE = 1 # load DEM and climate (synth|valley|sheet) GENERIC_ICE_TOGGLE = 0 # start with generic ice surface (off|on) ICEFLOW_TOGGLE = 1 # ice motion by deformation (off|on) ICESLIDE_TOGGLE = 0 # ice motion by sliding (off|on|select) THERMAL_TOGGLE = 0 # temp dependance of flow (off|on) FREEZEON_TOGGLE = 0 # basal ice freeze to bed (off|on) AVALANCH_TOGGLE = 0 # avalanch off steep surfaces (off|on) ERODE_TOGGLE = 0 # erode the bed (off|on|select) CALVING_TOGGLE = 0 # calving front (off|on) CRN_TOGGLE = 0 # CRN accumulation (off|on) MASS_BALANCE_TOGGLE = MassBalance.ELA_LOWERING # select climate scenerio (off|on|select) WEST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow) EAST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow) SOUTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow) NORTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow) def init_valley_glacier( file='Animas_200.mat' ): # filenameDEM = 'Yosemite200_rot35_400x650' ; # filenameDEM = 'Nederland100' ; # filenameDEM = 'KingsCanyon200Rot256x256shift' ; # filenameDEM = 'sample200' ; # filenameDEM = 'animas_200' ; # filenameDEM = '4J_newDEM_200' ; # filenameDEM = 'reproj4j_200' ; ( topo , easting , northing , cellsize ) = load_dem( file ) dx = numpy.longdouble(200) # set a new dx dy = numpy.longdouble(dx) # AAR and eroded volume watershed mask mask_file = 'watershed_mask' try: #load( mask_file ); watershed_mask = load_mask( mask_file ) except: watershed_mask = numpy.ones( topo.shape , dtype=numpy.longdouble ) # Use the whole grid if no watershed mask is available logging.warning( 'No watershed mask found; using the whole grid for AAR and eroded flux calculations.' ) # Mass Balance try: initELA except NameError: initELA = numpy.longdouble(3350) maxBz = numpy.longdouble(2) gradBz = numpy.longdouble(1./100.) return ( topo , easting , northing , cellsize ) def init_ice_sheet( file ): file = 'Baffin200d' file = 'ValleyNonFjordTopo' #load( filenameDEM ) ; ( topo , easting , northing ) = load_dem( file ) dx = numpy.longdouble(2000) # set a new dx dy = dx UsChar = numpy.longdouble(100) taubChar = numpy.longdouble(50000) #load( filenameDEM, 'Bxy' ) ; Bxy = load_dem_var( filenameDEM , 'Bxy' ) # Mass Balance initELA = numpy.longdouble(3500) maxBz = numpy.longdouble(0) gradBz = numpy.longdouble(1./100) Hbound = numpy.longdouble(2000) Elev0 = numpy.longdouble(0) # reference elevation To = numpy.longdouble(-30) # temperature at Elev0 lapseRate = numpy.longdouble(-0.0065) # degrees per meter COMPRESS_TOGGLE = 0 GENERIC_ICE_TOGGLE = 0 MASS_BALANCE_TOGGLE = ELA_TIME_SERIES CALVING_TOGGLE = 1 ERODE_TOGGLE = 0 THERMAL_TOGGLE = 0 FREEZEON_TOGGLE = 0 HORZTL_ADVECT_TOGGLE = 0 GEOTHERMAL_HEAT_TOGGLE = 0 STRAIN_HEAT_TOGGLE = 0 SLIDING_HEAT_TOGGLE = 0 SURFACE_HEAT_FLUX_TOGGLE= 0 THERMAL_3D_TOGGLE = 0 WEST_BC_TOGGLE = ZERO_FLUX_BOUND EAST_BC_TOGGLE = ZERO_FLUX_BOUND SOUTH_BC_TOGGLE = ZERO_FLUX_BOUND NORTH_BC_TOGGLE = ZERO_FLUX_BOUND return ( topo , easting , northing ) def load_state( file , RESTART_TOGGLE=0 , INIT_COND_TOGGLE=True , GENERIC_ICE_TOGGLE=False ): ###### ### Load a saved state if RESTART_TOGGLE == 0 or RESTART_TOGGLE == 3: # LOAD A SAVED STATE # CODE BEHAVIOR TOGGLES # toggles turn on/off segments of the code or select # between multiple possibilities for a given process # values can be reset in INIT_COND segment toggles = Toggles # OUTPUT BEHAVIOR plotInterval = 60 * 120 # seconds saveInterval = 100 # whole years reportInterval = 30 # seconds nextPlot = 0 # initialize to plot on first timestep nextSave = 0 # initialize to save on first timestep nextReport = 0 # initialize to report on first timestep outputFile = 'savetmp' ###### ### Set numerical and physical constants params = Parameters # Constants g = numpy.longdouble(9.81) # gravitional acceleration rhoI = numpy.longdouble(917) # density of ice rhoW = numpy.longdouble(1000) # density of water day = numpy.longdouble(0.00274) # length of a day in years # Time t = numpy.longdouble(0) # set time to zero tMax = numpy.longdouble(100000) # maximum simulation time in years dtMax = numpy.longdouble(0.4 * 365*day) # maximum timestep in years dtDefault = numpy.longdouble(0.4 * 365*day) # timestep if VARIABLE_DT_TOGGLE==0 # Glacier Properties MinGlacThick = numpy.longdouble(1) # Ice Deformation glensA = numpy.longdouble((6.8e-15)*3.15e7/(1e9)) # Patterson, 1994; MacGregor, 2000 # Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used) UsChar = numpy.longdouble(10) taubChar = numpy.longdouble(100000) # Standard Sliding -- used if ICESLIDE_TOGGLE==2 (generally not used) B = numpy.longdouble(0.0012) # m/(Pa*yr) -- MacGregor, 2000 DepthToWaterTable = numpy.longdouble(20) # distance from ice surface to water table MaxFloatFraction = numpy.longdouble(80) # limits water level in ice Hpeff = numpy.longdouble(20) # effective pressure (meters of water) # Avalanching angleOfRepose = numpy.longdouble(30) avalanchFreq = numpy.longdouble(3) # average number per year # Calving seaLevel = numpy.longdouble(-100) # meters calvingCoef = numpy.longdouble(2) # year^-1 # Thermal c = numpy.longdouble(2060) # specific heat capacity (J/(kg*K)) Qg = numpy.longdouble(0.05*3.15e7) # Geothermal heat flux (W/m^2)*seconds/year = (J/year)/(m^2) gradTz = numpy.longdouble(-0.0255) # Geothermal Gradient # Mass Balance initELA = numpy.longdouble(4500) initELA = numpy.longdouble(3000) gradBz = numpy.longdouble(0.01) maxBz = numpy.longdouble(2) ELAStepSize = numpy.longdouble(-50) ELAStepInterval = numpy.longdouble(500) tmin = numpy.longdouble(200) # Years, spin-up time ###### ### RELOAD INPUT ARGUMENTS #load inputArgs inputArgs = load_input_args #if ( GUISTART_TOGGLE & exist('guiSimParams.mat','file') ) # load guiSimParams # delete guiSimParams.mat # clear newInitFile #elseif ( ~GUISTART_TOGGLE & exist( './guiPlotParams.mat', 'file' ) ) # delete guiPlotParams.mat #end ###### ### INITIALIZE COUNTERS # numTimeSteps = 0 ; # timeSteps = zeros(1000000,1) ; ###### ### INITIALIZE BED and ICE TOPOGRAPHY, and CLIMATE VARIABLES # Must define topo, cellsize, dx, and dy if INIT_COND_TOGGLE: ### .mat file contains: 'topo' = matrix of bed elevations and 'cellsize', ### both in meters. 'easting' and 'northing' are included for plotting if INIT_COND_TOGGLE == 1: # Valley glaciers # filenameDEM = 'Yosemite200_rot35_400x650' ; # filenameDEM = 'Nederland100' ; # filenameDEM = 'KingsCanyon200Rot256x256shift' ; # filenameDEM = 'sample200' ; # filenameDEM = 'animas_200' ; # filenameDEM = '4J_newDEM_200' ; # filenameDEM = 'reproj4j_200' ; filenameDEM = file filenameDEM = 'Animas_200.mat' #load( filenameDEM ) ; ( topo , easting , northing , cellsize ) = load_dem( filenameDEM ) dx = numpy.longdouble(200) # set a new dx dy = numpy.longdouble(dx) # AAR and eroded volume watershed mask mask_file = 'watershed_mask' try: #load( mask_file ); watershed_mask = load_mask( mask_file ) except: watershed_mask = numpy.ones( topo.shape , dtype=numpy.longdouble ) # Use the whole grid if no watershed mask is available logging.warning( 'No watershed mask found; using the whole grid for AAR and eroded flux calculations.' ) # Mass Balance try: initELA except NameError: initELA = numpy.longdouble(3350) maxBz = numpy.longdouble(2) gradBz = numpy.longdouble(1./100.) elif INIT_COND_TOGGLE==2: # Ice sheets filenameDEM = 'Baffin200d' filenameDEM = 'ValleyNonFjordTopo' #load( filenameDEM ) ; ( topo , easting , northing ) = load_dem( filenameDEM ) dx = numpy.longdouble(2000) # set a new dx dy = dx UsChar = numpy.longdouble(100) taubChar = numpy.longdouble(50000) #load( filenameDEM, 'Bxy' ) ; Bxy = load_dem_var( filenameDEM , 'Bxy' ) # Mass Balance initELA = numpy.longdouble(3500) maxBz = numpy.longdouble(0) gradBz = numpy.longdouble(1./100) Hbound = numpy.longdouble(2000) Elev0 = numpy.longdouble(0) # reference elevation To = numpy.longdouble(-30) # temperature at Elev0 lapseRate = numpy.longdouble(-0.0065) # degrees per meter COMPRESS_TOGGLE = 0 GENERIC_ICE_TOGGLE = 0 MASS_BALANCE_TOGGLE = ELA_TIME_SERIES CALVING_TOGGLE = 1 ERODE_TOGGLE = 0 THERMAL_TOGGLE = 0 FREEZEON_TOGGLE = 0 HORZTL_ADVECT_TOGGLE = 0 GEOTHERMAL_HEAT_TOGGLE = 0 STRAIN_HEAT_TOGGLE = 0 SLIDING_HEAT_TOGGLE = 0 SURFACE_HEAT_FLUX_TOGGLE= 0 THERMAL_3D_TOGGLE = 0 WEST_BC_TOGGLE = ZERO_FLUX_BOUND EAST_BC_TOGGLE = ZERO_FLUX_BOUND SOUTH_BC_TOGGLE = ZERO_FLUX_BOUND NORTH_BC_TOGGLE = ZERO_FLUX_BOUND elif INIT_COND_TOGGLE == 3: # gui_start #load( filenameDEM ) ; ( topo , easting , northing ) = load_dem( filenameDEM ) dy = dx rws,cls = topo.shape #if !exist('easting') : easting = numpy.arange( cls ) #if !exist('northing'): northing = numpy.arange( rws ) try: easting except NameError: easting = numpy.arange( cls ) try: northing except NameError: northing = numpy.arange( rws ) # resample DEM at new node spacing if cellsize != dx: rws,cls = topo.shape xOld = numpy.arange(cls-1)*cellsize yOld = numpy.arange(rws-1)*cellsize #xOld = (0:cls-1)*cellsize ; #yOld = (0:rws-1)*cellsize ; XOld,YOld = numpy.meshgrid( xOld , yOld ) #if rem(max(xOld),dx) == 0 and rem(max(yOld),dy) == 0: if max(xOld) % dx == 0 and max(yOld) % dy == 0: clsNew = max(xOld)/dx + 1 rwsNew = max(yOld)/dy + 1 else: clsNew = numpy.ceil( xOld[-1] / dx ) rwsNew = numpy.ceil( yOld[-1] / dy ) x = numpy.arange(clsNew)*dx y = numpy.arange(rwsNew)*dy X,Y = numpy.meshgrid( x , y ) topo = interpolate.interp2d( XOld , YOld , topo , kind='linear' )( X , Y ) #topo = interpolate.interp2d( XOld , YOld , topo, X, Y ) ; easting = interpolate.interp1d( xOld , easting , kind='linear' )( x ) northing = interpolate.interp1d( yOld , northing , kind='linear' )( y ) cellsize = dx # Set the bed elevation to 'topo' Zb = topo.copy() initZb = Zb.copy() #if !exist('H'): H = numpy.zeros(Zb.shape) try: H except NameError: H = numpy.zeros( Zb.shape , dtype=numpy.longdouble ) Zi = H + Zb #clear topo rws,cls = Zb.shape x = numpy.arange( cls )*dx y = numpy.arange( rws )*dy X,Y = numpy.meshgrid( x , y ) # Create a generic ice surface if GENERIC_ICE_TOGGLE: # This code segment rotates the topo such that the # ice boundary is on the left side of the simulation # need to check code; better to rotate DEM prior to use ZiBound = numpy.mean(Zb[:,0]) + Hbound taub = 200000 H = numpy.zeros(Zb.shape, dtype=numpy.longdouble ) rws,cls = Zb.shape beta = taub/(rhoI*g) jtermlast = cls-2 icefree = 0 # for each row, find the cell for which the ice surface # height at the left boundary would be ZiBound if the # terminus that starts in that cell #for i =1:rws for i in range(rws): mZb = Zb[i,:] slope = -numpy.diff(mZb)/dx # search starts in front of the terminus # of the adjacent row that was just found jterm = min( jtermlast+1, cls-2 ) while jterm > 0: # backwater calculation mH = numpy.zeros(mZb.shape, dtype=numpy.longdouble ) for j in range(jterm-1,-1,-1): term1 = ( -slope[j]/2. - (mH[j+1]/dx) )**2 term2 = -(2./dx) * ( slope[j] * mH[j+1] - beta ) deltaH = -slope[j]*dx/2. - mH[j+1] + dx * numpy.sqrt(term1+term2) mH[j] = mH[j+1] + deltaH # the following ensures that the search for # the terminus was started beyond the terminus mZi = mZb + mH if mZi[0] > ZiBound: icefree = 1 elif icefree and mZi[0] < ZiBound: H[i,:] = mH jtermlast = jterm icefree = 0 break else: jterm = jterm + 2 if jterm >= cls-1: logging.error( "Generic ice overruns boundary" ) return -1 jterm = jterm - 1 Zi = Zb + H rws,cls = Zb.shape filt = numpy.ones( (3,3) , dtype=numpy.longdouble ) / 9 ZiBig = numpy.zeros( (rws+2,cls+2) , dtype=numpy.longdouble ) ZiBig[1:-1,1:-1] = Zi for i in range(10): ZiBig[(0,-1),:] = ZiBig[(1,-2),:] ZiBig[:,(0,-1)] = ZiBig[:,(1,-2)] ZiBig = filter2d( filt , ZiBig ) Zi = ZiBig[1:-2,1:-2] ind = H == 0 Zi[ind] = Zb[ind] conserveIce = H.sum(axis=0).sum() iceVolumeLast = conserveIce*dx*dy else: # SYNTHETIC BEDROCK TOPOGRAPHY logging.error( "Must code synthetic initial condition" ) return -1 ### INIT_COND_TOGGLE ###### ### Load a saved state ###### return ( H , Zb , dx , dy ) def gc2d( argv=None , inputFile='Animas_200.mat' ): # if argv is None: # argv = sys.argv # try: # try: # opts, args = getopt.getopt( argv[1:] , "h" , ["help"] ) # except getopt.error, msg: # raise Usage(msg) # except Usage, err: # print >> sys.stderr, err.msg # print >> sys.stderr, "for help use --help" # return 2 ( H , Zb , dx , dy ) = load_state( inputFile ) run_for( Parameters.t , Parameters.tMax , H , Zb , dx , dy ) return 0 from csdms import Component class gc2d( Component ): def __init__( self ): self._name = 'GC2D' self._vars = {} self.set_var( 'H' , None ) self.set_var( 'Zb' , None ) self.set_var( 'dx' , None ) self.set_var( 'dy' , None ) def setup( self , file=None ): Component( self._name ).setup() ( H , Zb , dx , dy ) = load_state( file ) self.set_var( 'H' , H ) self.set_var( 'Zb' , Zb ) self.set_var( 'dx' , dx ) self.set_var( 'dy' , dy ) def run_for( self , duration , start=0. ): #if type( duration ) == unum.Unum: # duration_in_y = duration.convert( YR ).asNumber() #else: duration_in_y = duration #if type( start ) == unum.Unum: # start_in_y = start.convert( YR ).asNumber() #else: start_in_y = start Component( self._name ).run() H = self.get_var( 'H' ) Zb = self.get_var( 'Zb' ) dx = self.get_var( 'dx' ) dy = self.get_var( 'dy' ) run_for( start_in_y , start_in_y+duration_in_y , H , Zb , dx , dy ) def teardown( self ): Component( self._name ).teardown() if __name__ == "__main__": logging.basicConfig( level=logging.INFO , format='%(asctime)s %(levelname)-8s %(message)s' , datefmt='%a, %d %b %Y %H:%M:%S' , filename='gc2d.log' , filemode='w' ) sys.exit( gc2d() )
Clear glass trumpet with clear glass applied threading.This is an early one folks could even be 18th century but defiantly at least goes back to 1830. For an interesting article please read Dale Murchells story about Sleigh rides and Glass Whimsies in the early 19th century.I just learned that these early glass horns are actual copies of what was called a riflemans horn it would have been made from brass or another metal and carried during the American wars, hunters bags a woodman bag along with other items needed by a man out in the wilderness hunting exploring ect. Really a neat colonial item!Pictures in George Mckearin an other Early American Glass books.The horn is 7" long and in fine condition,no damage.There is a little thread loss at the bottom but that occoured during making a common problem when laying such a thin string of glass the thread was so thin it had no weight to adhere to the body.
import base64 import json from sqlalchemy.exc import IntegrityError, InvalidRequestError import sqlalchemy as sa from alchemyjsonschema import BaseModelWalker, SingleModelWalker, SchemaFactory from errorhandlers import page_not_found from flask import current_app, make_response, request from jsonschema import validate, ValidationError from sqlalchemy.ext.declarative import declarative_base from mrest_client.auth import decode_auth_data from flask_mrest.auth import mrest_authenticate, mrest_404 from hashlib import sha256 SABase = declarative_base() def dictify_item(item, model): columns = [c.name for c in model.__table__.columns] columnitems = dict([(c, getattr(item, c)) for c in columns]) return columnitems def query_to_json(query, model): if isinstance(query, SABase): return json.dumps(dictify_item(query, model)) else: items = [] for item in query: items.append(dictify_item(item, model)) return json.dumps(items) class BaseModel(object): """ An MREST model with no route handlers. A good base to use for private models which will require custom routes. See child SuperModel class for route handler examples. """ def __init__(self, name, model_name, plain_methods=None, id_methods=None, sa_model=None, excludes=None, walker=None): """ :param str name: The display name of the model (typically capitalized) :param str model_name: The model name (lower case, for routing, tables, etc) :param list plain_methods: Methods to use for plain route :param list id_methods: Methods to use for id route :param SABase sa_model: The SQLAlchemy model :param list excludes: a list of excludes to pass to the walker :param BaseModelWalker walker: """ if not excludes: excludes = [] if not id_methods: id_methods = [] if not plain_methods: plain_methods = [] self.name = name self.model_name = model_name self.plain_methods = plain_methods self.id_methods = id_methods self._private_routes = None self._public_routes = None if sa_model is not None: self._sa_model = sa_model else: self._sa_model = None self.excludes = excludes if isinstance(walker, BaseModelWalker): self.walker = walker else: self.walker = SingleModelWalker self._json_schema = None @property def sa_model(self): """ Provide the SQLAlchemy model as a separate object, so that it isn't cluttered with unnecessary attributes. :return: The SQLAlchemy model to use for this super model """ if self._sa_model is None: self._sa_model = SABase # This default is meaningless. Create your own class to inherit from Base. return self._sa_model @property def json_schema(self): """ Provide the SQLAlchemy model as a separate object, so that it isn't cluttered with unnecessary attributes. :return: The json schema for this model """ if self._json_schema is None: factory = SchemaFactory(self.walker) self._json_schema = factory.__call__(self.sa_model, excludes=self.excludes) # TODO change to custom route with valid json-reference as per # http://tools.ietf.org/html/draft-zyp-json-schema-04#section-6.2 self._json_schema['$schema'] = "http://json-schema.org/draft-04/schema#" self._json_schema['private_routes'] = self.private_routes self._json_schema['public_routes'] = self.public_routes print self._json_schema return self._json_schema @property def private_routes(self): if self._private_routes is None: self._private_routes = {"/": [], "/:id": []} for method in ('get', 'post', 'put', 'delete'): name = getattr(self, method).__name__ if name == 'authenticated_handler': self._private_routes['/'].append(method.upper()) for method in ('get', 'post', 'put', 'delete'): name = getattr(self, method + "_one").__name__ if name == 'authenticated_handler': self._private_routes['/:id'].append(method.upper()) return self._private_routes @property def public_routes(self): if self._public_routes is None: self._public_routes = {"/": [], "/:id": []} for method in ('get', 'post', 'put', 'delete'): name = getattr(self, method).__name__ if name != 'authenticated_handler' and name != 'pnf_handler': self._public_routes['/'].append(method.upper()) for method in ('get', 'post', 'put', 'delete'): name = getattr(self, method + "_one").__name__ if name != 'authenticated_handler' and name != 'pnf_handler': self._public_routes['/:id'].append(method.upper()) return self._public_routes def route_plain(self): """ Handler for /<model> routes. """ if request.method == 'GET': return self.get() elif request.method == 'POST': return self.post() elif request.method == 'PUT': return self.put() elif request.method == 'DELETE': return self.delete() else: return "this server does not support %s requests" % request.method def route_id(self, itemid): """ Handler for /<model>/<itemid> routes. """ if request.method == 'GET': return self.get_one(itemid) elif request.method == 'POST': return self.post_one(itemid) elif request.method == 'PUT': return self.put_one(itemid) elif request.method == 'DELETE': return self.delete_one(itemid) else: return "this server does not support %s requests" % request.method @mrest_404 def get(self): """ Handler for GET /<model> """ pass @mrest_404 def post(self): """ Handler for POST /<model> """ pass @mrest_404 def put(self): """ Handler for PUT /<model> """ pass @mrest_404 def delete(self): """ Handler for DELETE /<model> """ pass @mrest_404 def get_one(self, itemid): """ Handler for GET /<model>/<id> """ pass @mrest_404 def post_one(self, itemid): """ Handler for POST /<model>/<id> """ pass @mrest_404 def put_one(self, itemid): """ Handler for PUT /<model>/<id> """ pass @mrest_404 def delete_one(self, itemid): """ Handler for DELETE /<model>/<id> """ pass class SuperModel(BaseModel): """ An MREST model with all route handlers defined with default behavior. A good base to use for completely public models which need the generic REST functionality. """ def __init__(self, name, model_name, **kwargs): if 'plain_methods' in kwargs: del kwargs['plain_methods'] if 'id_methods' in kwargs: del kwargs['id_methods'] super(SuperModel, self).__init__(name, model_name, plain_methods=['GET', 'POST'], id_methods=['GET', 'PUT', 'DELETE'], **kwargs) def get(self): items = current_app.sa['session'].query(self.sa_model).all() current_app.sa['session'].commit() return make_response(query_to_json(items, self.sa_model), 200) @mrest_authenticate def post(self): args = decode_auth_data(request.data) try: validate(args, current_app.json_schemas[self.model_name]) except ValidationError: return page_not_found() item = self.sa_model(**args) current_app.sa['session'].add(item) current_app.sa['session'].commit() return make_response(query_to_json(item, self.sa_model), 200) @mrest_authenticate def get_one(self, itemid): try: item = current_app.sa['session'].query(self.sa_model).filter(self.sa_model.id == itemid).one() except Exception as e: return page_not_found() return make_response(query_to_json(item, self.sa_model), 200) @mrest_authenticate def put_one(self, itemid): try: item = current_app.sa['session'].query(self.sa_model).filter(self.sa_model.id == itemid).one() except Exception as e: return page_not_found() args = decode_auth_data(request.get_data()) # delete unsafe values if 'id' in args: del args['id'] # override existing values dictitem = dictify_item(item, self.sa_model) for arg in args: if arg in dictitem: dictitem[arg] = args[arg] try: validate(dictitem, current_app.json_schemas[self.model_name]) except ValidationError as ve: current_app.logger.info("ValidationError received %s" % ve) return page_not_found() cid = dictitem['id'] del dictitem['id'] try: current_app.sa['session'].query(self.sa_model).filter(self.sa_model.id == cid).update(dictitem) except Exception as e: return page_not_found() current_app.sa['session'].commit() return make_response(query_to_json(item, self.sa_model), 201) @mrest_authenticate def delete_one(self, itemid): try: item = current_app.sa['session'].query(self.sa_model).filter(self.sa_model.id == itemid).one() except Exception as e: return page_not_found() current_app.sa['session'].delete(item) current_app.sa['session'].commit() return make_response("", 204) class UserSA(SABase): """model for an api user or item user""" __tablename__ = "user" id = sa.Column(sa.String(120), primary_key=True, nullable=False, doc="primary key") pubpem = sa.Column(sa.String(255), nullable=False) def __repr__(self): return "<User(id='%s')>" % self.id class UserModel(BaseModel): """ The ECC auth user object. Override with your user model, if you wish. """ def __init__(self): super(UserModel, self).__init__('User', 'user', plain_methods=['POST'], id_methods=['GET'], sa_model=UserSA) def post(self): args = decode_auth_data(request.data) try: validate(args, current_app.json_schemas[self.model_name]) except ValidationError: return page_not_found() pubpem = base64.b64decode(args['pubpem']) pubhash = sha256(pubpem).hexdigest() item = self.sa_model(id=pubhash, pubpem=pubpem) current_app.sa['session'].add(item) try: current_app.sa['session'].commit() except IntegrityError as ie: current_app.logger.info("user already existed %r" % pubhash) current_app.sa['session'].rollback() return make_response(query_to_json(item, self.sa_model), 200) except InvalidRequestError as ire: current_app.logger.info("user already existed %r" % pubhash) current_app.sa['session'].rollback() return make_response(query_to_json(item, self.sa_model), 200) current_app.logger.info("created user %r" % item) return make_response(query_to_json(item, self.sa_model), 200) @mrest_authenticate def get_one(self, itemid): """ Handler for /user/<itemid> routes. """ try: item = current_app.sa['session'].query(self.sa_model).filter(self.sa_model.id == itemid).one() except Exception as e: return page_not_found() return make_response(query_to_json(item, self.sa_model), 200)
The investor wanted rules changed to help CVR Energy. The refiner he controls made bets aligned with that push. They backfired and Icahn resigned as the president’s adviser just ahead of a report detailing these alleged conflicts. Trump now seems to say there was no job to quit. Investor Carl Icahn said on Aug. 18 that he was stepping down as a special adviser to U.S. President Donald Trump. "I chose to end this arrangement (with your blessing) because I did not want partisan bickering about my role to in any way cloud your administration," Icahn said in a letter to the president posted on his website. Icahn, whose company holds an 82 percent stake in refiner CVR Energy, had been working to change federal regulations requiring refiners to blend ethanol into products or purchase credits from competitors. Reuters reported that regulators will probably reject that proposal. Icahn's resignation came ahead of the publication of a New Yorker story about his alleged business conflicts. The story said CVR had been selling so-called renewable identification number (RIN) credits, effectively betting on a price decline because the company ultimately has to buy RINs. Last year, CVR deferred until 2017 about $186 million worth of biofuels credits that it was required to purchase, Reuters reported. This year, the company continued to build that position to more than $275 million, according to a Reuters review of CVR filings. Icahn said he did not believe his role presented conflicts and he had not profited from it.