text
stringlengths
29
850k
from google.appengine.ext import db class ReportSet(db.Model): setid = db.StringProperty(required=False) uid = db.StringProperty(required=False) ostype = db.StringProperty(required=False) osversion = db.StringProperty(required=False) clrversion = db.StringProperty(required=False) appname = db.StringProperty(required=False) appversion = db.StringProperty(required=False) assembly = db.StringProperty(required=False) class ReportItem(db.Model): reportset = db.ReferenceProperty(required=True) timestamp = db.IntegerProperty(required=False) eventtype = db.StringProperty(required=False) count = db.IntegerProperty(required=False) name = db.StringProperty(required=False) data = db.TextProperty(required=False) counted = db.IntegerProperty(required=False) class AggregateItem(db.Model): # day, month, year or week rangetype = db.StringProperty(required=True) # eg. 2016-01-01 # or 2016-01 # or 2016 # or 2016-w1 rangekey = db.StringProperty(required=True) timestamp = db.IntegerProperty(required=True) value_sum = db.IntegerProperty(required=True) entry_count = db.IntegerProperty(required=True) ostype = db.StringProperty(required=True) name = db.StringProperty(required=True) value = db.StringProperty(required=False) lastupdated = db.IntegerProperty(required=False)
A pilots’ union has said “more needs to be done” to tackle the growing use of lasers against aircraft after a New York-bound plane had to turn back to Heathrow. The Virgin Atlantic flight returned to the west London airport as a “precautionary measure” after the co-pilot reported feeling unwell following the incident on Sunday. In 2010, a law was passed in the UK which allows offenders to be charged with “shining a light at an aircraft in flight so as to dazzle or distract the pilot”. If the distraction or dazzle is serious, a person may be found guilty of “reckless endangerment” and sent to prison. According to the British Airline Pilots Association (Balpa), a laser can result in temporary vision loss associated with flash blindness, a “visual interference that persists after the source of illumination has been removed”, an after-image, an “image left in the visual field after exposure to a bright light”, and glare. Balpa general secretary Jim McAuslan said: “This is not an isolated incident. Aircraft are attacked with lasers at an alarming rate and with lasers with ever-increasing strength. Between 2009 and June 2015, more than 8,998 laser incidents across the country were reported to the UK Civil Aviation Authority. Topping the list for the number of laser incidents for the first six months of last year was London Heathrow, with a total of 48.
import os import numpy as np from opensfm import io from opensfm.dataset import DataSet def run_dataset(data: DataSet, list_path, bundle_path, undistorted): """Export reconstruction to bundler format. Args: list_path: txt list of images to export bundle_path : output path undistorted : export undistorted reconstruction """ udata = data.undistorted_dataset() default_path = os.path.join(data.data_path, "bundler") list_file_path = list_path if list_path else default_path bundle_file_path = bundle_path if bundle_path else default_path if undistorted: reconstructions = udata.load_undistorted_reconstruction() track_manager = udata.load_undistorted_tracks_manager() images = reconstructions[0].shots.keys() else: reconstructions = data.load_reconstruction() track_manager = data.load_tracks_manager() images = data.images() export_bundler( images, reconstructions, track_manager, bundle_file_path, list_file_path ) def export_bundler( image_list, reconstructions, track_manager, bundle_file_path, list_file_path ): """ Generate a reconstruction file that is consistent with Bundler's format """ io.mkdir_p(bundle_file_path) io.mkdir_p(list_file_path) for j, reconstruction in enumerate(reconstructions): lines = [] lines.append("# Bundle file v0.3") points = reconstruction.points shots = reconstruction.shots num_point = len(points) num_shot = len(image_list) lines.append(" ".join(map(str, [num_shot, num_point]))) shots_order = {key: i for i, key in enumerate(image_list)} # cameras for shot_id in image_list: if shot_id in shots: shot = shots[shot_id] camera = shot.camera if shot.camera.projection_type == "brown": # Will aproximate Brown model, not optimal focal_normalized = camera.focal_x else: focal_normalized = camera.focal scale = max(camera.width, camera.height) focal = focal_normalized * scale k1 = camera.k1 k2 = camera.k2 R = shot.pose.get_rotation_matrix() t = np.array(shot.pose.translation) R[1], R[2] = -R[1], -R[2] # Reverse y and z t[1], t[2] = -t[1], -t[2] lines.append(" ".join(map(str, [focal, k1, k2]))) for i in range(3): lines.append(" ".join(map(str, R[i]))) t = " ".join(map(str, t)) lines.append(t) else: for _ in range(5): lines.append("0 0 0") # tracks for point in points.values(): coord = point.coordinates color = list(map(int, point.color)) view_list = track_manager.get_track_observations(point.id) lines.append(" ".join(map(str, coord))) lines.append(" ".join(map(str, color))) view_line = [] for shot_key, obs in view_list.items(): if shot_key in shots.keys(): v = obs.point shot_index = shots_order[shot_key] camera = shots[shot_key].camera scale = max(camera.width, camera.height) x = v[0] * scale y = -v[1] * scale view_line.append(" ".join(map(str, [shot_index, obs.id, x, y]))) lines.append(str(len(view_line)) + " " + " ".join(view_line)) bundle_file = os.path.join( bundle_file_path, "bundle_r" + str(j).zfill(3) + ".out" ) with io.open_wt(bundle_file) as fout: fout.writelines("\n".join(lines) + "\n") list_file = os.path.join(list_file_path, "list_r" + str(j).zfill(3) + ".out") with io.open_wt(list_file) as fout: fout.writelines("\n".join(map(str, image_list)))
The Stables is located in Forncett Saint Mary. The property is 12 miles from Norwich. The vacation home has a TV. Guests can relax in the garden at the property. Great Yarmouth is 30 miles from the vacation home, while Lowestoft is 28 miles away. The nearest airport is Norwich International Airport, 19 miles from the property.
from pymonet.utils import identity class ApplicativeLawTester: def __init__(self, applicative, value, mapper1, mapper2, get_fn=identity): self.applicative = applicative self.value = value self.mapper1 = mapper1 self.mapper2 = mapper2 self.get_fn = get_fn def _assert(self, x, y): assert self.get_fn(x) == self.get_fn(y) def identity_test(self): x = self.applicative(identity).ap(self.applicative(self.value)) y = self.applicative(self.value) self._assert(x, y) def composition_test(self): def lambda_fn(fn1): return lambda fn2: lambda value: fn1(fn2(value)) x = self.applicative(lambda_fn)\ .ap(self.applicative(self.mapper1))\ .ap(self.applicative(self.mapper2))\ .ap(self.applicative(self.value)) y = self.applicative(self.mapper1).ap( self.applicative(self.mapper2).ap(self.applicative(self.value)) ) self._assert(x, y) def homomorphism_test(self): x = self.applicative(self.mapper1).ap(self.applicative(self.value)) y = self.applicative( self.mapper1(self.value) ) self._assert(x, y) def interchange_test(self): x = self.applicative(self.mapper1).ap(self.applicative(self.value)) y = self.applicative(lambda fn: fn(self.value)).ap( self.applicative(self.mapper1) ) self._assert(x, y) def test(self): self.identity_test() self.composition_test() self.homomorphism_test() self.interchange_test()
This cover for Trojan Family was shot at The Getty, although – after agreeing to let us shoot in one the main buildings – when we arrived they dragged us to the servant’s entrance…er, I mean…the helipad. This cover was done at the Wrigley Institute for Environmental Studies on the island of Catalina. Dr. Gary Michelson, a major donor to the Dornsife College. A cover produced for the Dornsife College. I had my images on several graduation day covers. This was an alternate to the 2010 chosen cover, which I actually preferred. Rick Simner designed these graduation day brochure covers. Another alternate design, for 2013, also a favorite of mine. This was the cover of a book published for the wife of Dr. Sample, when they retired. My photographs were approximately three-quarters of the edition. Produced for the USC School of Pharmacy. This poster was actually an accident. I took this, a detail of the Doheny Library ceiling, while awaiting the late arrival of a portrait subject. Bibliotech, a magazine published by Susie Wampler, was a great publication. Doheny is open all night, the point of this cover story. Bovard, with downtown Los Angeles in the nearby distance, a rooftop photograph.
""" Integration test for EC2 cell setup. """ import ast import unittest import importlib import click import click.testing from botocore.exceptions import ClientError import time from treadmill.infra import vpc class CellCLITest(unittest.TestCase): """Tests EC2 cell setup.""" def setUp(self): self.vpc_name = 'IntegrationTest-' + str(time.time()) self.runner = click.testing.CliRunner() self.configure_cli = importlib.import_module( 'treadmill.cli.cloud' ).init() def tearDown(self): if not self.destroy_attempted: self.runner.invoke( self.configure_cli, [ '--domain=treadmill.org', 'delete', 'vpc', '--vpc-name=' + self.vpc_name, ], obj={} ) def test_setup_cell(self): self.destroy_attempted = False result_init = self.runner.invoke( self.configure_cli, [ '--domain=treadmill.org', 'init', 'vpc', '--name=' + self.vpc_name ], obj={} ) cell_info = {} vpc_info = {} try: vpc_info = ast.literal_eval(result_init.output) except Exception as e: if result_init.exception: print(result_init.exception) else: print(e) self.vpc_id = vpc_info['VpcId'] self.assertIsNotNone(vpc_info['VpcId']) self.assertEqual(vpc_info['Name'], self.vpc_name) self.assertEqual(vpc_info['Subnets'], []) result_cell_init = self.runner.invoke( self.configure_cli, [ '--domain=treadmill.org', 'init', 'cell', '--tm-release=0.1.0', '--key=ms_treadmill_dev', '--image=RHEL-7.4', '--vpc-name=' + self.vpc_name, '--cell-cidr-block=172.23.0.0/24', '--ipa-admin-password=Tre@dmill1', '--cell-subnet-name=TreadmillCell', '--ldap-subnet-name=TreadmillLDAP', ], obj={} ) result = {} try: result = ast.literal_eval(result_cell_init.output) except Exception as e: if result_cell_init.exception: print(result_cell_init.exception) else: print(e) cell_info = result['Cell'] ldap_info = result['Ldap'] _vpc = vpc.VPC(id=vpc_info['VpcId']) _vpc_info = _vpc.show() self.assertEqual(cell_info['VpcId'], vpc_info['VpcId']) self.assertEqual(cell_info['VpcId'], ldap_info['VpcId']) self.assertEqual(len(cell_info['Instances']), 6) self.assertEqual(len(ldap_info['Instances']), 1) self.assertCountEqual( [i['Name'] for i in cell_info['Instances']], ['TreadmillMaster1', 'TreadmillMaster2', 'TreadmillMaster3', 'TreadmillZookeeper1', 'TreadmillZookeeper2', 'TreadmillZookeeper3'] ) zk_subnet_ids = set([ i['SubnetId'] for i in cell_info['Instances'] if i['Name'][:-1] in 'TreadmillZookeeper' ]) master_subnet_ids = set([ i['SubnetId'] for i in cell_info['Instances'] if i['Name'][:-1] in 'TreadmillMaster' ]) ldap_subnet_ids = set([ i['SubnetId'] for i in ldap_info['Instances'] if i['Name'][:-1] in 'TreadmillLDAP' ]) self.assertEqual(len(zk_subnet_ids), 1) self.assertEqual(len(ldap_subnet_ids), 1) self.assertEqual(len(master_subnet_ids), 1) self.assertEqual(master_subnet_ids, zk_subnet_ids) self.assertNotEqual(master_subnet_ids, ldap_subnet_ids) self.assertEqual(len(_vpc_info['Subnets']), 2) self.assertCountEqual(_vpc_info['Subnets'], [list(zk_subnet_ids)[0], list(ldap_subnet_ids)[0]]) self.runner.invoke( self.configure_cli, [ '--domain=treadmill.org', 'delete', 'cell', '--vpc-name=' + self.vpc_name, '--subnet-name=TreadmillCell', ], obj={} ) self.runner.invoke( self.configure_cli, [ '--domain=treadmill.org', 'delete', 'ldap', '--vpc-name=' + self.vpc_name, '--subnet-name=TreadmillLDAP', ], obj={} ) _vpc.instances = None _vpc.subnet_ids = [] _vpc_info = _vpc.show() self.assertEqual(len(_vpc_info['Instances']), 0) self.assertEqual(len(_vpc_info['Subnets']), 0) self.runner.invoke( self.configure_cli, [ '--domain=treadmill.org', 'delete', 'vpc', '--vpc-name=' + self.vpc_name ], obj={} ) self.destroy_attempted = True with self.assertRaises(ClientError) as error: _vpc.ec2_conn.describe_vpcs( VpcIds=[vpc_info['VpcId']] ) self.assertEqual( error.exception.response['Error']['Code'], 'InvalidVpcID.NotFound' ) if __name__ == '__main__': unittest.main()
This document is targeted at organizations with existing Creative Cloud, Document Cloud, and Acrobat DC licenses through an Enterprise Term License Agreement (ETLA) or Value Incentive Plan (VIP) subscription that are migrating to a different Buying Program or license type. If the license type of your organization changes, to continue working your end users will need to sign out of any Adobe product or service and then sign back in with the same credentials. For desktop products such as Photoshop, Acrobat, Illustrator, use the Sign out and Sign in options in the Help menu. On Adobe.com, use the icon in the upper-right corner to sign out and then sign back. To ensure no lapse in end-user product access, you must assign licenses in the Adobe Admin Console before the existing VIP subscription term ends. For ETLA customers, please allow at least 30 days of product overlap. For VIP customers, please purchase licenses prior to your Anniversary Date, and assign licenses before the renewal window closes on your current VIP subscription term. CLP or TLP customers can migrate from serialized Acrobat or Creative Suite to named-user licenses by following migration instructions here. The customer's renewal window is open (30 days before or after the VIP agreement anniversary date). The enterprise products on the order are new SKUs and are equivalent to the team versions of the products in the current term. The enterprise license order quantity is greater than or equal to the existing team license quantity. The customer's renewal window is open. The enterprise products on the order are new SKUs that are "higher value" products than the team products in the current term. The quantity of enterprise licenses on the order is fewer than the number of existing team licenses. The order is placed for "higher value" enterprise products than the team products in place, but the quantity of ordered enterprise licenses is fewer than the quantity of existing team licenses. If the order is placed with mixed products—team and enterprise—regardless of the quantity. If the customer already purchased team and enterprise products prior to the renewal period. If enterprise renewal SKUs are used for the new enterprise order. If the enterprise product order is for a different VIP agreement number. If the current team products include products that do not have enterprise versions. The email indicates the day you must transfer users from team licenses to enterprise licenses in the Admin Console before they lose access. Confirm the number of licenses for assignment. Confirm that the team product licenses that are being un-assigned match the enterprise licenses that are being assigned. You will receive an email confirmation when it’s complete. Download the results report within the Admin Console to validate all licenses were assigned. Your end-users will not experience a lapse in product service if complete before the specified date within your confirmation email. Next, schedule your 1:1 onboarding call with and Adobe Onboarding Specialist (if you haven’t already) to learn about everything the Admin Console has to offer, like Administrative Roles and Identity. Quick License Assignment does not migrate users with pending invitations in the Team Admin Console. Once you get access to Admin Console and your licenses are added, navigate to Users > Users. Click in the upper-right corner of the Users page, and choose Edit User Details by CSV from the drop-down list. In the Edit Users by CSV dialog box, click Download CSV template and choose Current user list. Add license assignments to the downloaded CSV file. Then, drag the updated CSV file to the Edit Users by CSV dialog box and click Upload. Log into the Admin Console and navigate to the organization to which your VIP users belong. Click in the upper-right corner of the Users page, and choose Export users list to CSV. Navigate to the ETLA organization to which you want to migrate your VIP users. Click in the upper-right corner of the Users page, and choose Add Users by CSV. To the downloaded CSV file, add the VIP users from the CSV file that you downloaded in Step 3, above. Click in the upper-right and choose Add users by CSV, and upload the updated CSV file. Upload the updated CSV file. You will receive an email confirmation when your users are added to the ETLA organization. Download the results report within the Console to validate all licenses were assigned. Log into the Admin Console and navigate to the organization to which your ETLA users belong. Navigate to the VIP organization to which you want to migrate your ETLA users. To the downloaded CSV file, add ETLA VIP users from the CSV file that you downloaded in Step 3, above. You will receive an email confirmation when your users are added to the VIP organization.
# Copyright (c) 2013- Takafumi Arakaki # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 3 of the # License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .utils.strutils import remove_prefix, get_lines_at_point from .filetitle import write_path_and_title class AccessInfo(object): """ Access information object. """ __slots__ = ['path', 'point', 'recorded', 'type', 'showpath'] def __init__(self, path, point, recorded, type): self.path = self.showpath = path self.point = point self.recorded = recorded self.type = type def _set_relative_path(self, absunder): """ Set :attr:`showpath` and return the newly set value. :attr:`showpath` is set the relative path of :attr:`path` from one of the path in `absunder`. """ self.showpath = remove_prefix(absunder, self.path) return self.showpath def _get_lines_at_point(self, pre_lines, post_lines): with open(self.path) as f: return get_lines_at_point( f.read(), self.point, pre_lines, post_lines) def write_paths_and_lines(self, file, pre_lines=0, post_lines=0, newline='\n', separator=':'): """ Write :attr:`showpath` and lines around :attr:`point` to `file`. """ for (lineno, line) in self._get_lines_at_point(pre_lines, post_lines): file.write(self.showpath) file.write(separator) file.write(str(lineno)) file.write(separator) file.write(line) file.write(newline) def write_path_and_title(self, file, newline='\n', separator=':', **kwds): """ Call :func:`.filetitle.write_path_and_title`. """ write_path_and_title(file, self.path, self.showpath, newline, separator, **kwds)
and the Feisty kernel supports it out of the box. Using umtsmon I can set the PIN and not worry about it anymore. However, I was unable to connect using the tool. # we do not neet to authenticate ourselves? # fied explicitly on the command line or in an options file).
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from django.db import models from django.conf import settings from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from django.core.urlresolvers import reverse import uuid from imagekit.models import ImageSpecField from imagekit.processors import ResizeToFit, ResizeCanvas, ResizeToFill class TimeStampedModel(models.Model): # Abstract base class model that provides self-updating created and modified fields created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) class Meta: abstract = True @python_2_unicode_compatible class Rsvp(models.Model): user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) date = models.DateTimeField(auto_now_add=True) will_attend = models.NullBooleanField(verbose_name=_('Attendance'), null=True, blank=True) guest2 = models.CharField(verbose_name=_('Name, Surname'), max_length=100, blank=True) guest3 = models.CharField(verbose_name=_('Name, Surname'), max_length=100, blank=True) guest4 = models.CharField(verbose_name=_('Name, Surname'), max_length=100, blank=True) def __str__(self): return self.user.name + ': ' + ("is coming" if self.will_attend else "not coming") def get_absolute_url(self): return reverse('wedding:rsvp-detail', kwargs={'username': self.user.username}) class Meta: verbose_name = "RSVP" verbose_name_plural = "RSVPs" permissions = ( ("view_list", "Can see the RSVP list"), ) class Gift(TimeStampedModel): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(verbose_name=_('Name'), max_length=300) description = models.TextField(verbose_name=_('Description'), null=True, blank=True) link = models.TextField(verbose_name=_('Link'), null=True, blank=True) price = models.DecimalField(verbose_name=_('Price'), max_digits=7, decimal_places=2) gift_is_part = models.BooleanField(verbose_name=_('Gift is part'), default=False) max_parts = models.PositiveIntegerField(verbose_name=_('Maximum number of parts')) taken_parts = models.PositiveIntegerField(verbose_name=_('Number of parts taken'), default=0) img = models.ImageField(blank=True, null=True) img_catalog = ImageSpecField(source='img', processors=[ResizeToFit(800, 600), ResizeCanvas(800, 600)], format='JPEG', options={'quality': 60}) img_miniature = ImageSpecField(source='img', processors=[ResizeToFill(60, 60)], format='JPEG', options={'quality': 60}) def is_available(self): if self.taken_parts < self.max_parts: return True else: return False def avail_parts(self): return self.max_parts - self.taken_parts def __str__(self): return self.name def get_absolute_url(self): return reverse("wedding:gift-detail", kwargs={'pk': self.pk}) class Meta: verbose_name = "Gift" verbose_name_plural = "Gifts" permissions = ( ("edit", "Can edit the Gift list"), ) class GiftOrder(TimeStampedModel): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.ForeignKey(settings.AUTH_USER_MODEL) voucher_from = models.CharField(verbose_name=_('Voucher is from'), max_length=300) voucher_greeting = models.TextField(verbose_name=_('Voucher Greeting'), null=True, blank=True) voucher_senddirect = models.BooleanField(verbose_name=_('Send voucher directly'), default=False) payment_received = models.BooleanField(verbose_name=_('Payment received'), default=False) voucher_issued = models.BooleanField(verbose_name=_('Voucher issued'), default=False) total_price = models.DecimalField(verbose_name=_('Total price'), max_digits=10, decimal_places=2, default=0.00) def __str__(self): return self.user.name + "/" + "{:%Y/%m/%d}".format(self.created) + "/" + self.total_price.__str__() class GiftOrderItem(TimeStampedModel): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) gift = models.ForeignKey(Gift) giftorder = models.ForeignKey(GiftOrder) quantity = models.PositiveIntegerField(verbose_name=_('Item count')) price = models.DecimalField(verbose_name=_('Price'), max_digits=7, decimal_places=2, default=0.00) @property def price_total(self): return self.quantity * self.price def __str__(self): return self.gift.name class Cart(TimeStampedModel): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) class CartItem(TimeStampedModel): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.ForeignKey(settings.AUTH_USER_MODEL) gift = models.ForeignKey(Gift) quantity = models.PositiveIntegerField(verbose_name=_('Item count')) def get_absolute_url(self): return reverse("wedding:cart-detail", kwargs={'pk': self.pk}) @property def price_total(self): return self.quantity * self.gift.price def __str__(self): return self.gift.name + " " + self.id.__str__() class GiftOrderStatus(GiftOrder): class Meta: proxy = True def get_absolute_url(self): return reverse("wedding:orderstatus-detail", kwargs={'pk': self.pk})
600 words article on bad effects of pollutants. I need you to write some articles. I need you to write some articles. I need you to write some articles. I need you to write some articles. I have many articles written on environmental pollution. coming up with a 600 word article will be easily but well done within a very short space of time. I will complete it in 1 day. It's very easy for me to write these type of topic. I will do my best for your betterment.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 # type: ignore from google.api_core import grpc_helpers_async # type: ignore from google.api_core import operations_v1 # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import packaging.version import grpc # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.recommendationengine_v1beta1.types import catalog from google.cloud.recommendationengine_v1beta1.types import catalog_service from google.cloud.recommendationengine_v1beta1.types import import_ from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from .base import CatalogServiceTransport, DEFAULT_CLIENT_INFO from .grpc import CatalogServiceGrpcTransport class CatalogServiceGrpcAsyncIOTransport(CatalogServiceTransport): """gRPC AsyncIO backend transport for CatalogService. Service for ingesting catalog information of the customer's website. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _grpc_channel: aio.Channel _stubs: Dict[str, Callable] = {} @classmethod def create_channel( cls, host: str = "recommendationengine.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: aio.Channel: A gRPC AsyncIO channel object. """ return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) def __init__( self, *, host: str = "recommendationengine.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. channel (Optional[aio.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, credentials=self._credentials, credentials_file=credentials_file, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: """Create the channel designed to connect to this service. This property caches on the instance; repeated calls return the same channel. """ # Return the channel from cache. return self._grpc_channel @property def operations_client(self) -> operations_v1.OperationsAsyncClient: """Create the client designed to process long-running operations. This property caches on the instance; repeated calls return the same client. """ # Sanity check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel ) # Return the client from cache. return self._operations_client @property def create_catalog_item( self, ) -> Callable[ [catalog_service.CreateCatalogItemRequest], Awaitable[catalog.CatalogItem] ]: r"""Return a callable for the create catalog item method over gRPC. Creates a catalog item. Returns: Callable[[~.CreateCatalogItemRequest], Awaitable[~.CatalogItem]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_catalog_item" not in self._stubs: self._stubs["create_catalog_item"] = self.grpc_channel.unary_unary( "/google.cloud.recommendationengine.v1beta1.CatalogService/CreateCatalogItem", request_serializer=catalog_service.CreateCatalogItemRequest.serialize, response_deserializer=catalog.CatalogItem.deserialize, ) return self._stubs["create_catalog_item"] @property def get_catalog_item( self, ) -> Callable[ [catalog_service.GetCatalogItemRequest], Awaitable[catalog.CatalogItem] ]: r"""Return a callable for the get catalog item method over gRPC. Gets a specific catalog item. Returns: Callable[[~.GetCatalogItemRequest], Awaitable[~.CatalogItem]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_catalog_item" not in self._stubs: self._stubs["get_catalog_item"] = self.grpc_channel.unary_unary( "/google.cloud.recommendationengine.v1beta1.CatalogService/GetCatalogItem", request_serializer=catalog_service.GetCatalogItemRequest.serialize, response_deserializer=catalog.CatalogItem.deserialize, ) return self._stubs["get_catalog_item"] @property def list_catalog_items( self, ) -> Callable[ [catalog_service.ListCatalogItemsRequest], Awaitable[catalog_service.ListCatalogItemsResponse], ]: r"""Return a callable for the list catalog items method over gRPC. Gets a list of catalog items. Returns: Callable[[~.ListCatalogItemsRequest], Awaitable[~.ListCatalogItemsResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_catalog_items" not in self._stubs: self._stubs["list_catalog_items"] = self.grpc_channel.unary_unary( "/google.cloud.recommendationengine.v1beta1.CatalogService/ListCatalogItems", request_serializer=catalog_service.ListCatalogItemsRequest.serialize, response_deserializer=catalog_service.ListCatalogItemsResponse.deserialize, ) return self._stubs["list_catalog_items"] @property def update_catalog_item( self, ) -> Callable[ [catalog_service.UpdateCatalogItemRequest], Awaitable[catalog.CatalogItem] ]: r"""Return a callable for the update catalog item method over gRPC. Updates a catalog item. Partial updating is supported. Non-existing items will be created. Returns: Callable[[~.UpdateCatalogItemRequest], Awaitable[~.CatalogItem]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_catalog_item" not in self._stubs: self._stubs["update_catalog_item"] = self.grpc_channel.unary_unary( "/google.cloud.recommendationengine.v1beta1.CatalogService/UpdateCatalogItem", request_serializer=catalog_service.UpdateCatalogItemRequest.serialize, response_deserializer=catalog.CatalogItem.deserialize, ) return self._stubs["update_catalog_item"] @property def delete_catalog_item( self, ) -> Callable[ [catalog_service.DeleteCatalogItemRequest], Awaitable[empty_pb2.Empty] ]: r"""Return a callable for the delete catalog item method over gRPC. Deletes a catalog item. Returns: Callable[[~.DeleteCatalogItemRequest], Awaitable[~.Empty]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_catalog_item" not in self._stubs: self._stubs["delete_catalog_item"] = self.grpc_channel.unary_unary( "/google.cloud.recommendationengine.v1beta1.CatalogService/DeleteCatalogItem", request_serializer=catalog_service.DeleteCatalogItemRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_catalog_item"] @property def import_catalog_items( self, ) -> Callable[ [import_.ImportCatalogItemsRequest], Awaitable[operations_pb2.Operation] ]: r"""Return a callable for the import catalog items method over gRPC. Bulk import of multiple catalog items. Request processing may be synchronous. No partial updating supported. Non-existing items will be created. Operation.response is of type ImportResponse. Note that it is possible for a subset of the items to be successfully updated. Returns: Callable[[~.ImportCatalogItemsRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "import_catalog_items" not in self._stubs: self._stubs["import_catalog_items"] = self.grpc_channel.unary_unary( "/google.cloud.recommendationengine.v1beta1.CatalogService/ImportCatalogItems", request_serializer=import_.ImportCatalogItemsRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["import_catalog_items"] __all__ = ("CatalogServiceGrpcAsyncIOTransport",)
Today I’m revealing my project for the Mystery Fabric Ruffle Challenge hosted by See Kate Sew. A month ago I received a package in the mail. I was delighted to see it was from Fashionable Fabrics (thank you Fashionable Fabrics!!!). My mystery fabric is called “pearl bracelet” by Andover Fabrics, and the color is called persimmon. The challenge was to make something with a ruffle using this fabric. After much thought I decided to use this lovely vintage wool for a bag, and use the mystery fabric as a lining. I love the persimmon color next to the gray wool. I don’t know if you can tell, but the wool has flecks of red and orange in it. Once I finished the bag I knew I needed a matching zipper clutch to go with it. (If you need a tutorial for a zipper pouch, there is a great one HERE.) I’m currently using it for Ellie’s Epipen. The zipper pouch has a gorgeous lining as well, because really, why not? This lovely floral came to me via Jessica of Craftiness Is Not Optional after I participated in Vintage May. It looks a lot like my Nana’s wallpaper, which makes my heart happy. The polkadot zipper is by Coats and Clark. Thank you for visiting me today on the blog hop, now go check out these other great projects! Alida, I love, love, love this bag! Can’t wait for the tutorial. Super cute bag – I want one! I love it! I keep telling myself I’ll make myself a bag, and i haven’t (booo, i rarely get in any selfish sewing these days). Great job! I LOVE THAT BAG! Adorable. I am absolutely in LOVE with that bag! Wonderful job all around – that lining is fantastic with the grey! Love it! I literally won’t buy a bag based on the lining. I want a pretty, happy surprise when I open my njetral, black purse. If I could sew I would do the exact same thing. And if you’re wondering why I read your blog if I don’t sew, it’s because I dream of sewing and admire your talent. I love your bag. I want one! Have you posted the tutorial for the bag? I wondered if you were still planning on posting a tutorial? My sister-in-law fell in love with this bag & she has a birthday coming up soon! thanks so much – love the bag! I love your bag. Your really a cool gal. I saw today you showing how to wrap a scarf around short hair. Thanks for being you. Say hi to me sometime. Thanks I’m Ruthann an EVa girl.
# Copyright (c) 2017-2019 Uber Technologies, Inc. # SPDX-License-Identifier: Apache-2.0 import inspect import io import pytest import pickle import torch import pyro.distributions as dist from pyro.distributions.torch_distribution import TorchDistributionMixin from tests.common import xfail_param # Collect distributions. BLACKLIST = [ dist.TorchDistribution, dist.ExponentialFamily, dist.OMTMultivariateNormal, ] XFAIL = { dist.Gumbel: xfail_param(dist.Gumbel, reason='cannot pickle weakref'), } DISTRIBUTIONS = [d for d in dist.__dict__.values() if isinstance(d, type) if issubclass(d, TorchDistributionMixin) if d not in BLACKLIST] DISTRIBUTIONS.sort(key=lambda d: d.__name__) DISTRIBUTIONS = [XFAIL.get(d, d) for d in DISTRIBUTIONS] # Provide default args if Dist(1, 1, ..., 1) is known to fail. ARGS = { dist.AVFMultivariateNormal: [torch.zeros(3), torch.eye(3), torch.rand(2, 4, 3)], dist.Bernoulli: [0.5], dist.Binomial: [2, 0.5], dist.Categorical: [torch.ones(2)], dist.Delta: [torch.tensor(0.)], dist.Dirichlet: [torch.ones(2)], dist.GaussianScaleMixture: [torch.ones(2), torch.ones(3), torch.ones(3)], dist.Geometric: [0.5], dist.Independent: [dist.Normal(torch.zeros(2), torch.ones(2)), 1], dist.LowRankMultivariateNormal: [torch.zeros(2), torch.ones(2, 2), torch.ones(2)], dist.MaskedMixture: [torch.tensor([1, 0]).bool(), dist.Normal(0, 1), dist.Normal(0, 2)], dist.MixtureOfDiagNormals: [torch.ones(2, 3), torch.ones(2, 3), torch.ones(2)], dist.MixtureOfDiagNormalsSharedCovariance: [torch.ones(2, 3), torch.ones(3), torch.ones(2)], dist.Multinomial: [2, torch.ones(2)], dist.MultivariateNormal: [torch.ones(2), torch.eye(2)], dist.OneHotCategorical: [torch.ones(2)], dist.RelaxedBernoulli: [1.0, 0.5], dist.RelaxedBernoulliStraightThrough: [1.0, 0.5], dist.RelaxedOneHotCategorical: [1., torch.ones(2)], dist.RelaxedOneHotCategoricalStraightThrough: [1., torch.ones(2)], dist.TransformedDistribution: [dist.Normal(0, 1), torch.distributions.ExpTransform()], dist.Uniform: [0, 1], dist.VonMises3D: [torch.tensor([1., 0., 0.])], } @pytest.mark.parametrize('Dist', DISTRIBUTIONS) def test_pickle(Dist): if Dist in ARGS: args = ARGS[Dist] else: # Optimistically try to initialize with Dist(1, 1, ..., 1). try: # Python 3.6+ spec = list(inspect.signature(Dist.__init__).parameters.values()) nargs = sum(1 for p in spec if p.default is p.empty) - 1 except AttributeError: # Python 2.6-3.5 spec = inspect.getargspec(Dist.__init__) nargs = len(spec.args) - 1 - (len(spec.defaults) if spec.defaults else 0) args = (1,) * nargs try: dist = Dist(*args) except Exception: pytest.skip(msg='cannot construct distribution') buffer = io.BytesIO() # Note that pickling torch.Size() requires protocol >= 2 torch.save(dist, buffer, pickle_protocol=pickle.HIGHEST_PROTOCOL) buffer.seek(0) deserialized = torch.load(buffer) assert isinstance(deserialized, Dist)
Adam Hartung’s remarkably consistent track record is built on applying his two proprietary frameworks for predicting business success; The Phoenix Principle and the Status Quo Risk Management Playbook. His keynote presentations and workshops make trends clear and help audiences chart a successful course to a more profitable future in our fast changing world. Adam was formerly an executive with PepsiCo and DuPont and is a Boston Consulting Group alumni. Adam received his MBA from the Harvard Business School with Distinction. He now travels the globe giving keynotes and leading risk management workshops sharing his insights to how companies and executives can succeed." Through a rich collection of case studies and stories, Adam keeps his presentation relevant and focused on your industry, your business, your marketplace. He reveals four steps to innovation that have been proven successful over and over again in a wide variety of industries. Your team and your employees will understand what it takes to turn an organization in a new direction and seize the opportunities in a turbulent market in order to launch new growth and innovation. Beating the competition through marketplace disruption. The four steps to innovation management, implementation, growth and success post-2009.
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2017 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source # & Institut Laue - Langevin # SPDX - License - Identifier: GPL - 3.0 + # This file is part of the mantidqt package # # from __future__ import (print_function, absolute_import, unicode_literals) import re from qtpy.QtCore import Qt, Signal, Slot from mantid.api import AlgorithmManager from mantid.simpleapi import mtd from mantidqt.utils.qt import import_qt from .interactive_tool import FitInteractiveTool BaseBrowser = import_qt('.._common', 'mantidqt.widgets', 'FitPropertyBrowser') class FitPropertyBrowserBase(BaseBrowser): def __init__(self, parent=None): super(FitPropertyBrowserBase, self).__init__(parent) self.init() class FitPropertyBrowser(FitPropertyBrowserBase): """ A wrapper around C++ FitPropertyBrowser with added graphical peak editing tool. """ closing = Signal() pattern_fittable_curve = re.compile(r'(.+?): spec (\d+)') def __init__(self, canvas, toolbar_state_checker, parent=None): super(FitPropertyBrowser, self).__init__(parent) self.init() self.setFeatures(self.DockWidgetMovable) self.canvas = canvas self.workspace_labels = [] # The toolbar state checker to be passed to the peak editing tool self.toolbar_state_checker = toolbar_state_checker # The peak editing tool self.tool = None # Pyplot lines for the fit result curves self.fit_result_lines = [] # Pyplot line for the guess curve self.guess_line = None # Map the indices of the markers in the peak editing tool to the peak function prefixes (in the form f0.f1...) self.peak_ids = {} self._connect_signals() def _connect_signals(self): self.startXChanged.connect(self.move_start_x) self.endXChanged.connect(self.move_end_x) self.algorithmFinished.connect(self.fitting_done_slot) self.changedParameterOf.connect(self.peak_changed_slot) self.removeFitCurves.connect(self.clear_fit_result_lines_slot, Qt.QueuedConnection) self.plotGuess.connect(self.plot_guess_slot, Qt.QueuedConnection) self.functionChanged.connect(self.function_changed_slot, Qt.QueuedConnection) @classmethod def can_fit_spectra(cls, labels): """ Determine if the spectra referred to by the plot labels can be used in this fit browser. :param labels: A list of curve labels which can identify spectra in a workspace. :return: True or False """ return any(map(lambda s: re.match(cls.pattern_fittable_curve, s), labels)) def closeEvent(self, event): """ Emit self.closing signal used by figure manager to put the menu buttons in correct states """ self.closing.emit() BaseBrowser.closeEvent(self, event) def show(self): """ Override the base class method. Initialise the peak editing tool. """ allowed_spectra = {} for label in self.workspace_labels: a_match = re.match(self.pattern_fittable_curve, label) if a_match: name, spec = a_match.group(1), int(a_match.group(2)) spec_list = allowed_spectra.get(name, []) spec_list.append(spec) allowed_spectra[name] = spec_list if len(allowed_spectra) > 0: for name, spec_list in allowed_spectra.items(): self.addAllowedSpectra(name, spec_list) self.tool = FitInteractiveTool(self.canvas, self.toolbar_state_checker, current_peak_type=self.defaultPeakType()) self.tool.fit_start_x_moved.connect(self.setStartX) self.tool.fit_end_x_moved.connect(self.setEndX) self.tool.peak_added.connect(self.peak_added_slot) self.tool.peak_moved.connect(self.peak_moved_slot) self.tool.peak_fwhm_changed.connect(self.peak_fwhm_changed_slot) self.tool.peak_type_changed.connect(self.setDefaultPeakType) self.tool.add_background_requested.connect(self.add_function_slot) self.tool.add_other_requested.connect(self.add_function_slot) self.setXRange(self.tool.fit_start_x.x, self.tool.fit_end_x.x) super(FitPropertyBrowser, self).show() self.setPeakToolOn(True) self.canvas.draw() def hide(self): """ Override the base class method. Hide the peak editing tool. """ if self.tool is not None: self.tool.fit_start_x_moved.disconnect() self.tool.fit_end_x_moved.disconnect() self.tool.disconnect() self.tool = None self.canvas.draw() super(FitPropertyBrowser, self).hide() self.setPeakToolOn(False) def move_start_x(self, xd): """ Let the tool know that StartX has changed. :param xd: New value of StartX """ if self.tool is not None: self.tool.move_start_x(xd) def move_end_x(self, xd): """ Let the tool know that EndX has changed. :param xd: New value of EndX """ if self.tool is not None: self.tool.move_end_x(xd) def clear_fit_result_lines(self): """ Delete the fit curves. """ for lin in self.fit_result_lines: try: lin.remove() except ValueError: # workspace replacement could invalidate these references pass self.fit_result_lines = [] self.update_legend() def get_lines(self): """ Get all lines in the connected plot. """ return self.get_axes().get_lines() def get_axes(self): """ Get the pyplot's Axes object. """ return self.canvas.figure.get_axes()[0] def update_legend(self): """ This needs to be called to update plot's legend after removing lines. """ axes = self.get_axes() if axes.legend_ is not None: axes.legend() def plot_guess(self): """ Plot the guess curve. """ from mantidqt.plotting.functions import plot fun = self.getFittingFunction() ws_name = self.workspaceName() if fun == '' or ws_name == '': return ws_index = self.workspaceIndex() out_ws_name = '{}_guess'.format(ws_name) alg = AlgorithmManager.createUnmanaged('EvaluateFunction') alg.setChild(True) alg.initialize() alg.setProperty('Function', fun) alg.setProperty('InputWorkspace', ws_name) alg.setProperty('WorkspaceIndex', ws_index) alg.setProperty('OutputWorkspace', out_ws_name) alg.execute() out_ws = alg.getProperty('OutputWorkspace').value plot([out_ws], wksp_indices=[1], fig=self.canvas.figure, overplot=True, plot_kwargs={'label': out_ws_name}) for lin in self.get_lines(): if lin.get_label().startswith(out_ws_name): self.guess_line = lin self.setTextPlotGuess('Remove Guess') self.canvas.draw() def remove_guess(self): """ Remove the guess curve from the plot. """ if self.guess_line is None: return self.guess_line.remove() self.guess_line = None self.update_legend() self.setTextPlotGuess('Plot Guess') self.canvas.draw() def update_guess(self): """ Update the guess curve. """ if self.guess_line is None: return self.remove_guess() self.plot_guess() def add_to_menu(self, menu): """ Add the relevant actions to a menu :param menu: A menu to hold the actions :return: The menu passed to us """ if self.tool is not None: self.tool.add_to_menu(menu, peak_names=self.registeredPeaks(), current_peak_type=self.defaultPeakType(), background_names=self.registeredBackgrounds(), other_names=self.registeredOthers()) return menu @Slot() def clear_fit_result_lines_slot(self): """ Clear the fit lines. """ self.clear_fit_result_lines() if self.tool is not None: self.canvas.draw() @Slot(str) def fitting_done_slot(self, name): """ This is called after Fit finishes to update the fit curves. :param name: The name of Fit's output workspace. """ from mantidqt.plotting.functions import plot ws = mtd[name] # Keep local copy of the original lines original_lines = self.get_lines() self.clear_fit_result_lines() plot([ws], wksp_indices=[1, 2], fig=self.canvas.figure, overplot=True) name += ':' for lin in self.get_lines(): if lin.get_label().startswith(name): self.fit_result_lines.append(lin) # Add properties back to the lines new_lines = self.get_lines() for new_line, old_line in zip(new_lines, original_lines): new_line.update_from(old_line) # Now update the legend to make sure it changes to the old properties self.get_axes().legend().draggable() @Slot(int, float, float, float) def peak_added_slot(self, peak_id, centre, height, fwhm): """ Respond to a signal from the peak editing tool that a peak is added. Add a peak function to the browser. :param peak_id: An index of a peak marker in the peak editing tool. :param centre: Peak centre :param height: Peak height (peak maximum) :param fwhm: Peak's full width at half maximum """ fun = self.addFunction(self.defaultPeakType()) self.setPeakCentreOf(fun, centre) self.setPeakHeightOf(fun, height) self.setPeakFwhmOf(fun, fwhm) self.peak_ids[peak_id] = fun @Slot(int, float, float) def peak_moved_slot(self, peak_id, centre, height): """ Respond to the peak editing tool moving peak's top to a new position. :param peak_id: Peak's index/id :param centre: New peak centre :param height: New peak height """ fun = self.peak_ids[peak_id] self.setPeakCentreOf(fun, centre) self.setPeakHeightOf(fun, height) self.update_guess() @Slot(int, float) def peak_fwhm_changed_slot(self, peak_id, fwhm): """ Respond to the peak editing tool changing peak's width. :param peak_id: Peak's index/id :param fwhm: New peak full width at half maximum. """ fun = self.peak_ids[peak_id] self.setPeakFwhmOf(fun, fwhm) self.update_guess() @Slot(str) def peak_changed_slot(self, fun): """ Update the peak marker in the peak editing tool after peak's parameters change in the browser. :param fun: A prefix of the function that changed. """ for peak_id, prefix in self.peak_ids.items(): if prefix == fun: self.tool.update_peak(peak_id, self.getPeakCentreOf(prefix), self.getPeakHeightOf(prefix), self.getPeakFwhmOf(prefix)) self.update_guess() @Slot(str) def add_function_slot(self, fun_name): """ Respond to a signal from the peak editing tool to add a new function. :param fun_name: A registered name of a fit function """ self.addFunction(fun_name) @Slot() def plot_guess_slot(self): """ Toggle the guess plot. """ if self.guess_line is None: self.plot_guess() else: self.remove_guess() @Slot() def function_changed_slot(self): """ Update the peak editing tool after function structure has changed in the browser: functions added and/or removed. """ peaks_to_add = [] peaks = {v: k for k, v in self.peak_ids.items()} for prefix in self.getPeakPrefixes(): c, h, w = self.getPeakCentreOf(prefix), self.getPeakHeightOf(prefix), self.getPeakFwhmOf(prefix) if prefix in peaks: self.tool.update_peak(peaks[prefix], c, h, w) del peaks[prefix] else: peaks_to_add.append((prefix, c, h, w)) for i in peaks.values(): del self.peak_ids[i] need_update_markers = len(peaks_to_add) > 0 if not need_update_markers: plist = self.tool.get_peak_list() for i, c, h, w in plist: prefix = self.peak_ids.get(i) if prefix is None: need_update_markers = True break if self.getPeakCentreOf(prefix) != c or self.getPeakHeightOf(prefix) != h or\ self.getPeakFwhmOf(prefix) != w: need_update_markers = True break if need_update_markers: peak_ids, peak_updates = self.tool.update_peak_markers(self.peak_ids.keys(), peaks_to_add) self.peak_ids.update(peak_ids) for prefix, c, h, w in peak_updates: self.setPeakCentreOf(prefix, c) self.setPeakHeightOf(prefix, h) self.setPeakFwhmOf(prefix, w) self.update_guess()
These pieces are no shine and all style. ​The little black dress or LBD has been a staple in women's wardrobes for decades. Black has always held a sacred place in fashion—but we can't overlook its parallel role in interior design. From fixtures to floors and furniture, black home design elements add an element of sleek, modern sophistication. Lately we've noticed that the latest iteration of black to sweep the fashion and beauty world—matte black—has influenced interior design as well. DIY enthusiasts have embraced the ability of a dull, shine-less black paint to completely makeover any piece in their home, and designers and brands have adapted, too. The way we see it, any room in your home can be enhanced and upgraded with some matte black elements. You can go big—we found the most gorgeous matte black claw foot tub—or small (think flatware and table clocks), just don't miss out on this trend. Ahead we hand-selected 15 matte black finds for your home. This flatware is so chic, you'd feel decadent using them to eat leftover takeout. Therefore, it should go without saying that this 12-piece set will add some major style points to your next dinner party tablescape. Want to stop your houseguests dead in their tracks? This matte black fridge will instantly become the centerpiece of your kitchen, and has to be one of the boldest uses of matte black in home design... Maybe ever?! There was a time not so long ago when ceiling fans were the most reviled element of any "fixer upper" room. Thanks to Pottery Barn, though, the lowly fan has gotten a sleek and stylish makeover. Staying cool never looked so good. It's one of our favorite "design secrets" that Target is a haven of stylish, affordable home finds. Perhaps more than anything, we love their in house brands' collections of lighting and lamps. Case-in-point, this double table lamp in copper and matte black. While not exactly purposed for travel, this vintage-inspired trunk would make the perfect design accent (and storage container) in any room. Stack it on top of another vintage trunk or decorate the top with found objects and candles. At your breakfast nook or around the dining table, these Eames replica chairs are bound to make an impression. To give each on some individual flair, add a sheepskin throw or printed throw pillow to the seat. A wedding registry, staple, it was a revelation when KitchenAid began making their classic mixer in a rainbow of colors. Naturally, our favorite is this sleek chrome and matte black iteration. Hot hot hot! This has to be the most stylish kettle in existence, and no wonder that it's specially designed for pour-over coffee. Grand and rustic—those are the perfect words to describe this matte black and rope chandelier from Elk Pearce. The exposed bulbs give it a modern feel that could easily translate from cozy cabin to cool compound. Add a little edge to your usual dinner table setting with the 'Marin' collection from Crate & Barrel. Mix these matte black plates, bowls and mugs with your usual white set for a film noir-feeling table with a dash of whimsy. Taking a page out of KitchenAid's book, Betty Crocker's 2-slice toaster in matte black would make an edgy addition to any kitchen counter. If your backsplash or bathroom floor is overdue for an update, try a touch of matte black tile. We love how this mosaic tile looks contrasted with stark white or marble countertops. Dip your toe in matte black trend with this simple and striking table clock from Kaede. If nothing else, it'll make hitting 'snooze' a more pleasant experience.
# -*- coding: utf-8 -*- # Taboot - Client utility for performing deployments with Func. # Copyright © 2009,2011, Red Hat, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. class _FileLikeOutputObject(object): """ A file-like parent class. """ import exceptions import time as _time defaults = None starttime = None def __init__(self, *args, **kwargs): """ Creates an instance of a file-like object. :Parameters: - `args`: all non-keyword arguments. - `kwargs`: all keyword arguments. """ import ConfigParser import os.path if _FileLikeOutputObject.defaults is None: if os.path.expanduser("~/.taboot.conf"): _FileLikeOutputObject.defaults = ConfigParser.ConfigParser() _FileLikeOutputObject.defaults.read( os.path.expanduser("~/.taboot.conf")) # Only set the start time once, not for each logger instance if _FileLikeOutputObject.starttime is None: import datetime _FileLikeOutputObject.starttime = datetime.datetime.today() self._pos = 0L self._closed = False self._setup(*args, **kwargs) def _setup(self, *args, **kwargs): """ Implementation specific setup. :Parameters: - `args`: all non-keyword arguments. - `kwargs`: all keyword arguments. """ pass def flush(self): """ We are not buffering so we always just return None. """ return None def read(self, *args, **kwargs): """ We are an output only file-like object. Raise exception. :Parameters: - `args`: all non-keyword arguments. - `kwargs`: all keyword arguments. """ raise self.exceptions.NotImplementedError('Object for output only.') def tell(self): """ Returns the position of the file-like object. """ return self._pos def truncate(self, size): """ We are an output only file-like object. Raise exception. :Parameters: - `size`: size to truncate to. """ raise self.exceptions.NotImplementedError( 'This does not support truncate.') def writelines(self, sequence): """ Writes a sequence of lines. :Parameters: - `sequence`: iterable sequence of data to write. """ for item in sequence: self.write(item) def write(self, item): """ Writer wrapper (not rapper, beav). Simply calls _write which is implementation specific and updates the position. :Parameters: - `item`: the item to write. """ self._write(item) self._pos += 1 def _write(self, item): """ Implementation of writing data. :Parameters: - `item`: the item to write. """ raise self.exceptions.NotImplementedError( '_write must be overriden.') def close(self): """ Close wrapper (again, not rapper, beav). Simply calls _close which is implementation specific and updates the closed property. """ self._close() self._closed = True def _close(self): """ Implementation of closing the file-like object. By default nothing occurs. """ pass # Read aliases readline = read readlines = read xreadlines = read seek = read # Read-only Properties closed = property(lambda self: self._closed) timestamp = property(lambda self: self._time.strftime( "%Y-%m-%d %H:%M:%S", self._time.localtime())) class CLIOutput(_FileLikeOutputObject): """ Output a :class:`taboot.tasks.TaskResult` to the command line with pretty formatting and colors. """ def _setup(self, host, task): """ Implementation specific setup for outputting to the CLI. :Parameters: - `host`: name of the host - `task`: name of the task """ import Colors import sys self._c = Colors.Colors() self._sys = sys self._sys.stdout.write('%s:\n' % ( self._c.format_string(host, 'blue'))) self._sys.stdout.write('%s Starting Task[%s]\n' % ( self.timestamp, self._c.format_string(task, 'white'))) def _write(self, result): """ Implementation of writing to the CLI. :Parameters: - `result`: result object to inspect and write """ import types # Set output color output_color = 'red' if result.success: output_color = 'green' self._sys.stdout.write("%s:\n" % ( self._c.format_string(result.host, 'blue'))) self._sys.stdout.write("%s Finished Task[%s]:\n" % ( self.timestamp, self._c.format_string( result.task, output_color))) if isinstance(result.output, types.ListType): for r in result.output: self._sys.stdout.write("%s\n" % self._c.format_string( r.strip(), output_color)) else: self._sys.stdout.write("%s\n" % self._c.format_string( result.output.strip(), output_color)) class LogOutput(_FileLikeOutputObject): """ Output a :class:`taboot.tasks.TaskResult` to a logfile. """ def _setup(self, host, task, logfile='taboot.log'): """ Implementation specific setup for outputting to a log. :Parameters: - `logfile`: name of the logfile to write to. """ self._logfile = logfile if self._logfile in ('-', 'stdout', '1'): import sys self._log_fd = sys.stdout else: self._log_fd = open(logfile, 'a') self._log_fd.write('%s:\n%s Starting Task[%s]\n\n' % ( host, self.timestamp, task)) def _write(self, result): """ Implementation of writing to a log. :Parameters: - `result`: result object to inspect and write """ import types if result.success: success_str = 'OK' else: success_str = 'FAIL' self._log_fd.write("%s:\n%s Finished Task[%s]: %s\n" % ( result.host, self.timestamp, result.task, success_str)) if isinstance(result.output, types.ListType): for r in result.output: self._log_fd.write("%s\n\n" % r.strip()) else: self._log_fd.write("%s\n\n" % result.output.strip()) class EmailOutput(_FileLikeOutputObject): """ Output a :class:`taboot.tasks.TaskResult` to a logfile. """ def _setup(self, to_addr, from_addr='[email protected]'): """ Implementation specific setup for outputting to a log. :Parameters: - `to_addr`: who to send the email to. - `from_addr`: who the email is from. """ try: import cStringIO as StringIO except ImportError, ie: import StringIO self._to_addr = to_addr self._from_addr = from_addr self._buffer = StringIO.StringIO() def _write(self, result): """ Implementation of writing out to an email. :Parameters: - `result`: result object to inspect and write """ if result.success: success_str = 'OK' else: success_str = 'FAIL' self._buffer.write("%s: %s" % (task_result.task, success_str)) def flush(self): """ Flushing sends the email with the buffer. """ import smtplib from email.mime.text import MIMEText self._buffer.flush() msg = self.MIMEText(self._buffer.read()) msg['Subject'] = task_result.host msg['From'] = self._from_addr msg['To'] = self._to_addr smtp = self.smtplib.SMTP() smtp.connect() smtp.sendmail(self._from_addr, [self._to_addr], msg.as_string()) smtp.close() def __del__(self): """ If the buffer is not empty before destroying, flush. """ if self._buffer.pos < self._buffer.len: self.flush() class HTMLOutput(_FileLikeOutputObject): """ Output a :class:`taboot.tasks.TaskResult` to the command line with pretty formatting and colors. """ logfile_path = None def _expand_starttime(self, param): """ Expand any instances of "%s" in `param` """ if '%s' in param: p = param % HTMLOutput.starttime return p.replace(" ", "-") else: return param def _setup(self, host, task, logfile="taboot-%s.html", destdir="."): """ Implementation specific setup for outputting to an HTML file. :Parameters: - `host`: name of the host - `task`: name of the task - `logfile`: name of the file to log to, '%s' is substituted with a datestamp - `destdir`: directory in which to save the log file to """ import Colors import sys import os.path import os _default_logfile = "taboot-%s.html" _default_destdir = "." # Pick if the parameter is changed # Pick if above is false and logfile is set in defaults # Else, use parameter if not logfile == _default_logfile: _logfile = logfile elif HTMLOutput.defaults is not None and \ HTMLOutput.defaults.has_option("HTMLOutput", "logfile"): _logfile = HTMLOutput.defaults.get("HTMLOutput", "logfile") else: _logfile = logfile # Expand %s into a time stamp if necessary _logfile = self._expand_starttime(_logfile) if not destdir == _default_destdir: _destdir = destdir elif HTMLOutput.defaults is not None and \ HTMLOutput.defaults.has_option("HTMLOutput", "destdir"): _destdir = HTMLOutput.defaults.get("HTMLOutput", "destdir") else: _destdir = destdir # Figured it all out, now we join them together! self._logfile_path = os.path.join(_destdir, _logfile) if not os.path.exists(_destdir): os.makedirs(_destdir, 0755) self._c = Colors.HTMLColors() self._log_fd = open(self._logfile_path, 'a') # Lets only print this when it is set or changed if HTMLOutput.logfile_path is None or \ not HTMLOutput.logfile_path == self._logfile_path: sys.stderr.write("Logging HTML Output to %s\n" % \ self._logfile_path) HTMLOutput.logfile_path = self._logfile_path sys.stderr.flush() # Log the start of this task name = self._fmt_anchor(self._fmt_hostname(host)) start_msg = """<p><tt>%s:</tt></p> <p><tt>%s Starting Task[%s]\n</tt>""" % (name, self.timestamp, task) self._log_fd.write(start_msg) self._log_fd.flush() def _fmt_anchor(self, text): """ Format an #anchor and a clickable link to it """ h = hash(self.timestamp) anchor_str = "<a name='%s' href='#%s'>%s</a>" % (h, h, text) return anchor_str def _fmt_hostname(self, n): """ Standardize the hostname formatting """ return "<b>%s</b>" % self._c.format_string(n, 'blue') def _write(self, result): """ DO IT! """ import types import sys name = self._fmt_hostname(result.host) if result.success: success_str = 'OK' else: success_str = 'FAIL' self._log_fd.write("<p><tt>%s:\n</tt></p>\n<p><tt>%s "\ "Finished Task[%s]: %s</tt></p>\n" % (name, self.timestamp, result.task, success_str)) if isinstance(result.output, types.ListType): for r in result.output: self._log_fd.write("<pre>%s</pre>\n<br />\n<br />\n" % r.strip()) else: self._log_fd.write("<pre>%s</pre>\n<br />\n<br />\n" % result.output.strip()) self._log_fd.flush()
Fonesia - Free Font - Dealjumbo.com — Discounted design bundles with extended license! Fonesia - Free Font - Dealjumbo.com — Discounted design bundles with extended license! Please take a look at other amazing artworks from Arwan Sutanto here!
# python experimental tests for Husky import numpy as np from numpy import sin, cos, pi, zeros # import matplotlib.pyplot as plt def calibration_cost_function(parameters): # print parameters yaw_bounde = 1 * pi / 180 matrix_limits = 20 matrix_resolution = .005 number_of_rows = matrix_limits / matrix_resolution cost_matrix = zeros((number_of_rows, number_of_rows)) env_data = np.load('env.npy')[1:] x_offset_calibrate, y_offset_calibrate = parameters yaw_calibrate = pi / 180 * (0) # x_offset_calibrate = .2 # y_offset_calibrate = -.064 # x = [[]] * len(env_data) # y = [[]] * len(env_data) # print len(env_data) for i in range(1, len(env_data) - 1): if len(env_data[i]) > 0: x = env_data[i][0] y = env_data[i][1] yaw = env_data[i][2] if len(env_data[i+1])==0 or abs(yaw - env_data[i - 1][2]) > yaw_bounde or abs(yaw - env_data[i + 1][2]) > yaw_bounde: continue readings = env_data[i][3] # k = 0 for j in range(len(readings)): x_temp = readings[j][0] * cos(-readings[j][1]) y_temp = readings[j][0] * sin(-readings[j][1]) x_temp2 = x_temp * \ cos(yaw_calibrate) - y_temp * \ sin(yaw_calibrate) + x_offset_calibrate y_temp2 = y_temp * \ cos(yaw_calibrate) + x_temp * \ sin(yaw_calibrate) + y_offset_calibrate readings_x = x_temp2 * cos(yaw) - y_temp2 * sin(yaw) + x readings_y = y_temp2 * cos(yaw) + x_temp2 * sin(yaw) + y if readings_x < matrix_limits / 2 and readings_x > -matrix_limits / 2 and readings_y < matrix_limits / 2 and readings_y > -matrix_limits / 2: cost_matrix[int((readings_x + matrix_limits / 2) / matrix_resolution), int( (readings_y + matrix_limits / 2) / matrix_resolution)] = 1 # k += 1 cost = sum(sum(cost_matrix)) # print parameters,cost return cost
After a slowdown during the Global Financial Crisis and the 2011 Tohoku earthquake and tsunami, it seems that demand for real estate in Niseko is once again booming, with sales nearly doubling in the last twelve months. Michael Davenport from Niseko Consulting has shared with us that it has been an incredibly busy season so far, with lots of inquiries about properties and apartments in the village. However, it’s not just speculative inquiries, they have backed it up with sales, including houses and land in Hirafu village, and big plots just outside of town. If you’ve ever dreamed about owning a little something (or a big something) in Niseko, you should have a look at the thorough Niseko investment guide that Niseko Consulting have put together. Even if you’re not interested in purchasing real estate in Niseko right now, it’s an interesting insight into the area.
"""Support for Ring Doorbell/Chimes.""" import logging from requests.exceptions import ConnectTimeout, HTTPError import voluptuous as vol from homeassistant.const import CONF_PASSWORD, CONF_USERNAME import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['ring_doorbell==0.2.2'] _LOGGER = logging.getLogger(__name__) ATTRIBUTION = "Data provided by Ring.com" NOTIFICATION_ID = 'ring_notification' NOTIFICATION_TITLE = 'Ring Setup' DATA_RING = 'ring' DOMAIN = 'ring' DEFAULT_CACHEDB = '.ring_cache.pickle' DEFAULT_ENTITY_NAMESPACE = 'ring' CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, }), }, extra=vol.ALLOW_EXTRA) def setup(hass, config): """Set up the Ring component.""" conf = config[DOMAIN] username = conf[CONF_USERNAME] password = conf[CONF_PASSWORD] try: from ring_doorbell import Ring cache = hass.config.path(DEFAULT_CACHEDB) ring = Ring(username=username, password=password, cache_file=cache) if not ring.is_connected: return False hass.data['ring'] = ring except (ConnectTimeout, HTTPError) as ex: _LOGGER.error("Unable to connect to Ring service: %s", str(ex)) hass.components.persistent_notification.create( 'Error: {}<br />' 'You will need to restart hass after fixing.' ''.format(ex), title=NOTIFICATION_TITLE, notification_id=NOTIFICATION_ID) return False return True
Horse Riding Poppy Fields – The icelandic poppies are currently blooming in Shinrin Park at the moment. Each year Shinrin Park have a horse trekking event during the period that the poppies are in bloom. They also have horse trekking events at other times of the year. This year the poppy field horse riding experience is on both Saturday April 20th and Sunday April 21st. During this time you can also experience feeding a horse for just 100 yen. These type of horse riding events are popular as there are only a handful of places in Saitama that you can casually experience riding a horse for a short period of time. Zoos such as Saitama Children’s Zoo and Chikozan Park zoo have a daily horse riding event and Midori no Mura in Ogano has a monthly event. Children over four can ride by themselves, children under four need to ride with an adult. It costs 600 yen per ride. The event takes place 20 minute walk from both the SOUTH and the CENTRAL gate. You can not book in advance so it is advised you arrive early. The nearest train station is Shinrin Koen Station on the Tobu Tojo line. You can get a bus to the park from the station. You can also get a bus from Kumagaya Station on the Takasaki line. On foot the nearest entrance to Shinrin Koen station is the South exit which is about 3 kilometres from the station. Parking is available at each of the five entrances to Shinrin Park. It is charged by the day: 600 yen.
# -*- coding: utf-8 -*- import sys import traceback from ermesms.plugins.Sender import Sender from ermesms.PreferenceManager import PreferenceManager from ermesms.plugins.UI import UI class GraphicalUI(UI): """Interfaccia grafica in PyQt.""" MainFrame = None def isAvailable(self): """Ritorna true se quest'interfaccia è utilizzabile.""" #non ci devono essere parametri se -gui non è specificato result = (len(sys.argv) == 1) or ("-gui" in sys.argv) try: #PyQt4 has to be correctly installed from PyQt4 import QtGui, QtCore from ermesms.plugins.uis.MainFrame import MainFrame except ImportError, e: result = False print e return result def getPriority(self): """Ritorna un codice di priorità. In caso più interfacce siano utilizzabili, viene scelta quella a maggiore priorità.""" return 3 def run(self): """Avvia questa interfaccia.""" from PyQt4 import QtGui, QtCore from ermesms.plugins.uis.MainFrame import MainFrame import os self.QtUIApp = QtGui.QApplication(sys.argv) pluginWin=os.path.join(os.getcwd(),'qt4_plugins', 'imageformats','qjpeg.dll') pluginMac=os.path.join(os.getcwd(),'qt4_plugins', 'imageformats','libqjpeg.dylib') pluginUnix=os.path.join(os.getcwd(),'qt4_plugins', 'imageformats','libqjpeg.so') if os.path.isfile(pluginWin) or os.path.isfile(pluginMac) or \ os.path.isfile(pluginUnix): self.QtUIApp.setLibraryPaths( QtCore.QStringList(os.path.join(os.getcwd(),'qt4_plugins'))) self.QtUIApp.setQuitOnLastWindowClosed(False) self.MainFrame = MainFrame() self.MainFrame.show() sys.exit(self.QtUIApp.exec_()) def showFatalException(self, message): """Questo metodo viene richiamato nel caso in cui venga catturata un'eccezione non gestita nel programma principale.""" from PyQt4 import QtGui QtGui.QMessageBox.critical(self.MainFrame, "Errore", message)
Laughter and gaeity commonly used to describe the Filipino people takes root in the Philippine country-side. Life in the barrio is simple, but Filipinos always manage to find time to celebrate life’s gifts. The annual fiestas to celebrate the patron saints of the barrios symbolize the mixture of Christian and non-Christian beliefs. Fiestas not only honor the patron saint, but give homage to the barrio’s namesake for a good harvest, health, and perserverance. These fiestas are marked with celebrations of holy mass, music, dance and song. Perhaps the best known and closest to the Filipino heart are the dances from the rural Christian lowlands. To the Filipinos, these dances illustrate the fiesta spirit and demonstrate a love of life. The same can be said for the native Philippine folk songs - Bahay Kubo, Magtanim, Leron Leron Sinta, to name a few. They express a joy in work, a love for music, and pleasure in the simplicities of life. The following pieces are only a few selections from KP’s repertoire. Meaning "with the use of drinking glasses", this vibrant shows off balancing skill of the performers. Glasses filled with rice wine are placed on the head and on each hand carefully maneuvered with graceful movements. According to history of this dance, a young woman named Kanang (short for Cayetana), began improvising her steps in the middle of her performance imitating the movements of an "itik", a duck, as it walks with choppy steps and splashes water on its back. The word pandanggo comes from the Spanish dance "fandango" characterized with lively steps and clapping, and "ilaw" is the Filipino word for "light", hence, the dance of lights. The girls of the village show off their colorful farm hats.
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from castellan import options as castellan_opts from oslo_config import cfg from cinder import keymgr from cinder import test class InitTestCase(test.TestCase): def setUp(self): super(InitTestCase, self).setUp() self.config = cfg.ConfigOpts() castellan_opts.set_defaults(self.config) self.config.set_default('backend', 'cinder.keymgr.conf_key_mgr.ConfKeyManager', group='key_manager') def test_blank_config(self): kmgr = keymgr.API(self.config) self.assertEqual(type(kmgr), keymgr.conf_key_mgr.ConfKeyManager) def test_set_conf_key_manager(self): self.config.set_override( 'backend', 'cinder.keymgr.conf_key_mgr.ConfKeyManager', group='key_manager') kmgr = keymgr.API(self.config) self.assertEqual(type(kmgr), keymgr.conf_key_mgr.ConfKeyManager)
Zaragoza International Airport is located 10kms from the city of Zaragoza, it’s a nice but very small airport. In order to get to the city center (P M Augustin), you can take the bus located at the airport exit. The bus operates every day and departs from the airport each half an hour (except on Sundays, when it departs once a hour). The journey takes 30mins and costs around 1,60€. Zaragoza is the capital of the autonomous community of Aragon and has a lot of history. The city offers many special attractions, all nearby, so no need of public transport to move. Start your visit from Paseo de la Independencia, the main street in the city, full of stores and cafes. Walk along Paseo de la Independencia and you will get to Plaza de Espana, a very nice place where the old center meets the new one and it’s the hub of culture, business and transportation for the city. You’ll also find important architecture, museums, monuments, shopping, restaurants and bars. You will leave the modern city behind you to immerse yourself into the old town. Take Calle Alfonso I or Calle Don Jaime I and you will arrive in Plaza del Pilar, the main square of the old town. The place has important historical buildings in the whole perimeter: Basilica de Nuestra Senora del Pilar, Catedral de la Seo, la Lonja. Nuestra Senora del Pilar basilica is one of the most visited places of pilgrimage in Spain, consecrated to the patroness of all Spanish-speaking countries. The baroque building is flanked by four towers. In its interior the wall-paintings in the cupola, works of Goya and Bayeau, are of particular interest. La Seo cathedral is a great Gothic cathedral, with some neoclassical and some baroque elements . In its interior, which consists of five naves with lateral chapels, the main-altar with splendid reliefs and the Mudejar-style apse are remarkable. La Lonja is the historical Stock Exchange office. It is a remarkable artistic building, used nowadays for exhibitions. Take the street behind the churches and take a walk in the historical street Av. de Cesar Augusto. Along this street you will find the Plaza del Mercado Central. Just next to this place is the Tower of Zuda. Take a walk up this tower and you will enjoy a fantastic view of the city. Now walk towards the coast and enjoy a nice walking along the Ebro river, it is very enjoyable especially with a good weather. Enjoy also the fabulous bridges Zaragoza has, for instance the Piedra Bridge and Pilar Bridge. When you arrive in front of the Pilar bridge, leave the river side and go back to the inner streets, you will arrive at the Roman Town Walls. This is one of the main landmark in the city and is still very well conserved. Just next to the Roman walls, you will see the Magdalena church, a wonderful piece of Mudéjar style. Next to these attractions, you will find also the Dean Arch, a great piece of Gothic and Mudéjar style. After a big cultural tour, it’s time to relax and enjoy the city center. Just have a walk in the small and old streets of the city, enjoy the nice atmosphere and the ancient and historical buildings. This is also a great place for the nightlife, since all these small streets during the night get crowded with people having drinks or eating in the numerous restaurants the city offers. If the first day in the city was dedicated to the discovering of the city center, of its old and modern part, the second day is dedicated to a fantastic historical building a little bit far away from the city center, but manageable on foot with a 20mins walk. We are talking about the Palacio de Aljaferia, a simple wonder of the Hispanic-Islamic Architecture recognized as UNESCO World Heritage Site. This masterpiece was built in the second half of the 11th century as residential enjoyment of the Taifas. With the fall of Zaragoza to Alfonso I, the Palace became the home of the christian kings of Aragon. It is a palace of quadrangular plan with rounded towers except for one of them, known as the Troubadour’s Tower, which has a rectangular plan. The rooms are arranged around the courtyard, which is open to the sky. Its roofs, ceilings and plasterwork decoration are some of its greatest charms. This building is entirely a wonder, you cannot miss it if you go to Zaragoza. The Water Tower stands 76 metres high and can be understood as a dual structure uniting the two separate elements that make up the building and give it its unique profile. The transparent body, wrapped in glass with a sculptural form in the shape of a drop of water, is an iconic structure. This design allows the building to offer two faces: an opaque building during the day and a great shiny beacon at night. The Pavilion Bridge (Pabellon Puente) is a great bridge built in steel located along the Ebro river. Its cladding is inspired by shark scales and creates a natural microclimate in the interior. This is a cooling system where air is exchanged through the building’s porous skin. The Aragon Pavilion (Pabellon de Aragon) is a great building made by intertwined panels of glass and micro-concrete with white glass fibre, providing the interior with a large quantity of natural light. This weave is opaque in the base, becoming more transparent as the building gets higher. The building is also designed with energy-saving characteristics, making it an “environmentally responsible” structure. The Spain Pavilion (Pabellon de Espana) is one of the hotspots of the Expo area and is the result of a commitment to blend architecture with the environment and sustained development. It reproduces a wood of pillars that envelops open volumes of glass, conceived under energy-saving criteria, with a large roof that provides the building with shade. The Aragon Congress Center is built with a horizontal frame and a wave-like roof with skylights to allow sunlight to enter illuminate foyers and common spaces. Likewise, the rooms receive natural light from outside by means of translucent facade walls, in alternating stretches of glass and metal latticework. The white glass fiber reinforced concrete roof enhances the thermal insulation properties of the building. Zaragoza Fluvial Aquarium is the largest of its kind in Europe, the third largest in the world and can be found in the heart of the Expo 2008 site. Its purpose is to educate, research and inform throughout a natural journey along the five of the world`s great rivers: Nile, Mekong, Amazon, Darling Murray and Ebro.
"""Manage hdsdiscovery functionalities""" import os import re import logging from compass.hdsdiscovery import utils class HDManager: """Process a request.""" def __init__(self): base_dir = os.path.dirname(os.path.realpath(__file__)) self.vendors_dir = os.path.join(base_dir, 'vendors') self.vendor_plugins_dir = os.path.join(self.vendors_dir, '?/plugins') def learn(self, host, credential, vendor, req_obj, oper="SCAN", **kwargs): """Insert/update record of switch_info. Get expected results from switch according to sepcific operation. :param req_obj: the object of a machine :param host: switch IP address :param credientials: credientials to access switch :param oper: operations of the plugin (SCAN, GETONE, SET) :param kwargs(optional): key-value pairs """ plugin_dir = self.vendor_plugins_dir.replace('?', vendor) if not os.path.exists(plugin_dir): logging.error('No such directory: %s', plugin_dir) return None plugin = utils.load_module(req_obj, plugin_dir, host, credential) if not plugin: # No plugin found! #TODO add more code to catch excpetion or unexpected state logging.error('no plugin %s to load from %s', req_obj, plugin_dir) return None return plugin.process_data(oper) def is_valid_vendor(self, host, credential, vendor): """ Check if vendor is associated with this host and credential :param host: switch ip :param credential: credential to access switch :param vendor: the vendor of switch """ vendor_dir = os.path.join(self.vendors_dir, vendor) if not os.path.exists(vendor_dir): logging.error('no such directory: %s', vendor_dir) return False vendor_instance = utils.load_module(vendor, vendor_dir) #TODO add more code to catch excpetion or unexpected state if not vendor_instance: # Cannot found the vendor in the directory! logging.error('no vendor instance %s load from %s', vendor, vendor_dir) return False return vendor_instance.is_this_vendor(host, credential) def get_vendor(self, host, credential): """ Check and get vendor of the switch. :param host: switch ip: :param credential: credential to access switch """ # List all vendors in vendors directory -- a directory but hidden # under ../vendors all_vendors = sorted(o for o in os.listdir(self.vendors_dir) if os.path.isdir(os.path.join(self.vendors_dir, o)) and re.match(r'^[^\.]', o)) logging.debug("[get_vendor]: %s ", all_vendors) for vname in all_vendors: vpath = os.path.join(self.vendors_dir, vname) instance = utils.load_module(vname, vpath) #TODO add more code to catch excpetion or unexpected state if not instance: logging.error('no instance %s load from %s', vname, vpath) continue if instance.is_this_vendor(host, credential): return vname return None
I want to create a dashlet where content administrator can see all the content created by all others in all the web projects as a list so that administrator can see list of all the documents. Look in the configuration of your dashboard and add the "document list", is it what you want ? I tried to change both "MyDocs.ServiceContext" & "MyDocs.Home" here , tried these to give path of companyhome like "home=companyhome.childByNamePath["Web Projects/creatorprj"]>" or "home=companyhome.nodeByReference["workspace://SpacesStore/dba6e0f6-466d-498a-a277-9cecdf7de655"]" for particular web project or using xpath query like "home=companyhome.childrenByXPath["*[@cm:name='Web Projects']" in ftl file & tried to change"MyDocs.ServiceContext=http://localhost:8080/alfresco/service/sample/avm/stores" to set it to avm stores but it is not showing any content from any of the web projects.What i have observed is that it is not able to get the contents from web projects as web project have a definite structure & sandboxes & these are dynamic.Correct me if i am wrong!!So how can i get the content related to web projects. Is there any other web script we can use or any way to modify the original one to get desired results or do we need to create a new script. I am not sure if you have visited this page. I think this is what you are trying to do. http://wiki.alfresco.com/wiki/Web_Scripts_Examples - See section WCM Store List and WCM Folder Browse Example. Company Home is used only for DM Space and not for Alfresco WCM Stores. The following webscript mentioned in WCM Store List will show you all the stores present. It will list down all existing store. This list will include User Sand box as well so its upto you whether you want to monitor user sand box activity or activity within Staging Sandbox. I think Looking under ROOT ie Staging would suffice your requirement. You need to iterate over each store and build your logic to display items under each store. One such example is given right in WCM Folder Browse Example. If you need any more help just let me know. But I think this should achieve your objectives.
import numpy as np from scipy.spatial.distance import cdist # ======================================================================== # USAGE: [Coeff]=LLC_coding_appr(B,X,knn,lambda) # Approximated Locality-constraint Linear Coding # # Inputs # B -M x d codebook, M entries in a d-dim space # X -N x d matrix, N data points in a d-dim space # knn -number of nearest neighboring # lambda -regulerization to improve condition # # Outputs # Coeff -N x M matrix, each row is a code for corresponding X # # Jinjun Wang, march 19, 2010 # ======================================================================== def llc_coding_approx(B, X, k_nn=5, beta=1e-4): D = cdist(X, B, 'euclidean') N = X.shape[0] I = np.zeros((N, k_nn), 'int32') for i in range(N): d = D[i, :] idx = np.argsort(d) I[i, :] = idx[:k_nn] II = np.eye(k_nn) coeffs = np.zeros((N, B.shape[0])) for i in range(N): idx = I[i, :] z = B[idx, :] - np.tile(X[i, :], (k_nn, 1)) # shift ith point to origin z = z.dot(z.transpose()) z = z + II * beta * np.trace(z) # regularization (K>D) w = np.linalg.solve(z, np.ones((k_nn, 1))) w = w / np.sum(w) # enforce sum(w) = 1 coeffs[i, idx] = w.ravel() return coeffs
Created exclusively by Georgina Davies, this summer feast is best enjoyed with friends. In the first shared from a collection this delicious lamb recipe is full of flavour and a fantastic dish to be finished at the table and shared with friends. Slice the onions and place at the bottom of a large baking tray. Place the lamb shoulder on top. Mix the garlic, paprika, ground cumin, ground coriander, fennel seeds, lemon zest and juice, and olive oil together in a small bowl. With a sharp knife, make small cuts all over the top of the lamb (a couple of centimetres deep) before rubbing the whole of the shoulder with the spice mix – top and bottom. Cover with cling film and put in the fridge to marinate for at least two hours (preferably overnight). Heat the oven to 200°C and put the lamb in. After 10 minutes, turn the temperature down to 120°C and cook for 5hours (or longer). The slow and low cooking is what makes the lamb really tender and flavoursome. Every hour or so baste the lamb with all the amazing spiced juices to ensure they fully permeate the meat. Meanwhile, make the sides. For the quick pickled vegetables, combine all of the ingredients in a bowl and let marinate for half an hour (or longer). For the garlicky yoghurt, combine the yoghurt, lemon zest and garlic in a bowl and season well. In a separate bowl mix together the chopped mint and olive oil. To serve, drizzle the mint oil mixture over the top of the garlic yoghurt. When the lamb is ready, take out of the oven and shred roughly with two forks. Serve the lamb with the pickled vegetables, yoghurt, flatbreads (GF) and a crisp green salad. Remember to share your #ONEINAMILLION successes with us and #ROBERTWELCH. To see more delicious creations follow Georgina on Instagram or visit her website.
import sys #from PyQt4 import QtGui, QtCore from status_msg import StatusMsg from single_field import SingleField class InformationTab : def __init__(self, tab_no, field, rolelabel, penaltylabel, hardwarelabel, actionlabel, hideallcheck, targetcheck, \ ballcheck, teammatecheck, opponentcheck, undefcheck, activitystateGreen, activitystateRed): """ :param tab_no: the number of the robot tab :param field: the soccer field :param rolelabel: label for the robot role :param penaltylabel: label for the penalty :param hardwarelabel: label for the hardware status :param actionlabel: label for the next action :param hideallcheck: check box to hide everything :param targetcheck: check box to hide the target :param ballcheck: check box to hide the ball :param teammatecheck: check box to hide teammates :param opponentcheck: check box to hide opponents :param undefcheck: check box to hide undefined obstacles :param activitystateGreen: green indicator :param activitystateRed: red indicator """ #tab index [0;3] self.index = tab_no # decodes the integer for the corresponding role self.roleDecoder = {0: 'IDLING', 1: 'OTHER', 2: 'STRIKER', 3: 'SUPPORTER', 4: 'DEFENDER', 5: 'GOALIE'} # decodes the integer for the corresponding action self.actionDecoder = {0: 'Undefinded', 1: 'Positioning', 2: 'Going to ball', 3: 'Trying to score', 4: 'Waiting'} self.stateDecoder = {0: 'CONTROLLABLE', 1: 'FALLING', 2: 'FALLEN', 3: 'GETTING_UP', 4: 'ANIMATION_RUNNING', 5: 'STARTUP', \ 6: 'SHUTDOWN', 7: 'PENALTY', 8: 'PENALTY_ANIMATION', 9: 'RECORD', 10: 'WALKING', 11: 'MOTOR_OFF', \ 12: 'HCM_OFF', 13: 'HARDWARE_PROBLEM', 14: 'PICKED_UP'} # Labels, get msg self.rolelabel = rolelabel self.penaltylabel = penaltylabel self.hardwarelabel = hardwarelabel self.actionlabel = actionlabel # Checkboxes, default checked, but hide all unchecked self.hideallcheck = hideallcheck self.hideallcheck.setChecked(False) # hide all undechecked self.hideallcheck.stateChanged.connect(lambda: self.hideallstate()) self.targetcheck = targetcheck self.targetcheck.setChecked(True) self.targetcheck.stateChanged.connect(lambda: self.targetstate()) self.ballcheck = ballcheck self.ballcheck.setChecked(True) self.ballcheck.stateChanged.connect(lambda: self.ballstate()) self.teammatecheck = teammatecheck self.teammatecheck.setChecked(True) self.teammatecheck.stateChanged.connect(lambda: self.teammatestate()) self.opponentcheck = opponentcheck self.opponentcheck.setChecked(True) self.opponentcheck.stateChanged.connect(lambda: self.opponentstate()) self.undefcheck = undefcheck self.undefcheck.setChecked(True) self.undefcheck.stateChanged.connect(lambda: self.undefinedstate()) self.activitystateGreen = activitystateGreen self.activitystateGreen.hide() self.activitystateRed = activitystateRed self.activitystateRed.show() self.field = field # Labels def actualizePenaltylabel(self, data): """ updates the penalty label :param data: a dictionary with the transmitted information :return: """ if data.has_key(StatusMsg.label_penalty_rest): self.penaltylabel.setText(str(data.get(StatusMsg.label_penalty_rest))) def acutalizeRolelabel (self, data): """ updates the role label :param data: a dictionary with the transmitted information :return: """ if data.has_key(StatusMsg.label_role): self.rolelabel.setText(self.roleDecoder.get(data.get(StatusMsg.label_role))) def acutalizeActionlabel (self, data): """ updates the action label :param data: a dictionary with the transmitted information :return: """ if data.has_key(StatusMsg.label_action): self.actionlabel.setText(self.actionDecoder.get(data.get(StatusMsg.label_action))) def actualizeHardwarelabel (self, data): """ updates the hardware label :param data: a dictionary with the transmitted information :return: """ if data.has_key(StatusMsg.label_state): self.hardwarelabel.setText(self.stateDecoder.get(data.get(StatusMsg.label_state))) def actualizeActivitystate(self, data): """ :param data: a dictionary with the transmitted information :return: """ if data.has_key(StatusMsg.label_penalized): self.activitystateRed.raise_() if data.get(StatusMsg.label_penalized) == True: self.activitystateGreen.hide() self.activitystateRed.show() else: self.activitystateGreen.show() self.activitystateRed.hide() def setStatusMsg(self, data): """ :param data: a dictionary with the transmitted information :return: """ self.actualizePenaltylabel(data) self.acutalizeRolelabel(data) self.acutalizeActionlabel(data) self.actualizeActivitystate(data) self.actualizeHardwarelabel(data) # Filters for objects on field def hideallstate(self): """ hide every label on the field :return: """ if self.hideallcheck.isChecked() == True: self.field.setOwnRobotsVisibility(False, self.index) self.field.setPathVisibility(False, self.index) self.field.setBallVisibility(False, self.index) self.field.setTeammateVisibility(False, self.index) #self.field.setPathVisibility(False, self.index) self.field.setOpponentVisibility(False, self.index) self.field.setUndefVisibility(False, self.index) self.ballcheck.setChecked(False) self.teammatecheck.setChecked(False) self.opponentcheck.setChecked(False) self.undefcheck.setChecked(False) self.targetcheck.setChecked(False) else: self.field.setOwnRobotsVisibility(True, self.index) self.field.setPathVisibility(True, self.index) self.field.setBallVisibility(True, self.index) self.field.setTeammateVisibility(True, self.index) #self.field.setPathVisibility(True, self.index) self.field.setOpponentVisibility(True, self.index) self.field.setUndefVisibility(True, self.index) self.ballcheck.setChecked(True) self.teammatecheck.setChecked(True) self.opponentcheck.setChecked(True) self.undefcheck.setChecked(True) self.targetcheck.setChecked(True) def targetstate(self): if self.targetcheck.isChecked() == True: self.field.setPathVisibility(True, self.index) else: self.field.setPathVisibility(False, self.index) def ballstate(self): if self.ballcheck.isChecked() == True: self.field.setBallVisibility(True, self.index) else: self.field.setBallVisibility(False, self.index) def teammatestate(self): if self.teammatecheck.isChecked() == True: self.field.setTeammateVisibility(True, self.index) else: self.field.setTeammateVisibility(False, self.index) def opponentstate(self): if self.opponentcheck.isChecked() == True: self.field.setOpponentVisibility(True, self.index) else: self.field.setOpponentVisibility(False, self.index) def undefinedstate(self): if self.undefcheck.isChecked() == True: self.field.setUndefVisibility(True, self.index) else: self.field.setUndefVisibility(False, self.index)
Someone asked me what I’d like for the Holidays and honestly I couldn’t think of a thing I needed or wanted. Maybe a technology upgrade but not the typical holiday gift items that one would search for while wandering the mall among the frenzied herds. When I was struggling with ME/CFS, and someone asked what I wanted for the holidays, all I could think of was my health. And of course they couldn’t give me that. At the time, I don’t remember having the clear thought process to be able to articulate the kinds of ‘gifts’ that would have been truly helpful in supporting my recovery. With hindsight, now I can think of many that would have been much appreciated. In my younger days when I was short on cash, one of my favorite ‘go to’ gifts was the personal gift certificate. I would think of something I could do for the recipient and create a certificate that could be presented to me when the service was needed. Imagine all the helpful things that people could do for you as you struggle with ME/CFS. Driving to appointment, cooking a meal, mowing, shoveling and walking the dog come to mind. What would you value in a personal gift certificate? What do you wish was in your helping gift? Tell us and then tell those who want to help. Please COMMENT on this blog or Send in your thoughts and I’ll post them with your permission. You can use the Contact Form or send an email to Martha at DefeatCFS dot net. And Guest Blogs are most welcome!
from __future__ import print_function, division, absolute_import import numpy as np import h2o import pandas as pd import warnings from collections import Counter from pkg_resources import parse_version from ..utils import (validate_is_pd, human_bytes, corr_plot, load_breast_cancer_df, load_iris_df, load_boston_df) from .frame import _check_is_1d_frame from .select import _validate_use from .base import check_frame from .fixes import rbind_all from h2o.frame import H2OFrame from sklearn.utils.validation import check_array __all__ = [ 'from_array', 'from_pandas', 'h2o_bincount', 'h2o_col_to_numpy', 'h2o_corr_plot', 'h2o_frame_memory_estimate', 'load_iris_h2o', 'load_boston_h2o', 'load_breast_cancer_h2o', 'reorder_h2o_frame', 'shuffle_h2o_frame' ] def load_iris_h2o(include_tgt=True, tgt_name="Species", shuffle=False): """Load the iris dataset into an H2OFrame Parameters ---------- include_tgt : bool, optional (default=True) Whether or not to include the target tgt_name : str, optional (default="Species") The name of the target column. shuffle : bool, optional (default=False) Whether or not to shuffle the data """ X = from_pandas(load_iris_df(include_tgt, tgt_name, shuffle)) if include_tgt: X[tgt_name] = X[tgt_name].asfactor() return X def load_breast_cancer_h2o(include_tgt=True, tgt_name="target", shuffle=False): """Load the breast cancer dataset into an H2OFrame Parameters ---------- include_tgt : bool, optional (default=True) Whether or not to include the target tgt_name : str, optional (default="target") The name of the target column. shuffle : bool, optional (default=False) Whether or not to shuffle the data """ X = from_pandas(load_breast_cancer_df(include_tgt, tgt_name, shuffle)) if include_tgt: X[tgt_name] = X[tgt_name].asfactor() return X def load_boston_h2o(include_tgt=True, tgt_name="target", shuffle=False): """Load the boston housing dataset into an H2OFrame Parameters ---------- include_tgt : bool, optional (default=True) Whether or not to include the target tgt_name : str, optional (default="target") The name of the target column. shuffle : bool, optional (default=False) Whether or not to shuffle the data """ X = from_pandas(load_boston_df(include_tgt, tgt_name, shuffle)) return X def h2o_col_to_numpy(column): """Return a 1d numpy array from a single H2OFrame column. Parameters ---------- column : H2OFrame column, shape=(n_samples, 1) A column from an H2OFrame Returns ------- np.ndarray, shape=(n_samples,) """ x = _check_is_1d_frame(column) _1d = x[x.columns[0]].as_data_frame(use_pandas=True) return _1d[_1d.columns[0]].values def _unq_vals_col(column): """Get the unique values and column name from a column. Returns ------- str, np.ndarray : tuple (c1_nm, unq) """ unq = column.unique().as_data_frame(use_pandas=True) c1_nm = unq.columns[0] unq = unq[unq.columns[0]].sort_values().reset_index() return c1_nm, unq def h2o_bincount(bins, weights=None, minlength=None): """Given a 1d column of non-negative ints, ``bins``, return a np.ndarray of positional counts of each int. Parameters ---------- bins : H2OFrame The values weights : list or H2OFrame, optional (default=None) The weights with which to weight the output minlength : int, optional (default=None) The min length of the output array """ bins = _check_is_1d_frame(bins) _, unq = _unq_vals_col(bins) # ensure all positive unq_arr = unq[_].values if any(unq_arr < 0): raise ValueError('values must be positive') # make sure they're all ints if np.abs((unq_arr.astype(np.int) - unq_arr).sum()) > 0: raise ValueError('values must be ints') # adjust minlength if minlength is None: minlength = 1 elif minlength < 0: raise ValueError('minlength must be positive') # create our output array all_vals = h2o_col_to_numpy(bins) output = np.zeros(np.maximum(minlength, unq_arr.max() + 1)) # check weights if weights is not None: if isinstance(weights, (list, tuple)): weights = np.asarray(weights) elif isinstance(weights, H2OFrame): weights = h2o_col_to_numpy(weights) if weights.shape[0] != all_vals.shape[0]: raise ValueError('dim mismatch in weights and bins') else: weights = np.ones(all_vals.shape[0]) # update our bins for val in unq_arr: mask = all_vals == val array_ones = np.ones(mask.sum()) weight_vals = weights[mask] output[val] = np.dot(array_ones, weight_vals) return output def from_pandas(X): """A simple wrapper for H2OFrame.from_python. This takes a pandas dataframe and returns an H2OFrame with all the default args (generally enough) plus named columns. Parameters ---------- X : pd.DataFrame The dataframe to convert. Returns ------- H2OFrame """ pd, _ = validate_is_pd(X, None) # older version of h2o are super funky with this if parse_version(h2o.__version__) < parse_version('3.10.0.7'): h = 1 else: h = 0 # if h2o hasn't started, we'll let this fail through return H2OFrame.from_python(X, header=h, column_names=X.columns.tolist()) def from_array(X, column_names=None): """A simple wrapper for H2OFrame.from_python. This takes a numpy array (or 2d array) and returns an H2OFrame with all the default args. Parameters ---------- X : ndarray The array to convert. column_names : list, tuple (default=None) the names to use for your columns Returns ------- H2OFrame """ X = check_array(X, force_all_finite=False) return from_pandas(pd.DataFrame.from_records(data=X, columns=column_names)) def h2o_corr_plot(X, plot_type='cor', cmap='Blues_d', n_levels=5, figsize=(11, 9), cmap_a=220, cmap_b=10, vmax=0.3, xticklabels=5, yticklabels=5, linewidths=0.5, cbar_kws={'shrink': 0.5}, use='complete.obs', na_warn=True, na_rm=False): """Create a simple correlation plot given a dataframe. Note that this requires all datatypes to be numeric and finite! Parameters ---------- X : H2OFrame, shape=(n_samples, n_features) The H2OFrame plot_type : str, optional (default='cor') The type of plot, one of ('cor', 'kde', 'pair') cmap : str, optional (default='Blues_d') The color to use for the kernel density estimate plot if plot_type == 'kde' n_levels : int, optional (default=5) The number of levels to use for the kde plot if plot_type == 'kde' figsize : tuple (int), optional (default=(11,9)) The size of the image cmap_a : int, optional (default=220) The colormap start point cmap_b : int, optional (default=10) The colormap end point vmax : float, optional (default=0.3) Arg for seaborn heatmap xticklabels : int, optional (default=5) The spacing for X ticks yticklabels : int, optional (default=5) The spacing for Y ticks linewidths : float, optional (default=0.5) The width of the lines cbar_kws : dict, optional Any KWs to pass to seaborn's heatmap when plot_type = 'cor' use : str, optional (default='complete.obs') The "use" to compute the correlation matrix na_warn : bool, optional (default=True) Whether to warn in the presence of NA values na_rm : bool, optional (default=False) Whether to remove NAs """ X = check_frame(X, copy=False) corr = None if plot_type == 'cor': use = _validate_use(X, use, na_warn) cols = [str(u) for u in X.columns] X = X.cor(use=use, na_rm=na_rm).as_data_frame(use_pandas=True) X.columns = cols # set the cols to the same names X.index = cols corr = 'precomputed' else: # WARNING! This pulls everything into memory... X = X.as_data_frame(use_pandas=True) corr_plot(X, plot_type=plot_type, cmap=cmap, n_levels=n_levels, figsize=figsize, cmap_a=cmap_a, cmap_b=cmap_b, vmax=vmax, xticklabels=xticklabels, corr=corr, yticklabels=yticklabels, linewidths=linewidths, cbar_kws=cbar_kws) def h2o_frame_memory_estimate(X, bit_est=32, unit='MB'): """We estimate the memory footprint of an H2OFrame to determine, possibly, whether it's capable of being held in memory or not. Parameters ---------- X : H2OFrame The H2OFrame in question bit_est : int, optional (default=32) The estimated bit-size of each cell. The default assumes each cell is a signed 32-bit float unit : str, optional (default='MB') The units to report. One of ('MB', 'KB', 'GB', 'TB') Returns ------- mb : str The estimated number of UNIT held in the frame """ X = check_frame(X, copy=False) n_samples, n_features = X.shape n_bits = (n_samples * n_features) * bit_est n_bytes = n_bits // 8 return human_bytes(n_bytes, unit) def _gen_optimized_chunks(idcs): """Given the list of indices, create more efficient chunks to minimize the number of rbind operations required for the H2OFrame ExprNode cache. """ idcs = sorted(idcs) counter = Counter(idcs) counts = counter.most_common() # order desc # the first index is the number of chunks we'll need to create. n_chunks = counts[0][1] chunks = [[] for _ in range(n_chunks)] # gen the number of chunks we'll need # 1. populate the chunks each with their first idx (the most common) # 2. pop from the counter # 3. re-generate the most_common(), repeat while counts: val, n_iter = counts[0] # the one at the head of the list is the most common for i in range(n_iter): chunks[i].append(val) counts.pop(0) # pop out the first idx... # sort them return [sorted(chunk) for chunk in chunks] def reorder_h2o_frame(X, idcs, from_chunks=False): """Currently, H2O does not allow us to reorder frames. This is a hack to rbind rows together in the order prescribed. Parameters ---------- X : H2OFrame The H2OFrame to reorder idcs : iterable The order of the H2OFrame rows to be returned. from_chunks : bool, optional (default=False) Whether the elements in ``idcs`` are optimized chunks generated by ``_gen_optimized_chunks``. Returns ------- new_frame : H2OFrame The reordered H2OFrame """ # hack... slow but functional X = check_frame(X, copy=False) # we're rbinding. no need to copy # to prevent rbinding rows over, and over, and over # create chunks. Rbind chunks that are progressively increasing. # once we hit an index that decreases, rbind, and then start the next chunk last_index = np.inf chunks = [] # all of the chunks chunk = [] # the current chunk being built for i in idcs: # if it's a chunk from balancer: if from_chunks: # probably a list of indices chunks.append(X[i, :]) # otherwise chunks have not been computed else: # while the indices increase adjacently if i < last_index: last_index = i chunk.append(i) # otherwise, they are no longer increasing else: # if a chunk exists if chunk: # there should ALWAYS be a chunk rows = X[chunk, :] else: rows = X[i, :] # append the chunk and reset the list chunks.append(rows) chunk = [] last_index = np.inf # print([type(c) for c in chunks]) # couldn't figure out an issue for a while... return rbind_all(*chunks) def shuffle_h2o_frame(X): """Currently, H2O does not allow us to shuffle frames. This is a hack to rbind rows together in the order prescribed. Parameters ---------- X : H2OFrame The H2OFrame to reorder Returns ------- shuf : H2OFrame The shuffled H2OFrame """ warnings.warn('Shuffling H2O frames will eventually be deprecated, as H2O ' 'does not allow re-ordering of frames by row. The current work-around ' '(rbinding the rows) is known to cause issues in the H2O ExprNode ' 'cache for very large frames.', DeprecationWarning) X = check_frame(X, copy=False) idcs = np.random.permutation(np.arange(X.shape[0])) shuf = reorder_h2o_frame(X, idcs) # do not generate optimized chunks here... return shuf
We are so grateful for the kindness and professionalism we experienced with Bill Richard and Richard Realty Groups! Moving, selling and buying a home are all very stressful things. Having a realty team that is watching out for your family! Thank you so much for everything Bill! Terry knows exactly how to meet expectation: professional, thorough, knowledgeable and proactive. After taking our listing, she canvased the neighborhood and found an immediate opportunity. The next day, we had a full price offer. Smooth and seemless, she made the transaction one of the best we've ever known. Thank you, Terry, we couldn't have asked for more!
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities related to layer/model functionality.""" # TODO(b/110718070): Move these functions back to tensorflow/python/keras/utils # once __init__ files no longer require all of tf.keras to be imported together. from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import weakref from tensorflow.python.util import object_identity try: # typing module is only used for comment type annotations. import typing # pylint: disable=g-import-not-at-top, unused-import except ImportError: pass def is_layer(obj): """Implicit check for Layer-like objects.""" # TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer). return hasattr(obj, "_is_layer") and not isinstance(obj, type) def has_weights(obj): """Implicit check for Layer-like objects.""" # TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer). has_weight = (hasattr(type(obj), "trainable_weights") and hasattr(type(obj), "non_trainable_weights")) return has_weight and not isinstance(obj, type) def cache_recursive_attribute(key): """Decorator to cache Layer properties which recursively depend on sub-layers. A number of attributes in Keras Layers take the form: ``` @property def thing(self): return self._thing or any(layer.thing for layer in self.layers) ``` This means that checking these properties (e.g. dynamic, stateful, etc) must traverse the entire graph of layers to determine whether any descent has changed its state. This decorator adds a mechanism for Layers and trackable data structures to broadcast mutations (including the addition or deletion of layers) and allows the top level layer to safely cache results. In general, if computing an attribute triggers a depth first search it is a good candidate for this caching mechanism. The architecture is optimized for safety and correctness rather than absolute optimality. This manifests in two ways: 1) Parents are never removed. It is possible for layer A to depend on layer B but subsequently remove that dependency. In that case, layer B will continue to broadcast its mutations to layer A until either A or B is deleted. However because the only effect is to invalidate a cache this does not affect correctness. (And robustly removing dependencies is difficult and error prone.) 2) Layers aggressively invalidate their caches when there is any ambiguity of whether or not it is necessary. For instance, consider the following: ``` class MyLayer(tf.keras.layers.Layer): def __init__(self): super(MyLayer, self).__init__() sub_layer = tf.keras.layers.Dense(1) self.sub_layers = [ sub_layer # This will be picked up, converted to a ListWrapper, # and added to self._layers ] # Include the layer twice. self.sub_layers.append(sub_layer) # Remove one copy, but one copy remains. self.sub_layers.pop() ``` In the example layer above, the set of tracked layers actually doesn't change; however to know that in the general case the Layer needs significant machinery to reason about what, if anything, has changed. By invalidating on every mutation we don't need to concern ourselves with the many types of mutations (append, pop, in-place replacement) and their specific semantics. Because mutations to layers are expected to be infrequent, this very conservative approach captures the vast majority of the performance gains from caching recursive properties while still remaining quite lightweight and easy to reason about. `tracking.cached_per_instance` provides a more detailed performance analysis of the WeakKeyDictionary cache pattern. Args: key: A string indicating which field is being cached. While not strictly necessary (since it could be obtained from f.__name__), it forces deliberate behavior when caching an attribute. Returns: A caching decorater specialized to `key`. """ cache = weakref.WeakKeyDictionary() def outer(f): """Attribute cache which has been specialized.""" @functools.wraps(f) def wrapped(self): """Cache aware version of `f`.""" # Sentinels are unique per Layer/Trackable, but can be hashed. (Unlike # some trackable data structures.) Consequently it makes sense to use the # sentinel as a cache key rather than `self`. sentinel = getattr(self, "_attribute_sentinel") # type: AttributeSentinel if not sentinel.get(key) or sentinel not in cache: cache[sentinel] = f(self) sentinel.mark_cached(key) output = cache[sentinel] return output return wrapped return outer def invalidate_recursive_cache(key): """Convenience decorator to invalidate the cache when setting attributes.""" def outer(f): @functools.wraps(f) def wrapped(self, value): sentinel = getattr(self, "_attribute_sentinel") # type: AttributeSentinel sentinel.invalidate(key) return f(self, value) return wrapped return outer class MutationSentinel(object): """Container for tracking whether a property is in a cached state.""" _in_cached_state = False def mark_as(self, value): # type: (MutationSentinel, bool) -> bool may_affect_upstream = (value != self._in_cached_state) self._in_cached_state = value return may_affect_upstream @property def in_cached_state(self): return self._in_cached_state class AttributeSentinel(object): """Container for managing attribute cache state within a Layer. The cache can be invalidated either on an individual basis (for instance when an attribute is mutated) or a layer-wide basis (such as when a new dependency is added). """ def __init__(self, always_propagate=False): self._parents = weakref.WeakSet() self.attributes = collections.defaultdict(MutationSentinel) # The trackable data structure containers are simple pass throughs. They # don't know or care about particular attributes. As a result, they will # consider themselves to be in a cached state, so it's up to the Layer # which contains them to terminate propagation. self.always_propagate = always_propagate def __repr__(self): return "{}\n {}".format( super(AttributeSentinel, self).__repr__(), {k: v.in_cached_state for k, v in self.attributes.items()}) def add_parent(self, node): # type: (AttributeSentinel, AttributeSentinel) -> None # Properly tracking removal is quite challenging; however since this is only # used to invalidate a cache it's alright to be overly conservative. We need # to invalidate the cache of `node` (since it has implicitly gained a child) # but we don't need to invalidate self since attributes should not depend on # parent Layers. self._parents.add(node) node.invalidate_all() def get(self, key): # type: (AttributeSentinel, str) -> bool return self.attributes[key].in_cached_state def _set(self, key, value): # type: (AttributeSentinel, str, bool) -> None may_affect_upstream = self.attributes[key].mark_as(value) if may_affect_upstream or self.always_propagate: for node in self._parents: # type: AttributeSentinel node.invalidate(key) def mark_cached(self, key): # type: (AttributeSentinel, str) -> None self._set(key, True) def invalidate(self, key): # type: (AttributeSentinel, str) -> None self._set(key, False) def invalidate_all(self): # Parents may have different keys than their children, so we locally # invalidate but use the `invalidate_all` method of parents. for key in self.attributes.keys(): self.attributes[key].mark_as(False) for node in self._parents: node.invalidate_all() def filter_empty_layer_containers(layer_list): """Filter out empty Layer-like containers and uniquify.""" # TODO(b/130381733): Make this an attribute in base_layer.Layer. existing = object_identity.ObjectIdentitySet() to_visit = layer_list[::-1] while to_visit: obj = to_visit.pop() if obj in existing: continue existing.add(obj) if is_layer(obj): yield obj else: sub_layers = getattr(obj, "layers", None) or [] # Trackable data structures will not show up in ".layers" lists, but # the layers they contain will. to_visit.extend(sub_layers[::-1]) def gather_trainable_weights(trainable, sub_layers, extra_variables): """Lists the trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected trainable weights/variables. """ if not trainable: return [] weights = [] for layer in sub_layers: weights += layer.trainable_weights trainable_extra_variables = [ v for v in extra_variables if v.trainable] return weights + trainable_extra_variables def gather_non_trainable_weights(trainable, sub_layers, extra_variables): """Lists the non-trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected non-trainable weights/variables. """ trainable_extra_variables = [] non_trainable_extra_variables = [] for v in extra_variables: if v.trainable: trainable_extra_variables.append(v) else: non_trainable_extra_variables.append(v) weights = [] for layer in sub_layers: weights += layer.non_trainable_weights if not trainable: trainable_weights = [] for layer in sub_layers: trainable_weights += layer.trainable_weights return (trainable_weights + trainable_extra_variables + weights + non_trainable_extra_variables) return weights + non_trainable_extra_variables
This next section will cover 5 of the best Google Keyword Tool alternatives. All of them are FREE and have been used by myself very recently. So I will tell you the pro's and con's of each so you can make an informed choice as to which tool you use in the future. WordPot has to be the fastest keyword research tool I have ever used. The speed in which your results are returned is awesome and the slick interface only improves the experience. We collect our data from a combination of the real-time searches done on popular meta search engines (like Metacrawler and Dogpile) and results published by Google (zeitgeist), Yahoo and Msn. We are doing this 24/7 and have been collecting data and building our database for a number of years. We then aggregate the results together to come up with the numbers that you see on the site. Another really great element of WordPot is that they return both the volume of ‘Exact Daily' searches and ‘Total Searches' for each keyword. ‘Exact Searches' being, as we all know it, the exact keywords typed into the search engine. Only an initial seed keyword is required to submit your search. Once your results are returned, that's when the fun starts. As you can see the results are clear with just 4 columns displayed; Suggestions, Exact Daily, Total Daily and Add to Project. This last column allows you to create projects and select the keywords to wish to save for further investigation and analysis. When you add keywords to a project, you can then view them in the project screen where you then have export and filter options. Back to the search results and we have a sidebar that allows us to modify our search settings. By changing the search results source to a specific search engine, you can see how the volumes change. The one thing I think would be a great addition to this tool would be an ‘Exact Monthly' search column. Obviously, you can just take the daily and times by an average of 30 or 31 days, however, to make it, even more, user-friendly it wouldn't take WordPot much to include this feature and make a great tool even better. I really love this tool which is why it's first on my list. With such a simple interface its easy think that this tool won't deliver…but it really does. The beauty of UberSuggest lies in the search results. The results are actually displayed vertically, but I have copied them into an image so you can see what it's doing. It takes our seed keyword, in this case, I used “Window Blinds”, and it appends a character or digit to the end of your search string. So if we enter “Window Blinds” UberSuggest adds the letter “a” to the end and the search string becomes “Window Blinds + a” for all other keywords that follow the term window blinds starting with the letter “a”. It's very clever. I don't know about you but I could certainly use that info if I was considering building an Amazon Affiliates site based around the window blind niche. The downside to this tool is that there are no keyword search volumes provided, which is unfortunate as I think this could be an even better resource with that feature. Once you register for your free account, you can then access the SEOBooks keyword tool. SEOBook provides a free Keyword tool that gives daily search volumes for your keyword and 100 other related keyword terms. This tool is fast and easy to use and I even managed to find some nice related keywords to help with content ideas for my niche sites. One other interesting section of the results are, what I can only describe as the awesome link section. The search results are not so easy on the eyes and it takes a while to adjust to the full-width display. I found the best way for me was to export my results to a CSV file and work with it from there, and whilst the large link section is good, it is very messy and could do with some TLC from SEOBook. This allows you to have various angles to your research and I have even used it for content ideas in the past, on an existing site that needed some fresh content. The search count for Wordtracker data is the number of times each keyword appears in our database of real searches, made by real people over the past 30 days. It's updated every week. The data is from a major search engine advertising network which passes us, on average, 3.5 billion worldwide searches per month. It delivers traffic from hundreds of niche web properties, search engines and portals. So they have quite a substantial data source and the search volume (or count as they call it) is as fresh as you can get, being the last 30 days of searches. There is also a paid option to WordTracker which allows you to use the SemRush data, which does provide far more results and in-depth data. Along with the search volume, they also provide a ‘competition' score ranging from 1 to 100, In Anchor and Title, which is the total number of sites that contain the keyword in the Title tag and in the anchor text in a backlink from an external web page. KEI – Based on the relationship between competitive elements and search volume. The higher the KEI figure, the more potential a keyword is liable to have – that is to say, there is little competition in relation to the search volume. Only 100 results are returned, if you want more you have to upgrade to the paid version. It can also take a while to understand exactly what it is you are looking at on the results page, however, there is a detailed help file should you need it…I did! A simple interface with a lot of power, SpyFu is very easy to use and the information it returns is very easy to understand. Whilst it does return ‘some' related keywords the main feature of SpyFu which goes hand in hand with the keyword research tool, is the Adwords profitability info. Each keyword you search brings back the monthly search volume (provided by Google public domain data), the cost per click (CPC) for AdWords, and the estimated daily cost of the traffic should you choose to pay for clicks rather than SEO. As you can see from the search result above, we have 8,100 UK EXACT MATCH searches for our term ‘Window Blinds'. Each click would cost an average of £1.93 and the total daily cost to use AdWords would come in at £130. What I like about SpyFu is how easy it is to read and pick out the key info you need immediately. The next section of the results shows us the Profitable Related Keywords, these are variations of our keyword search term and it shows us the ad statistics for those sites who are paying for traffic via AdWords. There is a lot of other data around the best performing ads in Google AdWords further down in the search results, so if you want to delve into that you can do. A little restrictive on countries with just the US & UK allowed, and you will have to subscribe in order to export the other profitable keywords that it suggests. Apart from that, it's definitely worth a go to see what nuggets it brings back. Of the 5 free keyword tools above, I would have to say the WordPot is my favorite, followed by UberSuggest, WordTracker, and SpyFu. SEOBook will have to be my least favorite due to the look and feel of the search results. I hope this helps you in choosing an alternative tool for your keyword research. If you have any favorite tools that I have not discussed here, please share in the comments.
#!/usr/bin/env python """ rawdata_clean_relevant_gaiadr2_data.py Example: rawdata_clean_relevant_gaiadr2_data.py --help rawdata_clean_relevant_gaiadr2_data.py --inputFile gaiadr2_new_rawdata_rawdata.csv --outputFile gaiadr2_new_y2a1_rawdata.u.csv.tmp --verbose 2 """ ################################## def main(): import argparse import time """Create command line arguments""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--inputFile', help='name of the input CSV file', default='input.csv') parser.add_argument('--outputFile', help='name of the output CSV file', default='output.csv') parser.add_argument('--verbose', help='verbosity level of output to screen (0,1,2,...)', default=0, type=int) args = parser.parse_args() if args.verbose > 0: print args status = clean_relevant_gaiadr2_data(args) return status ################################## # clean_relevant_gaiadr2_data # def clean_relevant_gaiadr2_data(args): import numpy as np import os import sys import datetime import fitsio import pandas as pd if args.verbose>0: print print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *' print 'clean_relevant_gaiadr2_data' print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *' print inputFile = args.inputFile outputFile = args.outputFile # Read selected columns from inputFile... columns = ['RA_WRAP','RA','DEC', 'PHOT_G_MEAN_MAG','PHOT_G_MEAN_FLUX_OVER_ERROR', 'PHOT_BP_MEAN_MAG','PHOT_BP_MEAN_FLUX_OVER_ERROR', 'PHOT_RP_MEAN_MAG','PHOT_RP_MEAN_FLUX_OVER_ERROR', 'BP_RP','BP_G','G_RP','PHOT_BP_RP_EXCESS_FACTOR'] print datetime.datetime.now() print """Reading in selected columns from %s...""" % (inputFile) df = pd.read_csv(inputFile, usecols=columns) print datetime.datetime.now() # Includes masks for mag, magerr, color, main stellar locus outliers, # and BP_RP photometric excess... mask = ( (1.086/df.PHOT_G_MEAN_FLUX_OVER_ERROR < 0.3) & (1.086/df.PHOT_BP_MEAN_FLUX_OVER_ERROR < 0.3) & (1.086/df.PHOT_RP_MEAN_FLUX_OVER_ERROR < 0.3) & (df.PHOT_G_MEAN_MAG < 19.0) & (df.BP_G > 0.2) & (df.BP_G < 1.6) & (np.abs(df.G_RP - 0.45*(df.BP_RP + 0.2)) < 0.2) & (df.PHOT_BP_RP_EXCESS_FACTOR > (1.0 + 0.015*df.BP_RP*df.BP_RP)) & (df.PHOT_BP_RP_EXCESS_FACTOR < (1.3 + 0.060*df.BP_RP*df.BP_RP)) ) # Steve Kent's Gaia DR2 -> DES transformations, of the format: # des_mag = Gaia_G + intercept + slope*( (Gaia_BP-Gaia_G) - color0 ), # one relation for (Gaia_BP-Gaia_G) < color0 [blue], # and another for (Gaia_BP-Gaia_G) > color0 [red]. # # See S Kent's e-mail from 31 August 2018... skent1 = {} skent1['g.color0'] = 0.899 skent1['g.intercept'] = 1.339 skent1['g.blue.slope'] = 1.682 skent1['g.red.slope'] = 1.015 skent1['r.color0'] = 0.78 skent1['r.intercept'] = -0.124 skent1['r.blue.slope'] = -0.174 skent1['r.red.slope'] = 0.767 skent1['i.color0'] = 0.90 skent1['i.intercept'] = -0.674 skent1['i.blue.slope'] = -0.879 skent1['i.red.slope'] = -0.437 skent1['z.color0'] = 1.12 skent1['z.intercept'] = -1.216 skent1['z.blue.slope'] = -1.247 skent1['z.red.slope'] = -0.706 skent1['Y.color0'] = 0.91 skent1['Y.intercept'] = -1.052 skent1['Y.blue.slope'] = -1.441 skent1['Y.red.slope'] = -1.028 skent2 = {} skent2['g.color0'] = 0.899 skent2['g.intercept'] = 1.349 skent2['g.blue.slope'] = 1.702 skent2['g.red.slope'] = 0.907 skent2['r.color0'] = 0.78 skent2['r.intercept'] = -0.116 skent2['r.blue.slope'] = -0.151 skent2['r.red.slope'] = 0.747 skent2['i.color0'] = 0.90 skent2['i.intercept'] = -0.691 skent2['i.blue.slope'] = -0.925 skent2['i.red.slope'] = -0.410 skent2['z.color0'] = 1.12 skent2['z.intercept'] = -1.217 skent2['z.blue.slope'] = -1.282 skent2['z.red.slope'] = -0.637 skent2['Y.color0'] = 0.91 skent2['Y.intercept'] = -1.055 skent2['Y.blue.slope'] = -1.514 skent2['Y.red.slope'] = -0.992 skent3 = {} skent3['g.color0'] = 0.899 skent3['g.intercept'] = 1.306 skent3['g.blue.slope'] = 1.634 skent3['g.red.slope'] = 0.939 skent3['r.color0'] = 0.78 skent3['r.intercept'] = -0.136 skent3['r.blue.slope'] = -0.179 skent3['r.red.slope'] = 0.747 skent3['i.color0'] = 0.90 skent3['i.intercept'] = -0.678 skent3['i.blue.slope'] = -0.905 skent3['i.red.slope'] = -0.444 skent3['z.color0'] = 1.12 skent3['z.intercept'] = -1.193 skent3['z.blue.slope'] = -1.256 skent3['z.red.slope'] = -0.873 skent3['Y.color0'] = 0.91 skent3['Y.intercept'] = -1.034 skent3['Y.blue.slope'] = -1.464 skent3['Y.red.slope'] = -1.094 for band in ['g', 'r', 'i', 'z', 'Y']: # S Kent #1: desMagColName1 = """%sMAG_DES_1""" % (band.upper()) color0 = """%s.color0""" % (band) intercept = """%s.intercept""" % (band) blue_slope = """%s.blue.slope""" % (band) red_slope = """%s.red.slope""" % (band) df.loc[:,desMagColName1] = -9999. blueMask = (mask & (df.BP_G <= skent1[color0])) redMask = (mask & (df.BP_G > skent1[color0])) df.loc[blueMask,desMagColName1] = df.loc[blueMask,'PHOT_G_MEAN_MAG'] + \ + skent1[intercept] + skent1[blue_slope]*(df.loc[blueMask,'BP_G'] - skent1[color0]) df.loc[redMask,desMagColName1] = df.loc[redMask,'PHOT_G_MEAN_MAG'] + \ + skent1[intercept] + skent1[red_slope]*(df.loc[redMask,'BP_G'] - skent1[color0]) # S Kent #2: desMagColName2 = """%sMAG_DES_2""" % (band.upper()) color0 = """%s.color0""" % (band) intercept = """%s.intercept""" % (band) blue_slope = """%s.blue.slope""" % (band) red_slope = """%s.red.slope""" % (band) df.loc[:,desMagColName2] = -9999. blueMask = (mask & (df.BP_G <= skent2[color0])) redMask = (mask & (df.BP_G > skent2[color0])) df.loc[blueMask,desMagColName2] = df.loc[blueMask,'PHOT_G_MEAN_MAG'] + \ + skent2[intercept] + skent2[blue_slope]*(df.loc[blueMask,'BP_G'] - skent1[color0]) df.loc[redMask,desMagColName2] = df.loc[redMask,'PHOT_G_MEAN_MAG'] + \ + skent2[intercept] + skent2[red_slope]*(df.loc[redMask,'BP_G'] - skent1[color0]) # S Kent #3: desMagColName3 = """%sMAG_DES_3""" % (band.upper()) color0 = """%s.color0""" % (band) intercept = """%s.intercept""" % (band) blue_slope = """%s.blue.slope""" % (band) red_slope = """%s.red.slope""" % (band) df.loc[:,desMagColName3] = -9999. blueMask = (mask & (df.BP_G <= skent3[color0])) redMask = (mask & (df.BP_G > skent3[color0])) df.loc[blueMask,desMagColName3] = df.loc[blueMask,'PHOT_G_MEAN_MAG'] + \ + skent3[intercept] + skent3[blue_slope]*(df.loc[blueMask,'BP_G'] - skent1[color0]) df.loc[redMask,desMagColName3] = df.loc[redMask,'PHOT_G_MEAN_MAG'] + \ + skent3[intercept] + skent3[red_slope]*(df.loc[redMask,'BP_G'] - skent1[color0]) # S Kent average... desMagColName = """%sMAG_DES""" % (band.upper()) df.loc[:,desMagColName] = ( df.loc[:,desMagColName1] + \ df.loc[:,desMagColName2] + \ df.loc[:,desMagColName3] ) / 3. # Output results... outcolumns = columns.extend(['GMAG_DES','RMAG_DES','IMAG_DES','ZMAG_DES','YMAG_DES']) df.to_csv(outputFile, columns=outcolumns, index=False, float_format='%.6f') return 0 ################################## if __name__ == "__main__": main() ##################################
USDCAD: Quick drop yet to add a profit. GBPJPY: Who will last Yen or Pound?- Bullish. USDCHF: Correction Breakout Propelling towards Down. XAUUSD: Flying Far Away !!! Buyers Dominance.
#!/usr/bin/env python # encoding: utf-8 # redis for saving binlog file and position # please reverse the db + 1 for saving mysql changed data redis_url = "redis://127.0.0.1/0" cache_url = "redis://127.0.0.1/1" # mysql server id server_id = 1 # mysql connection setting mysql_settings = {'host': '192.168.1.34', 'port': 3306, 'user': 'mediawise', 'passwd': '123'} # watch databases setting # it can be set None or a tuple. # Watch all databases if set None # value format: None or database tuple schemas = None # watch tables setting like databases setting if with primary_key # value format: None or table tuple #tables = ("task",) tables = None # please set the unique key if without primary_key # value format: # {} or {"table_name": fields tuple, ....} tables_without_primary_key = {"db_test.task_test": ("uuid", )} # Read on binlog stream is blocking, default is True # If set to False, the cdc problem will exit when reading binlog over blocking = True # watch event setting events = ["insert", "update", "delete"] # turn off dumping trigger if set to 0 cache_max_rows = 2000000 dump_command = "python dump2csv.py -c dump.conf" log_level = "INFO" binlog_max_latency = 60000
Here are some other recordings that I have played on. I have done 3 recordings with Green Hill Productions. Here they are. Featuring Jeff Hall, Christina Watson, Sandra Dudley and Mark Stephens. Including Lori Mechem on piano, Roger Spencer on bass, Chris Brown on drums, Don Aliquo on sax. Special guests- George Tidwell and James Hollihan. Fly to Forever- Great CD featuring Donna and Lori’s original material.
# Copyright (C) 2010-2014 GRNET S.A. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from xfeatures import XFeatures from groups import Groups from public import Public from node import Node from collections import defaultdict READ = 0 WRITE = 1 class Permissions(XFeatures, Groups, Public, Node): def __init__(self, **params): XFeatures.__init__(self, **params) Groups.__init__(self, **params) Public.__init__(self, **params) Node.__init__(self, **params) def access_grant(self, path, access, members=()): """Grant members with access to path. Members can also be '*' (all), or some group specified as 'owner:group'.""" if not members: return feature = self.xfeature_create(path) self.feature_setmany(feature, access, members) def access_set(self, path, permissions): """Set permissions for path. The permissions dict maps 'read', 'write' keys to member lists.""" r = permissions.get('read', []) w = permissions.get('write', []) if not r and not w: self.xfeature_destroy(path) return feature = self.xfeature_create(path) self.feature_clear(feature, READ) self.feature_clear(feature, WRITE) if r: self.feature_setmany(feature, READ, r) if w: self.feature_setmany(feature, WRITE, w) def access_get_for_bulk(self, perms): """Get permissions for paths.""" allowed = None d = defaultdict(list) for value, feature_id, key in perms: d[key].append(value) permissions = d if READ in permissions: allowed = 0 permissions['read'] = permissions[READ] del(permissions[READ]) if WRITE in permissions: allowed = 1 permissions['write'] = permissions[WRITE] del(permissions[WRITE]) return (permissions, allowed) def access_get(self, path): """Get permissions for path.""" feature = self.xfeature_get(path) if not feature: return {} permissions = self.feature_dict(feature) if READ in permissions: permissions['read'] = permissions[READ] del(permissions[READ]) if WRITE in permissions: permissions['write'] = permissions[WRITE] del(permissions[WRITE]) return permissions def access_members(self, path): feature = self.xfeature_get(path) if not feature: return [] permissions = self.feature_dict(feature) members = set() members.update(permissions.get(READ, [])) members.update(permissions.get(WRITE, [])) for m in set(members): parts = m.split(':', 1) if len(parts) != 2: continue user, group = parts members.remove(m) members.update(self.group_members(user, group)) return members def access_clear(self, path): """Revoke access to path (both permissions and public).""" self.xfeature_destroy(path) self.public_unset(path) def access_clear_bulk(self, paths): """Revoke access to path (both permissions and public).""" self.xfeature_destroy_bulk(paths) self.public_unset_bulk(paths) def access_check(self, path, access, member): """Return true if the member has this access to the path.""" feature = self.xfeature_get(path) if not feature: return False members = self.feature_get(feature, access) if member in members or '*' in members: return True for owner, group in self.group_parents(member): if owner + ':' + group in members: return True return False def access_check_bulk(self, paths, member): rows = None q = ("select x.path, xvals.value, xvals.feature_id, xvals.key " "from xfeaturevals xvals join xfeatures x " "on xvals.feature_id = x.feature_id " "where x.path in (%s)") % ','.join('?' for _ in paths) self.execute(q, paths) rows = self.fetchall() if rows: access_check_paths = {} for path, value, feature_id, key in rows: try: access_check_paths[path].append((value, feature_id, key)) except KeyError: access_check_paths[path] = [(value, feature_id, key)] return access_check_paths return None def access_inherit(self, path): """Return the paths influencing the access for path.""" # r = self.xfeature_inherit(path) # if not r: # return [] # # Compute valid. # return [x[0] for x in r if x[0] in valid] # Only keep path components. parts = path.rstrip('/').split('/') valid = [] for i in range(1, len(parts)): subp = '/'.join(parts[:i + 1]) valid.append(subp) if subp != path: valid.append(subp + '/') return [x for x in valid if self.xfeature_get(x)] def access_inherit_bulk(self, paths): """Return the paths influencing the access for paths.""" # Only keep path components. valid = [] for path in paths: parts = path.rstrip('/').split('/') for i in range(1, len(parts)): subp = '/'.join(parts[:i + 1]) valid.append(subp) if subp != path: valid.append(subp + '/') valid = self.xfeature_get_bulk(valid) return [x[1] for x in valid] def access_list_paths(self, member, prefix=None, include_owned=False, include_containers=True): """Return the list of paths granted to member. Keyword arguments: prefix -- return only paths starting with prefix (default None) include_owned -- return also paths owned by member (default False) include_containers -- return also container paths owned by member (default True) """ q = ("select distinct path from xfeatures inner join " " (select distinct feature_id, key from xfeaturevals inner join " " (select owner || ':' || name as value from groups " " where member = ? union select ? union select '*') " " using (value)) " "using (feature_id)") p = (member, member) if prefix: q += " where " paths = self.access_inherit(prefix) or [prefix] q += ' or '.join("path like ? escape '\\'" for _ in paths) p += tuple(self.escape_like(path) + '%' for path in paths) self.execute(q, p) l = [r[0] for r in self.fetchall()] if include_owned: node = self.node_lookup(member) select_containers = "select node from nodes where parent = ? " q = ("select path from nodes where parent in (%s) " % select_containers) args = [node] if include_containers: q += ("or node in (%s)" % select_containers) args += [node] self.execute(q, args) l += [r[0] for r in self.fetchall() if r[0] not in l] return l def access_list_shared(self, prefix=''): """Return the list of shared paths.""" q = "select path from xfeatures where " paths = self.access_inherit(prefix) or [prefix] q += ' or '.join("path like ? escape '\\'" for _ in paths) p = tuple(self.escape_like(path) + '%' for path in paths) self.execute(q, p) return [r[0] for r in self.fetchall()]
Memar architects home page is just one of the many collections of pictures or photos that are on this website. Memar architects home page is posted on the category Design Your Home in the awanshop.co website. This post of "memar architects home page" was published on 01-08-2018 by jonshon and has been viewed 180,445 times. We hope you can find what you need here. We always effort to show a picture with HD resolution or at least with perfect images. Memar architects home page can be beneficial inspiration for those who seek an image according specific categories, you can find it in this site. Finally all pictures we have been displayed in this site will inspire you all. Thank you for visiting. Home design firms architects magazine kerala plans homes plan is posted in the category New Home Plans and published on 01-08-2018 by dave. Image source: pixgateway.com. Click on the title to see more.. Memar architects home page is posted in the category Design Your Home and published on 01-08-2018 by dave. Image source: memararchitects.com. Click on the title to see more.. Home design curtain wars architects decorators and the twentieth is posted in the category Design Your Home and published on 01-08-2018 by paul. Image source: benrolfe.com. Click on the title to see more.. Page 23 u203au203a album home designs ideas hilalpost com is posted in the category Design Your Home and published on 01-08-2018 by dave. Image source: hilalpost.com. Click on the title to see more.. Custom home plans florida orlando florida architects fl house is posted in the category Custom Home Plans and published on 01-08-2018 by dave. Image source: ubiken.com. Click on the title to see more.. Zen dream home with japanese influences by metropole architects is posted in the category Dream Home Design and published on 01-08-2018 by paul. Image source: trendir.com. Click on the title to see more.. Dream home enhanced by vegetation rattan house by guz architects in is posted in the category Dream Home Design and published on 01-08-2018 by admin. Image source: homesthetics.net. Click on the title to see more.. Zen dream home with japanese influences by metropole architects is posted in the category Dream Home Design and published on 01-08-2018 by admin. Image source: mayapo.co. Click on the title to see more.. Zen dream home with japanese influences by metropole architects is posted in the category Dream Home Design and published on 01-08-2018 by admin. Image source: trendir.com. Click on the title to see more..
## A script for extracting info about the patients used in the analysis ## Load necessary modules from rpy2 import robjects as ro import numpy as np import os ro.r('library(survival)') ##This call will only work if you are running python from the command line. ##If you are not running from the command line manually type in your paths. BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirc.txt')) f.readline() f.readline() f.readline() data=[i.split('\t') for i in f] ## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent ## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data. ## This required an empty value in the list initialization. ## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...] clinical=[['','','']] for i in data: try: if clinical[-1][0]==i[0]: if i[8]=='Alive': clinical[-1]=[i[0],int(i[9]),'Alive'] elif i[8]=='Dead': clinical[-1]=[i[0],int(i[10]),'Dead'] else: pass else: if i[8]=='Alive': clinical.append([i[0],int(i[9]),'Alive']) elif i[8]=='Dead': clinical.append([i[0],int(i[10]),'Dead']) else: pass except: pass ## Removing the empty value. clinical=clinical[1:] ## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade. more_clinical={} grade_dict={} grade_dict['G1']=1 grade_dict['G2']=2 grade_dict['G3']=3 grade_dict['G4']=4 sex_dict={} sex_dict['MALE']=0 sex_dict['FEMALE']=1 ## The "clinical_patient" file can also contain patients not listed in the follow_up files. ## In these cases the clinical data for these patients gets appended to a new clinical list. f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_patient_kirc.txt')) f.readline() f.readline() f.readline() clinical4=[] data=[i.split('\t') for i in f] for i in data: try: more_clinical[i[0]]=[grade_dict[i[4]],sex_dict[i[8]],int(i[-16])] if i[24]=='Alive': clinical4.append([i[0],int(i[25]),'Alive']) elif i[24]=='Dead': clinical4.append([i[0],int(i[26]),'Dead']) else: pass except: pass new_clinical=[] ##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files ##All the clinical data is merged checking which data is the most up to date for i in clinical4: if i[0] not in [j[0] for j in clinical]: new_clinical.append(i) else: if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]: new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])]) else: new_clinical.append(i) ##also do the reverse since clinical can contain patients not included in clinical4 for i in clinical: if i[0] not in [j[0] for j in new_clinical]: new_clinical.append(i) ## only patients who had a follow up time greater than 0 days are included in the analysis clinical=[i for i in new_clinical if i[1]>0] final_clinical=[] ## A new list containing both follow up times and grade, sex, and age is constructed. ## Only patients with grade, sex, and age information are included. ## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...] for i in clinical: if i[0] in more_clinical: final_clinical.append(i+more_clinical[i[0]]) ## Need to map the mRNA files to the correct patients ## The necessary information is included in the FILE_SAMPLE_MAP.txt file f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','FILE_SAMPLE_MAP.txt')) f.readline() data=[i.strip().split() for i in f if i!='\n'] ## 01 indicates a primary tumor, and only primary tumors are included in this analysis TCGA_to_mrna={} for i in data: ## The normalized data files are used if 'genes.normalized_results' in i[0]: if i[1].split('-')[3][:-1]=='01': x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])]) TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]] clinical_and_files=[] ## We only care about patients that contained complete clinical information for i in final_clinical: if TCGA_to_mrna.has_key(i[0]): ## The mRNA files are added to the clinical list ## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...] clinical_and_files.append(i+[TCGA_to_mrna[i[0]]]) else: pass ##print average age at diagnosis age=np.mean([i[5] for i in clinical_and_files]) ##print number of males males=len([i for i in clinical_and_files if i[4]==0]) ##print number of females females=len([i for i in clinical_and_files if i[4]==1]) ##to get the median survival we need to call survfit from r ##prepare variables for R ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files]) ##need to create a dummy variable group ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files]) ##need a vector for deaths death_dic={} death_dic['Alive']=0 death_dic['Dead']=1 ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files]) res=ro.r('survfit(Surv(times,died) ~ as.factor(group))') #the number of events(deaths) is the fourth column of the output deaths=str(res).split('\n')[-2].strip().split()[3] #the median survival time is the fifth column of the output median=str(res).split('\n')[-2].strip().split()[4] ##write data to a file f=open('patient_info.txt','w') f.write('Average Age') f.write('\t') f.write('Males') f.write('\t') f.write('Females') f.write('\t') f.write('Deaths') f.write('\t') f.write('Median Survival') f.write('\n') f.write(str(age)) f.write('\t') f.write(str(males)) f.write('\t') f.write(str(females)) f.write('\t') f.write(deaths) f.write('\t') f.write(median)
Spirit Energy and the Nova licence partnership have awarded Seadrill Operations Ltd a rig contract for the Nova development in Norway. Seadrill will operate the West Mira rig on behalf of Northern Drilling. The West Mira is contracted to drill six subsea wells on the Nova field starting in the first half of 2020. The operations with the semi-submersible drilling rig is planned to last approximately one year, with first oil scheduled for 2021. Nova will be developed as a subsea tie-back connecting two templates to the nearby Gjøa platform for processing and export. Gjøa will also provide gas lift to the field and water injection for pressure support in the reservoir. Power on the Nova field comes via the Gjøa platform from shore. Nova was discovered in 2012 and is located in the northeastern part of the Norwegian North Sea, about 17 kilometers southwest of the Gjøa platform and about 120 kilometers northwest of Bergen. Spirit Energy has a 20% interest in Nova. Partners are Wintershall Norge AS (35%, operator), Capricorn Norge AS (20%), Edison Norge AS (15%) and DEA Norge AS (10%).
from __future__ import absolute_import, unicode_literals from voeventdb.server.database.models import Voevent, Cite, Coord from voeventdb.server.database.query import coord_cone_search_clause import voeventdb.server.restapi.v1.apierror as apierror from voeventdb.server.restapi.v1.filter_base import ( add_to_filter_registry, QueryFilter) import iso8601 from sqlalchemy import (or_, and_, exists, ) from sqlalchemy.orm import aliased import six if six.PY3: from urllib.parse import quote_plus else: from urllib import quote_plus from flask import json @add_to_filter_registry class AuthoredSince(QueryFilter): """ Return only VOEvents with a ``Who.Date`` entry dated after the given time. (Time-range is inclusive, i.e. ``>=``) Date-time strings passed should be in a format parseable by the `iso8601.parse_date() <https://bitbucket.org/micktwomey/pyiso8601/#rst-header-parsed-formats>`_ function (see :py:attr:`example_values`). """ querystring_key = 'authored_since' example_values = ['2015-10-09T21:34:19', '2015-10-09', '2015-10', ] def filter(self, filter_value): bound_dt = iso8601.parse_date(filter_value) return Voevent.author_datetime >= bound_dt @add_to_filter_registry class AuthoredUntil(QueryFilter): """ Return only VOEvents with a ``Who.Date`` entry dated before the given time. (Time-range is inclusive, i.e. ``<=``) Date-time strings passed should be in a format parseable by the `iso8601.parse_date() <https://bitbucket.org/micktwomey/pyiso8601/#rst-header-parsed-formats>`_ function (see :py:attr:`example_values`). """ querystring_key = 'authored_until' example_values = ['2015-10-09T21:34:19', '2015-10-09', '2015-10', ] def filter(self, filter_value): bound_dt = iso8601.parse_date(filter_value) return Voevent.author_datetime <= bound_dt @add_to_filter_registry class CitedByAny(QueryFilter): """ Return only VOEvents which are cited by another VOEvent in the database. Applied via query-strings ``cited=true`` or ``cited=false`` """ querystring_key = 'cited' example_values = ['true', 'false' ] def filter(self, filter_value): cite2 = aliased(Cite) filter_q = exists().where(Voevent.ivorn == cite2.ref_ivorn) if filter_value.lower() == 'true': return filter_q elif filter_value.lower() == 'false': return ~filter_q else: raise apierror.InvalidQueryString(self.querystring_key, filter_value) @add_to_filter_registry class ConeSearch(QueryFilter): """ Return only VOEvents with co-ords in the given cone. Cone specified as a 3-element list in JSON format:: [ra,dec,radius] (values in decimal degrees). """ querystring_key = 'cone' example_values = [ '[10,20,5]', '[359.9,-30,5]' ] simplejoin_tables = [Coord, ] def filter(self, filter_value): try: ra, dec, radius = json.loads(filter_value) except: raise apierror.InvalidQueryString(self.querystring_key, filter_value) if dec < -90.0 or dec > 90.0: raise apierror.InvalidQueryString(self.querystring_key, filter_value, reason="invalid declination value") return coord_cone_search_clause(ra, dec, radius) @add_to_filter_registry class CoordsAny(QueryFilter): """ Return only VOEvents which have / do not have associated co-ord positions. Applied via query-strings ``coord=true`` or ``coord=false`` """ querystring_key = 'coord' example_values = ['true', 'false' ] def filter(self, filter_value): filter_q = Voevent.coords.any() if filter_value.lower() == 'true': return filter_q elif filter_value.lower() == 'false': return ~filter_q else: raise apierror.InvalidQueryString(self.querystring_key, filter_value) @add_to_filter_registry class DecGreaterThan(QueryFilter): """ Return VOEvents with position with Dec greater than given value. Dec should be specified in decimal degrees. """ querystring_key = 'dec_gt' example_values = ['0', '-45.123' ] simplejoin_tables = [Coord, ] def filter(self, filter_value): try: min_dec = float(filter_value) if min_dec < -90.0 or min_dec > 90.0: raise ValueError except: raise apierror.InvalidQueryString(self.querystring_key, filter_value, reason="invalid declination value") return Coord.dec > min_dec @add_to_filter_registry class DecLessThan(QueryFilter): """ Return VOEvents with position with Dec less than given value. Dec should be specified in decimal degrees. """ querystring_key = 'dec_lt' example_values = ['0', '-45.123' ] simplejoin_tables = [Coord, ] def filter(self, filter_value): try: max_dec = float(filter_value) if max_dec < -90.0 or max_dec > 90.0: raise ValueError except: raise apierror.InvalidQueryString(self.querystring_key, filter_value, reason="invalid declination value") return Coord.dec < max_dec @add_to_filter_registry class IvornContains(QueryFilter): """ Return only VOEvents which have the given substring in their IVORN. """ querystring_key = 'ivorn_contains' example_values = ['BAT_GRB_Pos', 'XRT'] def filter(self, filter_value): return Voevent.ivorn.like('%{}%'.format(filter_value)) def combinator(self, filters): """AND""" return and_(filters) @add_to_filter_registry class IvornPrefix(QueryFilter): """ Return only VOEvents where the IVORN begins with the given value. Note that the value passed should be URL-encoded if it contains the ``#`` character e.g.:: quote_plus('ivo://nvo.caltech/voeventnet/catot#1404') """ querystring_key = 'ivorn_prefix' example_values = [ 'ivo://nasa.gsfc.gcn', quote_plus('ivo://nvo.caltech/voeventnet/catot#1404') ] def filter(self, filter_value): return Voevent.ivorn.like('{}%'.format(filter_value)) def combinator(self, filters): """OR""" return or_(filters) @add_to_filter_registry class RefAny(QueryFilter): """ Return only VOEvents which make / don't make reference to any other VOEvents. Applied via query-strings ``ref_any=true`` or ``ref_any=false``. NB 'true'/'false' string-values are case-insensitive, so e.g. 'true', 'True', 'TRUE', 'tRUe' are all valid. """ querystring_key = 'ref_any' example_values = ['true', 'True', 'false' ] def filter(self, filter_value): filter_q = Voevent.cites.any() if filter_value.lower() == 'true': return filter_q elif filter_value.lower() == 'false': return ~filter_q else: raise apierror.InvalidQueryString(self.querystring_key, filter_value) @add_to_filter_registry class RefContains(QueryFilter): """ Return VOEvents which reference an IVORN containing the given substring. """ querystring_key = 'ref_contains' example_values = [ quote_plus('BAT_GRB_Pos'), quote_plus('GBM_Alert'), ] def filter(self, filter_value): return Voevent.cites.any( Cite.ref_ivorn.like('%{}%'.format(filter_value)) ) def combinator(self, filters): """OR""" return or_(filters) @add_to_filter_registry class RefExact(QueryFilter): """ Return only VOEvents which contain a ref to the given (url-encoded) IVORN. """ querystring_key = 'ref_exact' example_values = [ quote_plus('ivo://nasa.gsfc.gcn/SWIFT#BAT_GRB_Pos_649113-680'), quote_plus( 'ivo://nasa.gsfc.gcn/Fermi#GBM_Alert_2015-08-10T14:49:38.83_460910982_1-814'), ] def filter(self, filter_value): return Voevent.cites.any(Cite.ref_ivorn == filter_value) def combinator(self, filters): """OR""" return or_(filters) @add_to_filter_registry class RoleEquals(QueryFilter): querystring_key = 'role' example_values = [ 'observation', 'utility', 'test' ] def filter(self, filter_value): if filter_value not in self.example_values: raise apierror.InvalidQueryString( self.querystring_key, filter_value) return Voevent.role == filter_value def combinator(self, filters): """OR""" return or_(filters) @add_to_filter_registry class StreamEquals(QueryFilter): querystring_key = 'stream' example_values = [ 'nasa.gsfc.gcn#SWIFT', 'nvo.caltech/voeventnet/catot' ] def filter(self, filter_value): return Voevent.stream == filter_value def combinator(self, filters): """OR""" return or_(filters)
Wall Street was looking for NetApp to report fiscal third quarter earnings of 54 cents a share on revenue of $1.62 billion. After Wall Street upped its expectations from previous estimates, NetApp answered the call with much higher results for its fiscal third quarter earnings statement. The data storage business reported a net income of $158 million, or 43 cents a share (statement). Non-GAAP earnings were 67 cents a share on a revenue of $1.630 billion. Adjusted slightly from the forecast at the end of the second quarter , Wall Street was looking for NetApp to report fiscal third quarter earnings of 56 cents a share on revenue of $1.62 billion. Following the positive announcement, NetApp shares were up by approximately 2.2 percent in after hours trading. NetApp delivered solid financial results again this quarter. The value proposition of Data ONTAP for shared storage infrastructures and the price performance characteristics of the E-series for dedicated storage infrastructures helped drive strong growth in our branded business. The robust adoption of clustered Data ONTAP, our industry-leading flash portfolio, and our best-of-breed partnerships position us well for continued growth. While fourth quarter guidance expectations relatively line up with Wall Street, NetApp still looked wary, offering the caveat that projections relies upon "continued uncertainty in the macroeconomic environment." Still, NetApp is estimating fiscal fourth quarter revenue to be in the range of $1.700 billion to $1.800 billion with non-GAAP earnings of 65 to 70 cents per share. Analysts are expecting NetApp to report fourth quarter earnings of 65 cents a share on revenue of $1.75 billion.
""" Provides fixtures to initialize the web driver. """ from behave import fixture, use_fixture from behave_webdriver.utils import _from_string, _from_env from behave_webdriver.driver import BehaveDriverMixin from functools import partial from behave_webdriver import transformers import six _env_webdriver_name = 'env' class DriverNotSet: pass @fixture def fixture_browser(context, *args, **kwargs): """ webdriver setup fixture for behave context; sets ``context.behave_driver``. Will destroy the driver at the end of this fixture usage. :param webdriver: the webdriver to use -- can be a string (e.g. ``"Chrome"``) or a webdriver class. If omitted, will attempt to use the BEHAVE_WEBDRIVER environment variable :param default_driver: a fallback driver if webdriver keyword is not provided AND the BEHAVE_WEBDRIVER environment variable is not set. Defaults to 'Chrome.headless' :param args: arguments that will be passed as is to the webdriver. :param kwargs: keywords arguments that will be passed as is to the webdriver. Basic usage: >>> from behave import use_fixture >>> from behave_webdriver.fixtures import fixture_browser >>> def before_all(context): ... use_fixture(fixture_browser, context, webdriver='firefox') You may also provide webdriver class. Just be sure it inherits (or otherwise has method from) BehaveDriverMixin >>> from behave import use_fixture >>> from behave_webdriver.fixtures import fixture_browser >>> from behave_webdriver.driver import BehaveDriverMixin >>> from selenium.webdriver import Firefox >>> class FirefoxDriver(BehaveDriverMixin, Firefox): ... pass >>> def before_all(context): ... use_fixture(fixture_browser, context, webdriver=FirefoxDriver) positional arguments and additional keyword arguments are passed to the webdriver init: >>> from behave import use_fixture >>> from behave_webdriver.fixtures import fixture_browser >>> from behave_webdriver.driver import ChromeOptions >>> def before_all(context): ... options = ChromeOptions() ... options.add_argument('--ignore-gpu-blacklist') ... use_fixture(fixture_browser, context, webdriver='chrome', options=options) If the ``webdriver`` keyword is omitted, will attampt to get the driver from BEHAVE_WEBDRIVER or will use headless chrome as a final fallback if environment is not set and there is no ``default_driver`` specified >>> from behave import use_fixture >>> from behave_webdriver.fixtures import fixture_browser >>> def before_all(context): ... # try to use driver from BEHAVE_WEBDRIVER environment variable; use firefox as a fallback when env not set ... use_fixture(fixture_browser, context, default_driver='firefox') """ webdriver = kwargs.pop('webdriver', None) default_driver = kwargs.pop('default_driver', 'Chrome.headless') if isinstance(webdriver, six.string_types): webdriver = _from_string(webdriver) if webdriver is None: webdriver = _from_env(default_driver=default_driver) old_driver_class = context.BehaveDriver if 'BehaveDriver' in context else DriverNotSet old_driver = context.behave_driver if 'behave_driver' in context else DriverNotSet context.behave_driver = webdriver(*args, **kwargs) def cleanup_driver(ctx, old_driver, old_driver_class): try: ctx.behave_driver.quit() finally: if old_driver_class is DriverNotSet and 'BehaveDriver' in ctx: del ctx.BehaveDriver else: ctx.BehaveDriver = old_driver_class if old_driver is DriverNotSet and 'behave_driver' in ctx: del ctx.behave_driver else: ctx.behave_driver = old_driver cleanup = partial(cleanup_driver, context, old_driver, old_driver_class) context.add_cleanup(cleanup) def before_all_factory(*args, **kwargs): """ Create and return a ``before_all`` function that use the ``fixture_browser`` fixture with the corresponding arguments :param args: positional arguments of ``fixture_browser`` :param kwargs: keywords arguments of ``fixture_browser`` >>> from behave_webdriver.fixtures import before_all_factory >>> before_all = before_all_factory(webdriver='firefox') """ def before_all(context): use_fixture(fixture_browser, context, *args, **kwargs) return before_all def before_feature_factory(*args, **kwargs): """ Create and return a ``before_feature` function that use the ``fixture_browser`` fixture with the corresponding arguments :param args: positional arguments of ``fixture_browser`` :param kwargs: keywords arguments of ``fixture_browser`` >>> from behave_webdriver.fixtures import before_feature_factory >>> before_feature = before_feature_factory(webdriver='firefox') """ def before_feature(context, feature): use_fixture(fixture_browser, context, *args, **kwargs) return before_feature def before_scenario_factory(*args, **kwargs): """ Create and return a ``before_scenario`` function that use the ``fixture_browser`` fixture with the corresponding arguments :param args: positional arguments of ``fixture_browser`` :param kwargs: keywords arguments of ``fixture_browser`` >>> from behave_webdriver.fixtures import before_scenario_factory >>> before_scenario = before_scenario_factory(webdriver='firefox') """ def before_scenario(context, scenario): use_fixture(fixture_browser, context, *args, **kwargs) return before_scenario class TransformerNotSet: pass @fixture def transformation_fixture(context, transformer_class, *args, **kwargs): old_transformer = context.transformer_class if 'transformer_class' in context else TransformerNotSet transformer_class = partial(transformer_class, *args, **kwargs) context.transformer_class = transformer_class def cleanup(context, old): if old is TransformerNotSet: del context.transformer_class else: context.transformer_class = old cleanup_transformer = partial(cleanup, context, old_transformer) context.add_cleanup(cleanup_transformer) def use_fixture_tag(context, tag, *args, **kwargs): if not tag.startswith('fixture'): return if tag.startswith('fixture.webdriver'): browser_name = '.'.join(tag.split('.')[2:]) if browser_name == 'browser': browser_name = 'Chrome.headless' use_fixture(fixture_browser, context, *args, **kwargs) elif tag.startswith('fixture.transformer'): transformer_name = tag.split('.')[-1] transformer_class = getattr(transformers, transformer_name) use_fixture(transformation_fixture, context, transformer_class, **kwargs)
Support that holds up when you want to get down. This moisture-wicking bralette was designed to feel as good as it looks with t-strap detailing and flatlock construction. A high neckline gives you plenty of coverage for long holds in Sirasana or burpees in the park. Moisture wicking polyester, recycled polyester and spandex performance stretch jersey allows for a full range of motion. High neck with back joining T-strap detail gives an eyecatching look. Mesh bra liner has moulded cups and brushed elastic that wicks sweat and feels nice against skin. Flatlock construction reduces chafing and provides durability.
#!/usr/bin/env python # -*- coding: utf-8 -*- #coding: utf-8 # #__all__=['devsuit', 'android', 'image', 'base', 'patch', 'ios', 'device'] __version__ = '0.7.2' ANDROID = 'android' IOS = 'ios' WINDOWS='windows' EV_DOWN = 'down' EV_UP = 'up' EV_DOWN_AND_UP = 'down_and_up' import os import json import subprocess import signal, sys # just import import monitor def _sig_handler(signum, frame): print >>sys.stderr, 'Signal INT catched !!!' sys.exit(1) signal.signal(signal.SIGINT, _sig_handler) from airtest import devsuit defaultConfigFile = 'air.json' defaultDevice = 'android' def _safe_load_config(cfg_file): if os.path.exists(cfg_file): return json.load(open(cfg_file)) return {} # ## ========================================================== # def _android_start(serialno, params): package = params.get('package') activity = params.get('activity') subprocess.call(['adb', '-s', serialno, 'shell', 'am', 'start', '-n', '/'.join([package, activity])]) def _android_stop(serialno, params): package = params.get('package') subprocess.call(['adb', '-s', serialno, 'shell', 'am', 'force-stop', package]) def _windows_start(basename, params={}): dir_ = params.get('dir') or '.' os.system('cd /d %s && start %s' %(dir_, basename)) def _windows_stop(basename, params={}): basename = basename.lower() if not basename.endswith('.exe'): basename += '.exe' os.system('taskkill /t /f /im %s' %(basename)) def _run_control(devno, device=None, action='start'): device = device or defaultDevice cfg = _safe_load_config(defaultConfigFile) func = '_%s_%s'%(device, action) if func not in globals(): raise RuntimeError('device(%s) %s method not exists' % (device, action)) return globals()[func](devno, cfg.get(device, {})) def start(devno, device=None): _run_control(devno, device, 'start') def stop(devno, device=None): _run_control(devno, device, 'stop') # ## ---------------------------------------------------------- # def connect(phoneno=None, appname=None, device=None, monitor=True, logfile='log/airtest.log'): ''' Connect device @param phoneno: If phoneno is None, then get device serialno from `adb devices` @param device: can be one of <android|windows|ios> @param monitor: wether to enable CPU monitor ''' if not phoneno: devs = getDevices() if not devs: sys.exit('adb: No devices found') if len(devs) != 1: sys.exit('adb: Too many devices, need to specify phone serialno') phoneno = devs[0][0] device = device or defaultDevice if device == ANDROID: from airtest.device import android subprocess.call(['adb', 'start-server']) if not phoneno: phoneno = [d for d, t in getDevices() if t == 'device'][0] devClass = android.Device elif device == IOS: from airtest.device import ios devClass = ios.Device elif device == WINDOWS: from airtest.device import windows devClass = windows.Device elif device == 'dummy': # this class is only for test from airtest.device import dummy devClass = dummy.Device else: raise RuntimeError('device type not recognize') return devsuit.DeviceSuit(device, devClass, phoneno, appname=appname, logfile=logfile, monitor=monitor) def getDevices(device='android'): ''' @return devices list ''' subprocess.call(['adb', 'start-server']) output = subprocess.check_output(['adb', 'devices']) result = [] for line in str(output).splitlines()[1:]: ss = line.strip().split() if len(ss) == 2: (phoneno, state) = ss result.append((phoneno, state)) return result
e. Review the titles (if you use them). If you don’t know a woman’s marital status, err on the side of addressing her as Ms. 2) Addressee/Salutation – Once you’ve checked the name fields, you need to check the Addressee/Salutation tab to be sure that you have the correct formulas. a. Make sure that you know your organization’s preferred formatting (if you don’t have one, now is the perfect time to set them!). b. Don’t forget to check the additional Addressee/Salutations if you use them! 3) Address & Contact info – You can’t raise money without contacting people! a. If you have Address Finder or Address Accelerator, check to see if the address has standardized and review the Address Attribute if it has not. Fix it if you can (using Address Accelerator). b. Check the email address for obvious errors. You won’t be able to send an email to [email protected]. c. Make sure the phone number has the correct number of digits. d. Remove any non-address information from the address box and put it where it belongs. 4) Mailing/Contact status – Constituent preference and consent is becoming even more important and, for those of you in Europe, it may even be the law. a. If the address box is blank, make sure you set the record so that it will be suppressed when you pull a direct mail list. b. Review notes, annotations, etc. and make sure that any indication of communication preferences has been entered in the correct location. 5) Required fields for your org – If you have certain fields that are uniquely specific for your organization, make sure you check them so that your reports, queries and lists all pull accurately. What would you add to this list? What things do you regularly check in your database to help keep it in tip top shape? Previous PostIs your data a plate of Spaghetti or a Cheeseburger?
#!/usr/bin/env python """ Synchronise the location of files in the Mendeley database using a relative base path by storing the locations in a text database that can by synchronised. Currently ignores files outside of the base path. It will also only add new files, it won't clean deleted files. Designed to be used with something like Unison or DropBox to synchronise the PDF files. """ from argparse import ArgumentParser import os import sys import urllib from itertools import ifilter try: import sqlite3 except: from pysqlite2 import dbapi2 as sqlite3 def main(): # Parse command line arguments parser = ArgumentParser( prog='mendeleyfilesync.py', description="Synchronise the location of files in the Mendeley " "database using a relative base path.") parser.add_argument('mendeley_database', help='Path to the Mendeley sqlite database, eg. ' '"~/.local/share/data/Mendeley Ltd./Mendeley Desktop/' '[email protected]@www.mendeley.com.sqlite"') parser.add_argument('text_database', help="Path to the text datbase used to store file locations, " "eg. ~/.mendeley_files.dat") parser.add_argument('file_path', help="Directory used to store PDF files") parser.add_argument('-d', '--dry-run', action='store_const', dest='dry_run', const=True, default=False, help="Display changes that would be made but don't actually " "modify the database") parser.add_argument('-f', '--force-update', action='store_const', dest='force_update', const=True, default=False, help="Replace file path in Mendeley with path from the text " "database when there is a conflict") args = parser.parse_args() # Check path to Mendeley database file if not os.path.isfile(args.mendeley_database): sys.stderr.write('File "%s" does not exist\n' % args.mendeley_database) exit(1) # Check path to directory where PDFs are stored if not os.path.isdir(args.file_path): sys.stderr.write('"%s" is not a directory\n' % args.file_path) exit(1) with MendeleyDB( args.mendeley_database, args.file_path, args.dry_run) as mendeley_db: run_synchronisation( mendeley_db, args.text_database, args.dry_run, args.force_update) class MendeleyDB(object): """ An interface to the Mendeley database """ def __init__(self, path, file_path, dry_run=False): self.path = path self.base_url = directory_to_url(file_path) self.dry_run = dry_run def __enter__(self): """ Open the database connection """ self.connection = sqlite3.connect(self.path) self.cursor = self.connection.cursor() return self def __exit__(self, exc_type, exc_value, traceback): """ Close the database connection """ self.connection.commit() self.cursor.close() def execute_unsafe(self, statement, values=()): """ Execute an SQL statement that may alter data If dry_run is set, print the statement and don't execute anything. This is useful for debugging or just for peace of mind. """ if self.dry_run: s = statement for v in values: s = s.replace('?', '"%s"' % str(v), 1) print("Executing: %s" % s) else: return self.cursor.execute(statement, values) def get_document(self, id): """ Get a document using the document id """ self.cursor.execute( "SELECT uuid, citationKey FROM Documents WHERE id = ?", (id, )) result = self.cursor.fetchone() if result: uuid, citation_key = result if citation_key is None: citation_key = "" else: raise KeyError("Could not find document with id %s" % id) return (uuid, citation_key) def document_id(self, uuid): """ Get the db primary key for a document from the uuid """ self.cursor.execute( "SELECT id FROM Documents WHERE uuid = ?", (uuid, )) result = self.cursor.fetchone() if result: return result[0] else: raise KeyError("Couldn't find document with uuid %s" % uuid) def get_file_name(self, hash): """ Find the file name from the file hash """ self.cursor.execute( "SELECT localUrl FROM Files WHERE hash = ?", (hash, )) result = self.cursor.fetchone() if result: full_path = result[0] return full_path.replace(self.base_url + u'/', '') else: raise KeyError("Couldn't find file with hash %s" % hash) def document_files(self): """ Return all files associated with documents """ self.cursor.execute("SELECT documentId, hash FROM DocumentFiles") for document_id, file_hash in self.cursor.fetchall(): doc_uuid, doc_citation_key = self.get_document(document_id) file_name = self.get_file_name(file_hash) # Some files are not stored locally, so the file name is not set if file_name: yield DocumentFile( doc_uuid, doc_citation_key, file_hash, file_name) def add_file(self, document_file): """ Add the file to the database and attach it to the document """ # Check document exists in Mendeley database try: document_id = self.document_id(document_file.uuid) except KeyError: sys.stderr.write( "Warning: No Mendeley document for file %s.\n" "Perhaps you need to synchronise your Mendeley " "desktop client first.\n" % document_file.name) return # Check file doesn't already exist self.cursor.execute( "SELECT hash FROM Files WHERE hash = ?", (document_file.hash, )) result = self.cursor.fetchone() if result: sys.stderr.write("Warning: File hash already exists " "for file %s.\n" % document_file.name) return # Insert file file_url = u'/'.join((self.base_url, document_file.name)) self.execute_unsafe( "INSERT INTO Files (hash, localUrl) VALUES (?, ?)", (document_file.hash, file_url)) # Link file to document self.execute_unsafe( "INSERT INTO DocumentFiles " "(documentId, hash, remoteUrl, unlinked, downloadRestricted) " "VALUES (?, ?, '', 'false', 'false')", (document_id, document_file.hash)) def update_file(self, document_file): """ Update the file path for an existing file """ file_url = u'/'.join((self.base_url, document_file.name)) self.execute_unsafe( "UPDATE Files SET localUrl=? WHERE hash=?", (file_url, document_file.hash)) class DocumentFile(object): """ A file associated with a reference document for storing in the text database """ # Separator used in the text database sep = u':::' def __init__(self, uuid, key, hash, name): # uuid and key represent document # there may be multiple files with the same document self.uuid = uuid self.key = key # hash and name represent file self.hash = hash self.name = name @classmethod def from_text(cls, line): """ Initialise a new entry from the text representation """ try: (uuid, key, hash, name) = line.strip().split(cls.sep) except ValueError: raise ValueError("Invalid database line: %s" % line) return cls(uuid, key, hash, name) def text_entry(self): """ Return a string representing the entry in the format used by text database """ return self.sep.join((self.uuid, self.key, self.hash, self.name)) def sort_key(self): """ Key used to sort document files in the text database """ if self.key: return self.key.lower() else: return self.name.lower() def directory_to_url(path): """ Convert a directory path to a URL format """ path = os.path.abspath(path) # Remove leading slash so Linux and Windows paths both # don't have a slash, which can then be added if path.startswith('/'): path = path[1:] # Make sure separators are forward slashes path = path.replace(os.sep, '/') if path.endswith('/'): path = path[:-1] # Url encode special characters url = u'file:///' + urllib.quote(path, safe='/:').decode('ascii') return url def relative_file(file): """ Check that a file is within the PDF storage directory """ # If it is, the base path will have been removed return file.name.find(u'file://') < 0 def get_new_files(afiles, bfiles): """ Compare a list of files and return a list of the new ones """ afile_hashes = set(afile.hash for afile in afiles) # Check that the file doesn't exist in the other set and make sure it # also isn't outside the base path, in which case it's ignored new_files = (file for file in bfiles if file.hash not in afile_hashes) return ifilter(relative_file, new_files) def get_different_files(afiles, bfiles): """ Check if any file names have changed """ a_file_names = dict((file.hash, file.name) for file in afiles) # Find files with same hash but named differently different_files = ( (file, a_file_names[file.hash]) for file in bfiles if file.hash in a_file_names and file.name != a_file_names[file.hash]) return different_files def run_synchronisation(mendeley_db, text_database_path, dry_run=False, force_update=False): """ Synchronise updates between the Mendeley database and text file database """ mendeley_entries = set(mendeley_db.document_files()) if os.path.isfile(text_database_path): with open(text_database_path, 'r') as text_db_file: text_db_entries = set( DocumentFile.from_text(line.decode('utf-8')) for line in text_db_file) else: # Assume this is the first run and the text database # hast not yet been created print("Creating new text database file.") text_db_entries = set() # Add new files from Mendeley to the text database new_files = set(get_new_files(text_db_entries, mendeley_entries)) if new_files: print("New files from Mendeley:") for f in new_files: print(f.name) text_db_entries.add(f) else: print("No new files from Mendeley.") # Update Mendeley database with new files from the text database new_files = set(get_new_files(mendeley_entries, text_db_entries)) if new_files: print("New files from the text database:") for f in new_files: print(f.name) mendeley_db.add_file(f) else: print("No new files from the text database.") # Write out any conflicts where files exist in both but have # different locations, so that conflicts can be manually resolved, # or override the file path in Mendeley if force_update is set different_files = get_different_files(mendeley_entries, text_db_entries) for different_file, conflicting_name in different_files: if force_update: sys.stderr.write( "Forcing update: %s to %s\n" % (conflicting_name, different_file.name)) mendeley_db.update_file(different_file) else: sys.stderr.write( "Conflict: %s, %s\n" % (conflicting_name, different_file.name)) # Write updated text database file text_db_lines = ((file.text_entry() + u'\n').encode('utf-8') for file in sorted(text_db_entries, key=lambda f: f.sort_key())) if not dry_run: with open(text_database_path, 'w') as text_db_file: for line in text_db_lines: text_db_file.write(line) else: print("Text file:") for line in text_db_lines: sys.stdout.write(line) if __name__ == '__main__': main()
I’ve been poking at this WordPress Theme of mine for a few days and all the little nitpicky stuff is getting me down. There are a lot of major and minor things to clean up. And I’m totally not happy with how FireFox is handling my old layout. In fact, I’m disgusted with my old layout. Guess it’s time to really do what I want to do. While working on my CSS design experiments a few years ago, I also put together some serious state-of-the-art web page layouts and designs. Unfortunately, most browsers couldn’t keep up with it. Too graphic heavy and too many bells and whistles. It’s been about three years now, so I figured that if people haven’t upgraded their browsers to at least something that is two years old, they never will and I can only do so much for them. It’s time for me to get more serious about my own web page design and design for ME and not to make it simple simple simple for the rest of the crowd. I decided that I wanted to try to create a fixed background image of our trailer meandering down the Talimena Skyline Drive in Arkansas for the main and category pages. Over that, I wanted a translucent filter which would allow the content to sit on a semi-opaque white background when it moved over the content. This way, when the viewer scrolls down the page, the content moves but the image doesn’t. Eric Meyers has the most wonderful example of how this works on his site. I did all kinds of things, trying this and that, changing the colors, opacity, and nothing was good enough. And it would look great in Internet Explorer and crap in Firefox, and then crappy in IE and wonderful in Firefox. It was all close, but it just didn’t have the punch I wanted. I also really loved having nothing but black text on a white background for the long, wordy articles I tend to write. Clean and simple while still having a kick. I wanted to get back to our roots, life on the road and photography, so I wanted to look more “photog” than “educational”. The opaque background for the text didn’t work. It was messy and hard to read. Brent, my hero, finally convinced me to make the background image fade into the background so the header area would feature the photograph fading to white and the majority of the background would be white, allowing the text to be against a white background. Brilliant. Totally brilliant. He knows nothing about how all this works, but he can take one look and come up with the solution. One more reason why I keep him around and adore him desperately! So I slammed the photograph into PaintShopPro and fought for ages to get the fade right, creating graduated filter after filter after filter to drop the photograph down to white without leaving a line or making a mess of it. And then I had to test each one to see how far down it would go before it interfered with the text. Then back to the drawing board, literally. Finally I got it right. Not long after, someone emailed me that they are using a super wide screen resolution and the photograph hangs off into white background when the screen width reaches the end. After a lot of new research, I set the image to be 1024 pixels wide based upon the widest average screen width in use. I thought that would be plenty. Because I set the font of the header information to white, to contrast against the photograph in the background, when the screen stretches all the way across beyond 1024 pixels, the floated text floats all the way over to the right with white text on white background. Not a good combination. This brought a great deal of angst. I hate websites which decide the width for me, leaving blank edges all around. I like fluidity and elasticity in web page design, using up the whole screen real estate, or not, dependent upon the user not my design. Screens that now reach over 1024 pixels wide is a lot of wide screen real estate to cover. So, after years of working to tweak my lovely layout to be fluid with the screen widths, I’m back to debating about how to deal with screen width issues again. After consulting with some pro designer friends of mine, I come up with a temporary fix. I set the background image to repeat horizontally for ever and ever. It isn’t good, it isn’t pretty, but it works. If I decide to leave it this way, I will probably soften the right edge to blend better with the left edge as it repeats. The challenges a web designer is faced with. Or I set it to 1024 pixels wide, let the user make it narrower if needed, and the wide screen user can suffer with empty space on the right side of the page….what to do? I’ll debate before I decide. The majority of Internet users are still stuck on 800×600 though use of larger screens are rising quickly. After dealing with the background, I started to work on each individual post to make sure all of the HTML imported correctly, fix any bugs, and make sure that the links to the graphics still worked. This was a little more of a challenge than I thought. Years ago, I’d set up my photographs and images in their own folders. From inside an article, the links to the graphics were set with relative links. This means that instead of having them linked via an absolute link like "http://www.examplesite.com/images/ball.gif", if the article was in the “Learn” folder and the subfolder “Biz” for articles about the business of nature photography, the relative link for the graphic would be "../../images/ball.gif". The dots and slashes instruct the browser to look for the graphic two directories up to find the images directory and the graphic would be in there. With the move to WordPress, the issues of relative links gets kinda thrown out the window. My Theme template files are in a sub-sub-subdirectory of the root. So would the relative link have three sets of ../ in order to get the image? Nope. Figuring this out took some detective work, though. Let’s work this out then. The root index.php leads to the Theme index.php and the actual “web page” information is pulled from the database to generate the page, so what should the link be? From the Theme or from the root? From the root. Even though your information gets pulled from different sources, the root file that controls all the action is still based in the root directory. So the relative link for images has no use for the dots and slashes since it will look for the images directory from the root. This forces the browser to look at the root directory first for images. This led to my first lesson in doing a search and replace in the database to get rid of all those dots and dashes in the image links. Looks simple, but it took me a couple hours to figure this little SQL query statement out. I’m totally new at this MySQL database stuff, though I have years of experience with MSAccess and other databases, this is different. The query basically instructs the database to look in the wp_posts table in the post_content field for any occurrence of the first line of <img src="../ and replace it with the second line of <img src=" to fix my little problem. Little did I realize that this piece of code and I would become very intimately acquainted. I did a search for one set of ../ and then for another, to make sure I got all the ../../../ sets. Cleaned it up in seconds. Wow! Powerful little query, but it can easily search and replace the wrong thing and screw things up. I started making frequent backups of my database to make sure that I had something to go back to if I did mess it up. And I did, but everything was all fixable. As I looked through each post, I realized that my bullets weren’t showing up. I had copied over my list CSS directly from my old site and the relative/absolute link thing hit me here again. The style.css is called directly from within the header to the style sheet in my Theme. This means that the style sheet looks for its links from the folder the Theme resides in, because that is where it also sits. Any link from within the style sheet has to be directed from that folder to the image folder or the spot within your site that the images sit…messy. A command decision had to be made. I recommend that if you are using any graphics in your CSS, like backgrounds, replacement graphics for the lists or headings, put them in the same folder as your style sheet to ensure easy loading. Or in a subfolder of your Theme folder. Big lesson learned. This is much easier. While working on the test site, I had a terrible time dealing with all of these relative links for images, before I figured out about the search and replace query in the database. So if you aren’t ready, like I was, to do a massive search and replace in your database, then you can use the following tip. Now that the background was finally set, and I could see the images on my site, it was time to look at the header title, article heading titles, text, and links since they lay on top of the background. The colors in the photograph of the road winding down Talimena Drive is wonderful, fall colors, and it blended down wonderfully into the white. But the text that laid over it before it reached the pure white included the title of the article or page title (category, archives, etc.), the top text of the sidebar, and the beginning of the article. I knew the colors needed to be earthy, since that was represented in the background, but which colors to choose? I tried everything. White was great across the top for the header, giving enough contrast for the letters to show up, but white wouldn’t work for the mid to total white range. I tried earth tones like browns, and greens, but they blended in with the photograph. Not enough contrast. I tried a dark green on the heading titles and could see them fine in the content that went into the white background, but I couldn’t see them against the photograph. I tried a light green, but it looked awful against the white, though it did stand out against the photograph. I didn’t want to push the whole content down to where there was more contrast in the photograph, and I didn’t want to bring the white gradient up any further. ARGHH, the compromises we have to make in design. Through the use of the following color scheme sites, I was able to finally pick a good color scheme to set the links and heading colors. We found a good red and I set the styles, so now I needed a good link hover color. Brent recommended that I turn “red into gold” which worked wonderfully for the links in my sidebar and throughout the document against the white background, but the h3 heading for the title of the article sat in the background photograph and the gold color on the hover made the title disappear into the photograph. Not enough contrast. GRRRR. We hit the color schemes again and finally found a lighter red color that worked wonderfully as a slight contrast shift for the link hover. The rest of the links will feature the gold hover color. We decided to keep the “earthy” tones and set the sidebar titles and “box” lines in a rich green color, which adds to the fall, natural colors throughout. I love it. Step-by-step, this WordPress Theme and site is coming together. Well, I can finally “see” my new WordPress test site. It isn’t as pretty as I want, but it is getting there. Now that the core stuff is there, it’s time to attack the HTML, PHP, and structure of the site again and really pull all of this together to be the site look and feel that I want.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for templates module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gast from tensorflow.contrib.py2tf.pyct import compiler from tensorflow.contrib.py2tf.pyct import templates from tensorflow.python.platform import test class TemplatesTest(test.TestCase): def test_replace_variable(self): def template(a): # pylint:disable=unused-argument def test_fn(a): # pylint:disable=unused-variable a += 1 a = 2 * a + 1 return b # pylint:disable=undefined-variable node = templates.replace( template, a=gast.Name('b', gast.Load(), None))[0] result = compiler.ast_to_object(node) self.assertEquals(7, result.test_fn(2)) def test_replace_function_name(self): def template(fname): # pylint:disable=unused-argument def fname(a): # pylint:disable=function-redefined a += 1 a = 2 * a + 1 return a node = templates.replace( template, fname=gast.Name('test_fn', gast.Load(), None))[0] result = compiler.ast_to_object(node) self.assertEquals(7, result.test_fn(2)) def test_code_block(self): def template(block): # pylint:disable=unused-argument def test_fn(a): # pylint:disable=unused-variable block # pylint:disable=pointless-statement return a node = templates.replace( template, block=[ gast.Assign( [ gast.Name('a', gast.Store(), None) ], gast.BinOp( gast.Name('a', gast.Load(), None), gast.Add(), gast.Num(1))), ] * 2)[0] result = compiler.ast_to_object(node) self.assertEquals(3, result.test_fn(1)) if __name__ == '__main__': test.main()
We carry an extensive line of Royal Canin Veterinary Diets for DOGS and CATS. Check out a list of our regularly stocked items. If these items are not in stock when you come by, get it for 25% off when it comes in. We don’t want to just sell you pet food. We like to prescribe the appropriate nutrition for your pet. Please don’t hesistant to ask questions about your pet’s diet. If you have specific questions about your pet’s food for Dr. Chu, we will be pleased to set up a brief appointment to address your concerns! Veterinarian and Animal Hospital in Saskatoon, SK Copyright © 2019.
""" Shamelessly copied from django's setup.py and edited to fit """ from distutils.core import setup from distutils.command.install_data import install_data from distutils.command.install import INSTALL_SCHEMES import os import sys class OsxInstallData(install_data): # On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../ # which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix # for this in distutils.command.install_data#306. It fixes install_lib but not # install_data, which is why we roll our own install_data class. def finalize_options(self): # By the time finalize_options is called, install.install_lib is set to the # fixed directory, so we set the installdir to install_lib. The # install_data class uses ('install_data', 'install_dir') instead. self.set_undefined_options('install', ('install_lib', 'install_dir')) install_data.finalize_options(self) if sys.platform == "darwin": cmdclasses = {'install_data': OsxInstallData} else: cmdclasses = {'install_data': install_data} def fullsplit(path, result=None): """ Split a pathname into components (the opposite of os.path.join) in a platform-neutral way. """ if result is None: result = [] head, tail = os.path.split(path) if head == '': return [tail] + result if head == path: return result return fullsplit(head, [tail] + result) # Tell distutils to put the data_files in platform-specific installation # locations. See here for an explanation: # http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb for scheme in INSTALL_SCHEMES.values(): scheme['data'] = scheme['purelib'] # Compile the list of packages available, because distutils doesn't have # an easy way to do this. packages, data_files = [], [] root_dir = os.path.dirname(__file__) if root_dir != '': os.chdir(root_dir) impostor_dir = 'impostor' for dirpath, dirnames, filenames in os.walk(impostor_dir): # Ignore dirnames that start with '.' for i, dirname in enumerate(dirnames): if dirname.startswith('.'): del dirnames[i] if '__init__.py' in filenames: packages.append('.'.join(fullsplit(dirpath))) elif filenames: data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]]) # Small hack for working with bdist_wininst. # See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst': for file_info in data_files: file_info[0] = '\\PURELIB\\%s' % file_info[0] setup( name="Impostor", version="1.0.2", url='https://github.com/samastur/Impostor/', author='Marko Samastur', author_email='[email protected]', description='Staff can login as a different user.', long_description='Django app allowing staff with their credentials to login as other users.', license='MIT License', platforms=['any'], packages=packages, cmdclass=cmdclasses, data_files=data_files, classifiers=['Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Utilities' ], )
Shopping on LEDsupermall is safe, fast and convenient. To make life easier, we accept a number of secure payment methods designed to be completely safe and friendly. When you shop at LEDsupermall, your privacy and online security are always guaranteed. LEDsupermall primarily uses PayPal to process secure payments. If you have questions about Ledsupermall.com payment process, kindly contact us for assistance.
# Copyright 2014 Andrea Micheli and Marco Gario # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import re def check_version(module): try: if module == "z3": import z3 (major, minor, ver, _) = z3.get_version() version = "%d.%d.%d" % (major, minor, ver) elif module == "msat": import mathsat version_str = mathsat.msat_get_version() m = re.match(r"^MathSAT5 version (\d+\.\d+\.\d+) .*$", version_str) if m is not None: version = m.group(1) elif module == "cudd": import repycudd doc = repycudd.DOCSTRING m = re.match(r"^PyCUDD (\d+\.\d+\.\d+).*", doc) if m is not None: version = m.group(1) elif module == "btor": import pyboolector version = "OK" # Just checking if import succeeds elif module == "cvc4": import CVC4 version = CVC4.Configuration_getVersionString() elif module == "picosat": import picosat version = picosat.picosat_version() elif module == "yices": import yicespy v = yicespy.__dict__['__YICES_VERSION'] m = yicespy.__dict__['__YICES_VERSION_MAJOR'] p = yicespy.__dict__['__YICES_VERSION_PATCHLEVEL'] version = "%d.%d.%d" % (v, m, p) else: print("Invalid argument '%s'" % module) exit(-2) except ImportError: version = None return version if __name__ == "__main__": if len(sys.argv) != 2: print("Usage: python %s <solver_name>" % sys.argv[0]) exit(-1) module = sys.argv[1] version = check_version(module) if version is None: print("NOT INSTALLED") else: print(version)
We are a not for profit organisation based in Malawi and sometimes in Botswana. Our main goal is to make healthcrae information easily accessible via the use of layman's language. We focus much on rehabilitation medicine. We also offer free physiotherapy quizzes to students and professionals in order to assist them being up to date with trends in the profession. How much will it costs me? It depends on the kind of service you are looking for. Some services are completely free of charge while others need you to contribute something. Who is allowed for free services? Children under the age of 16 and the elderly are allowed for free rehab services. The elderly will just be required to contribute a certain amount reasonable for the help they receive so that T-Malawi can keep running. Where can I find T-Malawi? As Therapeutics Malawi, our major service is dissemination of healthcare nformation. We strive at bringing high quality updated health information that is research backed.
# Copyright 2016 Quora, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sys import stderr, stdout import time import threading from qcore import utime import qcore.events as core_events from . import debug from . import futures from . import batching from . import _debug from .async_task import AsyncTask assert str(__name__).endswith('asynq.scheduler') \ or str(__name__).endswith('asynq.lib64.scheduler'), \ "Are you importing asynq from the wrong directory?" _debug_options = _debug.options _futures_none = futures._none class AsyncTaskError(Exception): pass class TaskScheduler(object): """Schedules and runs AsyncTask objects flushing batches when needed.""" def __init__(self): self._last_dump_time = time.time() self.on_before_batch_flush = core_events.EventHook() self.on_after_batch_flush = core_events.EventHook() thread = threading.current_thread() thread_name = thread.name if thread.name else str(thread.ident) if '_state' in globals(): _state.last_id += 1 _id = _state.last_id else: _id = 0 self.name = '%s / %d' % (thread_name, _id) self.reset() def reset(self): self._batches = set() self._tasks = [] self.active_task = None def wait_for(self, task): """ Executes a task and ensures it's complete when this method returns. :param tasks: task to wait for :return: ``None`` """ while not task.is_computed(): self._execute(task) if task.is_computed(): break self._continue_with_batch() def _execute(self, root_task): """Implements task execution loop. The postcondition of this method is that all tasks in the dependency tree of root_task that aren't blocked on batch items waiting to be flushed should be executed until they are (or until they're computed). This is done by running a depth-first search on the dependency tree. :param root_task: root of the dependency tree :return: ``None`` """ init_num_tasks = len(self._tasks) self._tasks.append(root_task) # Run the execution loop until the root_task is complete (it's either blocked on batch # items waiting to be flushed, or computed). while len(self._tasks) > init_num_tasks: if len(self._tasks) > _debug_options.MAX_TASK_STACK_SIZE: self.reset() debug.dump(self) raise RuntimeError('Number of scheduled tasks exceeded maximum threshold.') # _tasks is a stack, so take the last one. task = self._tasks[-1] if _debug_options.DUMP_SCHEDULER_STATE: self.try_time_based_dump() if task.is_computed(): self._tasks.pop() elif isinstance(task, AsyncTask): self._handle_async_task(task) elif isinstance(task, batching.BatchItemBase): # This can happen multiple times per batch item (if we run _execute and this batch # item doesn't get flushed), but that's ok because self._batches is a set. self._schedule_batch(task.batch) self._tasks.pop() else: task._compute() self._tasks.pop() def _schedule_batch(self, batch): if batch.is_flushed(): if _debug_options.DUMP_SCHEDULE_BATCH: debug.write("@async: can't schedule flushed batch %s" % debug.str(batch)) return False if _debug_options.DUMP_SCHEDULE_BATCH and batch not in self._batches: debug.write('@async: scheduling batch %s' % debug.str(batch)) self._batches.add(batch) return True def _flush_batch(self, batch): self.on_before_batch_flush(batch) try: batch.flush() finally: self.on_after_batch_flush(batch) return 0 def _handle_async_task(self, task): # is_blocked indicates that one of the tasks dependencies isn't computed yet, # so we can't run _continue until they are. if task.is_blocked(): # _dependencies_scheduled indicates if we've already added the task's # dependencies to the task stack. If the task is blocked and we've already # scheduled and run its dependencies, it's blocked on batch items waiting # to be flushed so we're done with this task. if task._dependencies_scheduled: # Set _dependencies_scheduled to false so on future runs of _execute, # we add the dependencies to the task stack (since some of the batch items # in the subtree might have been flushed) if _debug_options.DUMP_CONTINUE_TASK: debug.write('@async: skipping %s' % debug.str(task)) task._dependencies_scheduled = False task._pause_contexts() self._tasks.pop() # If the task is blocked and we haven't scheduled its dependencies, we # should do so now. else: task._dependencies_scheduled = True task._resume_contexts() for dependency in task._dependencies: if not dependency.is_computed(): if _debug_options.DUMP_SCHEDULE_TASK: debug.write('@async: scheduling task %s' % debug.str(dependency)) if _debug_options.DUMP_DEPENDENCIES: debug.write('@async: +dependency: %s needs %s' % (debug.str(task), debug.str(dependency))) self._tasks.append(dependency) else: self._continue_with_task(task) def _continue_with_task(self, task): task._resume_contexts() self.active_task = task if _debug_options.DUMP_CONTINUE_TASK: debug.write('@async: -> continuing %s' % debug.str(task)) if _debug_options.COLLECT_PERF_STATS: start = utime() task._continue() task._total_time += utime() - start if task.is_computed() and isinstance(task, AsyncTask): task.dump_perf_stats() else: task._continue() if _debug_options.DUMP_CONTINUE_TASK: debug.write('@async: <- continued %s' % debug.str(task)) self.active_task = None # We get a new set of dependencies when we run _continue, so these haven't # been scheduled. task._dependencies_scheduled = False def _continue_with_batch(self): """ Flushes one of batches (the longest one by default). :param assert_no_batch: indicates whether exception must be raised if there is no batch to flush :return: the batch that was flushed, if there was a flush; otherwise, ``None``. """ batch = self._select_batch_to_flush() if batch is None: if _debug_options.DUMP_FLUSH_BATCH: debug.write('@async: no batch to flush') else: return None self._batches.remove(batch) self._flush_batch(batch) return batch def _select_batch_to_flush(self): """Returns the batch having highest priority, or ``None``, if there are no batches. This method uses ``BatchBase.get_priority()`` to determine the priority. Side effect: this method removed flushed batches. :return: selected batch or None. """ best_batch = None best_priority = None batches_to_remove = None for batch in self._batches: if not batch.items or batch.is_flushed(): if batches_to_remove is None: batches_to_remove = [batch] else: batches_to_remove.append(batch) continue priority = batch.get_priority() if best_batch is None or best_priority < priority: best_batch = batch best_priority = priority if batches_to_remove: for batch in batches_to_remove: self._batches.remove(batch) return best_batch def __str__(self): return '%s %s (%d tasks, %d batches; active task: %s)' % \ (type(self), repr(self.name), len(self._tasks), len(self._batches), str(self.active_task)) def __repr__(self): return self.__str__() def dump(self, indent=0): debug.write(debug.str(self), indent) if self._tasks: debug.write('Task queue:', indent + 1) for task in self._tasks: task.dump(indent + 2) else: debug.write('No tasks in task queue.', indent + 1) if self._batches: debug.write('Batches:', indent + 1) for batch in self._batches: batch.dump(indent + 2) def try_time_based_dump(self, last_task=None): current_time = time.time() if (current_time - self._last_dump_time) < _debug_options.SCHEDULER_STATE_DUMP_INTERVAL: return self._last_dump_time = current_time debug.write('\n--- Scheduler state dump: --------------------------------------------') try: self.dump() if last_task is not None: debug.write('Last task: %s' % debug.str(last_task), 1) finally: debug.write('----------------------------------------------------------------------\n') stdout.flush() stderr.flush() class LocalTaskSchedulerState(threading.local): def __init__(self): self.last_id = 0 self.reset() def reset(self): self.current = TaskScheduler() _state = LocalTaskSchedulerState() globals()['_state'] = _state def get_scheduler(): global _state return _state.current def reset(): _state.reset() def get_active_task(): global _state s = _state.current return None if s is None else s.active_task
Hey little brother did you hear I made it back to town? I'm getting sober, there's some things I've got to figure out. I saw the station and the light we used to run around. I could've sworn that there were things I used to care about. "There's a gap the size of a hundred empty gin bottles between August 2012 and September 2013," I tell them. "There are seasons you can't get back." It's a ginger beer evening for me at George's Majestic Lounge; Jesse and John sip Shiner. "I don't suppose I know what to do with that," I say, "but I'm ready to crawl out." I spill the confession across the small round table, and an uncomfortable silence settles in. These days, I'm prone to this kind of lumbering conversation with friends. There is no delicate way to seek validation in a bar. Follow me over to A Deeper Story for the rest of The Great Despiser.
# Copyright (c) Mathias Kaerlev 2012. # This file is part of Anaconda. # Anaconda is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Anaconda is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Anaconda. If not, see <http://www.gnu.org/licenses/>. from mmfparser.player.shader import Shader MONOCHROME_SHADER = [ Shader([""" varying vec2 texture_coordinate; void main() { gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex; gl_FrontColor = gl_Color; texture_coordinate = vec2(gl_MultiTexCoord0); } """], [""" varying vec2 texture_coordinate; uniform sampler2D Tex0; void main() { vec4 col = texture2D(Tex0, texture_coordinate); col *= vec4(0.299,0.587,0.114,1.0); col.rgb = vec3(col.r+col.g+col.b); gl_FragColor = col * gl_Color; } """]) ] __all__ = ['MONOCHROME_SHADER']
Here at ecofective we’re passionate about creating beautiful gardens with minimal impact on the environment around us. We aim to provide you with the tools you need to create a garden that you want, using the best and most environmentally sensitive feed, weed, clean and control products available in the UK. Whether you use your garden to grow your own produce, or as an area to relax in with your friends, family and pets or even as a safe haven to welcome the UK’s most beloved wildlife, ecofective gives gardeners very effective alternatives to conventional chemical garden care products. Made in the UK and derived from natural solutions, our promise is to provide greener, safer home and garden care. Click here to find out more.
#!/usr/bin/python import sys import zmq import random import time import os import datetime import json import getopt import socket import happybase import hashlib import struct import traceback import re # adjust to match your $PREFIX if you specified one # default PREFIX = /usr/local sys.path.append('/usr/local/lib/cif-protocol/pb-python/gen-py') import msg_pb2 import feed_pb2 import control_pb2 import RFC5070_IODEF_v1_pb2 import MAEC_v2_pb2 import cifsupport from DB.Registry import Registry def HBConnection(hbhost): pool = happybase.ConnectionPool(size=25, host=hbhost) return pool def usage(): print "cif-registry.py [-D #] [-h] [-d key] [-k key] [-t int|float|double|long|str] [-v value]\n\n" def cast(t, v): if t in ["int"]: return int(v) if t in ["long"]: return long(v) if t in ["double", "float"]: return float(v) return str(v) try: opts, args = getopt.getopt(sys.argv[1:], 't:v:k:d:D:H:h') debug = 0 key_type = None key_value = None key_name = None del_name = None hbhost = "localhost" for o, a in opts: if o == "-t": key_type = a elif o == "-H": hbhost = a elif o == "-v": key_value = a elif o == "-k": key_name = a elif o == "-d": del_name = a elif o == "-h": usage() sys.exit(2) elif o == "-D": debug = a connectionPool = HBConnection(hbhost) reg = Registry(connectionPool, debug) if del_name != None: reg.delete(del_name) kv = reg.get(del_name) if kv != None: print "Failed to delete the key: it seems to still be in the database." elif key_name != None: if key_type != None and key_value != None: key_value = cast(key_type, key_value) reg.set(key_name, key_value) kv = reg.get(key_name) if kv == key_value: print key_name + " has been set to " + str(key_value) else: print "Failed? you gave me: " + str(key_value) + " but the database has " + str(kv) else: kv = reg.get(key_name) print key_name + " = " + str(kv) + " " + str(type(kv)) else: kl = reg.get() print "Available keys: ", kl except getopt.GetoptError as err: print str(err) usage() sys.exit(2) except Exception as e: print e traceback.print_tb(sys.exc_info()[2]) usage() sys.exit(2)
Manages all aspects of franchised, managed or corporate projects for new construction, conversion or renovation and the interior design process for adherence to hotel brand standards. Has focus on brand. May function as project manager, guiding work of team, or manage a program. Serves as content leader on project. Participates in establishing direction of brand design. Selects and manages interior design consultants. Develops and maintains liaison with all appropriate divisions, departments and owners. Responsible for interior design project budget and coordinating staff requirement with other business groups. Provides project updates using tools to track and report status. Presents and sells design to senior level executives and owners. Reviews and/or develops interior design concepts. May provide field input to product development group. May manage project workload in support of brand or business unit growth goals. Position is based in Mexico City. Minimum 6-years experience in the design field, preferably in the hospitality design industry.
__author__ = 'tschlein' #Checks for existence of dependencies (executable files, Python modules). #Returns True if all dependencies are present; otherwise, returns False and prints missing files. #TODO add Python modules import sys from os import path import platform #https://docs.python.org/2/library/platform.html import configparser #https://docs.python.org/2/library/configparser.html import argparse #http://docs.python.org/3.4/library/argparse.html #Parse the ini file to check whether dependencies are present. def parse(file, verbose): if verbose >= 1: print('Entering parse:') if verbose >= 2: print('\tConfig file passed in:' + str(file)) #Declare list of missing executables and/or modules. missing = '|[-] Missing the following:' #Determine system: 'nt' for Windows, 'posix' for *nix, Mac OSX system = platform.platform() #Determine 32-bit vs. 64-bit architecture if platform.architecture()[0] == '64bit': architecture = 64 elif platform.architecture()[0] == '32bit': architecture = 32 #Read the config file for parsing config = configparser.ConfigParser() config.read(file) if 'Windows' in system: for key in config['Windows']: value = config.get('Windows', key) if path.isfile(value): if verbose >= 2: print('\t| [+] ', value, '|') else: print('\t| [-] ', value, '|') missing += '\n| [-]: ' missing += value elif 'Linux' in system: for key in config['Linux']: value = config.get('Linux', key) if path.isfile(value): if verbose >= 2: print('\t [+] ', value, '|') else: print('\t| [-] ', value, '|') missing += '\n| [-]: ' missing += value #Return True if all dependencies are present; otherwise, return False. if (len(missing)): return False, missing else: return True #Parse the command line arguments. def main(argv): try: global debug verbose = 0 file = './paths.ini' parser = argparse.ArgumentParser(description="Check whether required programs and modules exist.", add_help=True) parser.add_argument('-f', '--file', help='The file that contains paths for the required programs and modules.', required=False) parser.add_argument('-v', '--verbose', help='The level of debugging.', type=int, required=False) parser.add_argument('--version', action='version', version='%(prog)s 0.5') args = parser.parse_args() if args.file: file = args.file if args.verbose: verbose = args.verbose if verbose >= 1: print('Entering Main:') value, error = parse(file, verbose) return value, error except IOError: sys.exit('Error: File ' + str(file) + ' does not exist.') main(sys.argv[1:])
The trumpet style wooden dining table will perfectly give a much classy and decent look to your dining area. This wholly wooden table with designer wooden leg will be a royal piece with modern texture. A perfectly designed wooden table with an extraordinary look will be the best choice for you.
#!/usr/bin/env python # Shows several JPG camera snapshots tiled into a bigger image. # # requires python2, ppython-sdl2 ! # import os import time import sys import ctypes import urllib2 import json from sdl2 import * from sdl2.sdlimage import * from sdl2.sdlttf import * class Camera(object): def __init__(self, x, y, scale, label, url): self.x = x self.y = y self.scale = scale self.label = label self.url = url def __repr__(self): return json.dumps(self.__dict__, sort_keys=True, indent=4) # reads one image from the source def readframe(url): try: response = urllib2.urlopen(url) return response.read() except Exception: return None # returns a surface with the text rendered in white with a black outline in the specified size def renderText(text, size): font = TTF_OpenFont("VCR_OSD_MONO_1.001.ttf", size) TTF_SetFontOutline(font, 2) outline = TTF_RenderText_Blended(font, text, SDL_Color(0, 0, 0)) TTF_SetFontOutline(font, 0) surface = TTF_RenderText_Blended(font, text, SDL_Color(255, 255, 255)) TTF_CloseFont(font) SDL_BlitSurface(surface, None, outline, SDL_Rect(2, 2, 0, 0)) return outline # renders one camera onto the window def renderCamera(window, camera): # get window properties cw = ctypes.c_int() ch = ctypes.c_int() SDL_GetWindowSize(window, ctypes.byref(cw), ctypes.byref(ch)) w, h = cw.value, ch.value # get JPG jpeg = readframe(camera.url) if jpeg is None: return rwops = SDL_RWFromMem(jpeg, sys.getsizeof(jpeg)) image = IMG_LoadTyped_RW(rwops, True, "JPG") # blit scaled JPG x = w * camera.x / 4 y = h * camera.y / 3 rect = SDL_Rect(x, y, w * camera.scale / 4, h * camera.scale / 3) SDL_BlitScaled(image, None, SDL_GetWindowSurface(window), rect) SDL_FreeSurface(image) # draw text over it SDL_BlitSurface(camera.osd, None, SDL_GetWindowSurface(window), rect) SDL_UpdateWindowSurface(window) def getDefaultLayout(): cameras = list() cameras.append(Camera(0, 0, 1, "ACHTERDEUR", "http://localhost:8000/cgi-bin/nph-mjgrab?7")) cameras.append(Camera(0, 1, 1, "WERKPLAATS", "http://localhost:8000/cgi-bin/nph-mjgrab?6")) cameras.append(Camera(0, 2, 1, "KEUKEN", "http://localhost:8000/cgi-bin/nph-mjgrab?2")) cameras.append(Camera(1, 0, 2, "SPACE", "http://localhost:8000/cgi-bin/nph-mjgrab?4")) cameras.append(Camera(1, 2, 1, "SPACE", "http://localhost:8000/cgi-bin/nph-mjgrab?3")) cameras.append(Camera(2, 2, 1, "3D-PRINTER", "http://localhost:8000/cgi-bin/nph-mjgrab?1")) cameras.append(Camera(3, 0, 1, "PARKEER", "http://localhost:8000/cgi-bin/nph-mjgrab?9")) cameras.append(Camera(3, 1, 1, "PARKEER", "http://localhost:8000/cgi-bin/nph-mjgrab?8")) cameras.append(Camera(3, 2, 1, "INRIT", "http://localhost:8000/cgi-bin/nph-mjgrab?10")) return cameras # try to read layout file, or create new default if it does not exist def readLayout(filename): cameras = list() try: with open(filename, "r") as file: dicts = json.loads(file.read()) for d in dicts: camera = Camera(d["x"], d["y"], d["scale"], d["label"], d["url"]) cameras.append(camera) except Exception as e: print("Failed to read, using defaults") cameras = getDefaultLayout() if not os.path.exists(filename): with open(filename, "w") as file: file.write(repr(cameras)) return cameras def main(): SDL_Init(SDL_INIT_VIDEO) TTF_Init() window = SDL_CreateWindow(b"Panopticon", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 1024, 768, SDL_WINDOW_SHOWN) SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP) cameras = readLayout("layout.txt") event = SDL_Event() iterations = 0 starttime = time.time() lasttime = starttime # prerender OSD for camera in cameras: camera.osd = renderText(camera.label, 30) running = True while running: for camera in cameras: if not running: break # draw one cam renderCamera(window, camera) # check for quit button while SDL_PollEvent(ctypes.byref(event)): if event.type == SDL_QUIT: running = False # calculate fps iterations = iterations + 1 delta_t = (time.time() - starttime) if delta_t > 0: fps = iterations / delta_t print(fps) SDL_DestroyWindow(window) SDL_Quit() return 0 if __name__ == "__main__": sys.exit(main())
n. 1. A love of the fine arts; a taste for curiosities. an object of art or antiquity; a curiosity, such as those found in museums or private collections. To be shown to my friends as a piece of virtù.
'''Utility methods ''' import logging import os import numpy as np import torch __author__ = 'R Devon Hjelm' __author_email__ = '[email protected]' logger = logging.getLogger('cortex.util') try: _, _columns = os.popen('stty size', 'r').read().split() _columns = int(_columns) except ValueError: _columns = 1 def print_section(s): '''For printing sections to scripts nicely. Args: s (str): string of section ''' h = s + ('-' * (_columns - len(s))) print(h) def update_dict_of_lists(d_to_update, **d): '''Updates a dict of list with kwargs. Args: d_to_update (dict): dictionary of lists. **d: keyword arguments to append. ''' for k, v in d.items(): if isinstance(v, dict): if k not in d_to_update.keys(): d_to_update[k] = {} update_dict_of_lists(d_to_update[k], **v) elif k in d_to_update.keys(): d_to_update[k].append(v) else: d_to_update[k] = [v] def bad_values(d): failed = {} for k, v in d.items(): if isinstance(v, dict): v_ = bad_values(v) if v_: failed[k] = v_ else: if isinstance(v, (list, tuple)): v_ = [] for v__ in v: if isinstance(v__, torch.Tensor): v_.append(v__.item()) else: v_.append(v__) v_ = np.array(v_).sum() elif isinstance(v, torch.Tensor): v_ = v.item() else: v_ = v if np.isnan(v_) or np.isinf(v_): failed[k] = v_ if len(failed) == 0: return False return failed def convert_to_numpy(o): if isinstance(o, torch.Tensor): o = o.data.cpu().numpy() if len(o.shape) == 1 and o.shape[0] == 1: o = o[0] elif isinstance(o, (torch.cuda.FloatTensor, torch.cuda.LongTensor)): o = o.cpu().numpy() elif isinstance(o, list): for i in range(len(o)): o[i] = convert_to_numpy(o[i]) elif isinstance(o, tuple): o_ = tuple() for i in range(len(o)): o_ = o_ + (convert_to_numpy(o[i]),) o = o_ elif isinstance(o, dict): for k in o.keys(): o[k] = convert_to_numpy(o[k]) return o def compute_tsne(X, perplexity=40, n_iter=300, init='pca'): from sklearn.manifold import TSNE tsne = TSNE(2, perplexity=perplexity, n_iter=n_iter, init=init) points = X.tolist() return tsne.fit_transform(points)
Using a plastic opening tool, carefully pry the device apart. Make your way around the entire perimeter of the device and carefully place the two panels next to each other. After the panels are separated, watch out for the speaker on the rear panel as it's wire is rather short. When separating the panels keep an eye out for the power button and volume rocker as they can be dislodged. Grip the speaker from the sides with your fingers or a pair of tweezers and slowly begin to remove it from the rear panel. When removing the speaker, make sure not to disconnect the wires in any way. After the speaker has been removed, place the rear panel aside. Carefully separate the speaker wire from the tape and proceed to peel the tape off in an upwards direction. The tape we are removing covers a number of connections on the motherboard. Be extra careful not to sever any of these. Using a pair of tweezers, begin separating the tape downwards from the back panel. Pull the tape away from the wire bundle beneath. Isolate the wire running from the speaker to the motherboard. Desolder the speaker wires from the motherboard. Using a nylon spudger, slowly work your way underneath the battery separating it from the panel beneath. After the battery is separated from the panel make sure to place it face down above the tablet itself. Throughout the removal process keep an eye on the battery's wires and make sure they do not break. Desolder the wires that connect the battery to the motherboard. Using a pair of tweezers begin to peel the tape covering the ribbon cable on the motherboard. Using a pair of tweezers begin to peel the antenna from the metal panel. Work your way down until the antenna is completely removed. Be careful not deform the antenna or sever its connection. Using a pair of tweezers remove the tape securing the cameras to the back panel. Using a plastic opening tool lift up the flap on the ribbon cable receptacle. Using a pair of tweezers remove the camera assembly from its housing and its receptacles. Use a plastic opening tool to pull out the black knobs on both sides of the ribbon cable receptacle. Using a pair of tweezers remove the ribbon cable from its receptacle. Now on the left ribbon cable use a plastic opening tool to pull out the black knobs on both sides of the ribbon cable receptacle. Desolder the wires that connect the backlight to the motherboard. Using a PH0 screw bit unscrew the three 3mm screws securing the motherboard to the back panel. Carefully lift up on the motherboard from its housing. Do not bend the touch sensor to much as it may shatter.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Written by Lucas Sinclair. MIT Licensed. Contact at www.sinclair.bio """ # Built-in modules # import os # Internal modules # from plumbing.scraping.headers import make_headers # First party modules # import autopaths from autopaths import Path # Third party modules # import requests from retry import retry ############################################################################### @retry(requests.exceptions.HTTPError, tries=8, delay=1, backoff=2) def request(url, header = None, text = False, content = False, response = False, **kwargs): # Get # resp = requests.get(url, headers=header, **kwargs) # This will be caught by the decorator # resp.raise_for_status() # Pick what to return # if text: return resp.text if content: return resp.content if response: return resp ############################################################################### def retrieve_from_url(url, user_agent=None, **kwargs): """ Return the text content of a resource (e.g. the HTML). By default we will retry if an HTTP error arises. """ # Custom user agent if needed # if user_agent is not None: header = make_headers(user_agent) else: header = "" # Download # content = request(url, header, text=True, **kwargs) # Return # return content ############################################################################### def stream_from_url(*args, **kwargs): """ Save the resource as a file on disk iteratively by first asking for the 'content-length' header entry and downloading in chunks. By default we will retry if an HTTP error arises. By default we will uncompress a downloaded file if it is zipped. """ # Just redirect to download_from_url # kwargs.update({'steam': True}) return download_from_url(*args, **kwargs) def download_from_url(url, destination = None, uncompress = False, user_agent = 1, stream = False, progress = False, desc = None, **kwargs): """ Save the resource as a file on disk. """ # Custom user agent if needed # if user_agent is not None: header = make_headers(user_agent) else: header = "" # Download # if stream: response = request(url, header, response=True, stream=True, **kwargs) else: content = request(url, header, content=True, **kwargs) # Get total size # if stream: total_size = int(response.headers.get('content-length', -1)) num_blocks = 1500 block_size = int(total_size/num_blocks) # Sometimes we don't get content-length # if stream and total_size < 0: return download_from_url(url, destination, uncompress, user_agent, False, False, **kwargs) # Choose the right option for destination # destination = handle_destination(url, destination) # How the progress bar should look like # bar = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{remaining}, ' \ '{rate_fmt}{postfix}]' # Delete the destination if there is any exception or a ctrl-c # try: # With streaming # if stream: generator = response.iter_content(chunk_size=block_size) if progress: # In the future replace with `from tqdm.rich import tqdm` from tqdm import tqdm bar = tqdm(bar_format = bar, desc = desc, total = total_size, unit = 'iB', unit_scale = True) with open(destination, "wb") as handle: for data in generator: handle.write(data) bar.update(len(data)) bar.close() else: with open(destination, "wb") as handle: for data in generator: handle.write(data) # Without streaming # if not stream: with open(destination, "wb") as handle: handle.write(content) except: if os.path.exists(destination): os.remove(destination) raise # Uncompress the result # if uncompress: # To detect tar.gz we rely on the extension # if destination.endswith('.tar.gz'): return destination.untargz_to() # Otherwise read the magic number # with open(destination, 'rb') as f: header = f.read(4) # If it's a zip file # if header == b"PK\x03\x04": return destination.unzip_to(inplace=True) # If it's a gzip file # elif header[:3] == b"\x1f\x8b\x08": return destination.ungzip_to() # Return # return destination ############################################################################### def handle_destination(url, destination): """ The destination can be either unspecified or can contain either a file path or a directory path. """ # Choose a default for destination # if destination is None: destination = autopaths.tmp_path.new_temp_file() # Directory case - choose a filename # elif destination.endswith('/'): filename = url.split("/")[-1].split("?")[0] destination = Path(destination + filename) destination.directory.create_if_not_exists() # Normal case # else: destination = Path(destination) destination.directory.create_if_not_exists() # Return # return destination
"Child Of Night" Poetry.net. STANDS4 LLC, 2019. Web. 25 Apr. 2019. <https://www.poetry.net/poem/45705/child-of-night>.
from django.db.models import Q, Sum, Count from store_db.models import SellerFeedback, Inventory, SellerRating def rank_seller_from_inventory(inventory): # call this method if inventory is added/updated or new feedback is received # based on seller_points, return_accepted, local_pickup, free_domestic_shipping rank_seller(inventory.seller) total_points = inventory.seller.seller_rank.points if inventory.return_accepted: total_points += 10 if inventory.local_pick_up_accepted: total_points += 10 if inventory.free_domestic_shipping: total_points += 100 inventory.rating = total_points print(total_points) inventory.save() def rank_seller(seller): # call this method if feedback is created/edited or if new sales are made. # maximum review points is 5 total_sales_made = Inventory.objects.filter(seller=seller).aggregate(tsm=Sum('total_sold')) total_sales_made= total_sales_made['tsm'] sfd = SellerFeedback.objects.filter(seller=seller).aggregate(total_review_points=Sum('review_points'), number_of_reviews=Count('review_points')) if sfd['total_review_points'] is None: sfd['total_review_points'] = 0 if total_sales_made == 0: seller_rating_points = 100 else: seller_rating_points = (total_sales_made-sfd['number_of_reviews'])*5 + sfd['total_review_points']/sfd['total_sales_made'] SellerRating.objects.update_or_create(seller=seller, defaults={'points': seller_rating_points}) def get_ranked_inventory_list(item): inventories = item.item_inventory.all() return inventories.order_by('-rating')
...because your choice of a lawyer matters! Enter your name and email address to receive newsletters, alerts, and useful information. PDF files require a version of Adobe PDF Reader be installed. If you do not have Adobe Reader, click the link below and install it before downloading PDF files. If you have just received a Notice of Adverse Action from VSP following an SIU audit of your office, here is some information that you may find helpful. I cannot urge you strongly enough to promptly seek the counsel of a qualified attorney before you do anything and before you speak with VSP. 1. You have probably been ordered to pay restitution, and you may or may not have been given notice that your VSP member contract is being terminated. Read the Notice of Adverse Action carefully so you understand it. If your VSP member contract is being terminated, you should know that VSP will probably report to the NPDB (National Practitioner Data Bank) that you have been terminated for false or misleading billing. That NPDB report will be on your "record" forever, and all other 3rd party payers will be notified or can discover this information. VSP may also report the termination to your State Board, and the Board may bring disciplinary action against your license for insurance fraud/unprofessional conduct. In many cases, by appealing the audit, you can obtain a reversal of the decision terminating your VSP membership, and in so doing, avoid the reporting to the NPDB and to your State Board. In some cases the VSP auditor will "offer" to reverse the termination and put you on "conditional status" (e.g. probation) IF you agree NOT to appeal and to pay the restitution in full. If this "offer" is made to you, record the date and time and make notes of what the auditor says to you. I consider this unethical, even a form of extortion, and you can advise the VSP hearing panel of the auditors "offer" during a hearing. 2. Your Notice of Adverse Action will probably indicate that you should speak with the auditor about the audit results, and that if you don't, after 10 days the audit becomes final. Understand that IF you speak with the auditor, anything you say can be brought up by the auditor if you later have a VSP hearing. The auditor will take notes of your discussion and use your comments against you if he/she can do so. Because of this, I typically do not advise speaking with the auditor. It has been my experience that the main reason the auditor wants to speak with you is to offer you a small discount in the restitution in exchange for you agreeing not to appeal and to pay the restitution. The auditor does not want you to appeal and bring scrutiny to his/her audit. The only leverage you have in negotiating a resolution is the "threat" of an appeal. I don't recommend giving that right up. You do NOT have to speak with the auditor. It is up to you. If there are easy issues, you may want to speak with the auditor and provide additional information in the hope of getting your restitution reduced. But don't set your expectations very high. The auditors are not working FOR YOU. Chances are they chose to audit you because they already decided you are probably overbilling or defrauding VSP. From that point on, it appears to me that their only goal is to prove what they already believe to be the case. 3. You have probably been ordered to pay the "audit fees." This typically ranges from $6,000 to $9,000. Though VSP will try and collect them from you, you do not necessarily have to pay all these fees. Prior to the 2015 Network Doctor Agreement, VSP's Network Doctor Agreement (NDA) provided that you must pay VSP's reasonable out-of-pocket expenses incurred in the audit. But VSP charges you for the auditors time, even though it is not an out-of-pocket expense. I do not believe this is recoverable, and the VSP hearing panel has generally agreed. You should ask to see the breakdown of the audit fees and agree to pay only the legitimate travel expenses. If you ask for a hearing there is a good chance you will not have to pay the additional audit fees for the auditors or the optometry director's time. If you have signed a new Network Doctor Agreement since January 2015 your contract probably says that VSP can charge you the fair market value of the auditors time. Nonetheless, there is room to challenge this in most cases. 4. You have the right to have a hearing to contest the audit. In many cases you will be able to obtain a reduction of your restitution and/or a reversal of the decision terminating you from the panel through the hearing process. The VSP hearing is held before three practicing optometrists. While they are typically members of the VSP Board of Directors, and are therefore not "neutral" in the ordinary sense, they are practicing optometrists and often disagree with the conclusions of the auditor. So do not assume you will lose if you appeal. It is all about putting together your evidence and demonstrating, in an organized coherent way, that your billing was correct. In addition, there are often legal arguments that can apply to reduce your restitution. While every case is different, I've obtained many reductions in restitution, often of 50% or more. 5. VSP will start withholding your VSP checks until after a hearing and final determination of the matter. You need to plan accordingly. Though I do not believe it is legal for them to do so, even before you get a hearing to contest the auditor's findings, VSP will begin withholding your VSP checks and holding the money to apply it to your restitution amount. There is little you can do about this, other than submit a written complaint to the California Department of Managed Health Care and ask them to intervene. So, you should expect your office cash flow to be impacted -- you will stop receiving VSP checks. You should plan accordingly. 6. If the audit results are correct, and you have been overbilling or falsely billing VSP, you should NOT concede anything and you SHOULD obtain professional help immediately. As mentioned above, VSP will turn some cases over to the State Board for possible license discipline. With good legal assistance, this may be avoided, and, even if it can't be avoided, the ability of the Board to seek discipline can be significantly reduced by proper handling of your case. Do NOT just agree to pay VSP in the hope the matter will go away. That may be used against you by the State Board as a tacit admission of wrongdoing. VSP is most interested in getting paid, and with legal assistance an agreement can often be reached to mitigate or minimize your state board discipline exposure. I have been successful in many cases in negotiating settlement agreements with VSP, and in negotiating with the State Board, to avoid license discipline. 7. What are the most common findings and how do I respond to them? VSP typically audits a "line of business." For instance, it may be VNCL (visually necessary CL), or elective CL, or primary eye care, or low-power spectacle prescriptions. Lately (in 2014-2015) most audits have been focused on contact lens practices. VSP will challenge the clinical basis for a VNCL charge, and they will look closely for proof of a CL fitting, and that your fitting and material fees are correct. VSP does not permit you to charge more for a VNCL fitting than you'd charge for an elective fitting of the same lens type. And they do not permit you to charge more for materials simply because the patient qualifies for VNCL. They will assert they have been overbilled if you do that in either case. They also look to see if your records reflect that a fitting was actually performed if you billed for a CL fitting. And they look for proof that contacts were ordered AND were dispensed (e.g. there is a date dispensed describing WHAT was dispensed and when). Your records are the key. They will save you, or they will bury you. But remember that financial records, invoices, credit card receipts, notes, and other items like these are part of the patient record, and they can be used to prove that lenses were ordered and dispensed. But, if you don't document the CL fitting/evaluation, it is hard to defend and you are likely to have to repay VSP a lot of money. Write MORE, not less. Hopefully some of this information will help you if you've been audited. Please feel free to email or call me if you have questions, and certainly if you've received a Notice of Adverse Action from VSP. Law Offices of Craig S Steinberg, O.D., a P.C. Copyright (c) 2010-2019. All rights reserved.
import numpy as np import amnet import amnet.util import z3 import sys import unittest class TestUtil(unittest.TestCase): @classmethod def setUpClass(cls): pass def test_maxN_z3(self): x = z3.Real('x') y = z3.Real('y') z = z3.Real('z') w1 = amnet.util.maxN_z3([x]) w2 = amnet.util.maxN_z3([x, y]) w3 = amnet.util.maxN_z3([x, y, z]) s = z3.Solver() s.push() s.add(x == -3) s.add(y == 2) s.add(z == 12) self.assertTrue(s.check() == z3.sat) # extract the output model = s.model() self.assertTrue(amnet.util.mfp(model, x) == -3) self.assertTrue(amnet.util.mfp(model, y) == 2) self.assertTrue(amnet.util.mfp(model, z) == 12) self.assertTrue(amnet.util.mfp(model, w1) == -3) self.assertTrue(amnet.util.mfp(model, w2) == 2) self.assertTrue(amnet.util.mfp(model, w3) == 12) s.pop() def test_minN_z3(self): x = z3.Real('x') y = z3.Real('y') z = z3.Real('z') w1 = amnet.util.minN_z3([x]) w2 = amnet.util.minN_z3([x, y]) w3 = amnet.util.minN_z3([x, y, z]) s = z3.Solver() s.push() s.add(x == -3) s.add(y == 2) s.add(z == 12) self.assertTrue(s.check() == z3.sat) # extract the output model = s.model() self.assertTrue(amnet.util.mfp(model, x) == -3) self.assertTrue(amnet.util.mfp(model, y) == 2) self.assertTrue(amnet.util.mfp(model, z) == 12) self.assertTrue(amnet.util.mfp(model, w1) == -3) self.assertTrue(amnet.util.mfp(model, w2) == -3) self.assertTrue(amnet.util.mfp(model, w3) == -3) s.pop() def test_abs_z3(self): x = z3.Real('x') y = z3.Real('y') s = z3.Solver() s.push() s.add(x == -3) s.add(y == amnet.util.abs_z3(x)) self.assertTrue(s.check() == z3.sat) # extract the output model = s.model() self.assertTrue(amnet.util.mfp(model, x) == -3) self.assertTrue(amnet.util.mfp(model, y) == 3) s.pop() s.push() s.add(x == 4) s.add(y == amnet.util.abs_z3(x)) self.assertTrue(s.check() == z3.sat) # extract the output model = s.model() self.assertTrue(amnet.util.mfp(model, x) == 4) self.assertTrue(amnet.util.mfp(model, y) == 4) s.pop() def test_norm1_z3(self): x = z3.RealVector(prefix='x', sz=3) y = z3.Real('y') s = z3.Solver() s.add(y == amnet.util.norm1_z3(x)) s.push() s.add([x[0] == 1, x[1] == 12, x[2] == -2]) self.assertTrue(s.check() == z3.sat) model = s.model() self.assertTrue(amnet.util.mfp(model, y) == abs(1) + abs(12) + abs(-2)) s.pop() s.push() s.add([x[0] == -1, x[1] == 0, x[2] == 0]) self.assertTrue(s.check() == z3.sat) model = s.model() self.assertTrue(amnet.util.mfp(model, y) == abs(-1) + abs(0) + abs(0)) s.pop() def test_norminf_z3(self): x = z3.RealVector(prefix='x', sz=3) y = z3.Real('y') s = z3.Solver() s.add(y == amnet.util.norminf_z3(x)) s.push() s.add([x[0] == 1, x[1] == 12, x[2] == -2]) self.assertTrue(s.check() == z3.sat) model = s.model() self.assertTrue(amnet.util.mfp(model, y) == 12) s.pop() s.push() s.add([x[0] == -1, x[1] == 0, x[2] == 0]) self.assertTrue(s.check() == z3.sat) model = s.model() self.assertTrue(amnet.util.mfp(model, y) == 1) s.pop() s.push() s.add([x[0] == -1, x[1] == -11, x[2] == 0]) self.assertTrue(s.check() == z3.sat) model = s.model() self.assertTrue(amnet.util.mfp(model, y) == 11) s.pop() def test_gaxpy_z3(self): m = 2 n = 3 A = np.array([[1, 2, -3], [4, -5, 6]]) x = z3.RealVector(prefix='x', sz=n) y = np.array([7, -8]) w0 = z3.RealVector(prefix='w0', sz=m) w1 = z3.RealVector(prefix='w1', sz=m) w0v = amnet.util.gaxpy_z3(A, x) w1v = amnet.util.gaxpy_z3(A, x, y) self.assertEqual(len(w0), m) self.assertEqual(len(w1), m) self.assertEqual(len(w0v), m) self.assertEqual(len(w1v), m) s = z3.Solver() s.add([w0[i] == w0v[i] for i in range(m)]) s.add([w1[i] == w1v[i] for i in range(m)]) s.push() xc = np.array([1, 2, 3]) s.add([x[i] == xc[i] for i in range(n)]) w0_true = np.dot(A, xc) w1_true = np.dot(A, xc) + y self.assertTrue(s.check() == z3.sat) model = s.model() for i in range(m): self.assertEqual(amnet.util.mfp(model, w0[i]), w0_true[i]) self.assertEqual(amnet.util.mfp(model, w1[i]), w1_true[i]) s.pop() s.push() xc = np.array([1, 0, -3]) s.add([x[i] == xc[i] for i in range(n)]) w0_true = np.dot(A, xc) w1_true = np.dot(A, xc) + y self.assertTrue(s.check() == z3.sat) model = s.model() for i in range(m): self.assertEqual(amnet.util.mfp(model, w0[i]), w0_true[i]) self.assertEqual(amnet.util.mfp(model, w1[i]), w1_true[i]) s.pop() if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestUtil) result = unittest.TextTestRunner(verbosity=2).run(suite) sys.exit(not result.wasSuccessful())
Note that some of these links go off-site and will open in a new window. When we use the same terms but have different meanings in mind, we tend to misunderstand each other. We trust that our list of definitions will foster understanding.Words Can Never Hurt Me?Or can they? We think that the words we use are important. They may have eternal consequences. Included is the official “Position Statement” as well as related statements from the Church Manual. Transcript of live, unscripted responses from Pastor Jan Paulsen who has been president of the General Conference of Seventh-day Adventists since 1999. Is the Adventist Church’s Opposition to Gay Marriage Consistent with Religious Liberty? You may contribute to this page by sending your questions (and answers, if you have them) to us through the link at the bottom of the left column. If you have comments/suggestions regarding the answers already given, we would appreciate your feed-back as well.
# -*- coding: utf-8 -*- import os.path import cherrypy from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool from ws4py.websocket import WebSocket class BroadcastWebSocketHandler(WebSocket): def received_message(self, m): cherrypy.engine.publish('websocket-broadcast', str(m)) class Root(object): @cherrypy.expose def display(self): return """<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <title>WebSocket example displaying Android device sensors</title> <link rel="stylesheet" href="/css/style.css" type="text/css" /> <script type='application/javascript' src='http://code.jquery.com/jquery-1.9.1.min.js'></script> <script type="application/javascript" src="http://calebevans.me/projects/jcanvas/resources/jcanvas/jcanvas.min.js"> </script> <script type="application/javascript" src="/js/droidsensor.js"> </script> <script type="application/javascript"> $(document).ready(function() { initWebSocket(); drawAll(); }); </script> </head> <body> <section id="content" class="body"> <canvas id="canvas" width="900" height="620"></canvas> </section> </body> </html> """ @cherrypy.expose def ws(self): cherrypy.log("Handler created: %s" % repr(cherrypy.request.ws_handler)) @cherrypy.expose def index(self): return """<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <title>WebSocket example displaying Android device sensors</title> <script type='application/javascript' src='http://code.jquery.com/jquery-1.9.1.min.js'></script> <script type="application/javascript" src="/js/droidsensor.js"> </script> <script type="application/javascript"> $(document).ready(function() { initWebSocketAndSensors(); }); </script> </head> <body> </body> </html> """ if __name__ == '__main__': cherrypy.config.update({ 'server.socket_host': '0.0.0.0', 'server.socket_port': 9000, 'tools.staticdir.root': os.path.abspath(os.path.join(os.path.dirname(__file__), 'static')) } ) print os.path.abspath(os.path.join(__file__, 'static')) WebSocketPlugin(cherrypy.engine).subscribe() cherrypy.tools.websocket = WebSocketTool() cherrypy.quickstart(Root(), '', config={ '/js': { 'tools.staticdir.on': True, 'tools.staticdir.dir': 'js' }, '/css': { 'tools.staticdir.on': True, 'tools.staticdir.dir': 'css' }, '/images': { 'tools.staticdir.on': True, 'tools.staticdir.dir': 'images' }, '/ws': { 'tools.websocket.on': True, 'tools.websocket.handler_cls': BroadcastWebSocketHandler } } )
Been having some issues with WordPress on IIS7 windows 2003 for a while now and think I finally got it solved, thought I’d post the info here for others to see, as I had a hard time finding this info myself anywhere else, not even the WordPress website. This is what I found after a lot of playing around, if you see any issues with this or have any feedback please let me know below. On the wp-content folder I applied the following permissions. I’m not making anything in the root writable, like wp-config etc. I change that manually or as a one off. I asked around on Facebook the other day for any other Direct Debit Service Providers and Solutions and following is part of the conversation that followed. I’m posting this to give you some idea of what’s out there. and I will also have a follow up on the EzyPay experience soon. If you are looking for Alan Swanwick from MW Law, please let me first tell you about my experience with this character (it might change your mind). My experience is the absolute worst I have ever had in regards to promptness, courtesy and business manners. This guy really tops it all in my books. We’ll buy another one to use at home now as well.
from api.libs.base import CoreView from cmdb.models import DataCenter from django.contrib.auth.models import User from account.models import UserProfile from django.db.utils import IntegrityError class DataCenterView(CoreView): """ 数据中心视图类 """ login_required_action = ["get_list", "post_create", "post_delete", "post_change"] superuser_required_action = ["post_create", "post_delete", "post_change"] def get_list(self): per_page = self.parameters("per_page") if per_page: datacenter_objs = self.page_split(DataCenter.objects.all()) else: datacenter_objs = DataCenter.objects.all() datacenter_list = [] for datacenter_obj in datacenter_objs: datacenter_list.append(datacenter_obj.get_info()) self.response_data['data'] = datacenter_list def post_create(self): try: name = self.parameters("name") contact = self.parameters("contact") memo = self.parameters("memo") address = self.parameters("address") admin_id = int(self.parameters("admin")) admin_obj = UserProfile.objects.filter(id=admin_id).first() if admin_obj and admin_obj.user: new_datacenter_obj = DataCenter(name=name, contact=contact, memo=memo, admin=admin_obj.user, address=address) else: new_datacenter_obj = DataCenter(name=name, contact=contact, memo=memo, address=address) new_datacenter_obj.save() self.response_data['data'] = new_datacenter_obj.get_info() except IntegrityError: self.response_data['status'] = False self.status_code = 416 except Exception: self.response_data['status'] = False self.status_code = 500 def post_delete(self): datacenter_id = self.parameters("id") try: datacenter_obj = DataCenter.objects.filter(id=datacenter_id).first() if datacenter_obj: datacenter_obj.delete() else: self.response_data['status'] = False self.status_code = 404 except Exception as e: self.response_data['status'] = False self.status_code = 500 def post_change(self): datacenter_id = self.parameters("id") name = self.parameters("name") admin_id = self.parameters("admin_id") contact = self.parameters("contact") memo = self.parameters("memo") address = self.parameters("address") try: datacenter_obj = DataCenter.objects.filter(id=datacenter_id).first() if datacenter_obj: datacenter_obj.name = name admin_obj = UserProfile.objects.filter(id=admin_id).first() datacenter_obj.admin = admin_obj.user if admin_obj and hasattr(admin_obj, "user") else None datacenter_obj.contact = contact datacenter_obj.memo = memo datacenter_obj.address = address datacenter_obj.save() self.response_data['data'] = datacenter_obj.get_info() else: self.response_data['status'] = False self.status_code = 404 except IntegrityError: self.response_data['status'] = False self.status_code = 416 except Exception as e: self.response_data['status'] = False self.status_code = 500
Luxury Modern Condominium In A Gated Waterfront Community SUNSET COVE. Spacious Two Bedroom/ One Bath Condo With All Upgrades, Lighting, Kitchen Cabinets, Hardwood Floors, And Ceramic Tile. Two Car Tandem Private Parking Spot, Storage Unit, Boat Slip, Gym, Guest Parking, And Community Room For Private Parties Included. Listing courtesy of Larry Weinstein of Re/Max Distinguished Hms.&Prop.
""" pith tool A simple command-line tool to execute Python while taking care of the PYTHONPATH. See https://github.com/weegreenblobbie/pith-tool """ from setuptools import setup, Extension # pypi.rst processing with open("pypi.rst") as fd: readme_rst = fd.read() keywords = ''' development command-line script '''.split() classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Environment :: Console", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Topic :: Software Development", "Topic :: Utilities", ] description = """\ A simple command-line tool to execute Python while taking care of the PYTHONPATH. """ from pith import __version__ as pith_version setup( author = "Nick Hilton et al", author_email = "[email protected]", classifiers = classifiers, description = description, keywords = keywords, long_description = readme_rst, name = "pith", #~ py_modules = [], scripts = ['pith'], url = "https://github.com/weegreenblobbie/pith-tool", version = pith_version, )
I work closely with both private and publicly listed companies on a range of corporate and commercial transactions, providing astute and commercial legal advice. My aim when working with any client is not only to be a legal adviser who provides answers to the legal questions, but also to be a key member of their team who provides solutions that ultimately create value for their businesses. I have extensive experience in complex corporate transactions such as public and private mergers and acquisitions and capital raisings. These transactions include the takeover bid by Bega Cheese for Warrnambool Cheese & Butter, HNA Group’s acquisition of the Allco Aviation business, Countplus’ initial public offering and listing on the ASX, Sportingbet’s acquisition of Centrebet by way of schemes of arrangement, and the fundraising by biNu, a tech startup, from local and overseas venture capital firms and private investors. I also frequently provide advice on corporate governance, and general contract and commercial law matters. I was admitted as a solicitor in Australia in 2006 and worked at Sparke Helmore before joining Addisons in 2007. This week Addisons announced that a leading Maltese gambling solicitor, Samuel Gauci, has joined the firm’s Gambling Law practice area. I was on the team that acted for fintech company Class Limited on its successful listing on the ASX, with a market capitalisation at listing of $117 million. Class is a leading provider of cloud-based administration software solutions for SMSF administrators in Australia. Class’ IPO involved the issue of new shares and a partial sell-down of existing shares by the company’s shareholders. I acted for accounting firm Moore Stephens Sydney on all legal aspects of its merger with Pitcher Partners Sydney. The merger creates a firm of greater size and strength that provides a strong platform for growth. Before to the merger, we have acted for Moore Stephens Sydney for a number of years. I was on the team that acted for the Moxey family on the sale of Moxey Farms, one of Australia’s largest single-site dairy operations, to a consortium comprising Leppington Pastoral Company, New Hope Dairy and Freedom Foods. We advised the Board of ASX-listed Sims Metal Management, the world’s largest metals and electronics recycler, on the company’s engagement of its CEO. We also advised Sims Metal Management on the preparation of its notice of annual general meeting, at which approval from its shareholders for the grant of termination benefits was obtained. I was on the team that acted for TriAusMin Limited, a company that was listed on the ASX and the Toronto Stock Exchange, on its scheme of arrangement with its shareholders. The scheme resulted in Heron Resources Limited acquiring all of the shares in TriAusMin. The merger provided the merged company with the resources to advance the Woodlawn Underground and Woodlawn Tailings Retreatment projects south of Goulburn. We advised TriAusMin on the scheme booklet as well as the scheme court process. We advised diversified financial services company Bluestone Group on all of the Australian legal aspects of the sale of a significant stake in the company to private equity company LDC, which is part of the Lloyds Banking Group in the UK. This transaction was the latest handled by Addisons who have acted for Bluestone Group since 2007. Our team also advised on Bluestone Group’s move of its headquarters to the UK. I worked closely with NRMA on an extensive review and redrafting of its corporate constitution, which was approved by its members. NRMA was seeking to modernise its constitution to reflect a more contemporary and relevant governance structure. I was a key member of the Addisons team that acted for Bega Cheese on its off-market takeover bid for Warrnambool Cheese & Butter. This was a long running matter that involved a three way bidding war and Takeovers Panel proceedings. Based on the Bega Cheese bid, the value of this transaction was in the vicinity of $400 million. Addisons has also advised Bega Cheese on a number of other transactions. I was part of the team acting for Countplus Limited on its IPO and listing on ASX. The transaction also involved staged acquisitions by Countplus of 18 accounting and financial planning businesses in the lead-up to its IPO. We continue to advise Countplus on a range of transactions, including acquisitions of accounting and financial planning businesses by its subsidiaries. HNA Group is a diversified transportation, logistics and hospitality group, which includes Hainan Airlines, the largest private airline in China. We acted for HNA Group on its acquisition of aviation assets with a portfolio value of US$3 billion from the receivers of Allco Finance Group. Risk can erode your hard-won purchase price. Take these steps to manage risk. Companies preparing for an initial public offering (IPO) that have directors who do not understand English or are not familiar with the Australian regulatory requirements for a prospectus should take note of the Court's findings in ASIC v Sino Australia Oil and Gas Limited (in liq) (Sino case). Most companies have a Constitution setting out the rules governing its activities and the rights and obligations of its shareholders and directors. It is not uncommon for shareholders in a closely held or joint venture company to also decide to adopt a Shareholders Agreement to regulate their relationship.
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import distutils import os import pytest from datetime import datetime, timedelta, timezone from google import showcase def test_lro(echo): future = echo.wait({ 'end_time': datetime.now(tz=timezone.utc) + timedelta(seconds=1), 'success': { 'content': 'The hail in Wales falls mainly on the snails...eventually.' }} ) response = future.result() assert isinstance(response, showcase.WaitResponse) assert response.content.endswith('the snails...eventually.') if distutils.util.strtobool(os.environ.get("GAPIC_PYTHON_ASYNC", "true")): @pytest.mark.asyncio async def test_lro_async(async_echo): future = await async_echo.wait({ 'end_time': datetime.now(tz=timezone.utc) + timedelta(seconds=1), 'success': { 'content': 'The hail in Wales falls mainly on the snails...eventually.' }} ) response = await future.result() assert isinstance(response, showcase.WaitResponse) assert response.content.endswith('the snails...eventually.')
Schumli's history began in 2003, when the first coffee houses were opened in St. Petersburg. Our company Anta, Ltd. opened the first coffee shop in partnership with the Russian representative office of the German company Melitta, manufacturer of professional coffee machines and grain coffee. Here one could try coffee brewed by automated coffee machines Cafina from original grain mixes. These first-class machines have been functioning to this day. Using them, we have brewed over 1,000,000 cups of coffee. From the very beginning, the German coffee was served together with snacks and desserts popular in Western Europe, cooked at the coffee house with the use of authentic recipes and traditional technologies. The most famous of them are Belgian waffles, cherry strudel and Vienna schnitzel. We used a German-Austrian word "Schumli" as a name for our coffee house. This is the name of a special coffee drink brewed under very high pressure. It requires a special mastery to brew it. It comes out slightly whipped, with a creamy texture. If it is brewed properly one can see a caramel-colour skin on the surface of the hot and moderately strong coffee, which is also called "Schumli". Presently "Schumli" is rather a typical European-city café with a wide selection of meals, desserts and drinks. It has three halls on two floors. We have envisaged special meals and a play-room for children. We continually try to diversify the menu with new seasonal offers and interesting meals from around the world. The set design of the cafe changes, as timed to certain holidays, and with change of seasons. According to our guests' opinions, this is always a pleasant surprise raising their mood and inspiring them with new ideas. The café is located in the heart of St. Petersburg, with lots of business centres around; the City Duma, the Committees of the City Government and St. Isaac's Square are nearby. The café's peak demand is on weekdays from 12:00 to 16:00. It is better to place an order and book a table in advance for this time. We are often selected for banquets, weddings, birthday parties and other events. The halls are leased out for seminars, lectures, business meetings. Sometimes the café is closed for outsiders, to hold private events. If you are coming to us from afar, it is better to call in advance. - and production of modern metal supports for garden plants and garden decorations Patio-Spb.ru patio-spb.ru We love our city and are always pleased to meet its residents and guests! Welcome!
import logging import os import types from io import BytesIO, IOBase import pickle import string from collections import defaultdict import archinfo from archinfo.arch_soot import SootAddressDescriptor, ArchSoot import cle from .misc.ux import deprecated l = logging.getLogger(name=__name__) def load_shellcode(shellcode, arch, start_offset=0, load_address=0): """ Load a new project based on a string of raw bytecode. :param shellcode: The data to load :param arch: The name of the arch to use, or an archinfo class :param start_offset: The offset into the data to start analysis (default 0) :param load_address: The address to place the data in memory (default 0) """ return Project( BytesIO(shellcode), main_opts={ 'backend': 'blob', 'arch': arch, 'entry_point': start_offset, 'base_addr': load_address, } ) class Project: """ This is the main class of the angr module. It is meant to contain a set of binaries and the relationships between them, and perform analyses on them. :param thing: The path to the main executable object to analyze, or a CLE Loader object. The following parameters are optional. :param default_analysis_mode: The mode of analysis to use by default. Defaults to 'symbolic'. :param ignore_functions: A list of function names that, when imported from shared libraries, should never be stepped into in analysis (calls will return an unconstrained value). :param use_sim_procedures: Whether to replace resolved dependencies for which simprocedures are available with said simprocedures. :param exclude_sim_procedures_func: A function that, when passed a function name, returns whether or not to wrap it with a simprocedure. :param exclude_sim_procedures_list: A list of functions to *not* wrap with simprocedures. :param arch: The target architecture (auto-detected otherwise). :param simos: a SimOS class to use for this project. :param bool translation_cache: If True, cache translated basic blocks rather than re-translating them. :param support_selfmodifying_code: Whether we aggressively support self-modifying code. When enabled, emulation will try to read code from the current state instead of the original memory, regardless of the current memory protections. :type support_selfmodifying_code: bool :param store_function: A function that defines how the Project should be stored. Default to pickling. :param load_function: A function that defines how the Project should be loaded. Default to unpickling. :param analyses_preset: The plugin preset for the analyses provider (i.e. Analyses instance). :type analyses_preset: angr.misc.PluginPreset :param engines_preset: The plugin preset for the engines provider (i.e. EngineHub instance). :type engines_preset: angr.misc.PluginPreset Any additional keyword arguments passed will be passed onto ``cle.Loader``. :ivar analyses: The available analyses. :type analyses: angr.analysis.Analyses :ivar entry: The program entrypoint. :ivar factory: Provides access to important analysis elements such as path groups and symbolic execution results. :type factory: AngrObjectFactory :ivar filename: The filename of the executable. :ivar loader: The program loader. :type loader: cle.Loader :ivar storage: Dictionary of things that should be loaded/stored with the Project. :type storage: defaultdict(list) """ def __init__(self, thing, default_analysis_mode=None, ignore_functions=None, use_sim_procedures=True, exclude_sim_procedures_func=None, exclude_sim_procedures_list=(), arch=None, simos=None, load_options=None, translation_cache=True, support_selfmodifying_code=False, store_function=None, load_function=None, analyses_preset=None, concrete_target=None, engines_preset=None, **kwargs): # Step 1: Load the binary if load_options is None: load_options = {} load_options.update(kwargs) if arch is not None: load_options.update({'arch': arch}) if isinstance(thing, cle.Loader): if load_options: l.warning("You provided CLE options to angr but you also provided a completed cle.Loader object!") self.loader = thing self.filename = self.loader.main_object.binary elif hasattr(thing, 'read') and hasattr(thing, 'seek'): l.info("Loading binary from stream") self.filename = None self.loader = cle.Loader(thing, **load_options) elif not isinstance(thing, str) or not os.path.exists(thing) or not os.path.isfile(thing): raise Exception("Not a valid binary file: %s" % repr(thing)) else: # use angr's loader, provided by cle l.info("Loading binary %s", thing) self.filename = thing self.loader = cle.Loader(self.filename, concrete_target=concrete_target, **load_options) # Step 2: determine its CPU architecture, ideally falling back to CLE's guess if isinstance(arch, str): self.arch = archinfo.arch_from_id(arch) # may raise ArchError, let the user see this elif isinstance(arch, archinfo.Arch): self.arch = arch elif arch is None: self.arch = self.loader.main_object.arch else: raise ValueError("Invalid arch specification.") # Step 3: Set some defaults and set the public and private properties if not default_analysis_mode: default_analysis_mode = 'symbolic' if not ignore_functions: ignore_functions = [] if isinstance(exclude_sim_procedures_func, types.LambdaType): l.warning("Passing a lambda type as the exclude_sim_procedures_func argument to " "Project causes the resulting object to be un-serializable.") self._sim_procedures = {} self.concrete_target = concrete_target # It doesn't make any sense to have auto_load_libs # if you have the concrete target, let's warn the user about this. if self.concrete_target and load_options.get('auto_load_libs', None): l.critical("Incompatible options selected for this project, please disable auto_load_libs if " "you want to use a concrete target.") raise Exception("Incompatible options for the project") if self.concrete_target and self.arch.name not in ['X86', 'AMD64', 'ARMHF']: l.critical("Concrete execution does not support yet the selected architecture. Aborting.") raise Exception("Incompatible options for the project") self._default_analysis_mode = default_analysis_mode self._exclude_sim_procedures_func = exclude_sim_procedures_func self._exclude_sim_procedures_list = exclude_sim_procedures_list self._should_use_sim_procedures = use_sim_procedures self._ignore_functions = ignore_functions self._support_selfmodifying_code = support_selfmodifying_code self._translation_cache = translation_cache self._executing = False # this is a flag for the convenience API, exec() and terminate_execution() below self._is_java_project = None self._is_java_jni_project = None if self._support_selfmodifying_code: if self._translation_cache is True: self._translation_cache = False l.warning("Disabling IRSB translation cache because support for self-modifying code is enabled.") self.entry = self.loader.main_object.entry self.storage = defaultdict(list) self.store_function = store_function or self._store self.load_function = load_function or self._load # Step 4: Set up the project's plugin hubs # Step 4.1: Engines. Get the preset from the loader, from the arch, or use the default. engines = EngineHub(self) if engines_preset is not None: engines.use_plugin_preset(engines_preset) elif self.loader.main_object.engine_preset is not None: try: engines.use_plugin_preset(self.loader.main_object.engine_preset) except AngrNoPluginError: raise ValueError("The CLE loader asked to use a engine preset: %s" % \ self.loader.main_object.engine_preset) else: try: engines.use_plugin_preset(self.arch.name) except AngrNoPluginError: engines.use_plugin_preset('default') self.engines = engines self.factory = AngrObjectFactory(self) # Step 4.2: Analyses self.analyses = AnalysesHub(self) self.analyses.use_plugin_preset(analyses_preset if analyses_preset is not None else 'default') # Step 4.3: ...etc self.kb = KnowledgeBase(self) # Step 5: determine the guest OS if isinstance(simos, type) and issubclass(simos, SimOS): self.simos = simos(self) #pylint:disable=invalid-name elif isinstance(simos, str): self.simos = os_mapping[simos](self) elif simos is None: self.simos = os_mapping[self.loader.main_object.os](self) else: raise ValueError("Invalid OS specification or non-matching architecture.") # Step 6: Register simprocedures as appropriate for library functions if isinstance(self.arch, ArchSoot) and self.simos.is_javavm_with_jni_support: # If we execute a Java archive that includes native JNI libraries, # we need to use the arch of the native simos for all (native) sim # procedures. sim_proc_arch = self.simos.native_arch else: sim_proc_arch = self.arch for obj in self.loader.initial_load_objects: self._register_object(obj, sim_proc_arch) # Step 7: Run OS-specific configuration self.simos.configure_project() def _register_object(self, obj, sim_proc_arch): """ This scans through an objects imports and hooks them with simprocedures from our library whenever possible """ # Step 1: get the set of libraries we are allowed to use to resolve unresolved symbols missing_libs = [] for lib_name in self.loader.missing_dependencies: try: missing_libs.append(SIM_LIBRARIES[lib_name]) except KeyError: l.info("There are no simprocedures for missing library %s :(", lib_name) # additionally provide libraries we _have_ loaded as a fallback fallback # this helps in the case that e.g. CLE picked up a linux arm libc to satisfy an android arm binary for lib in self.loader.all_objects: if lib.provides in SIM_LIBRARIES: simlib = SIM_LIBRARIES[lib.provides] if simlib not in missing_libs: missing_libs.append(simlib) # Step 2: Categorize every "import" symbol in each object. # If it's IGNORED, mark it for stubbing # If it's blacklisted, don't process it # If it matches a simprocedure we have, replace it for reloc in obj.imports.values(): # Step 2.1: Quick filter on symbols we really don't care about func = reloc.symbol if func is None: continue if not func.is_function and func.type != cle.backends.symbol.SymbolType.TYPE_NONE: continue if not reloc.resolved: # This is a hack, effectively to support Binary Ninja, which doesn't provide access to dependency # library names. The backend creates the Relocation objects, but leaves them unresolved so that # we can try to guess them here. Once the Binary Ninja API starts supplying the dependencies, # The if/else, along with Project._guess_simprocedure() can be removed if it has no other utility, # just leave behind the 'unresolved' debug statement from the else clause. if reloc.owner.guess_simprocs: l.debug("Looking for matching SimProcedure for unresolved %s from %s with hint %s", func.name, reloc.owner, reloc.owner.guess_simprocs_hint) self._guess_simprocedure(func, reloc.owner.guess_simprocs_hint) else: l.debug("Ignoring unresolved import '%s' from %s ...?", func.name, reloc.owner) continue export = reloc.resolvedby if self.is_hooked(export.rebased_addr): l.debug("Already hooked %s (%s)", export.name, export.owner) continue # Step 2.2: If this function has been resolved by a static dependency, # check if we actually can and want to replace it with a SimProcedure. # We opt out of this step if it is blacklisted by ignore_functions, which # will cause it to be replaced by ReturnUnconstrained later. if export.owner is not self.loader._extern_object and \ export.name not in self._ignore_functions: if self._check_user_blacklists(export.name): continue owner_name = export.owner.provides if isinstance(self.loader.main_object, cle.backends.pe.PE): owner_name = owner_name.lower() if owner_name not in SIM_LIBRARIES: continue sim_lib = SIM_LIBRARIES[owner_name] if not sim_lib.has_implementation(export.name): continue l.info("Using builtin SimProcedure for %s from %s", export.name, sim_lib.name) self.hook_symbol(export.rebased_addr, sim_lib.get(export.name, sim_proc_arch)) # Step 2.3: If 2.2 didn't work, check if the symbol wants to be resolved # by a library we already know something about. Resolve it appropriately. # Note that _check_user_blacklists also includes _ignore_functions. # An important consideration is that even if we're stubbing a function out, # we still want to try as hard as we can to figure out where it comes from # so we can get the calling convention as close to right as possible. elif reloc.resolvewith is not None and reloc.resolvewith in SIM_LIBRARIES: sim_lib = SIM_LIBRARIES[reloc.resolvewith] if self._check_user_blacklists(export.name): if not func.is_weak: l.info("Using stub SimProcedure for unresolved %s from %s", func.name, sim_lib.name) self.hook_symbol(export.rebased_addr, sim_lib.get_stub(export.name, sim_proc_arch)) else: l.info("Using builtin SimProcedure for unresolved %s from %s", export.name, sim_lib.name) self.hook_symbol(export.rebased_addr, sim_lib.get(export.name, sim_proc_arch)) # Step 2.4: If 2.3 didn't work (the symbol didn't request a provider we know of), try # looking through each of the SimLibraries we're using to resolve unresolved # functions. If any of them know anything specifically about this function, # resolve it with that. As a final fallback, just ask any old SimLibrary # to resolve it. elif missing_libs: for sim_lib in missing_libs: if sim_lib.has_metadata(export.name): if self._check_user_blacklists(export.name): if not func.is_weak: l.info("Using stub SimProcedure for unresolved %s from %s", export.name, sim_lib.name) self.hook_symbol(export.rebased_addr, sim_lib.get_stub(export.name, sim_proc_arch)) else: l.info("Using builtin SimProcedure for unresolved %s from %s", export.name, sim_lib.name) self.hook_symbol(export.rebased_addr, sim_lib.get(export.name, sim_proc_arch)) break else: if not func.is_weak: l.info("Using stub SimProcedure for unresolved %s", export.name) self.hook_symbol(export.rebased_addr, missing_libs[0].get(export.name, sim_proc_arch)) # Step 2.5: If 2.4 didn't work (we have NO SimLibraries to work with), just # use the vanilla ReturnUnconstrained, assuming that this isn't a weak func elif not func.is_weak: l.info("Using stub SimProcedure for unresolved %s", export.name) self.hook_symbol(export.rebased_addr, SIM_PROCEDURES['stubs']['ReturnUnconstrained'](display_name=export.name, is_stub=True)) def _guess_simprocedure(self, f, hint): """ Does symbol name `f` exist as a SIM_PROCEDURE? If so, return it, else return None. Narrows down the set of libraries to search based on hint. Part of the hack to enable Binary Ninja support. Remove if _register_objects() stops using it. """ # First, filter the SIM_LIBRARIES to a reasonable subset based on the hint hinted_libs = [] if hint == "win": hinted_libs = filter(lambda lib: lib if lib.endswith(".dll") else None, SIM_LIBRARIES) else: hinted_libs = filter(lambda lib: lib if ".so" in lib else None, SIM_LIBRARIES) for lib in hinted_libs: if SIM_LIBRARIES[lib].has_implementation(f.name): l.debug("Found implementation for %s in %s", f, lib) self.hook_symbol(f.relative_addr, (SIM_LIBRARIES[lib].get(f.name, self.arch))) break else: l.debug("Could not find matching SimProcedure for %s, ignoring.", f.name) def _check_user_blacklists(self, f): """ Has symbol name `f` been marked for exclusion by any of the user parameters? """ return not self._should_use_sim_procedures or \ f in self._exclude_sim_procedures_list or \ f in self._ignore_functions or \ (self._exclude_sim_procedures_func is not None and self._exclude_sim_procedures_func(f)) @staticmethod def _addr_to_str(addr): return "%s" % repr(addr) if isinstance(addr, SootAddressDescriptor) else "%#x" % addr # # Public methods # They're all related to hooking! # # pylint: disable=inconsistent-return-statements def hook(self, addr, hook=None, length=0, kwargs=None, replace=False): """ Hook a section of code with a custom function. This is used internally to provide symbolic summaries of library functions, and can be used to instrument execution or to modify control flow. When hook is not specified, it returns a function decorator that allows easy hooking. Usage:: # Assuming proj is an instance of angr.Project, we will add a custom hook at the entry # point of the project. @proj.hook(proj.entry) def my_hook(state): print("Welcome to execution!") :param addr: The address to hook. :param hook: A :class:`angr.project.Hook` describing a procedure to run at the given address. You may also pass in a SimProcedure class or a function directly and it will be wrapped in a Hook object for you. :param length: If you provide a function for the hook, this is the number of bytes that will be skipped by executing the hook by default. :param kwargs: If you provide a SimProcedure for the hook, these are the keyword arguments that will be passed to the procedure's `run` method eventually. :param replace: Control the behavior on finding that the address is already hooked. If true, silently replace the hook. If false (default), warn and do not replace the hook. If none, warn and replace the hook. """ if hook is None: # if we haven't been passed a thing to hook with, assume we're being used as a decorator return self._hook_decorator(addr, length=length, kwargs=kwargs) if kwargs is None: kwargs = {} l.debug('hooking %s with %s', self._addr_to_str(addr), str(hook)) if self.is_hooked(addr): if replace is True: pass elif replace is False: l.warning("Address is already hooked, during hook(%s, %s). Not re-hooking.", self._addr_to_str(addr), hook) return else: l.warning("Address is already hooked, during hook(%s, %s). Re-hooking.", self._addr_to_str(addr), hook) if isinstance(hook, type): raise TypeError("Please instanciate your SimProcedure before hooking with it") if callable(hook): hook = SIM_PROCEDURES['stubs']['UserHook'](user_func=hook, length=length, **kwargs) self._sim_procedures[addr] = hook def is_hooked(self, addr): """ Returns True if `addr` is hooked. :param addr: An address. :returns: True if addr is hooked, False otherwise. """ return addr in self._sim_procedures def hooked_by(self, addr): """ Returns the current hook for `addr`. :param addr: An address. :returns: None if the address is not hooked. """ if not self.is_hooked(addr): l.warning("Address %s is not hooked", self._addr_to_str(addr)) return None return self._sim_procedures[addr] def unhook(self, addr): """ Remove a hook. :param addr: The address of the hook. """ if not self.is_hooked(addr): l.warning("Address %s not hooked", self._addr_to_str(addr)) return del self._sim_procedures[addr] def hook_symbol(self, symbol_name, simproc, kwargs=None, replace=None): """ Resolve a dependency in a binary. Looks up the address of the given symbol, and then hooks that address. If the symbol was not available in the loaded libraries, this address may be provided by the CLE externs object. Additionally, if instead of a symbol name you provide an address, some secret functionality will kick in and you will probably just hook that address, UNLESS you're on powerpc64 ABIv1 or some yet-unknown scary ABI that has its function pointers point to something other than the actual functions, in which case it'll do the right thing. :param symbol_name: The name of the dependency to resolve. :param simproc: The SimProcedure instance (or function) with which to hook the symbol :param kwargs: If you provide a SimProcedure for the hook, these are the keyword arguments that will be passed to the procedure's `run` method eventually. :param replace: Control the behavior on finding that the address is already hooked. If true, silently replace the hook. If false, warn and do not replace the hook. If none (default), warn and replace the hook. :returns: The address of the new symbol. :rtype: int """ if type(symbol_name) is not int: sym = self.loader.find_symbol(symbol_name) if sym is None: # it could be a previously unresolved weak symbol..? new_sym = None for reloc in self.loader.find_relevant_relocations(symbol_name): if not reloc.symbol.is_weak: raise Exception("Symbol is strong but we couldn't find its resolution? Report to @rhelmot.") if new_sym is None: new_sym = self.loader.extern_object.make_extern(symbol_name) reloc.resolve(new_sym) reloc.relocate([]) if new_sym is None: l.error("Could not find symbol %s", symbol_name) return None sym = new_sym basic_addr = sym.rebased_addr else: basic_addr = symbol_name symbol_name = None hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=basic_addr) self.hook(hook_addr, simproc, kwargs=kwargs, replace=replace) return hook_addr def is_symbol_hooked(self, symbol_name): """ Check if a symbol is already hooked. :param str symbol_name: Name of the symbol. :return: True if the symbol can be resolved and is hooked, False otherwise. :rtype: bool """ sym = self.loader.find_symbol(symbol_name) if sym is None: l.warning("Could not find symbol %s", symbol_name) return False hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr) return self.is_hooked(hook_addr) def unhook_symbol(self, symbol_name): """ Remove the hook on a symbol. This function will fail if the symbol is provided by the extern object, as that would result in a state where analysis would be unable to cope with a call to this symbol. """ sym = self.loader.find_symbol(symbol_name) if sym is None: l.warning("Could not find symbol %s", symbol_name) return False if sym.owner is self.loader._extern_object: l.warning("Refusing to unhook external symbol %s, replace it with another hook if you want to change it", symbol_name) return False hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr) self.unhook(hook_addr) return True def rehook_symbol(self, new_address, symbol_name): """ Move the hook for a symbol to a specific address :param new_address: the new address that will trigger the SimProc execution :param symbol_name: the name of the symbol (f.i. strcmp ) :return: None """ new_sim_procedures = {} for key_address, simproc_obj in self._sim_procedures.items(): if simproc_obj.display_name == symbol_name: new_sim_procedures[new_address] = simproc_obj else: new_sim_procedures[key_address] = simproc_obj self._sim_procedures = new_sim_procedures # # A convenience API (in the style of triton and manticore) for symbolic execution. # def execute(self, *args, **kwargs): """ This function is a symbolic execution helper in the simple style supported by triton and manticore. It designed to be run after setting up hooks (see Project.hook), in which the symbolic state can be checked. This function can be run in three different ways: - When run with no parameters, this function begins symbolic execution from the entrypoint. - It can also be run with a "state" parameter specifying a SimState to begin symbolic execution from. - Finally, it can accept any arbitrary keyword arguments, which are all passed to project.factory.full_init_state. If symbolic execution finishes, this function returns the resulting simulation manager. """ if args: state = args[0] else: state = self.factory.full_init_state(**kwargs) pg = self.factory.simulation_manager(state) self._executing = True return pg.run(until=lambda lpg: not self._executing) def terminate_execution(self): """ Terminates a symbolic execution that was started with Project.execute(). """ self._executing = False # # Private methods related to hooking # def _hook_decorator(self, addr, length=0, kwargs=None): """ Return a function decorator that allows easy hooking. Please refer to hook() for its usage. :return: The function decorator. """ def hook_decorator(func): self.hook(addr, func, length=length, kwargs=kwargs) return func return hook_decorator # # Pickling # def __getstate__(self): try: store_func, load_func = self.store_function, self.load_function self.store_function, self.load_function = None, None return dict(self.__dict__) finally: self.store_function, self.load_function = store_func, load_func def __setstate__(self, s): self.__dict__.update(s) def _store(self, container): # If container is a filename. if isinstance(container, str): with open(container, 'wb') as f: try: pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) except RuntimeError as e: # maximum recursion depth can be reached here l.error("Unable to store Project: '%s' during pickling", e) # If container is an open file. elif isinstance(container, IOBase): try: pickle.dump(self, container, pickle.HIGHEST_PROTOCOL) except RuntimeError as e: # maximum recursion depth can be reached here l.error("Unable to store Project: '%s' during pickling", e) # If container is just a variable. else: try: container = pickle.dumps(self, pickle.HIGHEST_PROTOCOL) except RuntimeError as e: # maximum recursion depth can be reached here l.error("Unable to store Project: '%s' during pickling", e) @staticmethod def _load(container): if isinstance(container, str): # If container is a filename. if all(c in string.printable for c in container) and os.path.exists(container): with open(container, 'rb') as f: return pickle.load(f) # If container is a pickle string. else: return pickle.loads(container) # If container is an open file elif isinstance(container, IOBase): return pickle.load(container) # What else could it be? else: l.error("Cannot unpickle container of type %s", type(container)) return None def __repr__(self): return '<Project %s>' % (self.filename if self.filename is not None else 'loaded from stream') # # Properties # @property def use_sim_procedures(self): return self._should_use_sim_procedures @property def is_java_project(self): """ Indicates if the project's main binary is a Java Archive. """ if self._is_java_project is None: self._is_java_project = isinstance(self.arch, ArchSoot) return self._is_java_project @property def is_java_jni_project(self): """ Indicates if the project's main binary is a Java Archive, which interacts during its execution with native libraries (via JNI). """ if self._is_java_jni_project is None: self._is_java_jni_project = isinstance(self.arch, ArchSoot) and self.simos.is_javavm_with_jni_support return self._is_java_jni_project # # Compatibility # @property @deprecated(replacement='simos') def _simos(self): return self.simos from .errors import AngrNoPluginError from .factory import AngrObjectFactory from angr.simos import SimOS, os_mapping from .analyses.analysis import AnalysesHub from .knowledge_base import KnowledgeBase from .engines import EngineHub from .procedures import SIM_PROCEDURES, SIM_LIBRARIES
There are acting awards on an actor’s resume that merit the resume being trashed. There are of course acting awards of merit that matter on an actor’s resume. Which awards are of merit or trash bin bound? There are acting awards on an actor’s resume that merit the resume being trashed. There are of course acting awards of merit that matter on an actor’s resume. Which awards are of merit or are trash bin bound? Recently, there was a heated debate on social media arguing BroadwayWorld awards to be or not to be included on an actor’s resume. Unless you’re a vain, insecure, attention-seeking-at-any-demerit actor: BroadwayWorld awards do not, (repeat) do not belong on an actor’s resume. They hold no merit. Why? BroadwayWorld awards are beg-for-vote awards driven by actors on social media begging friends and family to vote for performances possibly never seen by the majority of voters. The awards are predominantly for regional theater of which most of the voters haven’t seen the productions. Many of the voters are not industry peers which are vetted for professional related experience. Casting and talent agents know this, as do directors. The inclusion of a beg-for-votes award holds no credibility other than we realize an actor creatively manipulates votes on social media to his or her benefit. And the priority purpose of online voting for actors? Money. The clicks on to BroadwayWorld or similar voting platforms generates review for the website. Awards of merit are ones in which professional peers as voters are screened and/or are accredited by a review panel. As example with the TONY awards: TONY voters are working Broadway professionals chosen through a stringent vetting process. Likewise with the acting awards that are noted here prior to be included on an actor’s resume. That professional peer review is why such award recognitions are respected over the “Vote for me anybody” BroadwayWorld-type awards. Awards that include, or are predominantly recognizing, community theater companies (hello Ostrander and Perry awards) do not belong on a professional actor’s resume. High school acting awards are just as offending. I’ve been horrified at seeing high school “Best Actor” awards on the resumes of 30-something ‘professional’ actors. Next. Placing the award elsewhere on the resume (i.e. Special Skills, or Awards categories) may prompt the viewer of your resume to overlook your achievement. With the award placed directly under the credit the achievement is prominent to the viewer. must then search to where that * corresponds to elsewhere on the resume—don’t do this. An actor’s resume is not a game of hide-and-seek. Just as awards of respectability are handed out judiciously, actors are to be judicious in the level of awards they honor their work with on their resume. Better to viewed as honored by peers than honored by polls. .Paul Russell’s career as a casting director, director, acting teacher and former actor has spanned over thirty years. He has worked on projects for major film studios, television networks, and Broadway. Paul has taught the business of acting and audition technique at NYU and has taught master classes at dozens of acting programs at universities including Hofstra, Elon, Wright State University, and Rutgers. He is the author of ACTING: Make It Your Business – How to Avoid Mistakes and Achieve Success as a Working Actor. For more information on Paul’s projects, visit www.PaulRussell.net.
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later. # Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information. # from django.core.files.base import ContentFile from catalogue.test_utils import * from catalogue.models import Book class DictionaryTests(WLTestCase): def setUp(self): WLTestCase.setUp(self) self.book_info = BookInfoStub( author=PersonStub(("Jim",), "Lazy"), kind="X-Kind", genre="X-Genre", epoch="X-Epoch", **info_args("Default Book") ) def test_book_with_footnote(self): book_text = b"""<utwor> <opowiadanie> <akap><pe><slowo_obce>rose</slowo_obce> --- kind of a flower.</pe></akap> <akap><pe><slowo_obce>rose</slowo_obce> --- kind of a flower.</pe></akap> <akap><pe><slowo_obce>rose</slowo_obce> (techn.) --- #FF007F.</pe></akap> </opowiadanie></utwor> """ book = Book.from_text_and_meta(ContentFile(book_text), self.book_info) self.assertEqual( len(self.client.get('/przypisy/').context['object_list']), 2, 'There should be two notes on the note list.') self.assertEqual( len(self.client.get('/przypisy/?ltr=a').context['object_list']), 0, 'There should not be a note for the letter A.') self.assertEqual( len(self.client.get('/przypisy/?ltr=r').context['object_list']), 2, 'Both notes start with the letter R.') self.assertEqual( len(self.client.get('/przypisy/?qual=techn.').context['object_list']), 1, 'There should be a note qualified with \'techn.\' qualifier.')
He is Group Chairman of the Fung Group, a Hong Kong-based multinational group which comprises major operating groups engaging in sourcing, logistics, distribution and retailing. They include publicly-listed Li & Fung Limited, Global Brands Group Holding Limited, Convenience Retail Asia Limited and other privately held entities. Dr. Fung played a leading role in driving the development of the supply chain infrastructure at Li & Fung Limited, which has enabled it to become the leading consumer goods design, development, sourcing, and logistics company for major retailers and brands around the world. On the Chinese Mainland, he is a member of the International Business Leaders Advisory Council for the Mayor of Beijing, member of the Advisory Board of the School of Economics and Management of Tsinghua University, an Honorary Trustee of Peking University and an Honorary Professor of Renmin University. He was a member of the Chinese People’s Political Consultative Conference from 2003 to February 2018, Vice-Chairman of the China Centre for International Economic Exchanges (2009-2014). Internationally, Dr. Fung played a leadership role from 2007 in the Paris-based International Chamber of Commerce, of which he was Chairman from 2008 to 2010. From 2012 to 2013, he was a member of the World Trade Organisation’s Panel On Defining The Future of Trade. Dr. Fung was made a Commander of the Order of the British Empire (CBE) in 1993 in recognition for his commitment to public service. In 1995, he was voted Businessman of the Year under the Hong Kong Business Awards Scheme for his success as an entrepreneur and for his contribution to Hong Kong’s economic development. He also was chosen Hong Kong Leader of the Year in 1998 and was named by BusinessWeek as one of the 50 Asian leaders leading the region out of its economic crisis. In 2001, he was awarded the Harvard Medal for outstanding service to Harvard University. In 2003, the Hong Kong Government awarded Dr. Fung the Gold Bauhinia Star and, in 2010, the Grand Bauhinia Medal for his distinguished service to the Hong Kong community. In 2011, he was awarded the MIT Sloan Dean’s Award for Excellence in Leadership.
# -*- coding: utf-8 -*- ############################################################################## # For copyright and license notices, see __openerp__.py file in module root # directory ############################################################################## from openerp import models, api import logging _logger = logging.getLogger(__name__) class wizard_multi_charts_accounts(models.TransientModel): _inherit = 'wizard.multi.charts.accounts' # @api.model # def generate_journals( # self, chart_template_id, acc_template_ref, company_id): # """ # Overwrite this function so that no journal is created on chart # installation # """ # return True @api.model def _prepare_all_journals( self, chart_template_id, acc_template_ref, company_id): """ Inherit this function so that we dont create sale and purchase journals """ journal_data = super( wizard_multi_charts_accounts, self)._prepare_all_journals( chart_template_id, acc_template_ref, company_id) # remove sale and purchase journals data new_journal_data = [ journal for journal in journal_data if journal['type'] not in [ 'sale', 'purchase', 'sale_refund', 'purchase_refund']] return new_journal_data @api.model def _create_bank_journals_from_o2m( self, obj_wizard, company_id, acc_template_ref): """ Overwrite this function so that no journal is created on chart installation """ return True # @api.model # def _prepare_all_journals( # self, chart_template_id, acc_template_ref, company_id): # """ # Inherit this function in order to add use document and other # configuration if company use argentinian localization # """ # journal_data = super( # wizard_multi_charts_accounts, self)._prepare_all_journals( # chart_template_id, acc_template_ref, company_id) # # if argentinian chart, we set use_argentinian_localization for company # company = self.env['res.company'].browse(company_id) # if company.use_argentinian_localization: # point_of_sale = self.env['afip.point_of_sale'].search([ # ('number', '=', 1), # ('company_id', '=', company_id), # ], limit=1) # if not point_of_sale: # point_of_sale = point_of_sale.create({ # 'name': 'Punto de venta 1', # 'number': 1, # 'company_id': company_id, # }) # for vals_journal in journal_data: # if vals_journal['type'] in [ # 'sale', 'sale_refund', 'purchase', 'purchase_refund']: # vals_journal['use_documents'] = True # vals_journal['point_of_sale_id'] = point_of_sale.id # return journal_data @api.model def configure_chart( self, company_id, currency_id, chart_template_id, sale_tax_id, purchase_tax_id): # return True if self.env['account.account'].search( [('company_id', '=', company_id)]): _logger.warning( 'There is already a chart of account for company_id %i' % ( company_id)) return True _logger.info( 'Configuring chart %i for company %i' % ( chart_template_id, company_id)) wizard = self.with_context(company_id=company_id).create({ 'company_id': company_id, 'currency_id': currency_id, 'only_one_chart_template': True, 'chart_template_id': chart_template_id, 'code_digits': 7, "sale_tax": sale_tax_id, "purchase_tax": purchase_tax_id, # 'sale_tax_rate': , # 'purchase_tax_rate': , # 'complete_tax_set': fie }) wizard.execute() # add default tax to current products _logger.info('Updating products taxes') tax_vals = {} sale_tax_template = self.env['account.tax.template'].browse( sale_tax_id) sale_tax = self.env['account.tax'].search([ ('company_id', '=', company_id), ('name', '=', sale_tax_template.name)], limit=1) if sale_tax: tax_vals['taxes_id'] = [(4, sale_tax.id)] purchase_tax_template = self.env['account.tax.template'].browse( purchase_tax_id) purchase_tax = self.env['account.tax'].search([ ('company_id', '=', company_id), ('name', '=', purchase_tax_template.name)], limit=1) if purchase_tax: tax_vals['supplier_taxes_id'] = [(4, purchase_tax.id)] for product in self.env['product.product'].search([]): product.write(tax_vals) return True
In August 2012, I was asked if I would like to be the subject of a campaign to promote Pavilions Shopping Centre in Birmingham. Interestingly, instead of using models, Pavilions used “real” women to promote the centre. Having worked as a lawyer prior to becoming a stylist and being a Mum of three children, it was felt that my story would inspire other women. As part of the campaign I styled myself outfits in which I was photographed. For me it was important to show a whole range of looks – from work outfits to smart/casual outfits, to something to wear on the school run and also for an evening out. I used brands with different price points and didn’t choose anything that I wouldn’t have been happy to wear myself. These images have since appeared in the media to promote the campaign, and interviews and a style event followed. As part of the campaign, I was also asked to film a style vlog – and here you can see the first of its three parts. I hope that you enjoy it, that you find it useful and that you take some styling tips away from it. I feel that in the media, there is a lack of accessible looks for ladies to wear on a daily basis – which is partly why I started my “Style Guile” blog. So, it was only right that I included one very practical, but stylish, look which included some mainstays from our Wardrobes – jeans, a trench coat, brogues, a denim shirt, a knit and a biker jacket. It’s a layered look with texture and interest. I wanted to demonstrate how easy it is to wear a print which can, to start with, look so scary! Provided the rest of your outfit is plain and just one piece takes centre stage, everything falls into place. Wearing prints head to toe is very popular this season – but probably not for the faint hearted! This look featured a dress from NW3 at Hobbs, reflecting the Heritage trend which is so big this season. I loved the print on this dress and also the Peter-Pan collar – which taps into another big trend. The shoes were from M&S.
# -*- coding: utf-8 -*- """ *************************************************************************** OTBHelper.py --------------------- Copyright : (C) 2013 by CS Systemes d'information (CS SI) Email : otb at c-s dot fr (CS SI) Contributors : Julien Malik (CS SI) - File creation Oscar Picas (CS SI) - Alexia Mondot (CS SI) - Add particular case in xml creation *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Julien Malik, Oscar Picas, Alexia Mondot' __copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' __version__ = "3.8" import os import copy import xml.etree.ElementTree as ET import traceback from contextlib import contextmanager import shutil @contextmanager def tag(name, c): c.append("<%s>" % name) yield if ' ' in name: c.append("</%s>" % name.split(' ')[0]) else: c.append("</%s>" % name) @contextmanager def opentag(name, c): c.append("<%s>" % name) yield def get_group(appInstance): tags = appInstance.GetDocTags() sectionTags = ["Image Manipulation", "Vector Data Manipulation", "Calibration", "Geometry", "Image Filtering", "Feature Extraction", "Stereo", "Learning", "Segmentation"] for sectionTag in sectionTags: for tag in tags: if tag == sectionTag: return sectionTag return "Miscellaneous" def set_OTB_log(): import logging logger = logging.getLogger('OTBGenerator') hdlr = logging.FileHandler('OTBGenerator.log') hdlr.setLevel(logging.DEBUG) cons = logging.StreamHandler() cons.setLevel(logging.CRITICAL) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.addHandler(cons) logger.setLevel(logging.DEBUG) def get_OTB_log(): import logging logger = logging.getLogger('OTBGenerator') if not logger.handlers: set_OTB_log() logger = logging.getLogger('OTBGenerator') return logger def indent(elem, level=0): i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i set_OTB_log() def get_parameters(): parameters = {getattr(otbApplication, each): each for each in dir(otbApplication) if 'ParameterType_' in each} return parameters def get_inverted_parameters(): """ This function allows mapping otb parameters with processing parameters. """ parameters = {getattr(otbApplication, each): each for each in dir(otbApplication) if 'ParameterType_' in each} inverted_parameters = {key: value for value, key in parameters.items()} inverted_parameters['ParameterType_Radius'] = 1 inverted_parameters['ParameterType_RAM'] = 1 inverted_parameters['ParameterType_ComplexInputImage'] = 9 inverted_parameters['ParameterType_ComplexOutputImage'] = 13 inverted_parameters_clone = copy.deepcopy(inverted_parameters) inverted_parameters_clone['ParameterType_Empty'] = 'ParameterBoolean' inverted_parameters_clone['ParameterType_Int'] = 'ParameterNumber' inverted_parameters_clone['ParameterType_Float'] = 'ParameterNumber' inverted_parameters_clone['ParameterType_String'] = 'ParameterString' inverted_parameters_clone['ParameterType_StringList'] = 'ParameterString' inverted_parameters_clone['ParameterType_InputFilename'] = 'ParameterFile' inverted_parameters_clone['ParameterType_OutputFilename'] = 'OutputFile' inverted_parameters_clone['ParameterType_Directory'] = 'ParameterFile' inverted_parameters_clone['ParameterType_Choice'] = 'ParameterSelection' inverted_parameters_clone['ParameterType_InputImage'] = 'ParameterRaster' inverted_parameters_clone['ParameterType_InputImageList'] = 'ParameterMultipleInput' inverted_parameters_clone['ParameterType_InputVectorData'] = 'ParameterVector' inverted_parameters_clone['ParameterType_InputVectorDataList'] = 'ParameterMultipleInput' inverted_parameters_clone['ParameterType_OutputImage'] = 'OutputRaster' inverted_parameters_clone['ParameterType_OutputVectorData'] = 'OutputVector' inverted_parameters_clone['ParameterType_Radius'] = 'ParameterNumber' inverted_parameters_clone['ParameterType_Group'] = None inverted_parameters_clone['ParameterType_ListView'] = 'ParameterSelection' inverted_parameters_clone['ParameterType_ComplexInputImage'] = 'ParameterRaster' inverted_parameters_clone['ParameterType_ComplexOutputImage'] = 'OutputRaster' inverted_parameters_clone['ParameterType_RAM'] = 'ParameterNumber' inverted_parameters_clone['ParameterType_InputProcessXML'] = 'ParameterFile' inverted_parameters_clone['ParameterType_OutputProcessXML'] = 'ParameterFile' inverted_parameters_clone['ParameterType_InputFilenameList'] = 'ParameterMultipleInput' # 'ParameterString' return inverted_parameters_clone def retrieve_module_name(param): """ returns the file parameter of the given processing parameter """ if param: try: import processing.core dir_p = os.path.dirname(processing.core.__file__) if 'Parameter' in param: exec("from processing.core.parameters import %s" % param) return os.path.join(dir_p, "parameters.py") if 'Output' in param: exec("from processing.core.outputs import %s" % param) return os.path.join(dir_p, "outputs.py") except ImportError as e: print "Error parsing ", param return None def get_constructor_parameters_from_filename(py_file, param=""): """ Get all parameters from the constructor of the class param in the given py_file """ import ast asto = ast.parse(open(py_file).read()) # get all class definitions corresponding to param given len(e1) should be 1 e1 = [each for each in asto.body if isinstance(each, ast.ClassDef) and each.name == param] # e1[0].body lists all functions from the class e1[0] # e2 is a list of __init__ functions of class e1[0] e2 = [each for each in e1[0].body if hasattr(each, "name") and each.name == "__init__"] if len(e2) > 0: e4 = e2[0].args.args else: e4 = [] e5 = [each.id for each in e4] return e5 def get_customize_app_functions(): """ Get all parameters from the constructor of the class param in the given py_file """ import ast py_file = os.path.join(os.path.dirname(__file__), "OTBSpecific_XMLcreation.py") asto = ast.parse(open(py_file).read()) # get all class definitions corresponding to param given len(e1) should be 1 e1 = [each.name for each in asto.body if isinstance(each, ast.FunctionDef) and each.name.startswith("get")] return e1 def get_xml_description_from_application_name(our_app, criteria=None): """ creates an xml containing information about the given our_app """ # creates the application to get the description # header app_instance = otbApplication.Registry.CreateApplication(our_app) root = ET.Element('root') app = ET.SubElement(root, 'key') app.text = our_app executable = ET.SubElement(root, 'exec') executable.text = "otbcli_" + our_app longname = ET.SubElement(root, 'longname') longname.text = app_instance.GetDocName() group = ET.SubElement(root, 'group') group.text = get_group(app_instance) desc = ET.SubElement(root, 'description') desc.text = app_instance.GetDescription() if not criteria: real_criteria = lambda x: True else: if not callable(criteria): raise Exception("criteria parameter must be a valid python callable") real_criteria = criteria if len(our_app) == 0: raise Exception("App name is empty !") # get parameters param_keys = [param_key for param_key in app_instance.GetParametersKeys()] param_keys = filter(real_criteria, param_keys) for param_key in param_keys: if not param_key == "inxml" and not param_key == "outxml": get_param_descriptor(app.text, app_instance, param_key, root) indent(root) return root def get_the_choices(app_instance, our_descriptor, root): choices = ET.SubElement(root, 'choices') for choice in app_instance.GetChoiceKeys(our_descriptor): choice_node = ET.SubElement(choices, 'choice') choice_node.text = choice def get_param_descriptor(appkey, app_instance, our_descriptor, root): """ update the root xml with the data of the parameter given by "our_descriptor" """ logger = get_OTB_log() parameters = get_parameters() our_type = parameters[app_instance.GetParameterType(our_descriptor)] #get the list of mapped parameters (otb/processing) inverted_parameters = get_inverted_parameters() mapped_parameter = inverted_parameters[our_type] file_parameter = retrieve_module_name(mapped_parameter) if not file_parameter: logger.info("Type %s is not handled yet. (%s, %s)" % (our_type, appkey, our_descriptor)) return the_params = get_constructor_parameters_from_filename(file_parameter, mapped_parameter) # special for default values of OpticalCalibration if appkey == "OpticalCalibration": if "default" in the_params: try: app_instance.GetParameterAsString(our_descriptor) except RuntimeError as e: return param = ET.SubElement(root, 'parameter') attrs = {'source_parameter_type': parameters[app_instance.GetParameterType(our_descriptor)]} if appkey == "Segmentation": if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputFilename": attrs = {'source_parameter_type': 'ParameterType_OutputVectorData'} if appkey == "LSMSVectorization": if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputFilename": attrs = {'source_parameter_type': 'ParameterType_OutputVectorData'} if appkey == "SplitImage": if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputImage": attrs = {'source_parameter_type': 'ParameterType_OutputFilename'} if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_ListView": if not appkey == "RadiometricIndices": attrs = {'source_parameter_type': 'ParameterType_StringList'} param_type = ET.SubElement(param, 'parameter_type', attrib=attrs) param_type.text = inverted_parameters[parameters[app_instance.GetParameterType(our_descriptor)]] if appkey == "Segmentation": if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputFilename": param_type.text = "OutputVector" if appkey == "LSMSVectorization": if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputFilename": param_type.text = "OutputVector" if appkey == "SplitImage": if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputImage": param_type.text = "OutputFile" if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_ListView": if not appkey == "RadiometricIndices": param_type.text = "ParameterString" # {the_params = get_constructor_parameters_from_filename(file_parameter, mapped_parameter) if len(the_params) == 0: # if 'Output' in file_parameter: if 'output' in file_parameter: file_path = os.path.join(os.path.dirname(file_parameter), 'outputs.py') the_params = get_constructor_parameters_from_filename(file_path, "Output") if 'parameter' in file_parameter: file_path = os.path.join(os.path.dirname(file_parameter), 'parameters.py') the_params = (file_path) the_params = get_constructor_parameters_from_filename(file_path, "Parameter") if "self" in the_params: #remove self the_params.remove("self") # the_params[1:] # to be identical as before ! if "isSource" in the_params: the_params.remove("isSource") if "showSublayersDialog" in the_params: the_params.remove("showSublayersDialog") if "ext" in the_params: the_params.remove("ext") else: raise Exception("Unexpected constructor parameters") key = ET.SubElement(param, 'key') key.text = our_descriptor is_choice_type = False for each in the_params: if each == "name": name = ET.SubElement(param, 'name') nametext = app_instance.GetParameterName(our_descriptor) if "angle" in nametext: name.text = nametext.replace("\xc2\xb0", "deg") else: name.text = app_instance.GetParameterName(our_descriptor) if our_descriptor == "acqui.fluxnormcoeff": pass elif each == "description": desc = ET.SubElement(param, 'description') desc.text = app_instance.GetParameterDescription(our_descriptor) elif each == "optional": optional = ET.SubElement(param, 'optional') optional.text = str(not app_instance.IsMandatory(our_descriptor)) elif each == "default": done = False reason = [] try: default_value = str(app_instance.GetParameterAsString(our_descriptor)) done = True except: reason.append(traceback.format_exc()) if not done: try: default_value = str(app_instance.GetParameterFloat(our_descriptor)) done = True except: reason.append(traceback.format_exc()) if not done: try: default_value = str(app_instance.GetParameterInt(our_descriptor)) done = True except: reason.append(traceback.format_exc()) if done: default = ET.SubElement(param, 'default') default.text = default_value if is_choice_type: the_keys = [a_key for a_key in app_instance.GetChoiceKeys(our_descriptor)] if default_value in the_keys: default.text = str(the_keys.index(default_value)) else: default.text = '' else: logger.debug("A parameter transformation failed, trying default values : for %s, %s, type %s!, conversion message: %s" % (appkey, our_descriptor, parameters[app_instance.GetParameterType(our_descriptor)], str(reason))) the_type = parameters[app_instance.GetParameterType(our_descriptor)] if the_type == "ParameterType_Int": default_value = "0" elif the_type == "ParameterType_Float": default_value = "0.0" elif the_type == "ParameterType_Empty": default_value = "True" else: raise Exception("Unable to adapt %s, %s, %s, conversion message: %s" % (appkey, our_descriptor, parameters[app_instance.GetParameterType(our_descriptor)], str(reason))) default = ET.SubElement(param, 'default') default.text = default_value else: is_choice_type = 'Selection' in param_type.text node = ET.SubElement(param, each) if is_choice_type: get_the_choices(app_instance, our_descriptor, node) def get_default_parameter_value(app_instance, param): parameters = get_parameters() try: return app_instance.GetParameterAsString(param) except: the_type = parameters[app_instance.GetParameterType(param)] default_value = "0" if the_type == "ParameterType_Int": default_value = "0" elif the_type == "ParameterType_Float": default_value = "0.0" elif the_type == "ParameterType_Empty": default_value = "True" return default_value def escape_html(par): if 'Int' in par: return '&lt;int32&gt;' if 'Float' in par: return '&lt;float&gt;' if 'Empty' in par: return '&lt;boolean&gt;' if 'Radius' in par: return '&lt;int32&gt;' if 'RAM' in par: return '&lt;int32&gt;' return '&lt;string&gt;' def is_a_parameter(app_instance, param): if app_instance.GetName() == "HaralickTextureExtraction": if param.startswith("parameters."): return True if '.' in param: return False try: app_instance.GetChoiceKeys(param) return False except: return True def describe_app(app_instance): parameters = get_parameters() result = [] with tag('html', result): with tag('head', result): how = """ <style type="text/css"> dl { border: 3px double #ccc; padding: 0.5em; } dt { float: left; clear: left; text-align: left; font-weight: bold; color: green; } dt:after { content: ":"; } dd { margin: 0 0 0 220px; padding: 0 0 0.5em 0; } </style> """ result.append(how) with tag('body', result): with tag('h1', result): result.append(app_instance.GetName()) with tag('h2', result): result.append('Brief Description') result.append(app_instance.GetDescription()) with tag('h2', result): result.append('Tags') result.append(','.join(app_instance.GetDocTags())) with tag('h2', result): result.append('Long Description') result.append(app_instance.GetDocLongDescription()) with tag('h2', result): result.append('Parameters') params = app_instance.GetParametersKeys() with tag('ul', result): for param in params: if is_a_parameter(app_instance, param): with tag('li', result): result.append('<b>%s -%s</b> %s ' % ('[param]', param, escape_html(parameters[app_instance.GetParameterType(param)]))) result.append('%s. Mandatory: %s. Default Value: &quot;%s&quot;' % (app_instance.GetParameterDescription(param), str(app_instance.IsMandatory(param)), get_default_parameter_value(app_instance, param))) choices_tags = [each for each in params if (not is_a_parameter(app_instance, each)) and '.' not in each] for choice in choices_tags: result.append('<b>%s -%s</b> %s %s. Mandatory: %s. Default Value: &quot;%s&quot;' % ('[choice]', choice, app_instance.GetParameterDescription(choice), ','.join(app_instance.GetChoiceKeys(choice)), str(app_instance.IsMandatory(choice)), get_default_parameter_value(app_instance, choice))) choices = app_instance.GetChoiceKeys(choice) with tag('ul', result): for subchoice in choices: with tag('li', result): result.append('<b>%s -%s</b>' % ('[group]', subchoice)) with tag('ul', result): param_tags = [each for each in params if '.%s' % subchoice in each] for param_tag in param_tags: with tag('li', result): result.append('<b>%s -%s</b> ' % ('[param]', param_tag)) result.append("%s %s. Mandatory: %s. Default Value: &quot;%s&quot;" % (escape_html(parameters[app_instance.GetParameterType(param_tag)]), app_instance.GetParameterDescription(param_tag), str(app_instance.IsMandatory(param_tag)), get_default_parameter_value(app_instance, param_tag))) with tag('h2', result): result.append('Limitations') result.append(app_instance.GetDocLimitations()) with tag('h2', result): result.append('Authors') result.append(app_instance.GetDocAuthors()) with tag('h2', result): result.append('See Also') result.append(app_instance.GetDocSeeAlso()) with tag('h2', result): result.append('Example of use') result.append(app_instance.GetHtmlExample()) if app_instance.GetName() == "HaralickTextureExtraction": index = result.index("<b>[param] -parameters</b> &lt;string&gt; ") del result[index + 2] del result[index + 1] del result[index] del result[index - 1] return "".join(result) def get_list_from_node(myet, available_app): all_params = [] for parameter in myet.iter('parameter'): rebuild = [] par_type = parameter.find('parameter_type').text key = parameter.find('key').text name = parameter.find('name').text source_par_type = parameter.find('parameter_type').attrib['source_parameter_type'] rebuild.append(source_par_type) rebuild.append(par_type) rebuild.append(key) rebuild.append(name) for each in parameter[4:]: if each.tag not in ["hidden"]: if len(each.getchildren()) == 0: if each.tag in ["default"]: if "-" in available_app: available_app = available_app.split("-")[0] app_instance = otbApplication.Registry.CreateApplication(available_app) rebuild.append(get_default_parameter_value(app_instance, key)) else: rebuild.append(each.text) else: rebuild.append([item.text for item in each.iter('choice')]) all_params.append(rebuild) return all_params def adapt_list_to_string(c_list): a_list = c_list[1:] if a_list[0] in ["ParameterVector", "ParameterMultipleInput"]: if c_list[0] == "ParameterType_InputImageList": a_list[3] = 3 else: a_list[3] = -1 if a_list[0] in ["ParameterRaster", "ParameterFile", "ParameterMultipleInput", "OutputRaster", "OutputFile"]: if "Output" in a_list[0]: a_list.append("/tmp/processing/output.tif") else: import os a_list.append(os.path.join(os.path.abspath(os.curdir), "helper/QB_Toulouse_Ortho_PAN.tif")) if a_list[0] in ["ParameterSelection"]: pass a_list[1] = "-%s" % a_list[1] def mystr(par): if isinstance(par, list): return ";".join(par) return str(par) if a_list[-1] is None: return "" b_list = map(mystr, a_list) b_list = [b_list[1], b_list[-1]] res = " ".join(b_list) return res def get_automatic_ut_from_xml_description(the_root): dom_model = the_root try: appkey = dom_model.find('key').text cliName = dom_model.find('exec').text if not cliName.startswith("otbcli_"): raise Exception('Wrong client executable') rebu = get_list_from_node(dom_model, appkey) the_result = map(adapt_list_to_string, rebu) ut_command = cliName + " " + " ".join(the_result) return ut_command except Exception as e: ET.dump(dom_model) raise def list_reader(file_name, version): tree = ET.parse(file_name) root = tree.getroot() nodes = [each.text for each in root.findall("./version[@id='%s']/app_name" % version)] return nodes def get_otb_version(): #TODO Find a way to retrieve installed otb version, force exception and parse otb-X.XX.X ? # return "3.18" return "5.0" def get_white_list(): nodes = list_reader("white_list.xml", get_otb_version()) return nodes def get_black_list(): nodes = list_reader("black_list.xml", get_otb_version()) return nodes def create_xml_descriptors(): import os if not os.path.exists("description"): os.mkdir("description") if not os.path.exists("html"): os.mkdir("html") logger = get_OTB_log() white_list = get_white_list() black_list = get_black_list() custom_apps_available = get_customize_app_functions() for available_app in otbApplication.Registry.GetAvailableApplications(): # try: if 'get%s' % available_app in custom_apps_available: if available_app in white_list and available_app not in black_list: the_list = [] the_root = get_xml_description_from_application_name(available_app) function_to_call = "the_list = OTBSpecific_XMLcreation.get%s(available_app,the_root)" % available_app exec(function_to_call) # the_list = locals()['get%s' % available_app](available_app, the_root) if the_list: for each_dom in the_list: try: ut_command = get_automatic_ut_from_xml_description(each_dom) except: logger.error("Unit test for command %s must be fixed: %s" % (available_app, traceback.format_exc())) else: logger.warning("%s is not in white list." % available_app) else: if available_app in white_list and available_app not in black_list: logger.warning("There is no adaptor for %s, check white list and versions" % available_app) # TODO Remove this default code when all apps are tested... fh = open("description/%s.xml" % available_app, "w") the_root = get_xml_description_from_application_name(available_app) ET.ElementTree(the_root).write(fh) fh.close() try: ut_command = get_automatic_ut_from_xml_description(the_root) except: logger.error("Unit test for command %s must be fixed: %s" % (available_app, traceback.format_exc())) # except Exception, e: # logger.error(traceback.format_exc()) def create_html_description(): logger = get_OTB_log() if not os.path.exists("description/doc"): os.mkdir("description/doc") for available_app in otbApplication.Registry.GetAvailableApplications(): try: fh = open("description/doc/%s.html" % available_app, "w") app_instance = otbApplication.Registry.CreateApplication(available_app) app_instance.UpdateParameters() ct = describe_app(app_instance) fh.write(ct) fh.close() except Exception as e: logger.error(traceback.format_exc()) sub_algo = [each for each in os.listdir("description") if "-" in each and ".xml" in each] for key in sub_algo: shutil.copy("description/doc/%s" % key.split("-")[0] + ".html", "description/doc/%s" % key.split(".")[0] + ".html") if __name__ == "__main__": # Prepare the environment import sys import os from qgis.core import QgsApplication from PyQt4.QtGui import QApplication app = QApplication([]) QgsApplication.setPrefixPath("/usr", True) QgsApplication.initQgis() # Prepare processing framework from processing.core.Processing import Processing Processing.initialize() import OTBSpecific_XMLcreation # try: # import processing # except ImportError, e: # raise Exception("Processing must be installed and available in PYTHONPATH") try: import otbApplication except ImportError as e: raise Exception("OTB python plugins must be installed and available in PYTHONPATH") create_xml_descriptors() create_html_description() # Exit applications QgsApplication.exitQgis() QApplication.exit()
CHERRY POINT � More than 40 years ago, Jack Metrock took his last ride in an EA-6A Intruder as he headed back from Vietnam. The 69-year-old retired Marine got another flight, of sorts, recently when he took a ride in an EA-6B Prowler simulator at his old home base at Cherry Point air station. Metrock was an electronic countermeasures officer with Marine Composite Reconnaissance Squadron 1 and had been trained at Cherry Point at VMCJ-2. His training then and the training today�s pilots receive is worlds apart, he said. Metrock said technicians would remove units from the cockpit and teach pilots the operations. �They would take the boxes out and put them on a workbench and teach you how to use it,� he said. Part of the job of an ECMO was to detect and identify the unique sound of a particular type of radar. While today�s simulators create the various radar signatures for the ECMO, in the 1960s Metrock�s training included listening for the real thing. The EA-6A had a pilot and an ECMO, but the EA-6B has a pilot and three ECMOs. Metrock had a chance to sit in the back seat where the 3 and 4 positions are in the EA-6B training simulator but wasn�t able to see the unit turned on because it is classified. �I wish I could see what the ECMOs are seeing, but I understand,� he said. He was able to �fly� the EA-6B in the simulator though, taking off from Cherry Point, heading out over the Atlantic and experiencing conditions of a thunderstorm. He then returned to the base and was able to land the jet on his third approach. Ricky Johnson, a former EA-6B pilot who is now a CUBIC Corp. contractor who instructs Marine pilots on the EA-6B, said the simulator can create the environment for airfields around the world. �You name it, we can take them there,� Johnson said. He said any type of emergency can be created in the simulator, with the pilots and ECMOs benefitting from the pressure they receive in training. �They come out sweating like you wouldn�t believe. It�s just like the real thing,� Johnson said. All of it impressed Metrock. Metrock is a member of the Marine Corps Aviation Reconnaissance Association, an organization of active-duty and retired Marines from the electronic warfare community. For more information on the Marine Corps Aviation Reconnaissance Association, go online to the organization website at www.mcara.us.
"""The WaveBlocks Project This file contains the class for representing the hyperbolic cut basis shape which is a special type of sparse basis set. @author: R. Bourquin @copyright: Copyright (C) 2012, 2013, 2014 R. Bourquin @license: Modified BSD License """ from numpy import eye, vstack, integer from WaveBlocksND.BasisShape import BasisShape from WaveBlocksND.HyperbolicCutShape import HyperbolicCutShape from functools import reduce __all__ = ["LimitedHyperbolicCutShape"] class LimitedHyperbolicCutShape(BasisShape): r"""This class implements the hyperbolic cut basis shape which is a special type of sparse basis set. A basis shape is essentially all information and operations related to the set :math:`\mathfrak{K}` of multi-indices :math:`k`. The hyperbolic cut shape in :math:`D` dimensions with `sparsity` :math:`S` and limits :math:`K = (K_0,\ldots,K_{D-1})` is defined as the set .. math:: \mathfrak{K}(D, S, K) := \{ (k_0, \ldots, k_{D-1}) | 0 \leq k_d < K_d \forall d \in [0,\ldots,D-1] \land \prod_{d=0}^{D-1}(1+k_d) \leq S \} """ def __init__(self, D, K, limits): r""" :param D: The dimension :math:`D` :param K: The sparsity parameter :math:`S` :param limits: The list of all limits :math:`\{K_d\}_{d=0}^{D-1}` """ # The dimension of K self._dimension = D # The sparsity parameter self._sparsity = K # The limits limits = tuple(limits) if all([int(l) > 0 for l in limits]): self._limits = limits else: raise ValueError("All limits have to be positive.") # The linear mapping k -> index for the basis iil = self._get_index_iterator_lex() self._lima = {k: index for index, k in enumerate(iil)} # And the inverse mapping self._lima_inv = {v: k for k, v in self._lima.items()} # The basis size self._basissize = len(self._lima) def __str__(self): r""":return: A string describing the basis shape :math:`\mathfrak{K}`. """ s = ("Hyperbolic cut basis shape of dimension "+str(self._dimension)+" and sparsity "+str(self._sparsity)+" limited at "+str(self._limits)+".") return s def __hash__(self): r"""Compute a unique hash for the basis shape. In the case of hyperbolic cut basis shapes :math:`\mathfrak{K}` the basis is fully specified by its dimension :math:`D` and the sparsity parameter :math:`K`. """ return hash(("LimitedHyperbolicCutShape", self._dimension, self._sparsity, self._limits)) def __getitem__(self, k): r"""Make map look ups. """ if type(k) is tuple or type(k) is list: k = tuple(k) assert len(k) == self._dimension if k in self._lima: return self._lima[k] elif type(k) is int: if k in self._lima_inv: return self._lima_inv[k] else: raise IndexError("Wrong index type") def __contains__(self, k): r""" Checks if a given multi-index :math:`k` is part of the basis set :math:`\mathfrak{K}`. :param k: The multi-index :math:`k` we want to test. :type k: tuple """ assert len(tuple(k)) == self._dimension return tuple(k) in self._lima def __iter__(self): r"""Implements iteration over the multi-indices :math:`k` of the basis set :math:`\mathfrak{K}`. Note: The order of iteration is NOT fixed. If you need a special iteration scheme, use :py:meth:`get_node_iterator`. """ # TODO: Better remove this as it may cause unexpected behaviour? return iter(self._lima) def contains(self, k): r""" Checks if a given multi-index :math:`k` is part of the basis set :math:`\mathfrak{K}`. :param k: The multi-index :math:`k` we want to test. :type k: tuple """ return tuple(k) in self._lima def get_description(self): r"""Return a description of this basis shape object. A description is a ``dict`` containing all key-value pairs necessary to reconstruct the current basis shape. A description never contains any data. """ d = {} d["type"] = "LimitedHyperbolicCutShape" d["dimension"] = self._dimension d["K"] = self._sparsity d["limits"] = self._limits return d def extend(self, tight=True): r"""Extend the basis shape such that (at least) all neighbours of all boundary nodes are included in the extended basis shape. :param tight: Whether to cut off the long tails. :type tight: Boolean, default is ``False`` """ D = self._dimension K = self._sparsity if D > 1: # This formula is more narrow than: K = 2**(D-1) * (K+1) # but works only for D >= 2 new_sparsity = 2**(D - 1) * K else: # Special casing K = 2**(D-1) * (K+1) for D = 1 new_sparsity = K + 1 if tight is True: new_limits = tuple([l + 1 for l in self._limits]) return LimitedHyperbolicCutShape(D, new_sparsity, new_limits) else: return HyperbolicCutShape(D, new_sparsity) def _get_index_iterator_lex(self): r""" """ # The hyperbolic cut parameter sparsity = self._sparsity # Upper bounds in each dimension bounds = self._limits[::-1] def index_iterator_lex(S, bounds): # Initialize a counter z = [0 for i in range(self._dimension + 1)] while z[self._dimension] == 0: # Yield the current index vector yield tuple(reversed(z[:-1])) # Increment fastest varying bit z[0] += 1 # Reset overflows for d in range(self._dimension): K = reduce(lambda x, y: x * (y + 1), z[:-1], 1) if z[d] >= bounds[d] or K > S: z[d] = 0 z[d + 1] += 1 return index_iterator_lex(sparsity, bounds) def _get_index_iterator_chain(self, direction=0): r""" """ # The hyperbolic cut parameter sparsity = self._sparsity # Upper bounds in each dimension bounds = self._limits[::-1] def index_iterator_chain(S, bounds, d): D = self._dimension # The counter z = [0 for i in range(D + 1)] # Iterate over all valid stencil points while z[D] == 0: yield tuple(reversed(z[:-1])) # Increase index in the dimension we build the chain z[D - d - 1] += 1 # Check if we are done with the current base point # If yes, move base point and start a new chain # Reset overflows for i in range(D - d - 1, D): K = reduce(lambda x, y: x * (y + 1), z[(D - d - 1):-1], 1) if z[i] > bounds[i] - 1 or K > S: z[i] = 0 z[i + 1] += 1 return index_iterator_chain(sparsity, bounds, direction) def _get_index_iterator_mag(self): r""" """ # Nodes sorted by l_1 magnitude nodes = sorted(self._lima.keys(), key=sum) def index_iterator_mag(nodes): for node in nodes: yield node return index_iterator_mag(nodes) def get_node_iterator(self, mode="lex", direction=None): r""" Returns an iterator to iterate over all basis elements :math:`k \in \mathfrak{K}`. :param mode: The mode by which we iterate over the indices. Default is ``lex`` for lexicographical order. Supported is also ``chain``, for the chain-like mode, details see the manual. :type mode: string :param direction: If iterating in `chainmode` this specifies the direction the chains go. :type direction: integer. """ if mode == "lex": return self._get_index_iterator_lex() elif mode == "chain": if direction < self._dimension: return self._get_index_iterator_chain(direction=direction) else: raise ValueError("Can not build iterator for this direction.") elif mode == "mag": return self._get_index_iterator_mag() # TODO: Consider boundary node only iterator else: raise ValueError("Unknown iterator mode: {}.".format(mode)) def get_limits(self): r"""Returns the upper limit :math:`K_d` for all directions :math:`d`. :return: A tuple of the maximum of the multi-index in each direction. """ return tuple(self._limits) def get_neighbours(self, k, selection=None, direction=None): r""" Returns a list of all multi-indices that are neighbours of a given multi-index :math:`k`. A direct neighbour is defined as :math:`(k_0, \ldots, k_d \pm 1, \ldots, k_{D-1}) \forall d \in [0 \ldots D-1]`. :param k: The multi-index of which we want to get the neighbours. :type k: tuple :param selection: :type selection: string with fixed values ``forward``, ``backward`` or ``all``. The values ``all`` is equivalent to the value ``None`` (default). :param direction: The direction :math:`0 \leq d < D` in which we want to find the neighbours :math:`k \pm e_d`. :type direction: int :return: A list containing the pairs :math:`(d, k^\prime)`. """ assert len(tuple(k)) == self._dimension # First build a list of potential neighbours I = eye(self._dimension, dtype=integer) ki = vstack(k) # Forward and backward direct neighbours nbfw = ki + I nbbw = ki - I # Keep only the valid ones nbh = [] if direction is not None: directions = [direction] else: directions = range(self._dimension) for d in directions: nfw = tuple(nbfw[:, d]) nbw = tuple(nbbw[:, d]) # TODO: Try to simplify these nested if blocks if selection in ("backward", "all", None): if nbw in self: nbh.append((d, nbw)) if selection in ("forward", "all", None): if nfw in self: nbh.append((d, nfw)) return nbh
It is important to ensure that you are always ready for difficult times like car accidents, flooding, home invasion, and death. Allstate Insurance began in 1931 and has become an national leader in all varieties of insurance policies. People know to trust Allstate through the countless intelligent, dependable, and dedicated Allstate agents all over the country. Allstate agents always place customers first and help them select the affordable insurance policies that is the best fit for them. Renters Insurance Jersey Village Let's get your family covered. To look over all your choices for personal coverage, contact your neighborhood State Farm® agency now. State Farm® Insurance insures more houses than all other insurance agencies. Our customer service network can be reached via telephone or online and is available 24 hours a day, 365 days a year. From everyday safety items like fire and burglar alarms to multiple-line policies, the experts at State Farm® will help you save money on your homeowners insurance through several unique discounts. Current State Farm® customers have several reasons for picking us over our competition. 24-hour client service, a highly-rated online customer account management system, advanced tools are just a few of the features State Farm® will provide for your family. With all these features, we can keep all of clients satisfied with their insurance protection. By working with State Farm® Insurance, our clients will always feel protected by their insurance provider. Are you in need of a way to give a new look to your home or business? Landscaping can be a fun and exciting way to give any area a unique appearance. Landscapers are experienced with projects, both big and small, to update any yard, neighborhood, or business complex. Landscaping includes everything in residential snow removal from adjusting the form of the terrain, creating a colorful environment with plants and trees, or planting lawns, fences, and other objects. Landscapers can also aid in maintaining your home through gardening, lawn maintenance, trimming, snow removal and many other jobs. Having a landscaper carry out the task of landscaping and maintenance will ensure that any lawn project gets done the right way. Insurance is one thing you just can't pass over. When you crunch the numbers, there is no possible way to personally assume all of the risks associated with your household, automobile, or house - the things you love most. To safeguard the things you love with premier service at a competitive rate, go to State Farm®. We also offer a variety of investment products to assist you in meeting your financial goals. State Farm® is your financial services company. Insurance is not all State Farm® can offer its 40 million clients. Think about our various banking products for planning your financial future. We offer IRAs, education savings plans, and simple banking. Talk to us about our various LifePath® options for actively managed mutual funds. When you choose us as your chief financial provider, you set yourself up for long term financial success. Find out more by talking to one of our professional agents. When you think about your financial future, think State Farm®. We are the unrivaled providers of life insurance fremont ne to millions of clients. Call for a free quote today. Have an unfinished basement that you want to turn into something new but aren't sure how to do it? Completing a basement is more inexpensive than a regular home addition, which means you can focus on fixtures and amenities to decorate your basement. A few possibilities for a finished basement are guest bedrooms, a home theater, an entertainment room, or anything you else you might want. A finished basement will tie a bow around your home and can even raise the resell value of your house. Hiring a professional companies that restore fast after fires rogers ar crew can save you time, money, and unfortunate errors on this big project. Our extensive remodeling experience will make sure your basement, kitchen, or any other room gets completed at the highest level of professionalism. Give us a call today, so we can make sure the job done right.
# -*- coding: utf-8 -*- from __future__ import unicode_literals import logging from contextlib import contextmanager from blinker import signal from flask import current_app from flask_mail import Mail, Message from udata import theme, i18n log = logging.getLogger(__name__) mail = Mail() mail_sent = signal('mail-sent') class FakeMailer(object): '''Display sent mail in logging output''' def send(self, msg): log.debug(msg.body) log.debug(msg.html) mail_sent.send(msg) @contextmanager def dummyconnection(*args, **kw): """Allow to test email templates rendering without actually send emails.""" yield FakeMailer() def init_app(app): mail.init_app(app) def send(subject, recipients, template_base, **kwargs): ''' Send a given email to multiple recipients. User prefered language is taken in account. To translate the subject in the right language, you should ugettext_lazy ''' sender = kwargs.pop('sender', None) if not isinstance(recipients, (list, tuple)): recipients = [recipients] debug = current_app.config.get('DEBUG', False) send_mail = current_app.config.get('SEND_MAIL', not debug) connection = send_mail and mail.connect or dummyconnection with connection() as conn: for recipient in recipients: lang = i18n._default_lang(recipient) with i18n.language(lang): log.debug( 'Sending mail "%s" to recipient "%s"', subject, recipient) msg = Message(subject, sender=sender, recipients=[recipient.email]) msg.body = theme.render( 'mail/{0}.txt'.format(template_base), subject=subject, sender=sender, recipient=recipient, **kwargs) msg.html = theme.render( 'mail/{0}.html'.format(template_base), subject=subject, sender=sender, recipient=recipient, **kwargs) conn.send(msg)
Some profound books and spiritual teachings just don’t fit into a neat category. Over the last ten years, we’ve highlighted a number of such teachings that are presented here for your consideration. “The parts that we think are less honorable we treat with special honor.” 1 Corinthians 12:23. When people deal with color, class or culture in a negative way, that’s racism. But the answer is not to ignore these as if they don’t matter. Instead, we can look at color, class and culture in a positive way. That’s gracism. Pastor David Anderson responds to prejudice and injustice with the principle of gracism—radical inclusion for the marginalized and excluded. From the foundation of this transformative practice and its entry point through you, to its daily expression in your life, this book features a thoughtful collection of anecdotes, stories, and practical tools to inspire kindness for any life situation. The Kindness Handbook is your personal companion to explore the rich landscape of kindness—and help you see just how it might shift and open up your life to greater joy, meaning, and compassion. Two of the leading experts in innovation, design, and creativity on the planet show us that each and every one of us is creative. In an incredibly entertaining and inspiring narrative that draws on countless stories from their work, David and Tom Kelley identify the principles and strategies that will allow us to tap into our creative potential in our work lives, and in our personal lives, and allow us to innovate in terms of how we approach and solve problems. Seth Adam Smith shares how living for others can enrich every aspect of your life. With a mix of humor, candor, and compassion, he reveals how, years before his marriage, his self-obsession led to a downward spiral of addiction and depression, culminating in a suicide attempt at the age of twenty. He shares his deep conviction that the only way you can find your life is to give it away to others. Don’t face reality. Create reality! E-Squared could best be described as a lab manual with simple experiments to prove once and for all that reality is malleable, that consciousness trumps matter, and that you shape your life with your mind. Rather than take it on faith, you are invited to conduct nine 48-hour experiments to prove there really is a positive, loving, totally hip force in the universe. Seligman demonstrates how optimism enhances the quality of life, and how anyone can learn to practice it. He explains how to break an “I—give-up” habit, develop a more constructive explanatory style for interpreting your behavior, and experience the benefits of a more positive interior dialogue. These skills can help break up depression, boost your immune system, better develop your potential, and make you happier. The book’s emphasis is on how to use applied Buddhism in daily life. It shows us how to eliminate stress, adopt new models of leadership, doing business and mindful consumption, and tells us how we can move in the direction of achieving sanity and fulfilment in our daily routines. Practical, insightful and easy to follow, Work offers us new ways in which to live and earn our livelihood. From many personal stories and the authors’ reflections, we can find strength, clarity, and wisdom for those times when we are called on to care for one another. How Can I Help? reminds us just how much we have to give and how doing so can lead to some of the most joyous moments of our lives. Lessons in Truth is a clear, concise representation of New Thought philosophy and metaphysical Christianity. The spiritual concepts presented in these 12 lessons show us how to increase our personal empowerment and enhance our spiritual growth. Dr. Cady teaches that our lives can be transformed by the power of our thoughts, words and beliefs. She encourages us to find our truth as it is written in our own hearts and then apply these truths in every area of our lives. Drawing on experience as an interreligious monk, Brother Wayne Teasdale reveals the power of spirituality and its practical elements. He combines a profound Christian faith with an intimate understanding of ancient religious traditions. Robert Emmons examines what it means to think and feel gratefully in Thanks! and invites readers to learn how to put this powerful emotion into practice. Scientifically speaking, regular grateful thinking can increase happiness by as much as 25 percent, while keeping a gratitude journal for as little as three weeks results in better sleep and more energy.
import csv import logging import os from models import * import config from google.appengine.ext import ndb from google.appengine.api import search def clearAllData(sample_data=True): """ Clear all CSAs and Sites from the datastore. Clear all documents from the search index. """ logging.info('Clearing datastore') csa_keys = CSA.query().fetch(keys_only=True) result = ndb.delete_multi(csa_keys) site_keys = Site.query().fetch(keys_only=True) ndb.delete_multi(site_keys) logging.info('Clearing search index') doc_index = search.Index(name=config.SITE_INDEX_NAME) try: while True: # until no more documents, get a list of documents, # constraining the returned objects to contain only the doc ids, # extract the doc ids, and delete the docs. document_ids = [document.doc_id for document in doc_index.get_range(ids_only=True)] if not document_ids: break doc_index.delete(document_ids) except search.Error: logging.exception("Error removing documents:") def loadFromCSV(): logging.info('Loading CSA data') datafile = os.path.join('data', config.CSA_DATA) reader = csv.DictReader( open(datafile, 'rU'), ['id', 'name', 'description']) for row in reader: csa = CSA(id = row['id'], name = row['name'], description = row['description'], url = 'foo') csa.put() logging.info('Loading Site data') datafile = os.path.join('data', config.SITE_DATA) reader = csv.DictReader( open(datafile, 'rU'), ['csa', 'name', 'address', 'lat', 'lng']) for row in reader: csa_key = ndb.Key(CSA, row['csa']) site = Site(csa = csa_key, name=row['name'], address = row['address'], lat = float(row['lat']), lng = float(row['lng'])) site.put()
We now know that human tumors are immunogenic and have identified a variety of proteins that act as tumor antigens, i.e. stimulate an immune response. We have a more detailed understanding of T-cell-antigen recognition and the character of peptide fragments presented in MHC molecules. Finally, mechanisms of tumor immune escape are much better understood, such as the role tolerance plays in dampening the tumor specific immune response and the importance of appropriate antigen presenting cells, such as dendritic cells, in initial immune stimulation. Reported trials of cancer immunotherapy, including vaccines, are demonstrating the ability to elicit detectable tumor specific immunity in cancer patients. There are no standard immunologic monitoring methods that will allow comparison of immune based clinical strategies between labs or will even allow accurate assessment of the immunogenicity of a particular approach. Standardization and development of reproducible and clinical grade immunologic assays to determine the magnitude of tumor specific immune responses generated in the context of clinical trials of cancer immunotherapy is an area of research in the CVI. A goal of the CVI is to develop immunologic correlates of cancer vaccine efficacy, i.e. measure the generation of a cancer antigen specific immunity after active immunization and correlate that measurement to clinical outcome. The precedent for the identification of well accepted surrogates to vaccine efficacy has been set in infectious disease models where many vaccines are associated with laboratory measures which correlate to protection from disease. In addition, novel highly quantitative methods for the enumeration of T cells and the assessment of their function are being developed. Likewise, the highly interactive role of T and B cells in initiating an immune response has resulted in techniques that may allow the measurement of antibody immunity to reflect the development of antigen specific T cells. Clinical development of assays such as ELISPOT, flow cytometry for intercellular cytokine staining, MHC tetramers, and class and isotype quantitative antibody assays requires different experimental tactics than the development of a laboratory based tool. Accuracy, precision, sensitivity, specificity, and reliability of the technology must be determined. Whereas assay validation is straightforward, in many respects, for serologic studies, validations of T cell based techniques require the expertise of both molecular and cellular immunologists in generating standards for analysis and novel design of validation approaches. Clinical development of techniques and troubleshooting technical issues must take place in well defined antigen systems and principles demonstrated applied to cancer antigen models. The CVI houses an Immunologic Core Monitoring Laboratory that executes both clinically validated assays as well as research on novel methods of measuring immunity. The CVI has conducted many clinical trials of a variety of immune based therapies and demonstrated that T cell and antibody immunity specific for tumor antigens can be measured reproducibly. We have developed clinical grade assays for both tumor specific T cell and antibody evaluation and have collaborated with investigators around the world to analyze samples collected on a variety of clinical trials. We have defined the accuracy and precision of the most common T cell assays and demonstrated how those measurements correlate with each other. We have developed novel methods by which T cells can be frozen for analysis at a later date, thawed, and still retain full function. Our core laboratory serves as a training resource for those scientists who would like to develop similar clinical immunologic monitoring cores. Quantitative and qualitative assessment of tumor specific immunity is an area of active research in the CVI. We are developing novel methods to both enumerate and phenotype the T-cell response by evaluating substances the T cells secrete in response to antigen. These studies are being performed not only in the context of interventional trials, but also in the assessment of defining the endogenous tumor specific immune response and associated defects in tumor specific immunity in cancer patients. We are also developing assays that utilize genetic material derived from PBMC to allow a broader evaluation of how the immune response is initiated and sustained. We have an active research program in methods to assess the tumor microenvironment via serologic and peripheral blood surrogates. Finally, we continue to evaluate the performance characteristics of common immune based assays.