text
stringlengths 29
850k
|
---|
from CommonServerPython import *
def main():
params = {k: v for k, v in demisto.params().items() if v is not None}
params['indicator_type'] = FeedIndicatorType.CIDR
params['url'] = 'https://www.dshield.org/block.txt'
params['ignore_regex'] = "[#S].*"
params['indicator'] = json.dumps({
"regex": r"^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\t[\d.]*\t(\d{1,2})",
"transform": "\\1/\\2"
})
fields = json.dumps({
"numberofattacks": {
"regex": "^.*\\t.*\\t[0-9]+\\t([0-9]+)",
"transform": "\\1"
},
"networkname": {
"regex": "^.*\\t.*\\t[0-9]+\\t[0-9]+\\t([^\\t]+)",
"transform": "\\1"
},
"geocountry": {
"regex": "^.*\\t.*\\t[0-9]+\\t[0-9]+\\t[^\\t]+\\t([A-Z]+)",
"transform": "\\1"
},
"registrarabuseemail": {
"regex": "^.*\\t.*\\t[0-9]+\\t[0-9]+\\t[^\\t]+\\t[A-Z]+\\t(\\S+)",
"transform": "\\1"
}
})
params['fields'] = fields
params['custom_fields_mapping'] = {
"geocountry": "geocountry",
"registrarabuseemail": "registrarabuseemail"
}
# Call the main execution of the HTTP API module.
feed_main('Dshield Feed', params, 'dshield-')
from HTTPFeedApiModule import * # noqa: E402
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
|
What do you think about Mid Century Modern Decorating Ideas ?
It's interested and make you have an idea to design your room? Get your idea with picture from Mid Century Modern Decorating Ideas, If you inspiration dont forget to email or contact us when you're happy. Mid Century Modern Decorating Ideas was posted in January 12, 2018 at 11:38 am and This Mid Century Modern Decorating Ideas has viewed by 348 users. Browse all of Home Designs. You may found another Mid Century Modern Decorating Ideas better design ideas.
|
import os
from PyQt5 import QtWidgets, QtGui, QtCore
class StatusBar(QtWidgets.QStatusBar):
def __init__(self, parent=None):
super(StatusBar, self).__init__(parent)
file_dir = os.path.dirname(__file__)
resource_dir = os.path.join(file_dir, 'resources', 'buttons')
red_button = os.path.join(resource_dir, 'red_button.png')
green_button = os.path.join(resource_dir, 'green_button.png')
self._red_icon = QtGui.QIcon(red_button)
self._green_icon = QtGui.QIcon(green_button)
self.time_label = QtWidgets.QLabel()
self.time_label.setStyleSheet('color: white;')
self.addPermanentWidget(self.time_label)
# set up the status widgets
self._status_widgets = {}
def set_up_helper(self, platform_name):
button = QtWidgets.QPushButton(self._red_icon,
' ' + platform_name)
button.setFlat(True)
button.setAutoFillBackground(True)
button.setStyleSheet('color: white;')
self.addPermanentWidget(button)
self._status_widgets[platform_name.lower()] = button
@QtCore.pyqtSlot(bool, str)
def set_widget_status(self, bool, platform_name):
# get the appropriate status widget
if platform_name:
button = self._status_widgets[platform_name]
else:
return
if bool:
button.setIcon(self._green_icon)
else:
button.setIcon(self._red_icon)
|
St Andrew’s strives to provide every child with an opportunity to explore and extend their abilities, helping each to grow into a happy and confident individual, preparing them for the pressures of modern day life.
We provide the best possible education through outstanding pastoral care, highly qualified staff and seek to instill in every child a desire to reach for higher achievements in academic, arts, drama, music and sport.
The St Andrew’s Walk will bring together our community. Your family name will become part of the school history, where St Andrew’s has grown from a small acorn to a mighty oak.
St Andrew’s School is committed to providing the highest quality of education and pastoral care and to maintain and enhance the school’s facilities. The magnificent Sports Centre, with its state of the art facilities, is our current commitment and we are delighted to announce the chance to be part of school history by having your name, or your year group written in stone in The St Andrew’s Walk.
The St Andrew’s Walk will offer any family of the St Andrew’s Community, past or present, to have their name inscribed on a paving stone, to form part of the pavement leading around the new Sports Centre.
This walkway will link the playing fields, artificial pitch and school to our new Sports Centre and will be another wonderful area for our children to play.
We would like to invite any St Andrew’s family to be part of The St Andrew’s Walk, whether you have left the school, are due to leave, or wish to play hopscotch from one friend’s name to another! There are three sizes of paving stone to choose from and this campaign is open now.
We hope you will agree that this is a wonderful chance for your family, or year group to be a permanent part of St Andrew’s and we look forward to seeing your name surrounding our first class Sports facility when it opens in the Autumn.
If you are trying to donate from overseas or you would rather donate by cheque please contact the school office and speak to Sharon Willis on 0118 974 4276.
If you decide to make a donation, you can choose any of the formats shown. It can be your family name, your year group or maybe someone you wish to thank or remember.
|
from flask import Flask, json, jsonify, request, render_template, send_from_directory, url_for, redirect
import os
import db
app = Flask(__name__)
@app.route("/", methods=["GET"])
def main():
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend'))
return send_from_directory(path, "index.html")
@app.route("/css/<file>", methods=["GET"])
def get_css(file):
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend', "css"))
return send_from_directory(path, file)
@app.route("/js/<file>", methods=["GET"])
def get_js(file):
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend', "js"))
return send_from_directory(path, file)
@app.route("/tracenow")
def tracenow():
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend'))
return send_from_directory(path, "tracenow.html")
@app.route("/gettrace", methods=['GET'])
def gettrace():
data = {}
return jsonify(data)
@app.route("/monthly")
def monthly():
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend'))
return send_from_directory(path, "monthly.html")
@app.route("/daily")
def daily():
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend'))
return send_from_directory(path, "daily.html")
@app.route("/monthlystats", methods=['GET'])
def get():
srcip = request.args.get('srcip')
dstip = request.args.get('dstip')
mac = request.args.get('mac')
if dstip is not None:
data = db.getmonthlystats(srcip, dstip)
elif srcip is not None:
data = db.getmonthlystatsforsrcip(srcip)
elif mac is not None:
data = db.getmonthlystatsformac(mac)
result = jsonify(data)
return result
# Return a deduplicated set of src ips
@app.route("/monthlysrc")
def getmonthlysrc():
data = db.getmonthlysrc()
result = jsonify(data)
return result
# Return a deduplicated set of src ips
@app.route("/dailysrc")
def getdailysrc():
data = db.getdailysrc()
result = jsonify(data)
return result
@app.route("/dailystats", methods=['GET'])
def getdailystats():
data = db.getdailystatsforsrcip()
result = jsonify(data)
return result
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
These are absolutely adorable !!!! just precious !
Oh Wow! Love them! They are just so so cute.
Greetings everyone from the Amish community of Lebanon,Pa. Richard from Amish Stories.
These are so very cute, congrats on such lovely ornaments.
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Torsten Duwe <[email protected]>
## Copyright (C) 2014 Sebastien Bourdelin <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class SamplerateError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = 'pwm'
name = 'PWM'
longname = 'Pulse-width modulation'
desc = 'Analog level encoded in duty cycle percentage.'
license = 'gplv2+'
inputs = ['logic']
outputs = []
tags = ['Encoding']
channels = (
{'id': 'data', 'name': 'Data', 'desc': 'Data line'},
)
options = (
{'id': 'polarity', 'desc': 'Polarity', 'default': 'active-high',
'values': ('active-low', 'active-high')},
)
annotations = (
('duty-cycle', 'Duty cycle'),
('period', 'Period'),
)
annotation_rows = (
('duty-cycle', 'Duty cycle', (0,)),
('period', 'Period', (1,)),
)
binary = (
('raw', 'RAW file'),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.ss_block = self.es_block = None
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_binary = self.register(srd.OUTPUT_BINARY)
self.out_average = \
self.register(srd.OUTPUT_META,
meta=(float, 'Average', 'PWM base (cycle) frequency'))
def putx(self, data):
self.put(self.ss_block, self.es_block, self.out_ann, data)
def putp(self, period_t):
# Adjust granularity.
if period_t == 0 or period_t >= 1:
period_s = '%.1f s' % (period_t)
elif period_t <= 1e-12:
period_s = '%.1f fs' % (period_t * 1e15)
elif period_t <= 1e-9:
period_s = '%.1f ps' % (period_t * 1e12)
elif period_t <= 1e-6:
period_s = '%.1f ns' % (period_t * 1e9)
elif period_t <= 1e-3:
period_s = '%.1f μs' % (period_t * 1e6)
else:
period_s = '%.1f ms' % (period_t * 1e3)
self.put(self.ss_block, self.es_block, self.out_ann, [1, [period_s]])
def putb(self, data):
self.put(self.ss_block, self.es_block, self.out_binary, data)
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
num_cycles = 0
average = 0
# Wait for an "active" edge (depends on config). This starts
# the first full period of the inspected signal waveform.
self.wait({0: 'f' if self.options['polarity'] == 'active-low' else 'r'})
self.first_samplenum = self.samplenum
# Keep getting samples for the period's middle and terminal edges.
# At the same time that last sample starts the next period.
while True:
# Get the next two edges. Setup some variables that get
# referenced in the calculation and in put() routines.
start_samplenum = self.samplenum
self.wait({0: 'e'})
end_samplenum = self.samplenum
self.wait({0: 'e'})
self.ss_block = start_samplenum
self.es_block = self.samplenum
# Calculate the period, the duty cycle, and its ratio.
period = self.samplenum - start_samplenum
duty = end_samplenum - start_samplenum
ratio = float(duty / period)
# Report the duty cycle in percent.
percent = float(ratio * 100)
self.putx([0, ['%f%%' % percent]])
# Report the duty cycle in the binary output.
self.putb([0, bytes([int(ratio * 256)])])
# Report the period in units of time.
period_t = float(period / self.samplerate)
self.putp(period_t)
# Update and report the new duty cycle average.
num_cycles += 1
average += percent
self.put(self.first_samplenum, self.es_block, self.out_average,
float(average / num_cycles))
|
These Turkey all inclusive resorts should set you up nicely for your next vacation. Here are our hand-picked best resort options.
If you are by water and intend to do a lot of swimming then summer is not too hot in Turkey, otherwise hold off heading there mid summer. Also you may want to avoid mid winter as well for resort vacations as it does get quite cold with snowfall in many parts of the country.
We searched out what we think are the best for you to enjoy. While many of these resorts do not set their all inclusive offerings in stone, this page does represent the best on offer (at the time of writing). We will continue to make updates. But please do check the package details at the time of booking. Things can change at any time!
the best All Inclusive Resorts in Europe.
Enjoy the crystal clear natural waters at the best hotel in Fethiye and one of the best all inclusive resorts in Europe.
The Cornelia Resorts have been offering packages with their 5 a la carte restaurants and bars really appealing to discerning guests tastes (although children are restricted from attending many of these options).
There is also a selection of activities around the resort and "special evenings", and room service. This is in addition to the big main buffet that operate during meals throughout your stay.
This resort gets outstanding reviews and is a great option for a Turkish all inclusive vacation resort.
This five star resort by Lara in Antalya is getting great reviews from guests who are really enjoying the quality of the "5 star Turkey All Inclusive experience"
Making a resort in the shape of a Concorde airplane may seem a little cheesy, but this is a little bit of the point in this part of Turkey. Facilities really are 5 star however with tennis courts, swimming pools, Jacuzzis, waterslides, Six a 'la carte restaurants, a good variety of entertainment though the day and night.
Click for more info on Concorde De Luxe Resort.
5 star Lara Beach Hotel with a poolside bar. Located in Antalya. This resort has a great private beach. Four quality restaurants provide variety at mealtime. The quality of the rooms is quite good with additions like bathrobes, sofa beds, coffee makers, WiFi and balconies included.
The two Club med hotels in Turkey rate at the top of the TripAdvisor all inclusive list for Turkey. Even some of the experienced Club Meders rate this as one of their best all inclusive experiences.
Rated the best all inclusive resort in Turkey Recently. See the reviews and more.
Barut Hotels Labada with such a lot of great amenities and facilities is a good choice for families. There's a poolside bar, a private beach, an outdoor pool, an indoor pool, a children's pool, and a health club and guest parking is complimentary. Breakfast in this 179 guestrooms resort is also included.
If you are looking for a Turkey all Inclusive resort because you want a place where kids can enjoy the splash-park than the Nashira Resort Hotel & Aqua is an excellent choice.
This amazing Turkey Resort with spa is located on the beach in Sarigerme. Packages includes meals and beverages at the on site dining establishments, taxes, and gratuities (at the time of writing).
It has six (yes 6) restaurants, a poolside bar and recreational amenities include a water park make this 5.0, 410 room resort a top choice and gets excellent reviews consistently.
On the beach in Belek with complimentary breakfast. This 5 star resort features, a golf course, an outdoor pool, an indoor pool, a sauna, and spa amenities.
The 5 star Gloria Golf Resort in Belek on a private beach with a restaurant and a coffee shop, a poolside bar and serves a complimentary breakfast.
Recreation includes a golf course, indoor pool, a children's pool, and a sauna and spa amenities.
See also the top Europe All Inclusive resorts.
|
from django.db import models
from wagtail.contrib.settings.models import BaseSetting, register_setting
@register_setting(icon='doc-empty')
class StickyNote(BaseSetting):
start = models.DateTimeField(null=True, help_text="Set the start date to override the content of the Give Sticky. Set the header and body below to change.")
expires = models.DateTimeField(null=True, help_text="Set the date to expire overriding the content of the Give Sticky.")
show_popup = models.BooleanField(default=False, help_text="Replaces the top banner with a popup, start and expire dates still control timing.")
header = models.TextField(max_length=255)
body = models.TextField()
link_text = models.CharField(max_length=255)
link = models.URLField()
emergency_expires = models.DateTimeField(null=True, blank=True, help_text="When active, the Sticky Note will not be displayed until the emergency expires.")
emergency_content = models.CharField(max_length=255)
class Meta:
verbose_name = 'Sticky Note'
@register_setting(icon='collapse-down')
class Footer(BaseSetting):
supporters = models.TextField()
copyright = models.TextField()
ap_statement = models.TextField()
facebook_link =models.URLField()
twitter_link = models.URLField()
linkedin_link = models.URLField()
class Meta:
verbose_name = 'Footer'
@register_setting(icon='cogs')
class CloudfrontDistribution(BaseSetting):
distribution_id = models.CharField(max_length=255, null=True, blank=True)
class Meta:
verbose_name = 'CloudFront Distribution'
@register_setting(icon='date')
class GiveToday(BaseSetting):
give_link_text = models.CharField(max_length=255)
give_link = models.URLField("Give link", blank=True, help_text="URL to Rice Give page or something similar")
start = models.DateTimeField(null=True,
help_text="Set the start date for Give Today to display")
expires = models.DateTimeField(null=True,
help_text="Set the date to expire displaying Give Today")
menu_start = models.DateTimeField(null=True,
help_text="Set the start date for Give Today to display in the menu")
menu_expires = models.DateTimeField(null=True,
help_text="Set the date to expire displaying Give Today in the menu")
class Meta:
verbose_name = 'Give Today'
|
Two resolutions were passed at the last meeting; The Fees Must Fall Resolution and the Resolution on Recognition of the Lenape. The #FeesMustFall campaign and resolution recognize that many benefits come for those who access higher education. However, the price of that education has consistently increased. The Resolution on the Recognition of the Lenape (/ləˈnɑːpi/) is our support in looking for recognition for the Lenape people who used to live where Columbia now resides.
There were 6 bills passed in the last meeting. The first bill was the SAS Qualitative Research Workshop, which was seeking funds for a qualitative research funding workshop, and was approved. The next bill passed was the C&T Holiday Potluck Party, a social gathering predominantly attended by students of the C&T department. Nonetheless, all are welcome. The next bill that was approved was the O&L & Clinical Psych: Psych Out! Game Night, which will be on December 3rd in the Everett Lounge from 7:30 PM to 9:30 PM. The next bill that was approved was the OneTC – One Vision (Poster) Bill. This poster would be used to give students the opportunity to publicly write things they want to accomplish before they have left TC. The next bill that was approved was the ICE/SIE Fall Internship Fair, which will be in the Everett Lounge on Thursday November 12th from 6:00 PM to 8:00 PM. The purpose of the event is to help TC students learn about and prepare for some of the most competitive internships in their respective fields. The EPSA Indoor Recess Night was the next bill to be approved in the last meeting. The aim of the bill is to provide a time for students, staff, and faculty to network and take a break from studies. It will be on Thursday, November 12 from 6:00 PM to 9:00 PM. There will be a wide range of games and activities at the event. There will likely be an outing to a local food and drink establishment afterward. Last, but not least, the Senate approved support for the November Comedy Night which will be held in the Whittier Café on November 17th from 9:00 PM to 11:00 PM.
International education week is November 16th through the 20th. On Thursday, November 19th from 7 – 9 p.m. TC will have a social trivia night featuring a panel of 2 domestic and 2 international students.
Jolene Lane and Janice S. Robinson from the Office for Diversity and Community Affairs host and promote many events that are informative and engaging. One I went to recently was entitled, “Demystifying the IRB.” The presenters at the event covered; what an institutional review board is, what our IRB does, and how to navigate through the online Mentor IRB system. These are all important things for every single doctoral student to know, and also for masters students who would like to collect data for publication.
The video update closes with a tutorial of how to input a picture of yourself into your tc.columbia.edu email profile. Sarah van den Berg, an Ed.D. Student in Curriculum and Teaching, showed this to me, and I thought it would be great to pass it along!
As the department is looking to strengthen the online presence of all of its students, this is just one step that will help!
As always, I’m wishing you a very productive week and rest of the semester!
Drew X Coles is the Arts & Humanities Senator for the Teachers College Senate at Teachers College, Columbia University.
|
#-------------------------------------------------------------------------------
#
# Copyright (c) 2007, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 03/05/2007
#
#-------------------------------------------------------------------------------
""" Defines classes used to implement and manage various trait listener
patterns.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from __future__ import absolute_import
import re
import string
import weakref
from weakref import WeakKeyDictionary
from string import whitespace
from types import MethodType
from .has_traits import HasPrivateTraits
from .trait_base import Undefined, Uninitialized
from .traits import Property
from .trait_types import Str, Int, Bool, Instance, List, Enum, Any
from .trait_errors import TraitError
from .trait_notifiers import TraitChangeNotifyWrapper
#---------------------------------------------------------------------------
# Constants:
#---------------------------------------------------------------------------
# The name of the dictionary used to store active listeners
TraitsListener = '__traits_listener__'
# End of String marker
EOS = '\0'
# Types of traits that can be listened to
ANYTRAIT_LISTENER = '_register_anytrait'
SIMPLE_LISTENER = '_register_simple'
LIST_LISTENER = '_register_list'
DICT_LISTENER = '_register_dict'
SET_LISTENER = '_register_set'
# Mapping from trait default value types to listener types
type_map = {
5: LIST_LISTENER,
6: DICT_LISTENER,
9: SET_LISTENER
}
# Listener types:
ANY_LISTENER = 0
SRC_LISTENER = 1
DST_LISTENER = 2
ListenerType = {
0: ANY_LISTENER,
1: DST_LISTENER,
2: DST_LISTENER,
3: SRC_LISTENER,
4: SRC_LISTENER
}
# Invalid destination ( object, name ) reference marker (i.e. ambiguous):
INVALID_DESTINATION = ( None, None )
# Regular expressions used by the parser:
simple_pat = re.compile( r'^([a-zA-Z_]\w*)(\.|:)([a-zA-Z_]\w*)$' )
name_pat = re.compile( r'([a-zA-Z_]\w*)\s*(.*)' )
# Characters valid in a traits name:
name_chars = string.ascii_letters + string.digits + '_'
#-------------------------------------------------------------------------------
# Utility functions:
#-------------------------------------------------------------------------------
def indent ( text, first_line = True, n = 1, width = 4 ):
""" Indent lines of text.
Parameters
----------
text : str
The text to indent.
first_line : bool, optional
If False, then the first line will not be indented (default: True).
n : int, optional
The level of indentation (default: 1).
width : int, optional
The number of spaces in each level of indentation (default: 4).
Returns
-------
indented : str
"""
lines = text.split( '\n' )
if not first_line:
first = lines[0]
lines = lines[1:]
spaces = ' ' * (width * n)
lines2 = [ spaces + x for x in lines ]
if not first_line:
lines2.insert( 0, first )
indented = '\n'.join( lines2 )
return indented
#-------------------------------------------------------------------------------
# Metadata filters:
#-------------------------------------------------------------------------------
def is_not_none ( value ): return (value is not None)
def is_none ( value ): return (value is None)
def not_event ( value ): return (value != 'event')
#-------------------------------------------------------------------------------
# 'ListenerBase' class:
#-------------------------------------------------------------------------------
class ListenerBase ( HasPrivateTraits ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The handler to be called when any listened to trait is changed:
#handler = Any
# The dispatch mechanism to use when invoking the handler:
#dispatch = Str
# Does the handler go at the beginning (True) or end (False) of the
# notification handlers list?
#priority = Bool( False )
# The next level (if any) of ListenerBase object to be called when any of
# our listened to traits is changed:
#next = Instance( ListenerBase )
# The type of handler being used:
#type = Enum( ANY_LISTENER, SRC_LISTENER, DST_LISTENER )
# Should changes to this item generate a notification to the handler?
# notify = Bool
# Should registering listeners for items reachable from this listener item
# be deferred until the associated trait is first read or set?
# deferred = Bool
#---------------------------------------------------------------------------
# Registers new listeners:
#---------------------------------------------------------------------------
def register ( self, new ):
""" Registers new listeners.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Unregisters any existing listeners:
#---------------------------------------------------------------------------
def unregister ( self, old ):
""" Unregisters any existing listeners.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Handles a trait change for a simple trait:
#---------------------------------------------------------------------------
def handle ( self, object, name, old, new ):
""" Handles a trait change for a simple trait.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Handles a trait change for a list trait:
#---------------------------------------------------------------------------
def handle_list ( self, object, name, old, new ):
""" Handles a trait change for a list trait.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Handles a trait change for a list traits items:
#---------------------------------------------------------------------------
def handle_list_items ( self, object, name, old, new ):
""" Handles a trait change for a list traits items.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Handles a trait change for a dictionary trait:
#---------------------------------------------------------------------------
def handle_dict ( self, object, name, old, new ):
""" Handles a trait change for a dictionary trait.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Handles a trait change for a dictionary traits items:
#---------------------------------------------------------------------------
def handle_dict_items ( self, object, name, old, new ):
""" Handles a trait change for a dictionary traits items.
"""
raise NotImplementedError
#-------------------------------------------------------------------------------
# 'ListenerItem' class:
#-------------------------------------------------------------------------------
class ListenerItem ( ListenerBase ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
#: The name of the trait to listen to:
name = Str
#: The name of any metadata that must be present (or not present):
metadata_name = Str
#: Does the specified metadata need to be defined (True) or not defined
#: (False)?
metadata_defined = Bool( True )
#: The handler to be called when any listened-to trait is changed:
handler = Any
#: A weakref 'wrapped' version of 'handler':
wrapped_handler_ref = Any
#: The dispatch mechanism to use when invoking the handler:
dispatch = Str
#: Does the handler go at the beginning (True) or end (False) of the
#: notification handlers list?
priority = Bool( False )
#: The next level (if any) of ListenerBase object to be called when any of
#: this object's listened-to traits is changed:
next = Instance( ListenerBase )
#: The type of handler being used:
type = Enum( ANY_LISTENER, SRC_LISTENER, DST_LISTENER )
#: Should changes to this item generate a notification to the handler?
notify = Bool( True )
#: Should registering listeners for items reachable from this listener item
#: be deferred until the associated trait is first read or set?
deferred = Bool( False )
#: Is this an 'any_trait' change listener, or does it create explicit
#: listeners for each individual trait?
is_any_trait = Bool( False )
#: Is the associated handler a special list handler that handles both
#: 'foo' and 'foo_items' events by receiving a list of 'deleted' and 'added'
#: items as the 'old' and 'new' arguments?
is_list_handler = Bool( False )
#: A dictionary mapping objects to a list of all current active
#: (*name*, *type*) listener pairs, where *type* defines the type of
#: listener, one of: (SIMPLE_LISTENER, LIST_LISTENER, DICT_LISTENER).
active = Instance( WeakKeyDictionary, () )
#-- 'ListenerBase' Class Method Implementations ----------------------------
#---------------------------------------------------------------------------
# String representation:
#---------------------------------------------------------------------------
def __repr__ ( self, seen = None ):
"""Returns a string representation of the object.
Since the object graph may have cycles, we extend the basic __repr__ API
to include a set of objects we've already seen while constructing
a string representation. When this method tries to get the repr of
a ListenerItem or ListenerGroup, we will use the extended API and build
up the set of seen objects. The repr of a seen object will just be
'<cycle>'.
"""
if seen is None:
seen = set()
seen.add( self )
next_repr = 'None'
next = self.next
if next is not None:
if next in seen:
next_repr = '<cycle>'
else:
next_repr = next.__repr__( seen )
return """%s(
name = %r,
metadata_name = %r,
metadata_defined = %r,
is_any_trait = %r,
dispatch = %r,
notify = %r,
is_list_handler = %r,
type = %r,
next = %s,
)""" % ( self.__class__.__name__, self.name, self.metadata_name,
self.metadata_defined, self.is_any_trait, self.dispatch, self.notify,
self.is_list_handler, self.type, indent( next_repr, False ) )
#---------------------------------------------------------------------------
# Registers new listeners:
#---------------------------------------------------------------------------
def register ( self, new ):
""" Registers new listeners.
"""
# Make sure we actually have an object to set listeners on and that it
# has not already been registered (cycle breaking):
if (new is None) or (new is Undefined) or (new in self.active):
return INVALID_DESTINATION
# Create a dictionary of {name: trait_values} that match the object's
# definition for the 'new' object:
name = self.name
last = name[-1:]
if last == '*':
# Handle the special case of an 'anytrait' change listener:
if self.is_any_trait:
try:
self.active[ new ] = [ ( '', ANYTRAIT_LISTENER ) ]
return self._register_anytrait( new, '', False )
except TypeError:
# This error can occur if 'new' is a list or other object
# for which a weakref cannot be created as the dictionary
# key for 'self.active':
return INVALID_DESTINATION
# Handle trait matching based on a common name prefix and/or
# matching trait metadata:
metadata = self._metadata
if metadata is None:
self._metadata = metadata = { 'type': not_event }
if self.metadata_name != '':
if self.metadata_defined:
metadata[ self.metadata_name ] = is_not_none
else:
metadata[ self.metadata_name ] = is_none
# Get all object traits with matching metadata:
names = new.trait_names( **metadata )
# If a name prefix was specified, filter out only the names that
# start with the specified prefix:
name = name[:-1]
if name != '':
n = len( name )
names = [ aname for aname in names if name == aname[ : n ] ]
# Create the dictionary of selected traits:
bt = new.base_trait
traits = dict( [ ( name, bt( name ) ) for name in names ] )
# Handle any new traits added dynamically to the object:
new.on_trait_change( self._new_trait_added, 'trait_added' )
else:
# Determine if the trait is optional or not:
optional = (last == '?')
if optional:
name = name[:-1]
# Else, no wildcard matching, just get the specified trait:
trait = new.base_trait( name )
# Try to get the object trait:
if trait is None:
# Raise an error if trait is not defined and not optional:
# fixme: Properties which are lists don't implement the
# '..._items' sub-trait, which can cause a failure here when
# used with an editor that sets up listeners on the items...
if not optional:
raise TraitError( "'%s' object has no '%s' trait" % (
new.__class__.__name__, name ) )
# Otherwise, just skip it:
traits = {}
else:
# Create a result dictionary containing just the single trait:
traits = { name: trait }
# For each item, determine its type (simple, list, dict):
self.active[ new ] = active = []
for name, trait in traits.items():
# Determine whether the trait type is simple, list, set or
# dictionary:
type = SIMPLE_LISTENER
handler = trait.handler
if handler is not None:
type = type_map.get( handler.default_value_type,
SIMPLE_LISTENER )
# Add the name and type to the list of traits being registered:
active.append( ( name, type ) )
# Set up the appropriate trait listeners on the object for the
# current trait:
value = getattr( self, type )( new, name, False )
if len( traits ) == 1:
return value
return INVALID_DESTINATION
#---------------------------------------------------------------------------
# Unregisters any existing listeners:
#---------------------------------------------------------------------------
def unregister ( self, old ):
""" Unregisters any existing listeners.
"""
if old is not None and old is not Uninitialized:
try:
active = self.active.pop( old, None )
if active is not None:
for name, type in active:
getattr( self, type )( old, name, True )
except TypeError:
# An error can occur if 'old' is a list or other object for
# which a weakref cannot be created and used an a key for
# 'self.active':
pass
#---------------------------------------------------------------------------
# Handles a trait change for an intermediate link trait:
#---------------------------------------------------------------------------
def handle_simple ( self, object, name, old, new ):
""" Handles a trait change for an intermediate link trait.
"""
self.next.unregister( old )
self.next.register( new )
def handle_dst ( self, object, name, old, new ):
""" Handles a trait change for an intermediate link trait when the
notification is for the final destination trait.
"""
self.next.unregister( old )
object, name = self.next.register( new )
if old is not Uninitialized:
if object is None:
raise TraitError( "on_trait_change handler signature is "
"incompatible with a change to an intermediate trait" )
wh = self.wrapped_handler_ref()
if wh is not None:
wh( object, name, old,
getattr( object, name, Undefined ) )
#---------------------------------------------------------------------------
# Handles a trait change for a list (or set) trait:
#---------------------------------------------------------------------------
def handle_list ( self, object, name, old, new ):
""" Handles a trait change for a list (or set) trait.
"""
if old is not None and old is not Uninitialized:
unregister = self.next.unregister
for obj in old:
unregister( obj )
register = self.next.register
for obj in new:
register( obj )
#---------------------------------------------------------------------------
# Handles a trait change for a list (or set) traits items:
#---------------------------------------------------------------------------
def handle_list_items ( self, object, name, old, new ):
""" Handles a trait change for items of a list (or set) trait.
"""
self.handle_list( object, name, new.removed, new.added )
def handle_list_items_special ( self, object, name, old, new ):
""" Handles a trait change for items of a list (or set) trait with
notification.
"""
wh = self.wrapped_handler_ref()
if wh is not None:
wh( object, name, new.removed, new.added )
#---------------------------------------------------------------------------
# Handles a trait change for a dictionary trait:
#---------------------------------------------------------------------------
def handle_dict ( self, object, name, old, new ):
""" Handles a trait change for a dictionary trait.
"""
if old is not Uninitialized:
unregister = self.next.unregister
for obj in old.values():
unregister( obj )
register = self.next.register
for obj in new.values():
register( obj )
#---------------------------------------------------------------------------
# Handles a trait change for a dictionary traits items:
#---------------------------------------------------------------------------
def handle_dict_items ( self, object, name, old, new ):
""" Handles a trait change for items of a dictionary trait.
"""
self.handle_dict( object, name, new.removed, new.added )
if len( new.changed ) > 0:
# If 'name' refers to the '_items' trait, then remove the '_items'
# suffix to get the actual dictionary trait.
#
# fixme: Is there ever a case where 'name' *won't* refer to the
# '_items' trait?
if name.endswith('_items'):
name = name[:-len('_items')]
dict = getattr( object, name )
unregister = self.next.unregister
register = self.next.register
for key, obj in new.changed.items():
unregister( obj )
register( dict[ key ] )
#---------------------------------------------------------------------------
# Handles an invalid intermediate trait change to a handler that must be
# applied to the final destination object.trait:
#---------------------------------------------------------------------------
def handle_error ( self, obj, name, old, new ):
""" Handles an invalid intermediate trait change to a handler that must
be applied to the final destination object.trait.
"""
if old is not None and old is not Uninitialized:
raise TraitError( "on_trait_change handler signature is "
"incompatible with a change to an intermediate trait" )
#-- Event Handlers ---------------------------------------------------------
#---------------------------------------------------------------------------
# Handles the 'handler' trait being changed:
#---------------------------------------------------------------------------
def _handler_changed ( self, handler ):
""" Handles the **handler** trait being changed.
"""
if self.next is not None:
self.next.handler = handler
#---------------------------------------------------------------------------
# Handles the 'wrapped_handler_ref' trait being changed:
#---------------------------------------------------------------------------
def _wrapped_handler_ref_changed ( self, wrapped_handler_ref ):
""" Handles the 'wrapped_handler_ref' trait being changed.
"""
if self.next is not None:
self.next.wrapped_handler_ref = wrapped_handler_ref
#---------------------------------------------------------------------------
# Handles the 'dispatch' trait being changed:
#---------------------------------------------------------------------------
def _dispatch_changed ( self, dispatch ):
""" Handles the **dispatch** trait being changed.
"""
if self.next is not None:
self.next.dispatch = dispatch
#---------------------------------------------------------------------------
# Handles the 'priority' trait being changed:
#---------------------------------------------------------------------------
def _priority_changed ( self, priority ):
""" Handles the **priority** trait being changed.
"""
if self.next is not None:
self.next.priority = priority
#-- Private Methods --------------------------------------------------------
#---------------------------------------------------------------------------
# Registers any 'anytrait' listener:
#---------------------------------------------------------------------------
def _register_anytrait ( self, object, name, remove ):
""" Registers any 'anytrait' listener.
"""
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, remove = remove,
dispatch = self.dispatch,
priority = self.priority )
return ( object, name )
#---------------------------------------------------------------------------
# Registers a handler for a simple trait:
#---------------------------------------------------------------------------
def _register_simple ( self, object, name, remove ):
""" Registers a handler for a simple trait.
"""
next = self.next
if next is None:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
return ( object, name )
tl_handler = self.handle_simple
if self.notify:
if self.type == DST_LISTENER:
if self.dispatch != 'same':
raise TraitError( "Trait notification dispatch type '%s' "
"is not compatible with handler signature and "
"extended trait name notification style" % self.dispatch )
tl_handler = self.handle_dst
else:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
object._on_trait_change( tl_handler, name,
remove = remove,
dispatch = 'extended',
priority = self.priority )
if remove:
return next.unregister( getattr( object, name ) )
if not self.deferred:
return next.register( getattr( object, name ) )
return ( object, name )
#---------------------------------------------------------------------------
# Registers a handler for a list trait:
#---------------------------------------------------------------------------
def _register_list ( self, object, name, remove ):
""" Registers a handler for a list trait.
"""
next = self.next
if next is None:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
if self.is_list_handler:
object._on_trait_change( self.handle_list_items_special,
name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
elif self.type == ANY_LISTENER:
object._on_trait_change( handler, name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
return ( object, name )
tl_handler = self.handle_list
tl_handler_items = self.handle_list_items
if self.notify:
if self.type == DST_LISTENER:
tl_handler = tl_handler_items = self.handle_error
else:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
if self.is_list_handler:
object._on_trait_change( self.handle_list_items_special,
name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
elif self.type == ANY_LISTENER:
object._on_trait_change( handler, name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
object._on_trait_change( tl_handler, name,
remove = remove,
dispatch = 'extended',
priority = self.priority )
object._on_trait_change( tl_handler_items, name + '_items',
remove = remove,
dispatch = 'extended',
priority = self.priority )
if remove:
handler = next.unregister
elif self.deferred:
return INVALID_DESTINATION
else:
handler = next.register
for obj in getattr( object, name ):
handler( obj )
return INVALID_DESTINATION
# Handle 'sets' the same as 'lists':
# Note: Currently the behavior of sets is almost identical to that of lists,
# so we are able to share the same code for both. This includes some 'duck
# typing' that occurs with the TraitListEvent and TraitSetEvent, that define
# 'removed' and 'added' attributes that behave similarly enough (from the
# point of view of this module) that they can be treated as equivalent. If
# the behavior of sets ever diverges from that of lists, then this code may
# need to be changed.
_register_set = _register_list
#---------------------------------------------------------------------------
# Registers a handler for a dictionary trait:
#---------------------------------------------------------------------------
def _register_dict ( self, object, name, remove ):
""" Registers a handler for a dictionary trait.
"""
next = self.next
if next is None:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
if self.type == ANY_LISTENER:
object._on_trait_change( handler, name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
return ( object, name )
tl_handler = self.handle_dict
tl_handler_items = self.handle_dict_items
if self.notify:
if self.type == DST_LISTENER:
tl_handler = tl_handler_items = self.handle_error
else:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
if self.type == ANY_LISTENER:
object._on_trait_change( handler, name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
object._on_trait_change( tl_handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
object._on_trait_change( tl_handler_items, name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
if remove:
handler = next.unregister
elif self.deferred:
return INVALID_DESTINATION
else:
handler = next.register
for obj in getattr( object, name ).values():
handler( obj )
return INVALID_DESTINATION
#---------------------------------------------------------------------------
# Handles new traits being added to an object being monitored:
#---------------------------------------------------------------------------
def _new_trait_added ( self, object, name, new_trait ):
""" Handles new traits being added to an object being monitored.
"""
# Set if the new trait matches our prefix and metadata:
if new_trait.startswith( self.name[:-1] ):
trait = object.base_trait( new_trait )
for meta_name, meta_eval in self._metadata.items():
if not meta_eval( getattr( trait, meta_name ) ):
return
# Determine whether the trait type is simple, list, set or
# dictionary:
type = SIMPLE_LISTENER
handler = trait.handler
if handler is not None:
type = type_map.get( handler.default_value_,
SIMPLE_LISTENER )
# Add the name and type to the list of traits being registered:
self.active[ object ].append( ( new_trait, type ) )
# Set up the appropriate trait listeners on the object for the
# new trait:
getattr( self, type )( object, new_trait, False )
#-------------------------------------------------------------------------------
# 'ListenerGroup' class:
#-------------------------------------------------------------------------------
def _set_value ( self, name, value ):
for item in self.items:
setattr( item, name, value )
def _get_value ( self, name ):
# Use the attribute on the first item. If there are no items, return None.
if self.items:
return getattr( self.items[0], name )
else:
return None
ListProperty = Property( fget = _get_value, fset = _set_value )
class ListenerGroup ( ListenerBase ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
#: The handler to be called when any listened-to trait is changed
handler = Property
#: A weakref 'wrapped' version of 'handler':
wrapped_handler_ref = Property
#: The dispatch mechanism to use when invoking the handler:
dispatch = Property
#: Does the handler go at the beginning (True) or end (False) of the
#: notification handlers list?
priority = ListProperty
#: The next level (if any) of ListenerBase object to be called when any of
#: this object's listened-to traits is changed
next = ListProperty
#: The type of handler being used:
type = ListProperty
#: Should changes to this item generate a notification to the handler?
notify = ListProperty
#: Should registering listeners for items reachable from this listener item
#: be deferred until the associated trait is first read or set?
deferred = ListProperty
# The list of ListenerBase objects in the group
items = List( ListenerBase )
#-- Property Implementations -----------------------------------------------
def _set_handler ( self, handler ):
if self._handler is None:
self._handler = handler
for item in self.items:
item.handler = handler
def _set_wrapped_handler_ref ( self, wrapped_handler_ref ):
if self._wrapped_handler_ref is None:
self._wrapped_handler_ref = wrapped_handler_ref
for item in self.items:
item.wrapped_handler_ref = wrapped_handler_ref
def _set_dispatch ( self, dispatch ):
if self._dispatch is None:
self._dispatch = dispatch
for item in self.items:
item.dispatch = dispatch
#-- 'ListenerBase' Class Method Implementations ----------------------------
#---------------------------------------------------------------------------
# String representation:
#---------------------------------------------------------------------------
def __repr__ ( self, seen = None ):
"""Returns a string representation of the object.
Since the object graph may have cycles, we extend the basic __repr__ API
to include a set of objects we've already seen while constructing
a string representation. When this method tries to get the repr of
a ListenerItem or ListenerGroup, we will use the extended API and build
up the set of seen objects. The repr of a seen object will just be
'<cycle>'.
"""
if seen is None:
seen = set()
seen.add( self )
lines = [ '%s(items = [' % self.__class__.__name__ ]
for item in self.items:
lines.extend( indent( item.__repr__( seen ), True ).split( '\n' ) )
lines[-1] += ','
lines.append( '])' )
return '\n'.join( lines )
#---------------------------------------------------------------------------
# Registers new listeners:
#---------------------------------------------------------------------------
def register ( self, new ):
""" Registers new listeners.
"""
for item in self.items:
item.register( new )
return INVALID_DESTINATION
#---------------------------------------------------------------------------
# Unregisters any existing listeners:
#---------------------------------------------------------------------------
def unregister ( self, old ):
""" Unregisters any existing listeners.
"""
for item in self.items:
item.unregister( old )
#-------------------------------------------------------------------------------
# 'ListenerParser' class:
#-------------------------------------------------------------------------------
class ListenerParser ( HasPrivateTraits ):
#-------------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------------
#: The string being parsed
text = Str
#: The length of the string being parsed.
len_text = Int
#: The current parse index within the string
index = Int
#: The next character from the string being parsed
next = Property
#: The next Python attribute name within the string:
name = Property
#: The next non-whitespace character
skip_ws = Property
#: Backspaces to the last character processed
backspace = Property
#: The ListenerBase object resulting from parsing **text**
listener = Instance( ListenerBase )
#-- Property Implementations -----------------------------------------------
def _get_next ( self ):
index = self.index
self.index += 1
if index >= self.len_text:
return EOS
return self.text[ index ]
def _get_backspace ( self ):
self.index = max( 0, self.index - 1 )
def _get_skip_ws ( self ):
while True:
c = self.next
if c not in whitespace:
return c
def _get_name ( self ):
match = name_pat.match( self.text, self.index - 1 )
if match is None:
return ''
self.index = match.start( 2 )
return match.group( 1 )
#-- object Method Overrides ------------------------------------------------
def __init__ ( self, text = '', **traits ):
self.text = text
super( ListenerParser, self ).__init__( **traits )
#-- Private Methods --------------------------------------------------------
#---------------------------------------------------------------------------
# Parses the text and returns the appropriate collection of ListenerBase
# objects described by the text:
#---------------------------------------------------------------------------
def parse ( self ):
""" Parses the text and returns the appropriate collection of
ListenerBase objects described by the text.
"""
# Try a simple case of 'name1.name2'. The simplest case of a single
# Python name never triggers this parser, so we don't try to make that
# a shortcut too. Whitespace should already have been stripped from the
# start and end.
# TODO: The use of regexes should be used throughout all of the parsing
# functions to speed up all aspects of parsing.
match = simple_pat.match( self.text )
if match is not None:
return ListenerItem(
name = match.group( 1 ),
notify = match.group(2) == '.',
next = ListenerItem( name = match.group( 3 ) ) )
return self.parse_group( EOS )
#---------------------------------------------------------------------------
# Parses the contents of a group:
#---------------------------------------------------------------------------
def parse_group ( self, terminator = ']' ):
""" Parses the contents of a group.
"""
items = []
while True:
items.append( self.parse_item( terminator ) )
c = self.skip_ws
if c is terminator:
break
if c != ',':
if terminator == EOS:
self.error( "Expected ',' or end of string" )
else:
self.error( "Expected ',' or '%s'" % terminator )
if len( items ) == 1:
return items[0]
return ListenerGroup( items = items )
#---------------------------------------------------------------------------
# Parses a single, complete listener item/group string:
#---------------------------------------------------------------------------
def parse_item ( self, terminator ):
""" Parses a single, complete listener item or group string.
"""
c = self.skip_ws
if c == '[':
result = self.parse_group()
c = self.skip_ws
else:
name = self.name
if name != '':
c = self.next
result = ListenerItem( name = name )
if c in '+-':
result.name += '*'
result.metadata_defined = (c == '+')
cn = self.skip_ws
result.metadata_name = metadata = self.name
if metadata != '':
cn = self.skip_ws
result.is_any_trait = ((c == '-') and (name == '') and
(metadata == ''))
c = cn
if result.is_any_trait and (not ((c == terminator) or
((c == ',') and (terminator == ']')))):
self.error( "Expected end of name" )
elif c == '?':
if len( name ) == 0:
self.error( "Expected non-empty name preceding '?'" )
result.name += '?'
c = self.skip_ws
cycle = (c == '*')
if cycle:
c = self.skip_ws
if c in '.:':
result.notify = (c == '.')
next = self.parse_item( terminator )
if cycle:
last = result
while last.next is not None:
last = last.next
last.next = lg = ListenerGroup( items = [ next, result ] )
result = lg
else:
result.next = next
return result
if c == '[':
if (self.skip_ws == ']') and (self.skip_ws == terminator):
self.backspace
result.is_list_handler = True
else:
self.error( "Expected '[]' at the end of an item" )
else:
self.backspace
if cycle:
result.next = result
return result
#---------------------------------------------------------------------------
# Parses the metadata portion of a listener item:
#---------------------------------------------------------------------------
def parse_metadata ( self, item ):
""" Parses the metadata portion of a listener item.
"""
self.skip_ws
item.metadata_name = name = self.name
if name == '':
self.backspace
#---------------------------------------------------------------------------
# Raises a syntax error:
#---------------------------------------------------------------------------
def error ( self, msg ):
""" Raises a syntax error.
"""
raise TraitError( "%s at column %d of '%s'" %
( msg, self.index, self.text ) )
#-- Event Handlers ---------------------------------------------------------
#---------------------------------------------------------------------------
# Handles the 'text' trait being changed:
#---------------------------------------------------------------------------
def _text_changed ( self ):
self.index = 0
self.len_text = len( self.text )
self.listener = self.parse()
#-------------------------------------------------------------------------------
# 'ListenerNotifyWrapper' class:
#-------------------------------------------------------------------------------
class ListenerNotifyWrapper ( TraitChangeNotifyWrapper ):
#-- TraitChangeNotifyWrapper Method Overrides ------------------------------
def __init__ ( self, handler, owner, id, listener, target=None):
self.type = ListenerType.get( self.init( handler,
weakref.ref( owner, self.owner_deleted ), target ) )
self.id = id
self.listener = listener
def listener_deleted ( self, ref ):
owner = self.owner()
if owner is not None:
dict = owner.__dict__.get( TraitsListener )
listeners = dict.get( self.id )
listeners.remove( self )
if len( listeners ) == 0:
del dict[ self.id ]
if len( dict ) == 0:
del owner.__dict__[ TraitsListener ]
# fixme: Is the following line necessary, since all registered
# notifiers should be getting the same 'listener_deleted' call:
self.listener.unregister( owner )
self.object = self.owner = self.listener = None
def owner_deleted ( self, ref ):
self.object = self.owner = None
#-------------------------------------------------------------------------------
# 'ListenerHandler' class:
#-------------------------------------------------------------------------------
class ListenerHandler ( object ):
def __init__ ( self, handler ):
if type( handler ) is MethodType:
object = handler.im_self
if object is not None:
self.object = weakref.ref( object, self.listener_deleted )
self.name = handler.__name__
return
self.handler = handler
def __call__ ( self ):
result = getattr( self, 'handler', None )
if result is not None:
return result
return getattr( self.object(), self.name )
def listener_deleted ( self, ref ):
self.handler = Undefined
|
Looking for fun activities and sightseeing things to do on your next holiday to Alicante province? Our selection of local expert articles show you the best things to see, places to visit, helpful travel tips and Alicante province attractions for all budgets and ages.
Choose from world-famous tourist attractions and landmarks, top free activities and fun things to do in Alicante province. Discover famous cities, beach resorts, islands and countryside with recommendations on must-see places and sightseeing tips.
|
# Techniques for outlier detection of speeds. Each of these returns a speed threshold that
# can be used with outlier detection techniques.
# Standard imports
import logging
logging.basicConfig(level=logging.DEBUG)
class BoxplotOutlier(object):
MINOR = 1.5
MAJOR = 3
def __init__(self, multiplier = MAJOR, ignore_zeros = False):
self.multiplier = multiplier
self.ignore_zeros = ignore_zeros
def get_threshold(self, with_speeds_df):
if self.ignore_zeros:
df_to_use = with_speeds_df[with_speeds_df.speed > 0]
else:
df_to_use = with_speeds_df
quartile_vals = df_to_use.quantile([0.25, 0.75]).speed
logging.debug("quartile values are %s" % quartile_vals)
iqr = quartile_vals.iloc[1] - quartile_vals.iloc[0]
logging.debug("iqr %s" % iqr)
return quartile_vals.iloc[1] + self.multiplier * iqr
class SimpleQuartileOutlier(object):
def __init__(self, quantile = 0.99, ignore_zeros = False):
self.quantile = quantile
self.ignore_zeros = ignore_zeros
def get_threshold(self, with_speeds_df):
if self.ignore_zeros:
df_to_use = with_speeds_df[with_speeds_df.speed > 0]
else:
df_to_use = with_speeds_df
return df_to_use.speed.quantile(self.quantile)
|
Louise Ann Fernandez, Chairperson for JMBM’s Labor & Employment Department, was included in the Daily Journal’s list of the Top 75 Labor and Employment Attorneys.
Based in Los Angeles, Louise Ann has more than 25 years of experience in labor law defense, specializing in labor relations, employment counseling and litigation, wrongful termination, wage and hour and other class actions, and unfair competition laws for companies and their owners.
Louise Ann’s recent victories include two significant wins for client Northrop Grumman Corporation in a sexual harassment lawsuit and the following retaliation arbitration; in both cases, the employee’s claims were dismissed. She also recently brought a confidentiality breach lawsuit against a former employee and counsel for Diskeeper Corp, winning $500,000 in damages and fees for the client.
|
#!/usr/bin/python
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ftd_file_download
short_description: Downloads files from Cisco FTD devices over HTTP(S)
description:
- Downloads files from Cisco FTD devices including pending changes, disk files, certificates,
troubleshoot reports, and backups.
version_added: "2.7"
author: "Cisco Systems, Inc. (@annikulin)"
options:
operation:
description:
- The name of the operation to execute.
- Only operations that return a file can be used in this module.
required: true
type: str
path_params:
description:
- Key-value pairs that should be sent as path parameters in a REST API call.
type: dict
destination:
description:
- Absolute path of where to download the file to.
- If destination is a directory, the module uses a filename from 'Content-Disposition' header specified by the server.
required: true
type: path
"""
EXAMPLES = """
- name: Download pending changes
ftd_file_download:
operation: 'getdownload'
path_params:
objId: 'default'
destination: /tmp/
"""
RETURN = """
msg:
description: The error message describing why the module failed.
returned: error
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.ftd.common import FtdServerError, HTTPMethod
from ansible.module_utils.network.ftd.fdm_swagger_client import OperationField, ValidationError, FILE_MODEL_NAME
def is_download_operation(op_spec):
return op_spec[OperationField.METHOD] == HTTPMethod.GET and op_spec[OperationField.MODEL_NAME] == FILE_MODEL_NAME
def validate_params(connection, op_name, path_params):
field_name = 'Invalid path_params provided'
try:
is_valid, validation_report = connection.validate_path_params(op_name, path_params)
if not is_valid:
raise ValidationError({
field_name: validation_report
})
except Exception as e:
raise ValidationError({
field_name: str(e)
})
def main():
fields = dict(
operation=dict(type='str', required=True),
path_params=dict(type='dict'),
destination=dict(type='path', required=True)
)
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
params = module.params
connection = Connection(module._socket_path)
op_name = params['operation']
op_spec = connection.get_operation_spec(op_name)
if op_spec is None:
module.fail_json(msg='Operation with specified name is not found: %s' % op_name)
if not is_download_operation(op_spec):
module.fail_json(
msg='Invalid download operation: %s. The operation must make GET request and return a file.' %
op_name)
try:
path_params = params['path_params']
validate_params(connection, op_name, path_params)
if module.check_mode:
module.exit_json(changed=False)
connection.download_file(op_spec[OperationField.URL], params['destination'], path_params)
module.exit_json(changed=False)
except FtdServerError as e:
module.fail_json(msg='Download request for %s operation failed. Status code: %s. '
'Server response: %s' % (op_name, e.code, e.response))
except ValidationError as e:
module.fail_json(msg=e.args[0])
if __name__ == '__main__':
main()
|
The Pavlova is one of those bits of food magic that everyone should make once. While it may look intimidating, at its heart it is as simple as strawberry shortcake. You're not afraid to make strawberry shortcake, are you? I didn't think so.
May I just say, I love my new box of light! Following the Strobist's excellent instructions for photography on the cheap, in this case the DIY $10 Macro Photo Studio, I transformed a cardboard box into, if not a thing of beauty itself, a thing that will give that "thing of beauty" quality to other things.
When I was a young'un, I moved from "Baja Oregon" to a very small coastal town in southwest Washington. A town where the locals joked, in some cases bragged, that, upon arriving, you should turn back your clock 20 years - to the '50s. I, being a child of the coolest artistic little beach towns in Baja Oregon, thought this was mildly amusing...for about 15 minutes.
I arrived in late-spring and my first summer there was, to put it mildly, not my best year. Two things saved me that wet, foggy summer. The first was a job at the local pizzeria, where Gina, a wise-cracking New Jersey girl — everyone swore we were sisters — taught me to toss rounds of dough high in the air and, much harder, catch them again. She also let me play with the brick oven. I loved Gina.
The second bit of salvation arrived one night when I met Becky and we instantly became BFF, before there even were BFF. This bread, made in loaves, was Becky's favorite. I baked some every week or so for years and years. Then Becky and I lost touch. I also mostly stopped baking this bread. Both sad things.
When I was a kid, I was not exactly enamored with lamb. It wasn't common fare around my place and, when it was served, it was one of the few meats that got the utterly unimaginative preparation of salt/pepper/garlic, grilled and with mint jelly on the side. It wasn't bad, but it certainly was not the usual, creative dinner I was spoiled enough to expect. Fast forward a few years, errr, decades and lamb is rapidly becoming one of my favorite meat choices.
What is the difference between English muffins and crumpets?
Well folks, after a day of research and experimentation, I have the answer to one of those. No, not the first three - I took the tough one: English muffins v. crumpets.
The first week of January is the time of year for lofty goals and noble aspirations and the food world is no exception. Food-related resolutions seem to fall into two categories: Proscriptive and prescriptive. There are far more of the former and they mostly take the form of "I will change my diet by not eating sugar, fat, HFCS, bread, chocolate, and so on." There are, to be fair, some positive "I will eat..." resolutions out there (mine is to grow some vegetables along with the herbs in my garden) but most of them are framed in the negative. What a way to start the year, with a list of things you are going to deny yourself!
Far more fun are the prescriptive resolutions. I particularly like "I will make ____ for the first time." and the ones that start "I will learn to..." If I was forced to make New Year's resolutions - and thankfully, I am not - I would fall into this camp so I have a soft spot for them.
If I had to pick one word for my life the last while, it would have to be scattered. Just as one crazy thing is brought under control, the next careens into view. Like garlic butter in your cake pan. Or a teetering stack of biscuits.
One of my surest cures for scattered is bread. As I gather the bits of ragged dough and knead them together into a cohesive whole, I am, likewise, remade just a bit, my loose edges reintegrated and all that. It's one of my favorite meditative states.
Some people insist on doing things the hard way, the complicated way, and I will gladly admit to being one of them - especially when it comes to bread. Not all the time, mind you, there are days when I need bread today and throw together a quick batch of baguettes, but on the other hand...well, lets just say that when I had to make fresh sourdough starter - after doing unmentionable things to my old one (the pretty pink stuff growing on it was cute but unappetizing) - I insisted on doing it by capturing wild yeast.
Worse, I made three kinds of starter: rye, white whole wheat, and white. This met with varying degrees of success, let's just say that if you plan on doing this at home, you can skip the plain white flour version. After ten days of nurturing three starters along, however, my kitchen is but a Bunsen burner away from qualifying as a mad scientist's lab. And I still haven't made any bread from the wild yeast starter, two jars of which are bubbling along in the refrigerator.
I think every baker needs a few never-fail recipes in their back pocket. Recipes that they can play with endlessly with a fair degree of certainty of success. This recipe is a variation of one of my standby recipes: a poolish baguette from Peter Reinhart's Bread Baker's Apprentice. If I had to pick just a few breads to bake all the time, this would be one of them. In its original form, it makes wonderful baguettes and is well suited to being shaped for breads like epis and I have been able to corrupt... err, vary it pretty endlessly over the years.
In fact — confession time — I once made a double batch of this bread. Except I didn't double the yeast. And I tripled the oil. (don't ask, it was late, I was rushed and had no business driving a KitchenAid...) As I kneaded the dough, stumbling my way through a series of "this feels all wrong" corrections, I slowly figured out how badly I had screwed up. Ever the good food writer, I trudged on, determined to take photos for an article titled "How to waste two pounds of flour" that I would write someday. Except for one problem: the bread was fine. It wasn't great, but it was good. This recipe earned its place in my back pocket that day.
|
# *-* coding:utf-8 *-*
'''
@author: ioiogoo
@date: 17-1-7 下午1:31
'''
from bs4 import BeautifulSoup
import requests
from base import Base
from models import Jobbole_news
from peewee import IntegrityError
class Jobbole_new(Base):
def __init__(self):
super(Jobbole_new, self).__init__()
self.name = 'Jobbole_news'
self.url = 'http://blog.jobbole.com/all-posts/'
def parse(self):
try:
print '%s is parse......' % self.name
html = requests.get(url=self.url, headers=self.headers).content
soup = BeautifulSoup(html, 'lxml')
news = []
for new in soup.find_all(class_="post floated-thumb"):
title = new.find('p').a['title']
url = new.find('p').a['href']
time = new.find('p').get_text('////').split('////')[-3].replace(u'·', '').strip()
intro = new.find(class_="excerpt").get_text().strip()
news.append(dict(title=title, url=url, time=time, intro=intro))
return 0, news
except Exception as e:
return 1, e
def handle(self):
status, news = self.parse()
if not status:
for new in news[::-1]:
try:
Jobbole_news(title=new['title'],
url=new['url'],
time=new['time'],
intro=new['intro']).save()
except IntegrityError:
pass
print '%s is done...' % self.name
else:
print news
if __name__ == '__main__':
j = Jobbole_new()
j.handle()
|
A bouquet of aromas and flavours.
An ode to aromatic richness.
Thirty years of maturing underscore the unwavering style and quality of the Perrier-Jouët vineyards.
The House has perpetuated the art of the vintage ever since 1811. It first selects choice wines from remarkable years and then consigns these rare vintages to the silence of its cellars. While each in turn possesses the particular character of its year, they all illustrate the outstanding quality of the Perrier-Jouët vineyards. 65 hectares, ranked at 99.2% on the scale of grands crus. The vineyards are situated in the "magic Champagne triangle", a perfect location which gives the chardonnays great maturity, wiA character magnified by the expertise of the House, developed over more than two centuries, by no more than seven Cellar Masters. Each was trained by the previous Master, like craftsmen jealously handing down their manufacturing secrets from father to son and thus ensuring the longevity of the distinctive style that characterises Perrier-Jouët. Hervé Deschamps, the current Cellar Master and guardian of this precious expertise since 1993, spent ten years working alongside his predecessor before mastering the subtleties of the House style and its unique floral tonality. Viewing each cuvée as a unique work of art. Deschamps fashions and sculpts, with a craftsman's attention to detail, each of the wines that make up the composition of his creations, in order to give full expression to the elegance and the finesse of Perrier-Jouët.
In 1982, Mother Nature offered up a harvest of rare generosity. Time and a precise and intuitive composition transformed this natural offering into a marvel of balance, characterised by the remarkable quality of the chardonnays. Thirty years later, this exceptional vintage still testifies to the unwavering style of Perrier-Jouët.
The 1982 vintage is characterised by its incredibly sugary chardonnays which give the wines finesse and elegance, offering the promise of a fine balance and great longevity. The final composition kept the promises of the grand crus that define the Belle Epoque style: 50% chardonnays (Cramant, Avize, Chouilly and Mesnil/Oger); 45% pinot noir (Aÿ, Ambonnay, Bouzy, Mailly, Verzy and Vernezay); 5% pinot meunier (Dizy, Hautvillers, Venteuil and Vincelles).
A prestigious wooden presentation case, engraved with the iconic anemone and the vintage, is the perfect setting for this rare wine.
Penetrate the secrets of two centuries of Perrier-Jouët heritage.
A multi-faceted wine and a model of sophistication that underscores the constancy of the Perrier-Jouët style.
the House of Perrier-Jouët has opened its doors to reveal rare vintages, previously kept under a veil of near secrecy in its cellars. Hervé Deschamps has selected a few bottles from the Belle Époque cuvée that are now almost impossible to find: 1982, 1985, 1996. They have lost none of their authenticity down the years and are a testament to a unique and unwavering style, renowned for its floral elegance and diamond-cut aromas.
A golden hue, with amber glints and a lingering effervescence.
An aromatic palette of exceptional opulence gradually reveals its secrets to the nose with extreme subtlety: flavours of dried fruits, mature and exotic fruits, candied fruits and cooked pears mingle with hints of cinnamon, heather honey, nougat and walnuts, topped with buttery notes and the aromas of pale tobacco, leather and roasted cocoa beans – an extraordinary bouquet that gradually reveals its fullness.
The palate is refined and complex, achieving a perfect harmony of minerality, acidity and freshness.
"A wine of great subtlety with the elegance and energy of a thoroughbred Arab Stallion."
A fragrance whose great subtlety calls for the finesse of veal or a fillet of lamb.
In 1902, Emile Gallé designed a delicate spray of anemones to be engraved on the prestigious cuvées of Perrier-Jouët Champagnes. The House has remained loyal to this artistic heritage ever since. The same tradition gave rise to Beauting: a vision of gastronomy which embraces Beauty and Good Food, turning each dish into a work of art, in perfect harmony with each cuvée. A unique sensory experience and a feast for the eye and the palate alike.
The subtlety and vivacity of this rare vintage would be a fitting accompaniment for a medallion of veal, set off with a fondue of rocket gently browned with parmesan, but the match would be no less perfect with a fillet of lamb garnished with a celery mousse.
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Plugin to correct and validate dates """
from datetime import datetime
try:
from dateutil import parser
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
def check_record(record, fields, dayfirst=True, yearfirst=False,
date_format="%Y-%m-%d", allow_future=True,
minimum_date=datetime(1800,1,1)):
"""
Corrects and validates date fields
For detailed explanation of how dayfirst and yearfirst works, visit
http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
For detailed explanation of the date_format placeholders, visit
http://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
This plugin needs the python-dateutil library to work.
@param dayfirst Consider the day first if ambiguous
@type dayfirst boolean
@param yearfirst Consider year first if ambiguous
@type yearfirst boolean
@param date_format normalized date format
@type date_format string
@param allow_future If False, dates in the future will be marked as invalid
@type allow_future boolean
@param minimum_date dates older than this will be rejected. Default Jan 1 1800
@type minimum_date datetime.datetime
"""
if not HAS_DATEUTIL:
return
for position, value in record.iterfields(fields):
try:
new_date = parser.parse(value, dayfirst=dayfirst, yearfirst=yearfirst)
except (ValueError, TypeError):
record.set_invalid("Non-parseable date format in field %s" % position[0])
continue
if not allow_future and new_date > datetime.now():
record.set_invalid("Date in the future in field %s" % position[0])
if new_date < minimum_date:
record.set_invalid("Date too old (less than minimum_date) in field %s" % position[0])
if new_date < datetime(1900, 1, 1):
continue # strftime doesn't accept older dates
new_date_str = new_date.strftime(date_format)
record.amend_field(position, new_date_str)
|
World Powered Paragliding Association WPPGA New President!
The WPPGA Has a NEW President & Vice President!
My Wife and I have done some really exciting things in our lives up to this point. The constant in everything we have done is that we are always involved activities which change people’s lives. We have ran some of the most successful Kenpo Karate schools in Northern California, produced several registered service dogs (including Search & Rescue), managed several fitness clubs and more! There is NOTHING more important to us than service to others, and NOTHING more exhilarating than being a part of changing someone’s life. It simply comes down to what we call “the house rules.” If you have a gift than can benefit others, there is a DUTY to share that knowledge.
Conquering the sky and touching clouds has always been a mutual dream for my Wife and I. When the shift in our countries economic state began, we scaled down our karate school chains and began our next great adventure. This of course was Powered Paragliding! The first step involved researching the sport and trying to find the best training possible. This was a much greater challenge than we anticipated. In fact, it reminded us of what it was like to find a good karate school years ago. Everyone offering instruction claimed to be the best, but at the same time, they spoke so negatively about everyone else out there teaching. From personal experience in the martial art world, the only people who talked negatively about others were extremely insecure about their own capabilities. My wife and I NEVER had to bring down another schools in order to convince a student to join ours. We simply let our actions speak for themselves, and let potential students make their decision based on what THEY personally felt was good for them.
We set out traveling the States in search for a Powered Paragliding school that possessed the same values we would look for in choosing a karate school. The common denominator in Powered Paragliding and karate schools is that your life could depend on choosing the right one. We attended many of the Powered Paragliding fly-in events, and read just about everything we could find online. We found countless self-taught instructors out there, and so-called “crash courses” claiming that they could train students in 2 to 3 days. Relating back to our karate experience, there are countless karate instructors out there who learned from books or videos, and claim that their students can achieve a black belt in 1-2 years. Think about this….. If you were looking for a good doctor, would you choose one who learned from a video and is offering 10 min. surgery, or would you choose one that went to Harvard and takes their time performing their work? It’s devastating that so many people out there are choosing these “fast-food” schools because of today’s “gotta get it now” mentality. Again, if your life depends on choosing the proper instruction, shortcuts are just not an option. We finally made the well-informed choice to go with the WPPGA National Training Center on the Pacific coast. All of the CERTIFIED WPPGA Master Instructors take their time to teach and develop each student until they reach their full-potential. The WPPGA was also the only school in the world to provide equipment insurance while training. This alone made a huge statement. The WPPGA is so confident with their instruction that they do not need to worry about a student breaking their equipment. I can’t imagine how expensive training would be if I broke a machine while in training (especially if it was poor training) and had to pay for it. $12,000 for lessons is a little steep!
As time went by, we decided to become more & more involved with the WPPGA. We wanted to support the WPPGA’s quest to provide people with the safest and most thorough training in the sport. My Wife and I never thought that one day we would be heading up this amazing organization. It has become just as rewarding for us as getting a student up to the level of Black Belt!
All that being said, we will continue to grow, learn, and change lives… One flight at a time!
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from shop.tests.factories import ProductFactory
from shop.models import Product
class Command(BaseCommand):
args = ''
help = ('Initialize an empty DB creating a User, setting a specific token, creating two'
'products (a Widget and a Gizmo).')
def handle(self, *args, **options):
# Create the default User
if User.objects.count() == 0:
user = User.objects.create_user(username='andrea',
email='[email protected]', password='andreatest')
user.save()
else:
user = User.objects.get(id=1)
if Token.objects.count() == 0:
# Generate the token for the created user
Token.objects.create(user=user)
# Change the Token to a known one
Token.objects.filter(user_id=user.id).update(key='b60868c38b813ea43b36036503e3f5de025dde31')
if Product.objects.count() == 0:
# Create a Widget and a Gizmo products on DB
ProductFactory.create(name='Widget', collect_stamp=True)
ProductFactory.create(name='Gizmo', collect_stamp=False)
|
Features crisp, sweet, sophisticated cymbals with a modern sound. Zildjian A Custom cymbals are made using radical rotary hammering techniques, thin weights, and a brilliant finish. Great for a variety of drumming styles. This pack features 14" A Custom HiHats, 16" A Custom Crash, 18" A Custom Crash, and 20" A Custom Medium Ride.
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.connection import Connection
_DEVICE_CONFIGS = {}
def get_connection(module):
if hasattr(module, '_exos_connection'):
return module._exos_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._exos_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._exos_connection
def get_capabilities(module):
if hasattr(module, '_exos_capabilities'):
return module._exos_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module._exos_capabilities = json.loads(capabilities)
return module._exos_capabilities
def get_config(module, flags=None):
global _DEVICE_CONFIGS
if _DEVICE_CONFIGS != {}:
return _DEVICE_CONFIGS
else:
connection = get_connection(module)
out = connection.get_config()
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS = cfg
return cfg
def run_commands(module, commands, check_rc=True):
responses = list()
connection = get_connection(module)
for cmd in to_list(commands):
if isinstance(cmd, dict):
command = cmd['command']
prompt = cmd['prompt']
answer = cmd['answer']
else:
command = cmd
prompt = None
answer = None
out = connection.get(command, prompt, answer)
try:
out = to_text(out, errors='surrogate_or_strict')
except UnicodeError:
module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
responses.append(out)
return responses
def load_config(module, commands):
connection = get_connection(module)
out = connection.edit_config(commands)
|
Step by Step Instructions - Fun and easy to follow with Bruno the Bear as your guide.
Dorset Horn Wool - Cream in colour with a crisp, robust texture, used to make Roly’s body.
Merino Wool - Beautifully soft and in different colours. Used to make the outer layer of your Roly including his eyes, arms and legs.
2 Felting Needles - The needles included in our kits are multi purpose and can be used for all levels of needle felting. They are extremely sharp and should always be handled with care.
Foam Block - This provides a safe surface to needle felt onto. We call it ‘Awesome Foam’ it's the best we have ever used!
2 Black Beads - The beads give your Roly’s pupils an extra bit of POP!
(Using the beads is entirely optional. You may prefer to needle felt the pupils directly onto your Roly's eyes using a wisp of the black merino wool provided in the kit, we always add in a little extra.
Sewing Needle and Thread - For attaching the beads.
Needle Storage Tube - A safe place to keep all your needles.
|
# coding: utf-8
# <h1>signinlca</h1>
#
# script to signin for volunteers at lca2015!
#
# The script asks for input of firstname, lastname, tshirt size, amount of coffee volc and comments.
# It creates a python dict with this data along with the current date and hour.
# It gets the username of user and saves the data in the users home dir under the folder signinlca.
# It saves the data as a json object.
# Currently saves the file as firstname + lastname.
#
# How could this be improved?
#
# Signup/Signin System.
#
# For signup - username, firstname, lastname, password(x2) is collected with input. Password is salted and hashed.
# Username, firstname, lastname, and password (salt/hash) is added to a dict. Dict is converted to a json object.
# json object is saved as a json file in the folder signinlca / USERNAME-FOLDER / .signup.json
#
# For signin. Username is collected with input.
# Looks for folder of username. Opens .signup.json file - parsing data.
# Save the value of 'password' as a varible.
#
# Asks for password (getpass.getpass('Password please: ')
# salt/hash this password.
# save password attempt if error, otherwise true complete signin.
#
#
#
# TODO
#
# add option to choose to login or logout. Y/N option for each one.
#
# add logout script that appends to the login data. saves, time/date/comment. anything else?
#
# Asign to jobs/room?
#
# Graph up total hour worked in day/week
#
# scp/rsync data to server/web page.
#
# Make new account, use existing account.
#
# Database of existing accounts... static page of files.
#
# Add password to account
#
# If you signin, doesn't ask if want to sign out.
#
# If you signout, doesn't ask if you want to sign in.
#
# Hash passwords
# When creating account asked for username (which could be firstname + lastname), and password. Passwords are hashed and when user tries to login the password inputed is compared to the hashed password.
#
# Save that hash as a varible that is then complared with the saved hash password.
# I have their signin data. Now what to do with it? Save it as a json object to be then used when they signin later?
#
# More security on it? Hash their usernames, firstnames, 2nd password?
# In[52]:
import os
#import time
import json
import getpass
import arrow
import hashlib
from passlib.hash import pbkdf2_sha256
from walkdir import filtered_walk, dir_paths, all_paths, file_paths
# In[53]:
gmtz = arrow.utcnow()
# In[54]:
yrmt = gmtz.strftime("%Y")
mthza = gmtz.strftime("%m")
dthaq = gmtz.strftime("%d")
gmtz.strftime("%Y")
#yearz = strftime("%y", gmtime())
#monthz = strftime("%m", gmtime())
#dayz = strftime("%d", gmtime())
# In[55]:
yrmt
# In[56]:
mthza
# In[57]:
dthaq
# In[58]:
def returndate():
return (dthaq + '-' + mthza + '-' + yrmt)
def returntime():
return gmtz.strftime('%H:%M:%S')
puser = ('wcmckee')
yrnum = gmtz.strftime("%Y")
mnthnum = gmtz.strftime("%m")
dayzum = gmtz.strftime("%d")
signpath = ('/home/' + puser + '/signinlca')
yrpath = (signpath + '/' + yrnum)
mnthpath = (yrpath + '/' + mnthnum)
dayzpath = (mnthpath + '/' + dayzum)
# In[59]:
if os.path.isdir(signpath) == True:
print 'Path is there'
else:
print 'Path not there'
os.mkdir(signpath)
# In[60]:
if os.path.isdir(yrpath) == True:
print 'Year Path is there'
else:
print 'Year Path not there'
os.mkdir(yrpath)
if os.path.isdir(mnthpath) == True:
print 'Month Path is there'
else:
print 'Month Path not there'
os.mkdir(mnthpath)
if os.path.isdir(dayzpath) == True:
print 'Day Path is there'
else:
print 'Day Path not there'
os.mkdir(dayzpath)
# In[61]:
dayzpath
# In[62]:
os.chdir(dayzpath)
# In[63]:
opsign = open('/home/wcmckee/signinlca/index.json', 'w')
# In[77]:
signup = raw_input('signup y/n ')
signupd = dict()
numchez = 0
if 'y' in signup:
print('Welcome to signup!')
firnam = raw_input('firstname: ')
signupd.update({"firstname":firnam, })
lasnam = raw_input('last name: ')
usenam = raw_input('username: ')
emnam = raw_input('email: ')
os.mkdir('/home/wcmckee/signinlca/usernames/' + usenam)
#passworz = passwd()
pastest = getpass.getpass('password: ')
pasnde = getpass.getpass('enter password again: ')
signupd.update({"firstname":firnam, "lastname":lasnam,
"username":usenam})
hashez = pbkdf2_sha256.encrypt(pastest, rounds=200000, salt_size=16)
emhash = pbkdf2_sha256.encrypt(emnam, rounds=200000, salt_size=16)
signupd.update({"password":hashez, "email":emhash})
savjsn = open('/home/wcmckee/signinlca/usernames/' + usenam + '/.signups.json', 'a')
jsncov = json.dumps(signupd)
savjsn.write(jsncov)
savjsn.close()
usdir = ('useradd -p ' + pastest + ' ' + usenam)
os.system(usdir)
print('Signup Complete. You can now signin with the username and password')
for logy in range(12):
ferzr = (numchez)
numchez = (numchez + 10)
#usfaz = ('/home/wcmckee/signinlca/usernames/' + str(numchez) + usenam + '/index.json', 'w')
os.mkdir('/home/wcmckee/signinlca/usernames/' + str(usenam) + '/' + str(logy))
# In[65]:
#hashez = pbkdf2_sha256.encrypt(pastest, rounds=200000, salt_size=16)
#signupd.update({"password":hashez})
#signin. need to open
print ('signin!')
loginam = raw_input('Username: ')
#Open logins.json, find the username json object
loginpas = getpass.getpass('Password: ')
vercryp = pbkdf2_sha256.verify(loginpas, hashez)
if vercryp == True:
print 'passwords correct - Logged in!'
else:
print 'passwords wrong - Could not log!'
#exit
# In[66]:
type(signupd)
# In[66]:
# In[67]:
#savjsn.write(jsncov)
# In[17]:
#savjsn.close()
# In[19]:
dicsigni = dict()
# In[21]:
signin = raw_input('signin? y/n')
if 'y' in signin:
#uzname = raw_input('firstname: ')
#lzname = raw_input('lastname: ')
uzernam = raw_input('username: ')
dicsigni.update({'username': uzernam})
opsignin = open('/home/wcmckee/signinlca/usernames/' + str(uzernam) + ('/') + ('.signin.json'), 'w')
logtest = getpass.getpass('login password: ')
loghash = pbkdf2_sha256.encrypt(logtest, rounds=200000, salt_size=16)
vercryp = pbkdf2_sha256.verify(logtest, hashez)
dicsigni.update({'password':loghash})
dicjsn = json.dumps(dicsigni)
opsignin.write(dicjsn)
opsignin.close()
#opsignin.write
if pastest == True:
print 'passwords correct'
# In[24]:
ersignin = open('/home/wcmckee/signinlca/usernames/' + str(uzernam) + ('/') + ('.signin.json'), 'r')
paswz = ersignin.read()
# In[28]:
dicvert = json.loads(paswz)
# In[49]:
dicloin = dicvert['password']
# In[39]:
tresignin = open('/home/wcmckee/signinlca/usernames/' + str(uzernam) + ('/') + ('.signups.json'), 'r')
# In[40]:
convea = tresignin.read()
# In[43]:
jsnver = json.loads(convea)
# In[47]:
jpas = jsnver['password']
# In[50]:
jpas
# In[51]:
dicloin
# In[118]:
loginz = raw_input('signin y/n ')
if 'y' in loginz:
print('You signed in')
#logoutz = None
else:
logoutz = raw_input('signouts y/n ')
# In[119]:
if 'y' in loginz:
firnam = raw_input('first name: ')
lasnam = raw_input('last name: ')
tshir = raw_input('tshirt size: ')
cofvol = raw_input('coffee volc: ')
comen = raw_input('comments: ')
betdict = dict()
betdict.update({'first-name' : firnam, 'last-name' : lasnam, 'signin-date' : returndate()})
betdict.update({'signin-hrmin' : returntime()})
betdict.update({'tshirt-size' : tshir})
betdict.update({'coffees' : int(cofvol)})
betdict.update({'comments:' : comen})
convj = json.dumps(betdict)
puser = getpass.getuser()
opday = open((dayzpath + '/' + firnam + lasnam) + '.json', 'w')
opday.write(str(convj))
opday.close()
else:
print ('not signing in')
# In[480]:
if 'y' in logoutz:
comout = raw_input('out comments: ')
outdic = dict()
firnaz = raw_input('first name: ' )
lasnaz = raw_input('last name: ')
outdic.update({'signout-date': returndate()})
outdic.update({'signout-time': returntime()})
outdic.update({'signout-comment': comout})
conout = json.dumps(outdic)
signoutz = open((dayzpath + '/' + firnaz + lasnaz) + '.json', 'a')
signoutz.write(str(conout))
signoutz.close()
else:
print ('not signing out')
# In[481]:
os.listdir(dayzpath)
# In[481]:
# In[68]:
files = file_paths(filtered_walk('/home/wcmckee/signinlca/', depth=100, included_files=['*.json']))
# In[69]:
for fie in files:
#print fie
print fie
# In[72]:
uslis = os.listdir('/home/wcmckee/signinlca/usernames/')
# In[74]:
print ('User List: ')
for usl in uslis:
print usl
# In[ ]:
|
I have serious NYC lust right now! Everyone is going or has been, I need to get over there!
I would be terrified of the 30 day shred. At least you managed the one day shred. That's probably more than most.
Christmas has been proper stealthy this year. I'm not really prepared, need to do some last minute present buying - eep! Hope you have a lovely time with your mum!
i cannot wait for xmas dinner! Yule log is deffo the way foward!!
Do some shops seriously not allow prams in? That's so mean! Our shop is tiny but we'd never stop anyone bringing one in.
Love the outfit by the way, the striped top looks great on you!
Get those decorations up, you'll feel so much more Christmassy :) I really hate the Christmas crowds too.
I know how you feel about shorts, I really love wearing them all year round, and they look great with tights or socks.
Sausages in bacon are just delicious and it's the only time we eat them here. Oh and I hate Christmas pudding too.
I love this outfit! Also, rather random, but you have fantabulous eyebrows!
You look lovely Caroline, hope your shopping expedition was fruitful! And yule log is most definitely where it's at - mmmmmm roll on the 25th!
I like the detailing on the shoulders and those lashes look awesome! Ahh I love bleach blonde hair XD you pull it off really well. I'm NEARLY NEARLY on xmas break. one. more. day.
Gorgeous outfit & make up! I seriously need to put some more decorations up too, my house is pretty bare!
Wow! I cannot believe youve not long had a baby, you look fantastic!
Great outfit, I love that belt with the pale denim it looks gorgeous. And your hair is so nice!!
You look gorgeous. Your outfit is the bees knees! And I completely understand about shopping on Saturdays, it's like everyone has nothing else to do but shop on those days!
|
"""
desisim.spec_qa.s2n
=========================
Module to examine S/N in object spectra
"""
from __future__ import print_function, absolute_import, division
import matplotlib
# matplotlib.use('Agg')
import numpy as np
import sys, os, glob
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from astropy.io import fits
from astropy.table import Table, vstack, hstack, MaskedColumn, join
from desiutil.log import get_logger, DEBUG
from desispec.io import get_exposures, findfile, read_fibermap, read_frame
from desisim.spec_qa.utils import get_sty_otype
log = get_logger()
def load_all_s2n_values(nights, channel, sub_exposures=None):
"""
Calculate S/N values for a set of spectra from an input list of nights
Args:
nights: list
channel: str ('b','r','z')
sub_exposures:
Returns:
fdict: dict
Contains all the S/N info for all nights in the given channel
"""
fdict = dict(waves=[], s2n=[], fluxes=[], exptime=[], OII=[], objtype=[])
for night in nights:
if sub_exposures is not None:
exposures = sub_exposures
else:
exposures = get_exposures(night)#, raw=True)
for exposure in exposures:
fibermap_path = findfile(filetype='fibermap', night=night, expid=exposure)
fibermap_data = read_fibermap(fibermap_path)
flavor = fibermap_data.meta['FLAVOR']
if flavor.lower() in ('arc', 'flat', 'bias'):
log.debug('Skipping calibration {} exposure {:08d}'.format(flavor, exposure))
continue
# Load simspec
simspec_file = fibermap_path.replace('fibermap', 'simspec')
log.debug('Getting truth from {}'.format(simspec_file))
sps_hdu = fits.open(simspec_file)
sps_tab = Table(sps_hdu['TRUTH'].data,masked=True)
#- Get OIIFLUX from separate HDU and join
if ('OIIFLUX' not in sps_tab.colnames) and ('TRUTH_ELG' in sps_hdu):
elg_truth = Table(sps_hdu['TRUTH_ELG'].data)
sps_tab = join(sps_tab, elg_truth['TARGETID', 'OIIFLUX'],
keys='TARGETID', join_type='left')
else:
sps_tab['OIIFLUX'] = 0.0
sps_hdu.close()
#objs = sps_tab['TEMPLATETYPE'] == objtype
#if np.sum(objs) == 0:
# continue
# Load spectra (flux or not fluxed; should not matter)
for ii in range(10):
camera = channel+str(ii)
cframe_path = findfile(filetype='cframe', night=night, expid=exposure, camera=camera)
try:
log.debug('Reading from {}'.format(cframe_path))
cframe = read_frame(cframe_path)
except (IOError, OSError):
log.warn("Cannot find file: {:s}".format(cframe_path))
continue
# Calculate S/N per Ang
dwave = cframe.wave - np.roll(cframe.wave,1)
dwave[0] = dwave[1]
# Calculate
s2n = cframe.flux * np.sqrt(cframe.ivar) / np.sqrt(dwave)
#s2n = cframe.flux[iobjs,:] * np.sqrt(cframe.ivar[iobjs,:]) / np.sqrt(dwave)
# Save
fdict['objtype'].append(sps_tab['TEMPLATETYPE'].data[cframe.fibers])
fdict['waves'].append(cframe.wave)
fdict['s2n'].append(s2n)
fdict['fluxes'].append(sps_tab['MAG'].data[cframe.fibers])
fdict['OII'].append(sps_tab['OIIFLUX'].data[cframe.fibers])
fdict['exptime'].append(cframe.meta['EXPTIME'])
# Return
return fdict
def parse_s2n_values(objtype, fdict):
"""
Parse the input set of S/N measurements on objtype
Args:
objtype: str
fdict: dict
Contains all the S/N info for all nights in a given channel
Returns:
pdict: dict
Contains all the S/N info for the given objtype
"""
pdict = dict(waves=[], s2n=[], fluxes=[], exptime=[], OII=[], objtype=[])
# Loop on all the entries
for ss, wave in enumerate(fdict['waves']):
objs = fdict['objtype'][ss] == objtype
if np.sum(objs) == 0:
continue
iobjs = np.where(objs)[0]
# Parse/Save
pdict['waves'].append(wave)
pdict['s2n'].append(fdict['s2n'][ss][iobjs,:])
pdict['fluxes'].append(fdict['fluxes'][ss][iobjs])
if objtype == 'ELG':
pdict['OII'].append(fdict['OII'][ss][iobjs])
pdict['exptime'].append(fdict['exptime'][ss])
# Return
return pdict
def load_s2n_values(objtype, nights, channel, sub_exposures=None):
"""
DEPRECATED
Calculate S/N values for a set of spectra
Args:
objtype: str
nights: list
channel: str
sub_exposures:
Returns:
fdict: dict
Contains S/N info
"""
fdict = dict(waves=[], s2n=[], fluxes=[], exptime=[], OII=[])
for night in nights:
if sub_exposures is not None:
exposures = sub_exposures
else:
exposures = get_exposures(night)#, raw=True)
for exposure in exposures:
fibermap_path = findfile(filetype='fibermap', night=night, expid=exposure)
fibermap_data = read_fibermap(fibermap_path)
flavor = fibermap_data.meta['FLAVOR']
if flavor.lower() in ('arc', 'flat', 'bias'):
log.debug('Skipping calibration {} exposure {:08d}'.format(flavor, exposure))
continue
# Load simspec
simspec_file = fibermap_path.replace('fibermap', 'simspec')
log.debug('Getting {} truth from {}'.format(objtype, simspec_file))
sps_hdu = fits.open(simspec_file)
sps_tab = Table(sps_hdu['TRUTH'].data,masked=True)
#- Get OIIFLUX from separate HDU and join
if ('OIIFLUX' not in sps_tab.colnames) and ('TRUTH_ELG' in sps_hdu):
elg_truth = Table(sps_hdu['TRUTH_ELG'].data)
sps_tab = join(sps_tab, elg_truth['TARGETID', 'OIIFLUX'],
keys='TARGETID', join_type='left')
else:
sps_tab['OIIFLUX'] = 0.0
sps_hdu.close()
objs = sps_tab['TEMPLATETYPE'] == objtype
if np.sum(objs) == 0:
continue
# Load spectra (flux or not fluxed; should not matter)
for ii in range(10):
camera = channel+str(ii)
cframe_path = findfile(filetype='cframe', night=night, expid=exposure, camera=camera)
try:
log.debug('Reading {} from {}'.format(objtype, cframe_path))
cframe = read_frame(cframe_path)
except (IOError, OSError):
log.warn("Cannot find file: {:s}".format(cframe_path))
continue
# Calculate S/N per Ang
dwave = cframe.wave - np.roll(cframe.wave,1)
dwave[0] = dwave[1]
#
iobjs = objs[cframe.fibers]
if np.sum(iobjs) == 0:
continue
s2n = cframe.flux[iobjs,:] * np.sqrt(cframe.ivar[iobjs,:]) / np.sqrt(dwave)
# Save
fdict['waves'].append(cframe.wave)
fdict['s2n'].append(s2n)
fdict['fluxes'].append(sps_tab['MAG'][cframe.fibers[iobjs]])
if objtype == 'ELG':
fdict['OII'].append(sps_tab['OIIFLUX'][cframe.fibers[iobjs]])
fdict['exptime'].append(cframe.meta['EXPTIME'])
# Return
return fdict
def obj_s2n_wave(s2n_dict, wv_bins, flux_bins, otype, outfile=None, ax=None):
"""Generate QA of S/N for a given object type
"""
logs = get_logger()
nwv = wv_bins.size
nfx = flux_bins.size
s2n_sum = np.zeros((nwv-1,nfx-1))
s2n_N = np.zeros((nwv-1,nfx-1)).astype(int)
# Loop on exposures+wedges (can do just once if these are identical for each)
for jj, wave in enumerate(s2n_dict['waves']):
w_i = np.digitize(wave, wv_bins) - 1
m_i = np.digitize(s2n_dict['fluxes'][jj], flux_bins) - 1
mmm = []
for ll in range(nfx-1): # Only need to do once
mmm.append(m_i == ll)
#
for kk in range(nwv-1):
all_s2n = s2n_dict['s2n'][jj][:,w_i==kk]
for ll in range(nfx-1):
if np.any(mmm[ll]):
s2n_sum[kk, ll] += np.sum(all_s2n[mmm[ll],:])
s2n_N[kk, ll] += np.sum(mmm[ll]) * all_s2n.shape[1]
sty_otype = get_sty_otype()
# Plot
if ax is None:
fig = plt.figure(figsize=(6, 6.0))
ax= plt.gca()
# Title
fig.suptitle('{:s}: Summary'.format(sty_otype[otype]['lbl']),
fontsize='large')
# Plot em up
wv_cen = (wv_bins + np.roll(wv_bins,-1))/2.
lstys = ['-', '--', '-.', ':', (0, (3, 1, 1, 1))]
mxy = 1e-9
for ss in range(nfx-1):
if np.sum(s2n_N[:,ss]) == 0:
continue
lbl = 'MAG = [{:0.1f},{:0.1f}]'.format(flux_bins[ss], flux_bins[ss+1])
ax.plot(wv_cen[:-1], s2n_sum[:,ss]/s2n_N[:,ss], linestyle=lstys[ss],
label=lbl, color=sty_otype[otype]['color'])
mxy = max(mxy, np.max(s2n_sum[:,ss]/s2n_N[:,ss]))
ax.set_xlabel('Wavelength (Ang)')
#ax.set_xlim(-ylim, ylim)
ax.set_ylabel('Mean S/N per Ang in bins of 20A')
ax.set_yscale("log", nonposy='clip')
ax.set_ylim(0.1, mxy*1.1)
legend = plt.legend(loc='upper left', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='medium', numpoints=1)
# Finish
plt.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.3)
plt.subplots_adjust(top=0.92)
if outfile is not None:
plt.savefig(outfile, dpi=600)
print("Wrote: {:s}".format(outfile))
def obj_s2n_z(s2n_dict, z_bins, flux_bins, otype, outfile=None, ax=None):
"""Generate QA of S/N for a given object type vs. z (mainly for ELG)
"""
logs = get_logger()
nz = z_bins.size
nfx = flux_bins.size
s2n_sum = np.zeros((nz-1,nfx-1))
s2n_N = np.zeros((nz-1,nfx-1)).astype(int)
# Loop on exposures+wedges (can do just once if these are identical for each)
for jj, wave in enumerate(s2n_dict['waves']):
# Turn wave into z
zELG = wave / 3728. - 1.
z_i = np.digitize(zELG, z_bins) - 1
m_i = np.digitize(s2n_dict['OII'][jj]*1e17, flux_bins) - 1
mmm = []
for ll in range(nfx-1): # Only need to do once
mmm.append(m_i == ll)
#
for kk in range(nz-1):
all_s2n = s2n_dict['s2n'][jj][:,z_i==kk]
for ll in range(nfx-1):
if np.any(mmm[ll]):
s2n_sum[kk, ll] += np.sum(all_s2n[mmm[ll],:])
s2n_N[kk, ll] += np.sum(mmm[ll]) * all_s2n.shape[1]
sty_otype = get_sty_otype()
# Plot
if ax is None:
fig = plt.figure(figsize=(6, 6.0))
ax= plt.gca()
# Title
fig.suptitle('{:s}: Redshift Summary'.format(sty_otype[otype]['lbl']),
fontsize='large')
# Plot em up
z_cen = (z_bins + np.roll(z_bins,-1))/2.
lstys = ['-', '--', '-.', ':', (0, (3, 1, 1, 1))]
mxy = 1e-9
for ss in range(nfx-1):
if np.sum(s2n_N[:,ss]) == 0:
continue
lbl = 'OII(1e-17) = [{:0.1f},{:0.1f}]'.format(flux_bins[ss], flux_bins[ss+1])
ax.plot(z_cen[:-1], s2n_sum[:,ss]/s2n_N[:,ss], linestyle=lstys[ss],
label=lbl, color=sty_otype[otype]['color'])
mxy = max(mxy, np.max(s2n_sum[:,ss]/s2n_N[:,ss]))
ax.set_xlabel('Redshift')
ax.set_xlim(z_bins[0], z_bins[-1])
ax.set_ylabel('Mean S/N per Ang in dz bins')
ax.set_yscale("log", nonposy='clip')
ax.set_ylim(0.1, mxy*1.1)
legend = plt.legend(loc='lower right', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='medium', numpoints=1)
# Finish
plt.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.3)
plt.subplots_adjust(top=0.92)
if outfile is not None:
plt.savefig(outfile, dpi=600)
print("Wrote: {:s}".format(outfile))
# Command line execution
if __name__ == '__main__':
import desispec.io
from astropy.table import Table
from astropy.io import fits
# Test obj_s2n method
if False:
nights = ['20190901']
exposures = [65+i for i in range(6)]
s2n_values = load_s2n_values('ELG', nights, 'b', sub_exposures=exposures)
wv_bins = np.arange(3570., 5950., 20.)
obj_s2n_wave(s2n_values, wv_bins, np.arange(19., 25., 1.0), 'ELG', outfile='tst.pdf')
# Test obj_s2n_z
if True:
nights = ['20190901']
exposures = [65+i for i in range(6)]
s2n_values = load_s2n_values('ELG', nights, 'z', sub_exposures=exposures)
z_bins = np.linspace(1.0, 1.6, 100) # z camera
oii_bins = np.array([1., 6., 10., 30., 100., 1000.])
obj_s2n_z(s2n_values, z_bins, oii_bins, 'ELG', outfile='tstz.pdf')
|
He's handsome, Alicia. :) He blends right in with those leaves! What kind of bunny is he?
Tedd sure is one handsome bun bun maybe a natural model too lol.
I sometimes get scared when I take Meadow out because there are so many plants that im scared could make him sick, does Tedd ever try to eat leave's,flowers,grass ect..??
Tedd you are a beauitful bunny and your coat looks so great with the leaves.
I sometimes get scared when I take Meadow out because there are so many plants that im scared could make him sick, does Tedd ever try to eat leave's,flowers,grass ect..??Grass and leaves are ok and dnadilions and those small white flowers (weeds) also clover is ok. I dont lety him eat any flower that is like a garden flower or any weed I dont know about. I dont think he minds sticking to the safe stuff.
|
"""Models to be used when accessing app specific datastore usage statistics.
These entities cannot be created by users, but are populated in the
application's datastore by offline processes run by the Google App Engine team.
"""
# NOTE: All constant strings in this file should be kept in sync with
# those in google/appengine/ext/db/stats.py.
from . import model
__all__ = ['BaseKindStatistic',
'BaseStatistic',
'GlobalStat',
'KindCompositeIndexStat',
'KindNonRootEntityStat',
'KindPropertyNamePropertyTypeStat',
'KindPropertyNameStat',
'KindPropertyTypeStat',
'KindRootEntityStat',
'KindStat',
'NamespaceGlobalStat',
'NamespaceKindCompositeIndexStat',
'NamespaceKindNonRootEntityStat',
'NamespaceKindPropertyNamePropertyTypeStat',
'NamespaceKindPropertyNameStat',
'NamespaceKindPropertyTypeStat',
'NamespaceKindRootEntityStat',
'NamespaceKindStat',
'NamespacePropertyTypeStat',
'NamespaceStat',
'PropertyTypeStat',
]
class BaseStatistic(model.Model):
"""Base Statistic Model class.
Attributes:
bytes: the total number of bytes taken up in the datastore for the
statistic instance.
count: attribute is the total number of occurrences of the statistic
in the datastore.
timestamp: the time the statistic instance was written to the datastore.
"""
# This is necessary for the _get_kind() classmethod override.
STORED_KIND_NAME = '__BaseStatistic__'
# The number of bytes that is taken up.
bytes = model.IntegerProperty()
# The number of entity records.
count = model.IntegerProperty()
# When this statistic was inserted into the datastore.
timestamp = model.DateTimeProperty()
@classmethod
def _get_kind(cls):
"""Kind name override."""
return cls.STORED_KIND_NAME
class BaseKindStatistic(BaseStatistic):
"""Base Statistic Model class for stats associated with kinds.
Attributes:
kind_name: the name of the kind associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
"""
# This is necessary for the _get_kind() classmethod override.
STORED_KIND_NAME = '__BaseKindStatistic__'
# The name of the kind.
kind_name = model.StringProperty()
# The number of bytes that is taken up in entity table. entity_bytes does not
# reflect the storage allocated for indexes, either built-in or composite
# indexes.
entity_bytes = model.IntegerProperty(default=0L)
class GlobalStat(BaseStatistic):
"""An aggregate of all entities across the entire application.
This statistic only has a single instance in the datastore that contains the
total number of entities stored and the total number of bytes they take up.
Attributes:
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Total__'
# The number of bytes that is taken up in entity storage.
entity_bytes = model.IntegerProperty(default=0L)
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
# The number of bytes taken up for composite index entries.
composite_index_bytes = model.IntegerProperty(default=0L)
# The number of composite indexes entries.
composite_index_count = model.IntegerProperty(default=0L)
class NamespaceStat(BaseStatistic):
"""An aggregate of all entities across an entire namespace.
This statistic has one instance per namespace. The key_name is the
represented namespace. NamespaceStat entities will only be found
in the namespace "" (empty string). It contains the total
number of entities stored and the total number of bytes they take up.
Attributes:
subject_namespace: the namespace associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Namespace__'
# The namespace name this NamespaceStat refers to.
subject_namespace = model.StringProperty()
# The number of bytes that is taken up in entity storage.
entity_bytes = model.IntegerProperty(default=0L)
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
# The number of bytes taken up for composite index entries.
composite_index_bytes = model.IntegerProperty(default=0L)
# The number of composite indexes entries.
composite_index_count = model.IntegerProperty(default=0L)
class KindStat(BaseKindStatistic):
"""An aggregate of all entities at the granularity of their Kind.
There is an instance of the KindStat for every Kind that is in the
application's datastore. This stat contains per-Kind statistics.
Attributes:
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Kind__'
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
# The number of bytes taken up for composite index entries.
composite_index_bytes = model.IntegerProperty(default=0L)
# The number of composite indexes entries.
composite_index_count = model.IntegerProperty(default=0L)
class KindRootEntityStat(BaseKindStatistic):
"""Statistics of the number of root entities in the datastore by Kind.
There is an instance of the KindRootEntityState for every Kind that is in the
application's datastore and has an instance that is a root entity. This stat
contains statistics regarding these root entity instances.
"""
STORED_KIND_NAME = '__Stat_Kind_IsRootEntity__'
class KindNonRootEntityStat(BaseKindStatistic):
"""Statistics of the number of non root entities in the datastore by Kind.
There is an instance of the KindNonRootEntityStat for every Kind that is in
the application's datastore that is a not a root entity. This stat contains
statistics regarding thse non root entity instances.
"""
STORED_KIND_NAME = '__Stat_Kind_NotRootEntity__'
class PropertyTypeStat(BaseStatistic):
"""An aggregate of all properties across the entire application by type.
There is an instance of the PropertyTypeStat for every property type
(google.appengine.api.datastore_types._PROPERTY_TYPES) in use by the
application in its datastore.
Attributes:
property_type: the property type associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType__'
# The name of the property_type.
property_type = model.StringProperty()
# The number of bytes that is taken up in entity storage.
entity_bytes = model.IntegerProperty(default=0L)
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindPropertyTypeStat(BaseKindStatistic):
"""Statistics on (kind, property_type) tuples in the app's datastore.
There is an instance of the KindPropertyTypeStat for every
(kind, property_type) tuple in the application's datastore.
Attributes:
property_type: the property type associated with the statistic instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType_Kind__'
# The name of the property_type.
property_type = model.StringProperty()
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindPropertyNameStat(BaseKindStatistic):
"""Statistics on (kind, property_name) tuples in the app's datastore.
There is an instance of the KindPropertyNameStat for every
(kind, property_name) tuple in the application's datastore.
Attributes:
property_name: the name of the property associated with the statistic
instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyName_Kind__'
# The name of the property.
property_name = model.StringProperty()
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindPropertyNamePropertyTypeStat(BaseKindStatistic):
"""Statistic on (kind, property_name, property_type) tuples in the datastore.
There is an instance of the KindPropertyNamePropertyTypeStat for every
(kind, property_name, property_type) tuple in the application's datastore.
Attributes:
property_type: the property type associated with the statistic instance.
property_name: the name of the property associated with the statistic
instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType_PropertyName_Kind__'
# The name of the property type.
property_type = model.StringProperty()
# The name of the property.
property_name = model.StringProperty()
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindCompositeIndexStat(BaseStatistic):
"""Statistic on (kind, composite_index_id) tuples in the datastore.
There is an instance of the KindCompositeIndexStat for every unique
(kind, composite_index_id) tuple in the application's datastore indexes.
Attributes:
index_id: the id of the composite index associated with the statistic
instance.
kind_name: the name of the kind associated with the statistic instance.
"""
STORED_KIND_NAME = '__Stat_Kind_CompositeIndex__'
# The id of the composite index
index_id = model.IntegerProperty()
# The name of the kind.
kind_name = model.StringProperty()
# The following specify namespace-specific stats.
# These types are specific to the datastore namespace they are located
# within. These will only be produced if datastore entities exist
# in a namespace other than the empty namespace (i.e. namespace="").
class NamespaceGlobalStat(GlobalStat):
"""GlobalStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Total__'
class NamespaceKindStat(KindStat):
"""KindStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind__'
class NamespaceKindRootEntityStat(KindRootEntityStat):
"""KindRootEntityStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_IsRootEntity__'
class NamespaceKindNonRootEntityStat(KindNonRootEntityStat):
"""KindNonRootEntityStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_NotRootEntity__'
class NamespacePropertyTypeStat(PropertyTypeStat):
"""PropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType__'
class NamespaceKindPropertyTypeStat(KindPropertyTypeStat):
"""KindPropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType_Kind__'
class NamespaceKindPropertyNameStat(KindPropertyNameStat):
"""KindPropertyNameStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyName_Kind__'
class NamespaceKindPropertyNamePropertyTypeStat(
KindPropertyNamePropertyTypeStat):
"""KindPropertyNamePropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType_PropertyName_Kind__'
class NamespaceKindCompositeIndexStat(KindCompositeIndexStat):
"""KindCompositeIndexStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_CompositeIndex__'
# Maps a datastore stat entity kind name to its respective model class.
# NOTE: Any new stats added to this module should also be added here.
_DATASTORE_STATS_CLASSES_BY_KIND = {
GlobalStat.STORED_KIND_NAME: GlobalStat,
NamespaceStat.STORED_KIND_NAME: NamespaceStat,
KindStat.STORED_KIND_NAME: KindStat,
KindRootEntityStat.STORED_KIND_NAME: KindRootEntityStat,
KindNonRootEntityStat.STORED_KIND_NAME: KindNonRootEntityStat,
PropertyTypeStat.STORED_KIND_NAME: PropertyTypeStat,
KindPropertyTypeStat.STORED_KIND_NAME: KindPropertyTypeStat,
KindPropertyNameStat.STORED_KIND_NAME: KindPropertyNameStat,
KindPropertyNamePropertyTypeStat.STORED_KIND_NAME:
KindPropertyNamePropertyTypeStat,
KindCompositeIndexStat.STORED_KIND_NAME: KindCompositeIndexStat,
NamespaceGlobalStat.STORED_KIND_NAME: NamespaceGlobalStat,
NamespaceKindStat.STORED_KIND_NAME: NamespaceKindStat,
NamespaceKindRootEntityStat.STORED_KIND_NAME: NamespaceKindRootEntityStat,
NamespaceKindNonRootEntityStat.STORED_KIND_NAME:
NamespaceKindNonRootEntityStat,
NamespacePropertyTypeStat.STORED_KIND_NAME: NamespacePropertyTypeStat,
NamespaceKindPropertyTypeStat.STORED_KIND_NAME:
NamespaceKindPropertyTypeStat,
NamespaceKindPropertyNameStat.STORED_KIND_NAME:
NamespaceKindPropertyNameStat,
NamespaceKindPropertyNamePropertyTypeStat.STORED_KIND_NAME:
NamespaceKindPropertyNamePropertyTypeStat,
NamespaceKindCompositeIndexStat.STORED_KIND_NAME:
NamespaceKindCompositeIndexStat,
}
|
Tights worth worshipping, fit for a goddess like you. Our patent-pending Goddess Ribbed Legging features ultimate performance fabric that slims inner and outer thighs and seriously lifts your booty. New waistband engineering is tummy flattening and super flattering.
Inseam: XXS 32.5"; XS 32.5"; S 33"; M 33.5"; L 33.5"
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 3 15:18:38 2014
@author: Dan Denman and Josh Siegle
Loads .continuous, .events, and .spikes files saved from the Open Ephys GUI
Usage:
import OpenEphys
data = OpenEphys.load(pathToFile) # returns a dict with data, timestamps, etc.
"""
import os
import numpy as np
import scipy.signal
import scipy.io
import time
import struct
from copy import deepcopy
# constants
NUM_HEADER_BYTES = 1024
SAMPLES_PER_RECORD = 1024
BYTES_PER_SAMPLE = 2
RECORD_SIZE = 4 + 8 + SAMPLES_PER_RECORD * BYTES_PER_SAMPLE + 10 # size of each continuous record in bytes
RECORD_MARKER = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 255])
# constants for pre-allocating matrices:
MAX_NUMBER_OF_SPIKES = int(1e6)
MAX_NUMBER_OF_RECORDS = int(1e6)
MAX_NUMBER_OF_EVENTS = int(1e6)
def load(filepath):
# redirects to code for individual file types
if 'continuous' in filepath:
data = loadContinuous(filepath)
elif 'spikes' in filepath:
data = loadSpikes(filepath)
elif 'events' in filepath:
data = loadEvents(filepath)
else:
raise Exception("Not a recognized file type. Please input a .continuous, .spikes, or .events file")
return data
def loadFolder(folderpath, dtype=float, **kwargs):
# load all continuous files in a folder
data = {}
# load all continuous files in a folder
if 'channels' in kwargs.keys():
filelist = ['100_CH' + x + '.continuous' for x in map(str, kwargs['channels'])]
else:
filelist = os.listdir(folderpath)
t0 = time.time()
numFiles = 0
for i, f in enumerate(filelist):
if '.continuous' in f:
data[f.replace('.continuous', '')] = loadContinuous(os.path.join(folderpath, f), dtype=dtype)
numFiles += 1
print(''.join(('Avg. Load Time: ', str((time.time() - t0) / numFiles), ' sec')))
print(''.join(('Total Load Time: ', str((time.time() - t0)), ' sec')))
return data
def loadFolderToArray(folderpath, channels='all', chprefix='CH',
dtype=float, session='0', source='100'):
'''Load continuous files in specified folder to a single numpy array. By default all
CH continous files are loaded in numerical order, ordering can be specified with
optional channels argument which should be a list of channel numbers.'''
if channels == 'all':
channels = _get_sorted_channels(folderpath, chprefix, session, source)
if session == '0':
filelist = [source + '_' + chprefix + x + '.continuous' for x in map(str, channels)]
else:
filelist = [source + '_' + chprefix + x + '_' + session + '.continuous' for x in map(str, channels)]
t0 = time.time()
numFiles = 1
channel_1_data = loadContinuous(os.path.join(folderpath, filelist[0]), dtype)['data']
n_samples = len(channel_1_data)
n_channels = len(filelist)
data_array = np.zeros([n_samples, n_channels], dtype)
data_array[:, 0] = channel_1_data
for i, f in enumerate(filelist[1:]):
data_array[:, i + 1] = loadContinuous(os.path.join(folderpath, f), dtype)['data']
numFiles += 1
print(''.join(('Avg. Load Time: ', str((time.time() - t0) / numFiles), ' sec')))
print(''.join(('Total Load Time: ', str((time.time() - t0)), ' sec')))
return data_array
def loadContinuous(filepath, dtype=float):
assert dtype in (float, np.int16), \
'Invalid data type specified for loadContinous, valid types are float and np.int16'
# print("Loading continuous data...")
ch = {}
# read in the data
f = open(filepath, 'rb')
fileLength = os.fstat(f.fileno()).st_size
# calculate number of samples
recordBytes = fileLength - NUM_HEADER_BYTES
if recordBytes % RECORD_SIZE != 0:
raise Exception("File size is not consistent with a continuous file: may be corrupt")
nrec = recordBytes // RECORD_SIZE
nsamp = nrec * SAMPLES_PER_RECORD
# pre-allocate samples
samples = np.zeros(nsamp, dtype)
timestamps = np.zeros(nrec)
recordingNumbers = np.zeros(nrec)
indices = np.arange(0, nsamp + 1, SAMPLES_PER_RECORD, np.dtype(np.int64))
header = readHeader(f)
recIndices = np.arange(0, nrec)
for recordNumber in recIndices:
timestamps[recordNumber] = np.fromfile(f, np.dtype('<i8'), 1) # little-endian 64-bit signed integer
N = np.fromfile(f, np.dtype('<u2'), 1)[0] # little-endian 16-bit unsigned integer
# print index
if N != SAMPLES_PER_RECORD:
raise Exception('Found corrupted record in block ' + str(recordNumber))
recordingNumbers[recordNumber] = (np.fromfile(f, np.dtype('>u2'), 1)) # big-endian 16-bit unsigned integer
if dtype == float: # Convert data to float array and convert bits to voltage.
data = np.fromfile(f, np.dtype('>i2'), N) * float(
header['bitVolts']) # big-endian 16-bit signed integer, multiplied by bitVolts
else: # Keep data in signed 16 bit integer format.
data = np.fromfile(f, np.dtype('>i2'), N) # big-endian 16-bit signed integer
samples[indices[recordNumber]:indices[recordNumber + 1]] = data
marker = f.read(10) # dump
# print recordNumber
# print index
ch['header'] = header
ch['timestamps'] = timestamps
ch['data'] = samples # OR use downsample(samples,1), to save space
ch['recordingNumber'] = recordingNumbers
f.close()
return ch
def loadSpikes(filepath):
'''
Loads spike waveforms and timestamps from filepath (should be .spikes file)
'''
data = {}
# print('loading spikes...')
f = open(filepath, 'rb')
header = readHeader(f)
if float(header[' version']) < 0.4:
raise Exception('Loader is only compatible with .spikes files with version 0.4 or higher')
data['header'] = header
numChannels = int(header['num_channels'])
numSamples = 40 # **NOT CURRENTLY WRITTEN TO HEADER**
spikes = np.zeros((MAX_NUMBER_OF_SPIKES, numSamples, numChannels))
timestamps = np.zeros(MAX_NUMBER_OF_SPIKES)
source = np.zeros(MAX_NUMBER_OF_SPIKES)
gain = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
thresh = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
sortedId = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
recNum = np.zeros(MAX_NUMBER_OF_SPIKES)
currentSpike = 0
while f.tell() < os.fstat(f.fileno()).st_size:
eventType = np.fromfile(f, np.dtype('<u1'), 1) # always equal to 4, discard
timestamps[currentSpike] = np.fromfile(f, np.dtype('<i8'), 1)
software_timestamp = np.fromfile(f, np.dtype('<i8'), 1)
source[currentSpike] = np.fromfile(f, np.dtype('<u2'), 1)
numChannels = int(np.fromfile(f, np.dtype('<u2'), 1))
numSamples = int(np.fromfile(f, np.dtype('<u2'), 1))
sortedId[currentSpike] = np.fromfile(f, np.dtype('<u2'), 1)
electrodeId = np.fromfile(f, np.dtype('<u2'), 1)
channel = np.fromfile(f, np.dtype('<u2'), 1)
color = np.fromfile(f, np.dtype('<u1'), 3)
pcProj = np.fromfile(f, np.float32, 2)
sampleFreq = np.fromfile(f, np.dtype('<u2'), 1)
waveforms = np.fromfile(f, np.dtype('<u2'), numChannels * numSamples)
gain[currentSpike, :] = np.fromfile(f, np.float32, numChannels)
thresh[currentSpike, :] = np.fromfile(f, np.dtype('<u2'), numChannels)
recNum[currentSpike] = np.fromfile(f, np.dtype('<u2'), 1)
waveforms_reshaped = np.reshape(waveforms, (numChannels, numSamples))
waveforms_reshaped = waveforms_reshaped.astype(float)
waveforms_uv = waveforms_reshaped
for ch in range(numChannels):
waveforms_uv[ch, :] -= 32768
waveforms_uv[ch, :] /= gain[currentSpike, ch] * 1000
spikes[currentSpike] = waveforms_uv.T
currentSpike += 1
data['spikes'] = spikes[:currentSpike, :, :]
data['timestamps'] = timestamps[:currentSpike]
data['source'] = source[:currentSpike]
data['gain'] = gain[:currentSpike, :]
data['thresh'] = thresh[:currentSpike, :]
data['recordingNumber'] = recNum[:currentSpike]
data['sortedId'] = sortedId[:currentSpike]
return data
def loadEvents(filepath):
data = {}
# print('loading events...')
f = open(filepath, 'rb')
header = readHeader(f)
if float(header[' version']) < 0.4:
raise Exception('Loader is only compatible with .events files with version 0.4 or higher')
data['header'] = header
index = -1
channel = np.zeros(MAX_NUMBER_OF_EVENTS)
timestamps = np.zeros(MAX_NUMBER_OF_EVENTS)
sampleNum = np.zeros(MAX_NUMBER_OF_EVENTS)
nodeId = np.zeros(MAX_NUMBER_OF_EVENTS)
eventType = np.zeros(MAX_NUMBER_OF_EVENTS)
eventId = np.zeros(MAX_NUMBER_OF_EVENTS)
recordingNumber = np.zeros(MAX_NUMBER_OF_EVENTS)
while f.tell() < os.fstat(f.fileno()).st_size:
index += 1
timestamps[index] = np.fromfile(f, np.dtype('<i8'), 1)
sampleNum[index] = np.fromfile(f, np.dtype('<i2'), 1)
eventType[index] = np.fromfile(f, np.dtype('<u1'), 1)
nodeId[index] = np.fromfile(f, np.dtype('<u1'), 1)
eventId[index] = np.fromfile(f, np.dtype('<u1'), 1)
channel[index] = np.fromfile(f, np.dtype('<u1'), 1)
recordingNumber[index] = np.fromfile(f, np.dtype('<u2'), 1)
data['channel'] = channel[:index]
data['timestamps'] = timestamps[:index]
data['eventType'] = eventType[:index]
data['nodeId'] = nodeId[:index]
data['eventId'] = eventId[:index]
data['recordingNumber'] = recordingNumber[:index]
data['sampleNum'] = sampleNum[:index]
return data
def readHeader(f):
header = {}
h = f.read(1024).decode().replace('\n', '').replace('header.', '')
for i, item in enumerate(h.split(';')):
if '=' in item:
header[item.split(' = ')[0]] = item.split(' = ')[1]
return header
def downsample(trace, down):
downsampled = scipy.signal.resample(trace, np.shape(trace)[0] / down)
return downsampled
def pack(folderpath, source='100', **kwargs):
# convert single channel open ephys channels to a .dat file for compatibility with the KlustaSuite, Neuroscope and Klusters
# should not be necessary for versions of open ephys which write data into HDF5 format.
# loads .continuous files in the specified folder and saves a .DAT in that folder
# optional arguments:
# source: string name of the source that openephys uses as the prefix. is usually 100, if the headstage is the first source added, but can specify something different
#
# data: pre-loaded data to be packed into a .DAT
# dref: int specifying a channel # to use as a digital reference. is subtracted from all channels.
# order: the order in which the .continuos files are packed into the .DAT. should be a list of .continious channel numbers. length must equal total channels.
# suffix: appended to .DAT filename, which is openephys.DAT if no suffix provided.
# load the openephys data into memory
if 'data' not in kwargs.keys():
if 'channels' not in kwargs.keys():
data = loadFolder(folderpath, dtype=np.int16)
else:
data = loadFolder(folderpath, dtype=np.int16, channels=kwargs['channels'])
else:
data = kwargs['data']
# if specified, do the digital referencing
if 'dref' in kwargs.keys():
ref = load(os.path.join(folderpath, ''.join((source, '_CH', str(kwargs['dref']), '.continuous'))))
for i, channel in enumerate(data.keys()):
data[channel]['data'] = data[channel]['data'] - ref['data']
# specify the order the channels are written in
if 'order' in kwargs.keys():
order = kwargs['order']
else:
order = list(data)
# add a suffix, if one was specified
if 'suffix' in kwargs.keys():
suffix = kwargs['suffix']
else:
suffix = ''
# make a file to write the data back out into .dat format
outpath = os.path.join(folderpath, ''.join(('openephys', suffix, '.dat')))
out = open(outpath, 'wb')
# go through the data and write it out in the .dat format
# .dat format specified here: http://neuroscope.sourceforge.net/UserManual/data-files.html
channelOrder = []
print(''.join(('...saving .dat to ', outpath, '...')))
random_datakey = next(iter(data))
bar = ProgressBar(len(data[random_datakey]['data']))
for i in range(len(data[random_datakey]['data'])):
for j in range(len(order)):
if source in random_datakey:
ch = data[order[j]]['data']
else:
ch = data[''.join(('CH', str(order[j]).replace('CH', '')))]['data']
out.write(struct.pack('h', ch[i])) # signed 16-bit integer
# figure out which order this thing packed the channels in. only do this once.
if i == 0:
channelOrder.append(order[j])
# update how mucb we have list
if i % (len(data[random_datakey]['data']) / 100) == 0:
bar.animate(i)
out.close()
print(''.join(('order: ', str(channelOrder))))
print(''.join(('.dat saved to ', outpath)))
# **********************************************************
# progress bar class used to show progress of pack()
# stolen from some post on stack overflow
import sys
try:
from IPython.display import clear_output
have_ipython = True
except ImportError:
have_ipython = False
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 40
self.__update_amount(0)
if have_ipython:
self.animate = self.animate_ipython
else:
self.animate = self.animate_noipython
def animate_ipython(self, iter):
print('\r', self, )
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
# *************************************************************
def pack_2(folderpath, filename='', channels='all', chprefix='CH',
dref=None, session='0', source='100'):
'''Alternative version of pack which uses numpy's tofile function to write data.
pack_2 is much faster than pack and avoids quantization noise incurred in pack due
to conversion of data to float voltages during loadContinous followed by rounding
back to integers for packing.
filename: Name of the output file. By default, it follows the same layout of continuous files,
but without the channel number, for example, '100_CHs_3.dat' or '100_ADCs.dat'.
channels: List of channel numbers specifying order in which channels are packed. By default
all CH continous files are packed in numerical order.
chprefix: String name that defines if channels from headstage, auxiliary or ADC inputs
will be loaded.
dref: Digital referencing - either supply a channel number or 'ave' to reference to the
average of packed channels.
source: String name of the source that openephys uses as the prefix. It is usually 100,
if the headstage is the first source added, but can specify something different.
'''
data_array = loadFolderToArray(folderpath, channels, chprefix, np.int16, session, source)
if dref:
if dref == 'ave':
print('Digital referencing to average of all channels.')
reference = np.mean(data_array, 1)
else:
print('Digital referencing to channel ' + str(dref))
if channels == 'all':
channels = _get_sorted_channels(folderpath, chprefix, session, source)
reference = deepcopy(data_array[:, channels.index(dref)])
for i in range(data_array.shape[1]):
data_array[:, i] = data_array[:, i] - reference
if session == '0':
session = ''
else:
session = '_' + session
if not filename: filename = source + '_' + chprefix + 's' + session + '.dat'
print('Packing data to file: ' + filename)
data_array.tofile(os.path.join(folderpath, filename))
def _get_sorted_channels(folderpath, chprefix='CH', session='0', source='100'):
Files = [f for f in os.listdir(folderpath) if '.continuous' in f
and '_' + chprefix in f
and source in f]
if session == '0':
Files = [f for f in Files if len(f.split('_')) == 2]
Chs = sorted([int(f.split('_' + chprefix)[1].split('.')[0]) for f in Files])
else:
Files = [f for f in Files if len(f.split('_')) == 3
and f.split('.')[0].split('_')[2] == session]
Chs = sorted([int(f.split('_' + chprefix)[1].split('_')[0]) for f in Files])
return (Chs)
|
We can provide results quickly and efficiently, by partnering with us you can take the pain out of the recruitment process. Use the form below, get in touch or even drop by the office. Our friendly team are always happy to help.
Our experienced consultants are on hand to discuss your requirements.
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"""Environment representation
There is one gotcha: getitem returns [] if the contents evals to False
This means env['foo'] = {}; print env['foo'] will print [] not {}
"""
import os, copy, re
import Logs, Options
from Constants import *
re_imp = re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$', re.M)
class Environment(object):
"""A safe-to-use dictionary, but do not attach functions to it please (break cPickle)
An environment instance can be stored into a file and loaded easily
"""
__slots__ = ("table", "parent")
def __init__(self, filename=None):
self.table={}
#self.parent = None <- set only if necessary
if Options.commands['configure']:
# set the prefix once and for everybody on creation (configuration)
self.table['PREFIX'] = os.path.abspath(os.path.expanduser(Options.options.prefix))
if filename:
self.load(filename)
def __contains__(self, key):
if key in self.table: return True
try: return self.parent.__contains__(key)
except AttributeError: return False # parent may not exist
def __str__(self):
keys = set()
cur = self
while cur:
keys.update(cur.table.keys())
cur = getattr(cur, 'parent', None)
keys = list(keys)
keys.sort()
return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in keys])
def set_variant(self, name):
self.table[VARIANT] = name
def variant(self):
env = self
while 1:
try:
return env.table[VARIANT]
except KeyError:
try: env = env.parent
except AttributeError: return DEFAULT
def copy(self):
newenv = Environment()
if Options.commands['configure']:
if self['PREFIX']: del newenv.table['PREFIX']
newenv.parent = self
return newenv
def __getitem__(self, key):
x = self.table.get(key, None)
if not x is None: return x
try:
u = self.parent
except AttributeError:
return []
else:
return u[key]
def __setitem__(self, key, value):
self.table[key] = value
def get_flat(self, key):
s = self[key]
if not s: return ''
elif isinstance(s, list): return ' '.join(s)
else: return s
def _get_list_value_for_modification(self, key):
"""Gets a value that must be a list for further modification. The
list may be modified inplace and there is no need to
"self.table[var] = value" afterwards.
"""
try:
value = self.table[key]
except KeyError:
try: value = self.parent[key]
except AttributeError: value = []
if isinstance(value, list):
value = copy.copy(value)
else:
value = [value]
else:
if not isinstance(value, list):
value = [value]
self.table[key] = value
return value
def append_value(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
current_value.extend(value)
else:
current_value.append(value)
def prepend_value(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
current_value = value + current_value
# a new list: update the dictionary entry
self.table[var] = current_value
else:
current_value.insert(0, value)
# prepend unique would be ambiguous
def append_unique(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
for value_item in value:
if value_item not in current_value:
current_value.append(value_item)
else:
if value not in current_value:
current_value.append(value)
def store(self, filename):
"Write the variables into a file"
file = open(filename, 'w')
# compute a merged table
table_list = []
env = self
while 1:
table_list.insert(0, env.table)
try: env = env.parent
except AttributeError: break
merged_table = {}
for table in table_list:
merged_table.update(table)
keys = merged_table.keys()
keys.sort()
for k in keys: file.write('%s = %r\n' % (k, merged_table[k]))
file.close()
def load(self, filename):
"Retrieve the variables from a file"
tbl = self.table
file = open(filename, 'r')
code = file.read()
file.close()
for m in re_imp.finditer(code):
g = m.group
tbl[g(2)] = eval(g(3))
Logs.debug('env: %s' % str(self.table))
def get_destdir(self):
"return the destdir, useful for installing"
if self.__getitem__('NOINSTALL'): return ''
return Options.options.destdir
|
Over the last five years, the price of ivory has rocketed with reports of Asian dealers paying in excess of US$1,000 per kilo for the tip of the tusk alone.
Elephant poaching for ivory and bushmeat in the Democratic Republic of Congo remains extreme, and the illegal smuggling of ivory is coming through Uganda headed for China and Thailand. Elephants are once again in danger of being wiped out.
The dramatic rise in the illegal trade in African elephant ivory has been marked by a tripling in the volume of ivory seized by law enforcement agencies (Traffic / INTERPOL). Containers with false compartments have been seized with tonnes of ivory concealed inside. But how do we know where it has come from – where was the poaching?
The percentage of remaining elephants being killed now may, in fact, be at its highest in history, representing 8% of the 470,000 elephants remaining in Africa (IUCN 2006), a percentage even higher than that leading up to the 1989 ivory ban when the population was 2-3 times its present size. Poaching in Kenya has risen alarmingly over the past two years.
Little actionable information has been available on the supply side, even though this may be the most effective place to contain this illegal trade.
It is where the elephants are being killed and before the ivory enters into a complex web of criminal activity that needs to be identified. Globalization of free trade, coupled with the large and increasing number of containers shipped around the world and the limited capacity of authorities to search them, is increasing the difficulties of policing this trade. China, where most of the ivory ends up, also has a growing ability for ivory extraction across Africa as it aggressively expands its mineral exploitation rights across Africa.
Wherever ivory is seized or on sale, its DNA genetic profile provides the ‘finger print’ of its true origin. By applying DNA analysis technology, Professor Sam Wasser and his team at the University of Washington Center for Conservation Biology have developed the first method to track the origin of these large ivory seizures, evidence which can be used in court.
This project is a ‘one-off’ and UCF is assisting the University of Washington by collecting samples from elephant populations across Uganda. In time our reach will also cover regions of Southern Sudan and Eastern Democratic Republic of Congo.
Working with the Uganda Wildlife Authority, UCF is attempting to visit every elephant habitat across Uganda and take samples of elephant dung to help provide a DNA profile of elephants from this region of Africa. Once completed the profile provides an evidence base to identify the origin of ivory and indeed, the possible smuggling routes. Equipped with this knowledge, law enforcement agencies can focus on activities to combat the trade at its original source.
Identify countries that are major sources of poached ivory; these countries are often different from where the ivory is shipped and thus untraceable by most other methods.
Direct law enforcement authorities to these poaching ‘hot spots’, as well as provide the bases to guide and pressure these countries to police more effectively their illegal trade.
Educate the international public and wildlife authorities about the need to increase support for combating illegal trade in hugely under resourced regions.
It will be possible – internationally – for convictions and political pressure to be made, based on evidence.
We will understand the genetic profile of elephants across Uganda for the first time.
The work is part of a collaborative, aggressive campaign to thwart the illegal ivory trade over the next five years and is aimed at maximizing the amount and flow of information available to national and international law enforcement authorities. UCF believes that this approach offers the greatest hope of stopping the illegal ivory trade at its source, before the elephant are actually killed.
Phases 1 and 2 of this project have been completed and focused on the easily accessible and large elephant populations in the major national parks of Uganda.
Further financial support is sought to complete the last and third phase, which will involve collecting samples from the more remote and transboundary populations (Democratic Republic of Congo and Sudan). These populations are mostly unknown and are in largely inaccessible areas, so this work will also provide critical updates about the elephants’ welfare for the African Elephant Database. Furthermore the DNA will provide a genetic profile that can be used to consider the biogeography of the region and potential hybridization of the Savanna and Forest elephants.
This project is extremely important, especially as the vast majority of ivory is thought to come from the DRC and smuggled through Uganda. Alarmingly, across this region of Africa many of the elephant populations receive no international support at all, in terms of policing or research, leaving all wildlife completely exposed to poaching. In carrying out this project, UCF will also be determining whether elephants and other wildlife still remain in the least accessible Protected Areas and forest reserves.
Uganda remains highly vulnerable to poaching and ivory smuggling, and it is critical to help increase the capacity to govern the illegal trade for the Forest and Savanna elephants.
Grants towards this work have been received from Seaworld Busch, Born Free, Disney and Rufford. UCF is proud to be associated with such a high profile initiative.
|
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template.loader import get_template
from accounts.email import unsubscribe_token
class NotificationEmail(object):
plain_template = ''
html_template = ''
def __init__(self,notification):
self.notification = notification
def get_context_data(self):
site = Site.objects.get_current()
context = {
'notification': self.notification,
'site': site,
'domain': 'http://{}'.format(site.domain),
'unsubscribe_token': unsubscribe_token(self.notification.recipient)
}
return context
def can_email(self):
if not getattr(settings,'EMAIL_ABOUT_NOTIFICATIONS',False):
return False
recipient = self.notification.recipient
return not recipient.userprofile.never_email
def send(self):
if not self.can_email():
return
subject = self.get_subject()
context = self.get_context_data()
plain_content = get_template(self.plain_template).render(context)
html_content = get_template(self.html_template).render(context)
from_email = '{title} <{email}>'.format(title=settings.SITE_TITLE, email=settings.DEFAULT_FROM_EMAIL)
recipient = self.notification.recipient
recipient_email = '{name} <{email}>'.format(name=recipient.get_full_name(), email=recipient.email)
send_mail(subject, plain_content, html_message=html_content, from_email=from_email, recipient_list=(recipient_email,))
class EditorItemNotificationEmail(NotificationEmail):
def __init__(self, *args, **kwargs):
super().__init__(*args,**kwargs)
self.editoritem = self.notification.target
self.project = self.editoritem.project
def get_subject(self):
return "[{project}] {user} {verb} \"{item}\"".format(project=self.project.name, user=self.notification.actor.get_full_name(), verb=self.notification.verb, item=self.editoritem.name)
def get_context_data(self):
context = super().get_context_data()
context.update({
'editoritem': self.editoritem,
'project': self.project,
})
return context
class StampNotificationEmail(EditorItemNotificationEmail):
plain_template = 'notifications/email/stamp.txt'
html_template = 'notifications/email/stamp.html'
def get_context_data(self):
stamp = self.notification.action_object
context = super().get_context_data()
context.update({
'stamp': stamp,
})
return context
def can_email(self):
recipient = self.notification.recipient
if not recipient.userprofile.email_about_stamps:
return False
return super().can_email()
class CommentNotificationEmailMixin:
def get_context_data(self):
comment = self.notification.action_object
context = super().get_context_data()
context.update({
'comment': comment,
})
return context
def can_email(self):
recipient = self.notification.recipient
if not recipient.userprofile.email_about_comments:
return False
return super().can_email()
class EditorItemCommentNotificationEmail(CommentNotificationEmailMixin,EditorItemNotificationEmail):
plain_template = 'notifications/email/editoritem_comment.txt'
html_template = 'notifications/email/editoritem_comment.html'
class ProjectCommentNotificationEmail(CommentNotificationEmailMixin,NotificationEmail):
plain_template = 'notifications/email/project_comment.txt'
html_template = 'notifications/email/project_comment.html'
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.project = self.notification.target
def get_subject(self):
return "[{project}] Comment by {user}".format(project=self.project.name, user=self.notification.actor.get_full_name())
def get_context_data(self):
context = super().get_context_data()
context.update({
'project': self.project,
})
return context
|
When you enable the rich text experience, server-side synchronization and appointment activities support rich text. With the rich text editor, appointment descriptions can contain rich text.
Create and synchronize appointments with rich text content in the description for an improved experience in both web and the Unified Interface.
Include content from an HTML web page right into the description field or create your own custom markup using the appointment editor. Appointments tracked from Outlook will also render rich text content in Dynamics 365 for Customer Engagement apps.
Server-side synchronization synchronizes the rich-text HMTL content of appointment descriptions into Dynamics 365 for Customer Engagement apps.
To enable rich text, your Dynamics 365 for Customer Engagement apps version must be Dynamics 365 for Customer Engagement apps version 9.0, or a later version.
After enabling, if you choose to disable the setting, the appointment editor description field will reset to the plain-text field. Previously synchronized appointments’ description will still contain rich-text HTML markup.
Although the rich text editor can be used with appointment activities, it can’t be used with recurring appointments. When an appointment that contains rich text is converted to a recurring appointment, the description field for the activity is converted to a plain-text field containing rich text content.
To enable the rich text editor on appointments, you need to configure the AppointmentRichEditorExperience organization setting for your Dynamics 365 for Customer Engagement apps instance by running the PowerShell sample below.
The PowerShell cmdlets require the Dynamics 365 for Customer Engagement apps Microsoft.Xrm.Data.PowerShell module. The sample below includes the cmdlet to install the module.
|
"""
Django settings for art_app project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ov4p%2ls7+tmi&@qt@=3_n+px*oxqk#+%jeza93j!1p!-cr$n9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
CORS_ORIGIN_WHITELIST = (
'localhost:3000'
)
SITE_ID = 1
AUTH_USER_MODEL = 'account.User'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'corsheaders',
'rest_framework',
'rest_framework_jwt',
'djoser',
'graphene_django',
'apps.account',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'apps.account.middleware.JWTAuthenticationMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.csrf',
'django.template.context_processors.static',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DB_NAME'),
'HOST': os.getenv('DB_HOST'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'PORT': os.getenv('DB_PORT'),
},
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'de'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'collected_static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "media")
MEDIA_URL = "/media/"
LANGUAGES = [
('de', 'German'),
]
GRAPHENE = {
'SCHEMA': 'config.schema.schema'
}
DJOSER = {
'DOMAIN': os.environ.get('DJANGO_DJOSER_DOMAIN', 'localhost:3000'),
'SITE_NAME': os.environ.get('DJANGO_DJOSER_SITE_NAME', 'my site'),
'PASSWORD_RESET_CONFIRM_URL': '?action=set-new-password&uid={uid}&token={token}',
'ACTIVATION_URL': 'activate?uid={uid}&token={token}',
'SEND_ACTIVATION_EMAIL': True,
}
JWT_AUTH = {
'JWT_ALLOW_REFRESH': True,
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
The Wilson 859902 Lightning Surge Protector protects your 50 Omh amplifiers (Wilson Pro Commerical Signal Boosters and weBoost Connect-X series) from power surges caused by lightning strikes. This device comes with N-Connectors with a replaceable fuse cartridge. Best installed between the amplifier and outside antenna. Proper grounding is also recommended.
|
from ctypes import *
from functools import partial
import sys
_libchewing = None
if sys.platform == "win32": # Windows
import os.path
# find in current dir first
dll_path = os.path.join(os.path.dirname(__file__), "chewing.dll")
if not os.path.exists(dll_path):
dll_path = "chewing.dll" # search in system path
_libchewing = CDLL(dll_path)
else: # UNIX-like systems
_libchewing = CDLL('libchewing.so.3')
_libchewing.chewing_commit_String.restype = c_char_p
_libchewing.chewing_buffer_String.restype = c_char_p
_libchewing.chewing_cand_String.restype = c_char_p
_libchewing.chewing_zuin_String.restype = c_char_p
_libchewing.chewing_aux_String.restype = c_char_p
_libchewing.chewing_get_KBString.restype = c_char_p
def Init(datadir, userdir):
return _libchewing.chewing_Init(datadir, userdir)
class ChewingContext:
def __init__(self, **kwargs):
if not kwargs:
self.ctx = _libchewing.chewing_new()
else:
syspath = kwargs.get("syspath", None)
userpath = kwargs.get("userpath", None)
self.ctx = _libchewing.chewing_new2(
syspath,
userpath,
None,
None)
def __del__(self):
_libchewing.chewing_delete(self.ctx)
def __getattr__(self, name):
func = 'chewing_' + name
if hasattr(_libchewing, func):
wrap = partial(getattr(_libchewing, func), self.ctx)
setattr(self, name, wrap)
return wrap
else:
raise AttributeError(name)
def Configure(self, cpp, maxlen, direction, space, kbtype):
self.set_candPerPage(cpp)
self.set_maxChiSymbolLen(maxlen)
self.set_addPhraseDirection(direction)
self.set_spaceAsSelection(space)
self.set_KBType(kbtype)
|
I love scuba period! I love the sound of bubbles underwater, interaction with marine life, the travel to exotic places and meeting people of different cultures. What I love more is teaching you my passion! From the first time you take a breath underwater, to the look on your face when Aqua Hut takes you to a foreign land. Scuba will show you a whole new world and travel is great for the soul./ Hope to see you in class.
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import time
import argparse
import ConfigParser
from vnc_api.vnc_api import *
from vnc_admin_api import VncApiAdmin
from cfgm_common.exceptions import *
class AnalyticsNodeProvisioner(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
connected = False
tries = 0
while not connected:
try:
self._vnc_lib = VncApiAdmin(
self._args.use_admin_api,
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/',
auth_host=self._args.openstack_ip,
api_server_use_ssl=self._args.api_server_use_ssl)
connected = True
except ResourceExhaustionError: # haproxy throws 503
if tries < 10:
tries += 1
time.sleep(3)
else:
raise
gsc_obj = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
self._global_system_config_obj = gsc_obj
if self._args.oper == 'add':
self.add_analytics_node()
elif self._args.oper == 'del':
self.del_analytics_node()
else:
print "Unknown operation %s. Only 'add' and 'del' supported"\
% (self._args.oper)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python provision_analytics_node.py --host_name a3s30.contrail.juniper.net
--host_ip 10.1.1.1
--api_server_ip 127.0.0.1
--api_server_port 8082
--api_server_use_ssl False
--oper <add | del>
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'oper': 'add',
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--host_name", help="hostname name of analytics node", required=True)
parser.add_argument("--host_ip", help="IP address of analytics node", required=True)
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument(
"--oper", default='add',
help="Provision operation to be done(add or del)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
parser.add_argument(
"--admin_tenant_name", help="Tenamt name for keystone admin user")
parser.add_argument(
"--openstack_ip", help="IP address of openstack node")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--api_server_ip", help="IP address of api server",
nargs='+', type=str)
group.add_argument("--use_admin_api",
default=False,
help = "Connect to local api-server on admin port",
action="store_true")
self._args = parser.parse_args(remaining_argv)
# end _parse_args
def add_analytics_node(self):
gsc_obj = self._global_system_config_obj
analytics_node_obj = AnalyticsNode(
self._args.host_name, gsc_obj,
analytics_node_ip_address=self._args.host_ip)
analytics_node_exists = True
try:
analytics_node_obj = self._vnc_lib.analytics_node_read(
fq_name=analytics_node_obj.get_fq_name())
except NoIdError:
analytics_node_exists = False
if analytics_node_exists:
self._vnc_lib.analytics_node_update(analytics_node_obj)
else:
try:
self._vnc_lib.analytics_node_create(analytics_node_obj)
except RefsExistError:
print "Already created!"
# end add_analytics_node
def del_analytics_node(self):
gsc_obj = self._global_system_config_obj
analytics_node_obj = AnalyticsNode(self._args.host_name, gsc_obj)
self._vnc_lib.analytics_node_delete(
fq_name=analytics_node_obj.get_fq_name())
# end del_analytics_node
# end class AnalyticsNodeProvisioner
def main(args_str=None):
AnalyticsNodeProvisioner(args_str)
# end main
if __name__ == "__main__":
main()
|
El Niño – A little boy causing a lot of problems!
My name is Esteban and I live in Ecuador. Barely a week ago, it was a hot, heavy day. It felt as though I was walking through steam or moving through wet cotton. My friends and I tried to play ball, but we just could not. When my friend Marco kicked the ball out of the playground, no one ran after it. It was just too much of an effort. Even standing still, we were too hot.
So I stayed home with my parents, and I learnt about this phenomenon called El Niño. In my mother language, which is Spanish, it means little boy. The early settlers in the western coasts of South America called it like this because it hits hardest around Christmas. My mom explained that El Niño happens when the temperature of the normally cool surface water of the ocean increases during a few months. It can cause big storms like the one we had last week.
My mom said she would buy me a book about meteorology so that I can find out more. Maybe later I will become a meteorologist, and I’ll be the one warning my dad about El Niño!
|
import xdrlib
import struct
from collections import namedtuple
class TypeBase(object):
def pack(self, stream, value):
raise NotImplementedError()
def unpack(self, stream):
raise NotImplementedError()
class Type(TypeBase):
def __init__(self, packer, unpacker):
self.pack = packer
self.unpack = unpacker
class TypeFactoryMeta(type):
pass
class TypeFactory(TypeBase):
__metaclass__ = TypeFactoryMeta
class CustomSimpleType(TypeBase):
def __init__(self, fmt):
self.fmt = fmt
self.length = struct.calcsize(fmt)
def pack(self, stream, value):
stream.get_buffer().write(struct.pack(self.fmt, value))
def unpack(self, stream):
i = stream.get_position()
j = i + self.length
stream.set_position(j)
data = stream.get_buffer()[i:j]
if len(data) < self.length:
raise EOFError
return struct.unpack(self.fmt, data)[0]
def make_xdr_type(name):
packer = getattr(xdrlib.Packer, 'pack_{}'.format(name))
unpacker = getattr(xdrlib.Unpacker, 'unpack_{}'.format(name))
return Type(packer, unpacker)
class FixedLengthString(TypeFactory):
def __init__(self, length):
self.length = length
def pack(self, stream, s):
stream.pack_fstring(self.length, s)
def unpack(self, stream):
return stream.unpack_fstring(self.length)
class FixedLengthData(TypeFactory):
def __init__(self, length):
self.length = length
def pack(self, stream, s):
stream.pack_fopaque(self.length, s)
def unpack(self, stream):
return stream.unpack_fopaque(self.length)
class ComplexType(TypeFactory):
def __init__(self, name, fields):
self.name = name
self.fields = fields
self.model = namedtuple(name, [f[0] for f in fields])
def __repr__(self):
return '{}({})'.format(self.name, ', '.join(f[0] for f in self.fields))
def __str__(self):
return self.name
def unpack(self, stream):
values = (type.unpack(stream) for _, type in self.fields)
return self.model(*values)
def pack(self, stream, value):
for name, type in self.fields:
type.pack(stream, getattr(value, name))
class FixedLengthArray(TypeFactory):
def __init__(self, items_type, length):
self.items_type = items_type
self.length = length
def pack(self, stream, items):
packer = lambda item: self.items_type.pack(stream, item)
stream.pack_farray(self.length, items, packer)
def unpack(self, stream):
unpacker = lambda: self.items_type.unpack(stream)
return stream.unpack_farray(self.length, unpacker)
class VariableLengthArray(TypeFactory):
def __init__(self, items_type, maxlength):
self.maxlength = maxlength
self.items_type = items_type
def pack(self, stream, items):
packer = lambda item: self.items_type.pack(stream, item)
stream.pack_array(items, packer)
def unpack(self, stream):
unpacker = lambda: self.items_type.unpack(stream)
return stream.unpack_array(unpacker)
class Optional(TypeFactory):
def __init__(self, type):
self.type = type
def pack(self, stream, v):
if v:
stream.pack_bool(True)
self.type.pack(stream, v)
else:
stream.pack_bool(False)
def unpack(self, stream):
if stream.unpack_bool():
return self.type.unpack(stream)
else:
return None
class Enum(TypeFactory):
def __init__(self, name, values):
self.name = name
self.values = values
self.ids = set([v[1] for v in values])
self.keys = set([v[0] for v in values])
for k, v in self.values:
setattr(self, k, v)
self._id_to_key = {v: k for k, v in values}
self._key_to_id = {k: v for k, v in values}
def __str__(self):
return self.name
def key(self, id):
return self._id_to_key[id]
def id(self, key):
return self._key_to_id[key]
def pack(self, stream, v):
if isinstance(v, int):
assert v in self.ids
else:
v = self.id(v)
return stream.pack_enum(v)
def unpack(self, stream):
v = stream.unpack_enum()
assert v in self.ids
return v
def __iter__(self):
return iter(self.values)
int = make_xdr_type('int')
uint = make_xdr_type('uint')
hyper = make_xdr_type('hyper')
uhyper = make_xdr_type('uhyper')
char = CustomSimpleType('>b')
uchar = CustomSimpleType('>B')
short = CustomSimpleType('>h')
ushort = CustomSimpleType('>H')
string = make_xdr_type('string')
opaque = make_xdr_type('opaque')
fstring = FixedLengthString
fopaque = FixedLengthData
farray = FixedLengthArray
array = VariableLengthArray
not_implemented = TypeBase
compound = ComplexType
enum = Enum
optional = Optional
def istype(k, v):
return k.islower() and isinstance(v, (TypeBase, TypeFactoryMeta))
__all__ = [k for k, v in locals().items() if istype(k, v)]
TYPES = {k: v for k, v in locals().items() if istype(k, v)}
|
Pore Cleaning Lotion helps remove sebum without stimulation by softly melting it instead of squeezing it out.
With this pack actually used in Korean spas and dermatology clinics, you can enjoy professional skin care at home.
This product is suitable for all facial parts: oily/greasy area, area with blackheads or whiteheads, cheek, chin, forehead, etc.
Its natural green ingredients (EWG green grade) and patented elements are safe even for pregnant women to apply.
As nose strip packs in the market rips off sebum while removing the pack, they have dramatic visual effects. However, these nose packs stimulate and expand pores, resulting in more sebum that form more blackheads.
Pore Cleaning Lotion makes your nose smooth without widening pores or stimulating skin.
Soak a cotton pad in Pore Cleaning Lotion and put it on T-zone or greasy areas for 15 -20 min. Wet the pad often with Pore Cleaning Lotion. After the set time of 20 mins, gently press sebum excreted from pores with a cotton bud and remove it. Rinse the area with warm water.
One time application is not enough to eliminate blackheads completely.
Continual care will present you with clear pores.
Use maximum one to two times a week after washing your face, and before starting your usual skincare regime.
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from itertools import groupby
from vilya.models.utils.decorators import cached_property
from vilya.models.git.diff.patch import Patch
from vilya.models.git.diff.delta import Delta
class Diff(object):
def __init__(self, repo, diff, linecomments=[]):
self.repo = repo
self.raw_diff = diff
self.old_ref = None
self.new_ref = None
self.old_sha = diff['old_sha']
self.new_sha = diff['new_sha']
self._additions = 0
self._deletions = 0
self._length = 0
# 实例化 Patches
# line comments groupby path
keyfunc_path = lambda x: x.old_path
linecomments_by_path = {}
if linecomments:
linecomments.sort(key=keyfunc_path)
linecomments_by_path = dict(
(k, list(v)) for k, v in groupby(linecomments,
key=keyfunc_path))
self._linecomments_by_path = linecomments_by_path
# TODO: MAX_DIFF_PATCHES
@property
def additions(self):
if self._additions:
return self._additions
for p in self.patches:
self._additions += p.additions
return self._additions
@property
def deletions(self):
if self._deletions:
return self._deletions
for p in self.patches:
self._deletions += p.deletions
return self._deletions
@property
def length(self):
if self._length:
return self._length
self._length = len(self.patches)
return self._length
@cached_property
def deltas(self):
repo = self.repo
diff = self.raw_diff
return [Delta(repo, self, p)
for p in diff['patches']]
@cached_property
def patches(self):
repo = self.repo
diff = self.raw_diff
linecomments_by_path = self._linecomments_by_path
# TODO: use generator
return [Patch(repo, self, p, linecomments_by_path.get(p['old_file_path'], []))
for p in diff['patches']]
|
This blog lists the difficulties and drawbacks faced when using the Application Cache. Solutions to most of these issues are described in another article called Tips for using Application Cache?
Double Refresh Issue - This is perhaps the biggest problem with manifest files. If the content for the page has been updated, the user will view the old content that was in the Application Cache until it has been updated. However, the Application Cache is not updated until the user views the page, as shown in the diagram below. This ultimately means that when the manifest file is updated (triggering the Application Cache to be updated) the user will not see the new content until they view the page for the second time. After content has been updated, the first time a user views the page they will view the old content loaded from the Application Cache. In the background the Application Cache will be updated and will receive the updated content. The second time a user views the page they will see the updated content. This is particularly problematic if not all the page resources are in the Application Cache. For example if a manifest file is "open" (as it has a * in the NETWORK section), any resource not listed in the CACHE (or FALLBACK) section will not be stored in the Application Cache. These resources will be downloaded freshly every time the page is viewed (however normal caching rules apply for Browser Cache, Proxy Caches, etc). The user may, therefore, get fresh copies of any resources not listed in the manifest file and stale copies of those resources listed in the manifest file until they refresh the page for a second time.
Atomic - Once a file is cached, the browser will continue to show the cached version, even if you change the file on the server. If you want to update a single file in the Application Cache, all files must be updated at the same time. Any change made to the manifest file will cause the entire set of files to be downloaded again.
|
#
# Copyright 2012 SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigitr import errhandler
from bigitr import util
class Merger(object):
def __init__(self, ctx):
self.ctx = ctx
self.err = errhandler.Errors(ctx)
def mergeBranches(self, repository, Git, requestedBranch=None):
onerror = self.ctx.getMergeError()
try:
for gitbranch in sorted(self.ctx.getMergeBranchMaps(repository).keys()):
if requestedBranch is None or gitbranch == requestedBranch:
self.mergeBranch(repository, Git, gitbranch)
except Exception as e:
self.err(repository, onerror)
@util.saveDir
def mergeBranch(self, repository, Git, gitbranch):
Git.initializeGitRepository(create=False)
self.mergeFrom(repository, Git, gitbranch)
def mergeFrom(self, repository, Git, gitbranch):
success = True
# try to merge downstream branches even if there was nothing to
# commit, because a merge conflict might have been resolved
if not self.merge(repository, Git, gitbranch):
success = False
# Status can report clean with .gitignored files existing
# Remove any .gitignored files added by the "cvs export"
Git.pristine()
if not success:
raise RuntimeError('merge failed for branch %s: see %s' %(
gitbranch, Git.log.thiserr))
def merge(self, repository, Git, gitbranch):
success = True
Git.pristine()
for target in self.ctx.getMergeBranchMaps(repository
).get(gitbranch, set()):
Git.checkout(target)
Git.mergeFastForward('origin/' + target)
mergeMsg = "Automated merge '%s' into '%s'" %(gitbranch, target)
rc = Git.mergeDefault(gitbranch, mergeMsg)
if rc != 0:
Git.log.mailLastOutput(mergeMsg)
success = False
else:
Git.push('origin', target, target)
Git.runImpPostHooks(target)
rc = self.merge(repository, Git, target)
if not rc:
success = False
return success
|
"Mega cap" status is a very exclusive club.
In order to be considered a "mega cap" (as opposed to a "large cap"), a company needs to have a total market cap of $200 billion or more.
These three companies that I listed above are the 800 pound gorillas of the US equity markets. Ask people to name the largest publicly traded companies in the US, and there is a good chance that they will name all three. These companies are just HUGE.
Apple, believe it or not, is on the verge of joining this very exclusive club.
Apple closed on Friday at $218.95, which means that it currently sports a market cap of $198.54 billion, just shy of "mega cap" status.
When people think of "big" companies, they normally don't think of Apple. Apple has done a good job of presenting itself as a smaller company that is less "corporate".
However, there is nothing small about the $198.54 billion market cap that Apple currently sports.
Berkshire Hathaway ($194.25 billion) and Google ($179.41 billion) are both working hard at regaining "mega cap" status after losing it during the market downturn of 2008-09.
$198.5 billion in market cap is a lot of iPods and iPhones..
|
import subprocess
import re
import os
import tempfile
import toc
def run(command, cwd=None):
proc = subprocess.Popen(
command,
shell=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=cwd
)
stdout, stderr = proc.communicate()
return (proc.returncode, stdout.strip(' \n'), stderr.strip(' \n'))
def write_and_close(contents):
(fd, path) = tempfile.mkstemp()
with os.fdopen(fd, 'w') as fp:
fp.write(wdl_source)
return path
with open('SPEC.md') as fp:
contents = fp.read()
toc.modify_and_write("SPEC.md")
source_regex = re.compile(r'```wdl(.*?)```', re.DOTALL)
count = 0
wdl_lines = 0
def lines(string, index=None):
string = string[:index] if index else string
return sum([1 for c in string if c == '\n']) + 1
for match in source_regex.finditer(contents):
count += 1
wdl_source = match.group(1)
wdl_lines += lines(wdl_source)
line = lines(contents, match.start(1))
wdl_file = write_and_close(wdl_source)
cmd = 'java -jar ../cromwell/target/scala-2.11/cromwell-0.9.jar parse ' + wdl_file
(rc, stdout, stderr) = run(cmd)
if rc != 0:
print("Line {}: Failure".format(line))
print(" rc: " + str(rc))
print(" stdout: " + write_and_close(stdout))
print(" stderr: " + write_and_close(stderr))
print(" WDL: " + wdl_file)
print(" Command: " + cmd)
else:
print("Line {}: Success".format(line))
os.unlink(wdl_file)
print('Total: {}'.format(wdl_lines))
|
In accordance with my wishes, my father, Syed Farzand Husain, took me with him for the first time in a Conga to admit me to the arts college in Lucknow. Halfway during our journey, we were engulfed in heavy rainfall. And as soon as the Tonga came on the Dali Gunj bridge to cross the Gomti river, I could see the magical view of the rising minarets and the domes of historical buildings in the pouring rain. I was mesmerized.
I was unaware when we crossed the bridge and the narrow strip of the road, with its cover of trees. The Tonga came to a halt with a sudden jerk and it was then that I came to my senses. The gate of the arts college was right in front of us. My father’ black sherwani, his black cap worn with finesse and my attire, which I had put on with a lot of care, had all been sacrificed to the intense rain. Ignoring my father’s concerns, my inquisitive eyes went straight through the gate as I soaked in the magnificent life size sculptures displayed on the lawns.
|
'''
FromScratchMult.py
Initialize params of HModel with multinomial observations from scratch.
'''
import numpy as np
from scipy.special import digamma
from scipy.cluster import vq
hasRexAvailable = True
try:
import KMeansRex
except ImportError:
hasRexAvailable = False
def init_global_params(hmodel, Data, initname='randexamples',
seed=0, K=0, initarg=None, **kwargs):
''' Initialize hmodel's global parameters in-place.
Returns
-------
Nothing. hmodel is updated in place.
Global Paramters are:
lamA, lamB = K x K stochastic block matrix
theta = N x K matrix of community membership probabilities
'''
PRNG = np.random.RandomState(seed)
N = Data.nNodeTotal
if initname == 'randexamples':
# Generate a sparse matrix given observed positive edges
#Data.to_sparse_matrix()
# Create assortative stochastic block matrix
lamA = np.zeros( K ) + (Data.nPosEdges / K) # assortative ( K x 1 ) vs. (K x K)
lamB = np.zeros( K ) + (Data.nAbsEdges / (K*K)) # assortative
# Create theta used for
theta = np.zeros( (N,K) )
alpha = np.ones(K) / K
for ii in xrange(N):
theta[ii, :] = PRNG.dirichlet(alpha)
# Initialize global stick-breaking weights beta to be 1/K (uniform)
beta = np.ones(K) / K
# Set the global parameters for the hmodel
hmodel.set_global_params(K=K, beta=beta, lamA=lamA, lamB=lamB, theta=theta)
return
else:
raise NotImplementedError('Unrecognized initname ' + initname)
|
The Central Suction System FZ VARIO is an intelligent system with vacuum funnels. To ensure maximum productivity with this unit, the system has a very powerful motor and filtering system. All the hand piece accessories work well in removing the dust from dental laboratories, irrespective of its size. The many vacuum positions add to the success of its suction functionality.
|
'''
Pinger class in nplib library for the np (Network Ping)
Copyright (C) 2015
Joseba Martos <[email protected]>
This file is part of np (Network Ping)
Web site: http://otzarri.net/np
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import netaddr
import subprocess
import threading
try:
import queue
except ImportError:
import Queue as queue # lint:ok
class Pinger:
'''Pinger object'''
hosts_up = list()
hosts_down = list()
def __init__(self, localnetaddr):
self.localnet = netaddr.IPNetwork(localnetaddr)
self.pingqueue = queue.Queue()
self.count = '1'
self.hosts_up = list()
self.hosts_down = list()
def pinger(self,):
'''Sends ping'''
while True:
ip = str(self.pingqueue.get())
retcode = subprocess.call("ping -c %s %s" % (self.count, ip),
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if retcode == 0:
self.hosts_up.append(netaddr.IPAddress(ip))
Pinger.hosts_up.append(netaddr.IPAddress(ip))
else:
self.hosts_down.append(netaddr.IPAddress(ip))
Pinger.hosts_down.append(netaddr.IPAddress(ip))
self.pingqueue.task_done()
def run(self):
thread_num = self.localnet.size - 2
for i in range(thread_num):
worker = threading.Thread(target=self.pinger)
worker.daemon = True
worker.start()
for ip in self.localnet.iter_hosts():
self.pingqueue.put(ip)
self.pingqueue.join()
|
This prime level rear block on 486m2 is the perfect block of land to build your future family home on. With a wide 3.5 meter side access this block is the best valued block in Dianella. Located on Camden Street Dianella this sought after block of land is located in the perfect location.
At this price in this sought after location this block of land will not last long. Contact Nigel Ross before you miss out!
|
from django.contrib import admin
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django_flickr_gallery.admin.forms import PhotosetAdminForm
from django_flickr_gallery.utils.date import parse_unix_datetime
def display_attr(func, short_description=None, boolean=False):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
wrap.short_description = short_description
wrap.boolean = boolean
return wrap
class PhotosetAdmin(admin.ModelAdmin):
list_display = ['primary', 'title', 'description', 'count', 'last_update']
form = PhotosetAdminForm
primary = display_attr(
lambda self, x: mark_safe('<img src="%s" width="48px" height="48px" />' % x.primary.small_square_url),
short_description=_('cover'))
title = display_attr(lambda self, x: x.title, short_description=_('title'))
description = display_attr(lambda self, x: x.description, short_description=_('description'))
count = display_attr(lambda self, x: x.count, short_description=_('photos'))
last_update = display_attr(lambda self, x: parse_unix_datetime(x.date_update), short_description=_('last update'))
|
Hi Dale, We aren't a supplier of tyres unfortunately, we are a comparison website. If you need any specifics about tyres, you would need to contact a supplier. Hope that helps.
|
from model.contact import Contact
from selenium.webdriver.common.by import By
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def create(self, contact):
wd = self.app.wd
# go to create contact page
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.firstname)
#self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
#self.change_field_value("nickname", contact.nickname)
#self.change_field_value("title", contact.title)
#self.change_field_value("company", contact.company)
#self.change_field_value("address", contact.address)
#self.change_field_value("home", contact.homephone)
#self.change_field_value("mobile", contact.mobile)
#self.change_field_value("work", contact.workphone)
#self.change_field_value("fax", contact.fax)
#self.change_field_value("email", contact.email)
#self.change_field_value("email2", contact.email2)
#self.change_field_value("email3", contact.email3)
#self.change_field_value("homepage", contact.homepage)
#Select(wd.find_element_by_name('bday')).select_by_visible_text(contact.bday)
#Select(wd.find_element_by_name('bmonth')).select_by_visible_text(contact.bmonth)
#self.change_field_value("byear", contact.byear)
#Select(wd.find_element_by_name('aday')).select_by_visible_text(contact.aday)
#Select(wd.find_element_by_name('amonth')).select_by_visible_text(contact.amonth)
#self.change_field_value("ayear", contact.ayear)
#self.change_field_value("address2", contact.address2)
#self.change_field_value("phone2", contact.phone2)
#self.change_field_value("notes", contact.notes)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_home_page()
wd.find_elements_by_name("entry")[index].find_elements(By.TAG_NAME, "td")[7].click()
def open_contact_to_view_by_index(self, index):
wd = self.app.wd
self.open_home_page()
wd.find_elements_by_name("entry")[index].find_elements(By.TAG_NAME, "td")[6].click()
def edit_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
self.fill_contact_form(new_contact_data)
wd.find_element_by_name("update").click()
self.contact_cache = None
def edit_contact_by_id(self, id, new_contact_data):
wd = self.app.wd
#self.select_contact_by_id(id)
wd.find_element_by_xpath("//a[@href='edit.php?id=%s']" % id).click()
self.fill_contact_form(new_contact_data)
wd.find_element_by_name("update").click()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_home_page()
# check index contact
wd.find_elements_by_name("selected[]")[index].click()
# init deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.open_home_page()
# check index contact
self.select_contact_by_id(id)
# init deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def open_home_page(self):
wd = self.app.wd
if not wd.current_url.endswith("/index.php"):
wd.find_element_by_link_text("home").click()
def count(self):
wd = self.app.wd
self.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
lastname = cells[1].text
firstname = cells[2].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
all_phones = cells[5].text
all_emails = cells[4].text
self.contact_cache.append(Contact(id = id, firstname = firstname, lastname = lastname, all_phones_from_home_page = all_phones, all_emails_from_home_page = all_emails))
return list(self.contact_cache)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
id = wd.find_element_by_name("id").get_attribute("value")
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(id=id, firstname=firstname, lastname=lastname, homephone=homephone, mobile=mobile, workphone=workphone, phone2=phone2, email=email, email2=email2, email3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_to_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
mobile = re.search("M: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Contact(homephone=homephone, mobile=mobile, workphone=workphone, phone2=phone2)
|
Rachel Armstrong is Professor of Experimental Architecture at the Department of Architecture, Planning and Landscape, Newcastle University. She is also a 2010 Senior TED Fellow who is establishing an alternative approach to sustainability that couples with the computational properties of the natural world to develop a 21st century production platform for the built environment, which she calls ‘living’ architecture. Rachel has been frequently recognized as being a pioneer. She has recently been featured in interview for PORTER magazine, added to the 2014 Citizens of the Next Century List, by Future-ish, listed on the Wired 2014 Smart List.
She is one of the 2013 ICON 50 and described as one of the ten people in the UK that may shape the UK’s recovery by Director Magazine in 2012. In the same year she was nominated as one of the most inspiring top nine women by Chick Chip magazine and featured by BBC Focus Magazine’s in 2011 in ‘ideas that could change the world’. Rachel Armstrong leads Metabolism research in developing artificial biology systems showing qualities of near-living systems. Her research into protocells is a pioneering effort that contributed to the previous collaboration with Philip Beesley.
|
# Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
from nnabla.parameter import get_parameter_or_create, get_parameter
from nnabla.initializer import (
calc_uniform_lim_glorot,
ConstantInitializer, NormalInitializer, UniformInitializer)
from .module import Module
class Convolution(Module):
"""N-D Convolution with a bias term.
For Dilated Convolution (a.k.a. Atrous Convolution), refer to:
- Chen et al., DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs. https://arxiv.org/abs/1606.00915
- Yu et al., Multi-Scale Context Aggregation by Dilated Convolutions. https://arxiv.org/abs/1511.07122
Note:
Convolution is a computationally intensive operation that
should preferably be run with the `cudnn` backend. NNabla
then uses CuDNN library functions to determine and cache the
fastest algorithm for the given set of convolution parameters,
which results in additional memory consumption which may pose
a problem for GPUs with insufficient memory size. In that
case, the `NNABLA_CUDNN_WORKSPACE_LIMIT` environment variable
can be used to restrict the choice of algorithms to those that
fit the given workspace memory limit, expressed in bytes. In
some cases it may also be desired to restrict the automatic
search to algorithms that produce deterministic (reproducable)
results. This can be requested by setting the the environment
variable `NNABLA_CUDNN_DETERMINISTIC` to a non-zero value.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.convolution` for the output shape.
"""
def __init__(self, inmaps, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
w_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True):
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, outmaps, tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
w_shape = (outmaps, inmaps // group) + tuple(kernel)
w = nn.Variable.from_numpy_array(
w_init(w_shape)).apply(need_grad=not fix_parameters)
b = None
if with_bias:
b_shape = (outmaps, )
b = nn.Variable.from_numpy_array(
b_init(b_shape)).apply(need_grad=not fix_parameters)
self.W = w
self.b = b
self.base_axis = base_axis
self.pad = pad
self.stride = stride
self.dilation = dilation
self.group = group
def __call__(self, inp):
return F.convolution(inp, self.W, self.b, self.base_axis,
self.pad, self.stride, self.dilation, self.group)
Conv1d = Convolution
Conv2d = Convolution
Conv3d = Convolution
ConvNd = Convolution
|
If you have trusted Jesus as your Lord and Savior or if you have questions or concerns you would like help with, please let us know. We want to rejoice in what God has done in your life and help you to grow spiritually. We’re here to help you understand the love that Jesus is offering you for free, no matter who or where you are.
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Provides interfaces to various commands provided by FreeSurfer
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
import os
import os.path as op
from glob import glob
#import itertools
import numpy as np
from nibabel import load
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.io import FreeSurferSource
from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec
from nipype.interfaces.base import (TraitedSpec, File, traits,
Directory, InputMultiPath,
OutputMultiPath, CommandLine,
CommandLineInputSpec, isdefined)
from ... import logging
iflogger = logging.getLogger('interface')
class ParseDICOMDirInputSpec(FSTraitedSpec):
dicom_dir = Directory(exists=True, argstr='--d %s', mandatory=True,
desc='path to siemens dicom directory')
dicom_info_file = File('dicominfo.txt', argstr='--o %s', usedefault=True,
desc='file to which results are written')
sortbyrun = traits.Bool(argstr='--sortbyrun', desc='assign run numbers')
summarize = traits.Bool(argstr='--summarize',
desc='only print out info for run leaders')
class ParseDICOMDirOutputSpec(TraitedSpec):
dicom_info_file = File(exists=True,
desc='text file containing dicom information')
class ParseDICOMDir(FSCommand):
"""Uses mri_parse_sdcmdir to get information from dicom directories
Examples
--------
>>> from nipype.interfaces.freesurfer import ParseDICOMDir
>>> dcminfo = ParseDICOMDir()
>>> dcminfo.inputs.dicom_dir = '.'
>>> dcminfo.inputs.sortbyrun = True
>>> dcminfo.inputs.summarize = True
>>> dcminfo.cmdline
'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize'
"""
_cmd = 'mri_parse_sdcmdir'
input_spec = ParseDICOMDirInputSpec
output_spec = ParseDICOMDirOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.dicom_info_file):
outputs['dicom_info_file'] = os.path.join(os.getcwd(), self.inputs.dicom_info_file)
return outputs
class UnpackSDICOMDirInputSpec(FSTraitedSpec):
source_dir = Directory(exists=True, argstr='-src %s',
mandatory=True,
desc='directory with the DICOM files')
output_dir = Directory(argstr='-targ %s',
desc='top directory into which the files will be unpacked')
run_info = traits.Tuple(traits.Int, traits.Str, traits.Str, traits.Str,
mandatory=True,
argstr='-run %d %s %s %s',
xor=('run_info', 'config', 'seq_config'),
desc='runno subdir format name : spec unpacking rules on cmdline')
config = File(exists=True, argstr='-cfg %s',
mandatory=True,
xor=('run_info', 'config', 'seq_config'),
desc='specify unpacking rules in file')
seq_config = File(exists=True, argstr='-seqcfg %s',
mandatory=True,
xor=('run_info', 'config', 'seq_config'),
desc='specify unpacking rules based on sequence')
dir_structure = traits.Enum('fsfast', 'generic', argstr='-%s',
desc='unpack to specified directory structures')
no_info_dump = traits.Bool(argstr='-noinfodump',
desc='do not create infodump file')
scan_only = File(exists=True, argstr='-scanonly %s',
desc='only scan the directory and put result in file')
log_file = File(exists=True, argstr='-log %s',
desc='explicilty set log file')
spm_zeropad = traits.Int(argstr='-nspmzeropad %d',
desc='set frame number zero padding width for SPM')
no_unpack_err = traits.Bool(argstr='-no-unpackerr',
desc='do not try to unpack runs with errors')
class UnpackSDICOMDir(FSCommand):
"""Use unpacksdcmdir to convert dicom files
Call unpacksdcmdir -help from the command line to see more information on
using this command.
Examples
--------
>>> from nipype.interfaces.freesurfer import UnpackSDICOMDir
>>> unpack = UnpackSDICOMDir()
>>> unpack.inputs.source_dir = '.'
>>> unpack.inputs.output_dir = '.'
>>> unpack.inputs.run_info = (5, 'mprage', 'nii', 'struct')
>>> unpack.inputs.dir_structure = 'generic'
>>> unpack.cmdline
'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .'
"""
_cmd = 'unpacksdcmdir'
input_spec = UnpackSDICOMDirInputSpec
class MRIConvertInputSpec(FSTraitedSpec):
read_only = traits.Bool(argstr='--read_only',
desc='read the input volume')
no_write = traits.Bool(argstr='--no_write',
desc='do not write output')
in_info = traits.Bool(argstr='--in_info',
desc='display input info')
out_info = traits.Bool(argstr='--out_info',
desc='display output info')
in_stats = traits.Bool(argstr='--in_stats',
desc='display input stats')
out_stats = traits.Bool(argstr='--out_stats',
desc='display output stats')
in_matrix = traits.Bool(argstr='--in_matrix',
desc='display input matrix')
out_matrix = traits.Bool(argstr='--out_matrix',
desc='display output matrix')
in_i_size = traits.Int(argstr='--in_i_size %d',
desc='input i size')
in_j_size = traits.Int(argstr='--in_j_size %d',
desc='input j size')
in_k_size = traits.Int(argstr='--in_k_size %d',
desc='input k size')
force_ras = traits.Bool(argstr='--force_ras_good',
desc='use default when orientation info absent')
in_i_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--in_i_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
in_j_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--in_j_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
in_k_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--in_k_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
_orientations = ['LAI', 'LIA', 'ALI', 'AIL', 'ILA', 'IAL', 'LAS', 'LSA', 'ALS', 'ASL', 'SLA', 'SAL', 'LPI', 'LIP', 'PLI', 'PIL', 'ILP', 'IPL', 'LPS', 'LSP', 'PLS', 'PSL', 'SLP', 'SPL', 'RAI', 'RIA', 'ARI', 'AIR', 'IRA', 'IAR', 'RAS', 'RSA', 'ARS', 'ASR', 'SRA', 'SAR', 'RPI', 'RIP', 'PRI', 'PIR', 'IRP', 'IPR', 'RPS', 'RSP', 'PRS', 'PSR', 'SRP', 'SPR']
#_orientations = [comb for comb in itertools.chain(*[[''.join(c) for c in itertools.permutations(s)] for s in [a+b+c for a in 'LR' for b in 'AP' for c in 'IS']])]
in_orientation = traits.Enum(_orientations,
argstr='--in_orientation %s',
desc='specify the input orientation')
in_center = traits.List(traits.Float, maxlen=3,
argstr='--in_center %s',
desc='<R coordinate> <A coordinate> <S coordinate>')
sphinx = traits.Bool(argstr='--sphinx',
desc='change orientation info to sphinx')
out_i_count = traits.Int(argstr='--out_i_count %d',
desc='some count ?? in i direction')
out_j_count = traits.Int(argstr='--out_j_count %d',
desc='some count ?? in j direction')
out_k_count = traits.Int(argstr='--out_k_count %d',
desc='some count ?? in k direction')
vox_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='-voxsize %f %f %f',
desc='<size_x> <size_y> <size_z> specify the size (mm) - useful for upsampling or downsampling')
out_i_size = traits.Int(argstr='--out_i_size %d',
desc='output i size')
out_j_size = traits.Int(argstr='--out_j_size %d',
desc='output j size')
out_k_size = traits.Int(argstr='--out_k_size %d',
desc='output k size')
out_i_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_i_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
out_j_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_j_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
out_k_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_k_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
out_orientation = traits.Enum(_orientations,
argstr='--out_orientation %s',
desc='specify the output orientation')
out_center = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_center %f %f %f',
desc='<R coordinate> <A coordinate> <S coordinate>')
out_datatype = traits.Enum('uchar', 'short', 'int', 'float',
argstr='--out_data_type %s',
desc='output data type <uchar|short|int|float>')
resample_type = traits.Enum('interpolate', 'weighted', 'nearest', 'sinc', 'cubic',
argstr='--resample_type %s',
desc='<interpolate|weighted|nearest|sinc|cubic> (default is interpolate)')
no_scale = traits.Bool(argstr='--no_scale 1',
desc='dont rescale values for COR')
no_change = traits.Bool(argstr='--nochange',
desc="don't change type of input to that of template")
autoalign_matrix = File(exists=True, argstr='--autoalign %s',
desc='text file with autoalign matrix')
unwarp_gradient = traits.Bool(argstr='--unwarp_gradient_nonlinearity',
desc='unwarp gradient nonlinearity')
apply_transform = File(exists=True, argstr='--apply_transform %s',
desc='apply xfm file')
apply_inv_transform = File(exists=True, argstr='--apply_inverse_transform %s',
desc='apply inverse transformation xfm file')
devolve_transform = traits.Str(argstr='--devolvexfm %s',
desc='subject id')
crop_center = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--crop %d %d %d',
desc='<x> <y> <z> crop to 256 around center (x, y, z)')
crop_size = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--cropsize %d %d %d',
desc='<dx> <dy> <dz> crop to size <dx, dy, dz>')
cut_ends = traits.Int(argstr='--cutends %d',
desc='remove ncut slices from the ends')
slice_crop = traits.Tuple(traits.Int, traits.Int,
argstr='--slice-crop %d %d',
desc='s_start s_end : keep slices s_start to s_end')
slice_reverse = traits.Bool(argstr='--slice-reverse',
desc='reverse order of slices, update vox2ras')
slice_bias = traits.Float(argstr='--slice-bias %f',
desc='apply half-cosine bias field')
fwhm = traits.Float(argstr='--fwhm %f',
desc='smooth input volume by fwhm mm')
_filetypes = ['cor', 'mgh', 'mgz', 'minc', 'analyze',
'analyze4d', 'spm', 'afni', 'brik', 'bshort',
'bfloat', 'sdt', 'outline', 'otl', 'gdf',
'nifti1', 'nii', 'niigz']
_infiletypes = ['ge', 'gelx', 'lx', 'ximg', 'siemens', 'dicom', 'siemens_dicom']
in_type = traits.Enum(_filetypes + _infiletypes, argstr='--in_type %s',
desc='input file type')
out_type = traits.Enum(_filetypes, argstr='--out_type %s',
desc='output file type')
ascii = traits.Bool(argstr='--ascii',
desc='save output as ascii col>row>slice>frame')
reorder = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--reorder %d %d %d',
desc='olddim1 olddim2 olddim3')
invert_contrast = traits.Float(argstr='--invert_contrast %f',
desc='threshold for inversting contrast')
in_file = File(exists=True, mandatory=True,
position=-2,
argstr='--input_volume %s',
desc='File to read/convert')
out_file = File(argstr='--output_volume %s',
position=-1, genfile=True,
desc='output filename or True to generate one')
conform = traits.Bool(argstr='--conform',
desc='conform to 256^3')
conform_min = traits.Bool(argstr='--conform_min',
desc='conform to smallest size')
conform_size = traits.Float(argstr='--conform_size %s',
desc='conform to size_in_mm')
parse_only = traits.Bool(argstr='--parse_only',
desc='parse input only')
subject_name = traits.Str(argstr='--subject_name %s',
desc='subject name ???')
reslice_like = File(exists=True, argstr='--reslice_like %s',
desc='reslice output to match file')
template_type = traits.Enum(_filetypes + _infiletypes,
argstr='--template_type %s',
desc='template file type')
split = traits.Bool(argstr='--split',
desc='split output frames into separate output files.')
frame = traits.Int(argstr='--frame %d',
desc='keep only 0-based frame number')
midframe = traits.Bool(argstr='--mid-frame',
desc='keep only the middle frame')
skip_n = traits.Int(argstr='--nskip %d',
desc='skip the first n frames')
drop_n = traits.Int(argstr='--ndrop %d',
desc='drop the last n frames')
frame_subsample = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--fsubsample %d %d %d',
desc='start delta end : frame subsampling (end = -1 for end)')
in_scale = traits.Float(argstr='--scale %f',
desc='input intensity scale factor')
out_scale = traits.Float(argstr='--out-scale %d',
desc='output intensity scale factor')
in_like = File(exists=True, argstr='--in_like %s',
desc='input looks like')
fill_parcellation = traits.Bool(argstr='--fill_parcellation',
desc='fill parcellation')
smooth_parcellation = traits.Bool(argstr='--smooth_parcellation',
desc='smooth parcellation')
zero_outlines = traits.Bool(argstr='--zero_outlines',
desc='zero outlines')
color_file = File(exists=True, argstr='--color_file %s',
desc='color file')
no_translate = traits.Bool(argstr='--no_translate',
desc='???')
status_file = File(argstr='--status %s',
desc='status file for DICOM conversion')
sdcm_list = File(exists=True, argstr='--sdcmlist %s',
desc='list of DICOM files for conversion')
template_info = traits.Bool('--template_info',
desc='dump info about template')
crop_gdf = traits.Bool(argstr='--crop_gdf',
desc='apply GDF cropping')
zero_ge_z_offset = traits.Bool(argstr='--zero_ge_z_offset',
desc='zero ge z offset ???')
class MRIConvertOutputSpec(TraitedSpec):
out_file = OutputMultiPath(File(exists=True), desc='converted output file')
class MRIConvert(FSCommand):
"""use fs mri_convert to manipulate files
.. note::
Adds niigz as an output type option
Examples
--------
>>> mc = MRIConvert()
>>> mc.inputs.in_file = 'structural.nii'
>>> mc.inputs.out_file = 'outfile.mgz'
>>> mc.inputs.out_type = 'mgz'
>>> mc.cmdline
'mri_convert --out_type mgz --input_volume structural.nii --output_volume outfile.mgz'
"""
_cmd = 'mri_convert'
input_spec = MRIConvertInputSpec
output_spec = MRIConvertOutputSpec
filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc',
afni='brik', brik='brik', bshort='bshort',
spm='img', analyze='img', analyze4d='img',
bfloat='bfloat', nifti1='img', nii='nii',
niigz='nii.gz')
def _format_arg(self, name, spec, value):
if name in ['in_type', 'out_type', 'template_type']:
if value == 'niigz':
return spec.argstr % 'nii'
return super(MRIConvert, self)._format_arg(name, spec, value)
def _get_outfilename(self):
outfile = self.inputs.out_file
if not isdefined(outfile):
if isdefined(self.inputs.out_type):
suffix = '_out.' + self.filemap[self.inputs.out_type]
else:
suffix = '_out.nii.gz'
outfile = fname_presuffix(self.inputs.in_file,
newpath=os.getcwd(),
suffix=suffix,
use_ext=False)
return os.path.abspath(outfile)
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self._get_outfilename()
if isdefined(self.inputs.split) and self.inputs.split:
size = load(self.inputs.in_file).get_shape()
if len(size) == 3:
tp = 1
else:
tp = size[-1]
if outfile.endswith('.mgz'):
stem = outfile.split('.mgz')[0]
ext = '.mgz'
elif outfile.endswith('.nii.gz'):
stem = outfile.split('.nii.gz')[0]
ext = '.nii.gz'
else:
stem = '.'.join(outfile.split('.')[:-1])
ext = '.' + outfile.split('.')[-1]
outfile = []
for idx in range(0, tp):
outfile.append(stem + '%04d' % idx + ext)
if isdefined(self.inputs.out_type):
if self.inputs.out_type in ['spm', 'analyze']:
# generate all outputs
size = load(self.inputs.in_file).get_shape()
if len(size) == 3:
tp = 1
else:
tp = size[-1]
# have to take care of all the frame manipulations
raise Exception('Not taking frame manipulations into account- please warn the developers')
outfiles = []
outfile = self._get_outfilename()
for i in range(tp):
outfiles.append(fname_presuffix(outfile,
suffix='%03d' % (i + 1)))
outfile = outfiles
outputs['out_file'] = outfile
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._get_outfilename()
return None
class DICOMConvertInputSpec(FSTraitedSpec):
dicom_dir = Directory(exists=True, mandatory=True,
desc='dicom directory from which to convert dicom files')
base_output_dir = Directory(mandatory=True,
desc='directory in which subject directories are created')
subject_dir_template = traits.Str('S.%04d', usedefault=True,
desc='template for subject directory name')
subject_id = traits.Any(desc='subject identifier to insert into template')
file_mapping = traits.List(traits.Tuple(traits.Str, traits.Str),
desc='defines the output fields of interface')
out_type = traits.Enum('niigz', MRIConvertInputSpec._filetypes,
usedefault=True,
desc='defines the type of output file produced')
dicom_info = File(exists=True,
desc='File containing summary information from mri_parse_sdcmdir')
seq_list = traits.List(traits.Str,
requires=['dicom_info'],
desc='list of pulse sequence names to be converted.')
ignore_single_slice = traits.Bool(requires=['dicom_info'],
desc='ignore volumes containing a single slice')
class DICOMConvert(FSCommand):
"""use fs mri_convert to convert dicom files
Examples
--------
>>> from nipype.interfaces.freesurfer import DICOMConvert
>>> cvt = DICOMConvert()
>>> cvt.inputs.dicom_dir = 'dicomdir'
>>> cvt.inputs.file_mapping = [('nifti', '*.nii'), ('info', 'dicom*.txt'), ('dti', '*dti.bv*')]
"""
_cmd = 'mri_convert'
input_spec = DICOMConvertInputSpec
def _get_dicomfiles(self):
"""validate fsl bet options
if set to None ignore
"""
return glob(os.path.abspath(os.path.join(self.inputs.dicom_dir,
'*-1.dcm')))
def _get_outdir(self):
"""returns output directory"""
subjid = self.inputs.subject_id
if not isdefined(subjid):
path, fname = os.path.split(self._get_dicomfiles()[0])
subjid = int(fname.split('-')[0])
if isdefined(self.inputs.subject_dir_template):
subjid = self.inputs.subject_dir_template % subjid
basedir = self.inputs.base_output_dir
if not isdefined(basedir):
basedir = os.path.abspath('.')
outdir = os.path.abspath(os.path.join(basedir, subjid))
return outdir
def _get_runs(self):
"""Returns list of dicom series that should be converted.
Requires a dicom info summary file generated by ``DicomDirInfo``
"""
seq = np.genfromtxt(self.inputs.dicom_info, dtype=object)
runs = []
for s in seq:
if self.inputs.seq_list:
if self.inputs.ignore_single_slice:
if (int(s[8]) > 1) and any([s[12].startswith(sn) for sn in self.inputs.seq_list]):
runs.append(int(s[2]))
else:
if any([s[12].startswith(sn) for sn in self.inputs.seq_list]):
runs.append(int(s[2]))
else:
runs.append(int(s[2]))
return runs
def _get_filelist(self, outdir):
"""Returns list of files to be converted"""
filemap = {}
for f in self._get_dicomfiles():
head, fname = os.path.split(f)
fname, ext = os.path.splitext(fname)
fileparts = fname.split('-')
runno = int(fileparts[1])
out_type = MRIConvert.filemap[self.inputs.out_type]
outfile = os.path.join(outdir, '.'.join(('%s-%02d' % (fileparts[0],
runno),
out_type)))
filemap[runno] = (f, outfile)
if self.inputs.dicom_info:
files = [filemap[r] for r in self._get_runs()]
else:
files = [filemap[r] for r in filemap.keys()]
return files
@property
def cmdline(self):
""" `command` plus any arguments (args)
validates arguments and generates command line"""
self._check_mandatory_inputs()
outdir = self._get_outdir()
cmd = []
if not os.path.exists(outdir):
cmdstr = 'python -c "import os; os.makedirs(\'%s\')"' % outdir
cmd.extend([cmdstr])
infofile = os.path.join(outdir, 'shortinfo.txt')
if not os.path.exists(infofile):
cmdstr = 'dcmdir-info-mgh %s > %s' % (self.inputs.dicom_dir,
infofile)
cmd.extend([cmdstr])
files = self._get_filelist(outdir)
for infile, outfile in files:
if not os.path.exists(outfile):
single_cmd = '%s %s %s' % (self.cmd, infile,
os.path.join(outdir, outfile))
cmd.extend([single_cmd])
return '; '.join(cmd)
class ResampleInputSpec(FSTraitedSpec):
in_file = File(exists=True, argstr='-i %s', mandatory=True,
desc='file to resample', position=-2)
resampled_file = File(argstr='-o %s', desc='output filename', genfile=True,
position=-1)
voxel_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='-vs %.2f %.2f %.2f', desc='triplet of output voxel sizes',
mandatory=True)
class ResampleOutputSpec(TraitedSpec):
resampled_file = File(exists=True,
desc='output filename')
class Resample(FSCommand):
"""Use FreeSurfer mri_convert to up or down-sample image files
Examples
--------
>>> from nipype.interfaces import freesurfer
>>> resampler = freesurfer.Resample()
>>> resampler.inputs.in_file = 'structural.nii'
>>> resampler.inputs.resampled_file = 'resampled.nii'
>>> resampler.inputs.voxel_size = (2.1, 2.1, 2.1)
>>> resampler.cmdline
'mri_convert -vs 2.10 2.10 2.10 -i structural.nii -o resampled.nii'
"""
_cmd = 'mri_convert'
input_spec = ResampleInputSpec
output_spec = ResampleOutputSpec
def _get_outfilename(self):
if isdefined(self.inputs.resampled_file):
outfile = self.inputs.resampled_file
else:
outfile = fname_presuffix(self.inputs.in_file,
newpath=os.getcwd(),
suffix='_resample')
return outfile
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['resampled_file'] = self._get_outfilename()
return outputs
def _gen_filename(self, name):
if name == 'resampled_file':
return self._get_outfilename()
return None
class ReconAllInputSpec(CommandLineInputSpec):
subject_id = traits.Str("recon_all", argstr='-subjid %s',
desc='subject name', usedefault=True)
directive = traits.Enum('all', 'autorecon1', 'autorecon2', 'autorecon2-cp',
'autorecon2-wm', 'autorecon2-inflate1',
'autorecon2-perhemi', 'autorecon3', 'localGI',
'qcache', argstr='-%s', desc='process directive',
usedefault=True, position=0)
hemi = traits.Enum('lh', 'rh', desc='hemisphere to process',
argstr="-hemi %s")
T1_files = InputMultiPath(File(exists=True), argstr='-i %s...',
desc='name of T1 file to process')
T2_file = File(exists=True, argstr="-T2 %s", min_ver='5.3.0',
desc='Use a T2 image to refine the cortical surface')
openmp = traits.Int(argstr="-openmp %d",
desc="Number of processors to use in parallel")
subjects_dir = Directory(exists=True, argstr='-sd %s', hash_files=False,
desc='path to subjects directory', genfile=True)
flags = traits.Str(argstr='%s', desc='additional parameters')
class ReconAllIOutputSpec(FreeSurferSource.output_spec):
subjects_dir = Directory(exists=True, desc='Freesurfer subjects directory.')
subject_id = traits.Str(desc='Subject name for whom to retrieve data')
class ReconAll(CommandLine):
"""Uses recon-all to generate surfaces and parcellations of structural data
from anatomical images of a subject.
Examples
--------
>>> from nipype.interfaces.freesurfer import ReconAll
>>> reconall = ReconAll()
>>> reconall.inputs.subject_id = 'foo'
>>> reconall.inputs.directive = 'all'
>>> reconall.inputs.subjects_dir = '.'
>>> reconall.inputs.T1_files = 'structural.nii'
>>> reconall.cmdline
'recon-all -all -i structural.nii -subjid foo -sd .'
"""
_cmd = 'recon-all'
_additional_metadata = ['loc', 'altkey']
input_spec = ReconAllInputSpec
output_spec = ReconAllIOutputSpec
_can_resume = True
_steps = [
#autorecon1
('motioncor', ['mri/rawavg.mgz', 'mri/orig.mgz']),
('talairach', ['mri/transforms/talairach.auto.xfm',
'mri/transforms/talairach.xfm']),
('nuintensitycor', ['mri/nu.mgz']),
('normalization', ['mri/T1.mgz']),
('skullstrip',
['mri/brainmask.auto.mgz',
'mri/brainmask.mgz']),
#autorecon2
('gcareg', ['mri/transforms/talairach.lta']),
('canorm', ['mri/norm.mgz']),
('careg', ['mri/transforms/talairach.m3z']),
('careginv', ['mri/transforms/talairach.m3z.inv.x.mgz',
'mri/transforms/talairach.m3z.inv.y.mgz',
'mri/transforms/talairach.m3z.inv.z.mgz']),
('rmneck', ['mri/nu_noneck.mgz']),
('skull-lta', ['mri/transforms/talairach_with_skull_2.lta']),
('calabel',
['mri/aseg.auto_noCCseg.mgz', 'mri/aseg.auto.mgz', 'mri/aseg.mgz']),
('normalization2', ['mri/brain.mgz']),
('maskbfs', ['mri/brain.finalsurfs.mgz']),
('segmentation', ['mri/wm.asegedit.mgz', 'mri/wm.mgz']),
('fill', ['mri/filled.mgz']),
('tessellate', ['surf/lh.orig.nofix', 'surf/rh.orig.nofix']),
('smooth1', ['surf/lh.smoothwm.nofix', 'surf/rh.smoothwm.nofix']),
('inflate1', ['surf/lh.inflated.nofix', 'surf/rh.inflated.nofix']),
('qsphere', ['surf/lh.qsphere.nofix', 'surf/rh.qsphere.nofix']),
('fix', ['surf/lh.orig', 'surf/rh.orig']),
('white',
['surf/lh.white',
'surf/rh.white',
'surf/lh.curv',
'surf/rh.curv',
'surf/lh.area',
'surf/rh.area',
'label/lh.cortex.label',
'label/rh.cortex.label']),
('smooth2', ['surf/lh.smoothwm', 'surf/rh.smoothwm']),
('inflate2',
['surf/lh.inflated',
'surf/rh.inflated',
'surf/lh.sulc',
'surf/rh.sulc',
'surf/lh.inflated.H',
'surf/rh.inflated.H',
'surf/lh.inflated.K',
'surf/rh.inflated.K']),
#autorecon3
('sphere', ['surf/lh.sphere', 'surf/rh.sphere']),
('surfreg', ['surf/lh.sphere.reg', 'surf/rh.sphere.reg']),
('jacobian_white', ['surf/lh.jacobian_white',
'surf/rh.jacobian_white']),
('avgcurv', ['surf/lh.avg_curv', 'surf/rh.avg_curv']),
('cortparc', ['label/lh.aparc.annot', 'label/rh.aparc.annot']),
('pial',
['surf/lh.pial',
'surf/rh.pial',
'surf/lh.curv.pial',
'surf/rh.curv.pial',
'surf/lh.area.pial',
'surf/rh.area.pial',
'surf/lh.thickness',
'surf/rh.thickness']),
('cortparc2', ['label/lh.aparc.a2009s.annot',
'label/rh.aparc.a2009s.annot']),
('parcstats2',
['stats/lh.aparc.a2009s.stats',
'stats/rh.aparc.a2009s.stats',
'stats/aparc.annot.a2009s.ctab']),
('cortribbon', ['mri/lh.ribbon.mgz', 'mri/rh.ribbon.mgz',
'mri/ribbon.mgz']),
('segstats', ['stats/aseg.stats']),
('aparc2aseg', ['mri/aparc+aseg.mgz', 'mri/aparc.a2009s+aseg.mgz']),
('wmparc', ['mri/wmparc.mgz', 'stats/wmparc.stats']),
('balabels', ['BA.ctab', 'BA.thresh.ctab']),
('label-exvivo-ec', ['label/lh.entorhinal_exvivo.label',
'label/rh.entorhinal_exvivo.label'])]
def _gen_subjects_dir(self):
return os.getcwd()
def _gen_filename(self, name):
if name == 'subjects_dir':
return self._gen_subjects_dir()
return None
def _list_outputs(self):
"""
See io.FreeSurferSource.outputs for the list of outputs returned
"""
if isdefined(self.inputs.subjects_dir):
subjects_dir = self.inputs.subjects_dir
else:
subjects_dir = self._gen_subjects_dir()
if isdefined(self.inputs.hemi):
hemi = self.inputs.hemi
else:
hemi = 'both'
outputs = self._outputs().get()
outputs.update(FreeSurferSource(subject_id=self.inputs.subject_id,
subjects_dir=subjects_dir,
hemi=hemi)._list_outputs())
outputs['subject_id'] = self.inputs.subject_id
outputs['subjects_dir'] = subjects_dir
return outputs
def _is_resuming(self):
subjects_dir = self.inputs.subjects_dir
if not isdefined(subjects_dir):
subjects_dir = self._gen_subjects_dir()
if os.path.isdir(os.path.join(subjects_dir, self.inputs.subject_id,
'mri')):
return True
return False
def _format_arg(self, name, trait_spec, value):
if name == 'T1_files':
if self._is_resuming():
return ''
return super(ReconAll, self)._format_arg(name, trait_spec, value)
@property
def cmdline(self):
cmd = super(ReconAll, self).cmdline
if not self._is_resuming():
return cmd
subjects_dir = self.inputs.subjects_dir
if not isdefined(subjects_dir):
subjects_dir = self._gen_subjects_dir()
#cmd = cmd.replace(' -all ', ' -make all ')
iflogger.info('Overriding recon-all directive')
flags = []
directive = 'all'
for idx, step in enumerate(self._steps):
step, outfiles = step
if all([os.path.exists(os.path.join(subjects_dir,
self.inputs.subject_id,f)) for
f in outfiles]):
flags.append('-no%s'%step)
if idx > 4:
directive = 'autorecon2'
elif idx > 23:
directive = 'autorecon3'
else:
flags.append('-%s'%step)
cmd = cmd.replace(' -%s ' % self.inputs.directive, ' -%s ' % directive)
cmd += ' ' + ' '.join(flags)
iflogger.info('resume recon-all : %s' % cmd)
return cmd
class BBRegisterInputSpec(FSTraitedSpec):
subject_id = traits.Str(argstr='--s %s',
desc='freesurfer subject id',
mandatory=True)
source_file = File(argstr='--mov %s',
desc='source file to be registered',
mandatory=True, copyfile=False)
init = traits.Enum('spm', 'fsl', 'header', argstr='--init-%s',
mandatory=True, xor=['init_reg_file'],
desc='initialize registration spm, fsl, header')
init_reg_file = File(exists=True, argstr='--init-reg %s',
desc='existing registration file',
xor=['init'], mandatory=True)
contrast_type = traits.Enum('t1', 't2', argstr='--%s',
desc='contrast type of image',
mandatory=True)
intermediate_file = File(exists=True, argstr="--int %s",
desc="Intermediate image, e.g. in case of partial FOV")
reg_frame = traits.Int(argstr="--frame %d", xor=["reg_middle_frame"],
desc="0-based frame index for 4D source file")
reg_middle_frame = traits.Bool(argstr="--mid-frame", xor=["reg_frame"],
desc="Register middle frame of 4D source file")
out_reg_file = File(argstr='--reg %s',
desc='output registration file',
genfile=True)
spm_nifti = traits.Bool(argstr="--spm-nii",
desc="force use of nifti rather than analyze with SPM")
epi_mask = traits.Bool(argstr="--epi-mask",
desc="mask out B0 regions in stages 1 and 2")
out_fsl_file = traits.Either(traits.Bool, File, argstr="--fslmat %s",
desc="write the transformation matrix in FSL FLIRT format")
registered_file = traits.Either(traits.Bool, File, argstr='--o %s',
desc='output warped sourcefile either True or filename')
class BBRegisterOutputSpec(TraitedSpec):
out_reg_file = File(exists=True, desc='Output registration file')
out_fsl_file = File(desc='Output FLIRT-style registration file')
min_cost_file = File(exists=True, desc='Output registration minimum cost file')
registered_file = File(desc='Registered and resampled source file')
class BBRegister(FSCommand):
"""Use FreeSurfer bbregister to register a volume to the Freesurfer anatomical.
This program performs within-subject, cross-modal registration using a
boundary-based cost function. The registration is constrained to be 6
DOF (rigid). It is required that you have an anatomical scan of the
subject that has already been recon-all-ed using freesurfer.
Examples
--------
>>> from nipype.interfaces.freesurfer import BBRegister
>>> bbreg = BBRegister(subject_id='me', source_file='structural.nii', init='header', contrast_type='t2')
>>> bbreg.cmdline
'bbregister --t2 --init-header --reg structural_bbreg_me.dat --mov structural.nii --s me'
"""
_cmd = 'bbregister'
input_spec = BBRegisterInputSpec
output_spec = BBRegisterOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
_in = self.inputs
if isdefined(_in.out_reg_file):
outputs['out_reg_file'] = op.abspath(_in.out_reg_file)
elif _in.source_file:
suffix = '_bbreg_%s.dat' % _in.subject_id
outputs['out_reg_file'] = fname_presuffix(_in.source_file,
suffix=suffix,
use_ext=False)
if isdefined(_in.registered_file):
if isinstance(_in.registered_file, bool):
outputs['registered_file'] = fname_presuffix(_in.source_file,
suffix='_bbreg')
else:
outputs['registered_file'] = op.abspath(_in.registered_file)
if isdefined(_in.out_fsl_file):
if isinstance(_in.out_fsl_file, bool):
suffix='_bbreg_%s.mat' % _in.subject_id
out_fsl_file = fname_presuffix(_in.source_file,
suffix=suffix,
use_ext=False)
outputs['out_fsl_file'] = out_fsl_file
else:
outputs['out_fsl_file'] = op.abspath(_in.out_fsl_file)
outputs['min_cost_file'] = outputs['out_reg_file'] + '.mincost'
return outputs
def _format_arg(self, name, spec, value):
if name in ['registered_file', 'out_fsl_file']:
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
return super(BBRegister, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'out_reg_file':
return self._list_outputs()[name]
return None
class ApplyVolTransformInputSpec(FSTraitedSpec):
source_file = File(exists=True, argstr='--mov %s',
copyfile=False, mandatory=True,
desc='Input volume you wish to transform')
transformed_file = File(desc='Output volume', argstr='--o %s', genfile=True)
_targ_xor = ('target_file', 'tal', 'fs_target')
target_file = File(exists=True, argstr='--targ %s', xor=_targ_xor,
desc='Output template volume', mandatory=True)
tal = traits.Bool(argstr='--tal', xor=_targ_xor, mandatory=True,
desc='map to a sub FOV of MNI305 (with --reg only)')
tal_resolution = traits.Float(argstr="--talres %.10f",
desc="Resolution to sample when using tal")
fs_target = traits.Bool(argstr='--fstarg', xor=_targ_xor, mandatory=True,
requires=['reg_file'],
desc='use orig.mgz from subject in regfile as target')
_reg_xor = ('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject')
reg_file = File(exists=True, xor=_reg_xor, argstr='--reg %s',
mandatory=True,
desc='tkRAS-to-tkRAS matrix (tkregister2 format)')
fsl_reg_file = File(exists=True, xor=_reg_xor, argstr='--fsl %s',
mandatory=True,
desc='fslRAS-to-fslRAS matrix (FSL format)')
xfm_reg_file = File(exists=True, xor=_reg_xor, argstr='--xfm %s',
mandatory=True,
desc='ScannerRAS-to-ScannerRAS matrix (MNI format)')
reg_header = traits.Bool(xor=_reg_xor, argstr='--regheader',
mandatory=True,
desc='ScannerRAS-to-ScannerRAS matrix = identity')
subject = traits.Str(xor=_reg_xor, argstr='--s %s',
mandatory=True,
desc='set matrix = identity and use subject for any templates')
inverse = traits.Bool(desc='sample from target to source',
argstr='--inv')
interp = traits.Enum('trilin', 'nearest', 'cubic', argstr='--interp %s',
desc='Interpolation method (<trilin> or nearest)')
no_resample = traits.Bool(desc='Do not resample; just change vox2ras matrix',
argstr='--no-resample')
m3z_file = File(argstr="--m3z %s",
desc=('This is the morph to be applied to the volume. '
'Unless the morph is in mri/transforms (eg.: for '
'talairach.m3z computed by reconall), you will need '
'to specify the full path to this morph and use the '
'--noDefM3zPath flag.'))
no_ded_m3z_path = traits.Bool(argstr="--noDefM3zPath",
requires=['m3z_file'],
desc=('To be used with the m3z flag. '
'Instructs the code not to look for the'
'm3z morph in the default location '
'(SUBJECTS_DIR/subj/mri/transforms), '
'but instead just use the path '
'indicated in --m3z.'))
invert_morph = traits.Bool(argstr="--inv-morph",
requires=['m3z_file'],
desc=('Compute and use the inverse of the '
'non-linear morph to resample the input '
'volume. To be used by --m3z.'))
class ApplyVolTransformOutputSpec(TraitedSpec):
transformed_file = File(exists=True, desc='Path to output file if used normally')
class ApplyVolTransform(FSCommand):
"""Use FreeSurfer mri_vol2vol to apply a transform.
Examples
--------
>>> from nipype.interfaces.freesurfer import ApplyVolTransform
>>> applyreg = ApplyVolTransform()
>>> applyreg.inputs.source_file = 'structural.nii'
>>> applyreg.inputs.reg_file = 'register.dat'
>>> applyreg.inputs.transformed_file = 'struct_warped.nii'
>>> applyreg.inputs.fs_target = True
>>> applyreg.cmdline
'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii'
"""
_cmd = 'mri_vol2vol'
input_spec = ApplyVolTransformInputSpec
output_spec = ApplyVolTransformOutputSpec
def _get_outfile(self):
outfile = self.inputs.transformed_file
if not isdefined(outfile):
if self.inputs.inverse == True:
if self.inputs.fs_target == True:
src = 'orig.mgz'
else:
src = self.inputs.target_file
else:
src = self.inputs.source_file
outfile = fname_presuffix(src,
newpath=os.getcwd(),
suffix='_warped')
return outfile
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['transformed_file'] = os.path.abspath(self._get_outfile())
return outputs
def _gen_filename(self, name):
if name == 'transformed_file':
return self._get_outfile()
return None
class SmoothInputSpec(FSTraitedSpec):
in_file = File(exists=True, desc='source volume',
argstr='--i %s', mandatory=True)
reg_file = File(desc='registers volume to surface anatomical ',
argstr='--reg %s', mandatory=True,
exists=True)
smoothed_file = File(desc='output volume', argstr='--o %s', genfile=True)
proj_frac_avg = traits.Tuple(traits.Float, traits.Float, traits.Float,
xor=['proj_frac'],
desc='average a long normal min max delta',
argstr='--projfrac-avg %.2f %.2f %.2f')
proj_frac = traits.Float(desc='project frac of thickness a long surface normal',
xor=['proj_frac_avg'],
argstr='--projfrac %s')
surface_fwhm = traits.Range(low=0.0, requires=['reg_file'],
mandatory=True, xor=['num_iters'],
desc='surface FWHM in mm', argstr='--fwhm %f')
num_iters = traits.Range(low=1, xor=['surface_fwhm'],
mandatory=True, argstr='--niters %d',
desc='number of iterations instead of fwhm')
vol_fwhm = traits.Range(low=0.0, argstr='--vol-fwhm %f',
desc='volume smoothing outside of surface')
class SmoothOutputSpec(TraitedSpec):
smoothed_file = File(exists=True, desc='smoothed input volume')
class Smooth(FSCommand):
"""Use FreeSurfer mris_volsmooth to smooth a volume
This function smoothes cortical regions on a surface and non-cortical
regions in volume.
.. note::
Cortical voxels are mapped to the surface (3D->2D) and then the
smoothed values from the surface are put back into the volume to fill
the cortical ribbon. If data is smoothed with this algorithm, one has to
be careful about how further processing is interpreted.
Examples
--------
>>> from nipype.interfaces.freesurfer import Smooth
>>> smoothvol = Smooth(in_file='functional.nii', smoothed_file = 'foo_out.nii', reg_file='register.dat', surface_fwhm=10, vol_fwhm=6)
>>> smoothvol.cmdline
'mris_volsmooth --i functional.nii --reg register.dat --o foo_out.nii --fwhm 10.000000 --vol-fwhm 6.000000'
"""
_cmd = 'mris_volsmooth'
input_spec = SmoothInputSpec
output_spec = SmoothOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.smoothed_file
if not isdefined(outfile):
outfile = self._gen_fname(self.inputs.in_file,
suffix='_smooth')
outputs['smoothed_file'] = outfile
return outputs
def _gen_filename(self, name):
if name == 'smoothed_file':
return self._list_outputs()[name]
return None
class RobustRegisterInputSpec(FSTraitedSpec):
source_file = File(mandatory=True, argstr='--mov %s',
desc='volume to be registered')
target_file = File(mandatory=True, argstr='--dst %s',
desc='target volume for the registration')
out_reg_file = File(genfile=True, argstr='--lta %s',
desc='registration file to write')
registered_file = traits.Either(traits.Bool, File, argstr='--warp %s',
desc='registered image; either True or filename')
weights_file = traits.Either(traits.Bool, File, argstr='--weights %s',
desc='weights image to write; either True or filename')
est_int_scale = traits.Bool(argstr='--iscale',
desc='estimate intensity scale (recommended for unnormalized images)')
trans_only = traits.Bool(argstr='--transonly',
desc='find 3 parameter translation only')
in_xfm_file = File(exists=True, argstr='--transform',
desc='use initial transform on source')
half_source = traits.Either(traits.Bool, File, argstr='--halfmov %s',
desc="write source volume mapped to halfway space")
half_targ = traits.Either(traits.Bool, File, argstr="--halfdst %s",
desc="write target volume mapped to halfway space")
half_weights = traits.Either(traits.Bool, File, argstr="--halfweights %s",
desc="write weights volume mapped to halfway space")
half_source_xfm = traits.Either(traits.Bool, File, argstr="--halfmovlta %s",
desc="write transform from source to halfway space")
half_targ_xfm = traits.Either(traits.Bool, File, argstr="--halfdstlta %s",
desc="write transform from target to halfway space")
auto_sens = traits.Bool(argstr='--satit', xor=['outlier_sens'], mandatory=True,
desc='auto-detect good sensitivity')
outlier_sens = traits.Float(argstr='--sat %.4f', xor=['auto_sens'], mandatory=True,
desc='set outlier sensitivity explicitly')
least_squares = traits.Bool(argstr='--leastsquares',
desc='use least squares instead of robust estimator')
no_init = traits.Bool(argstr='--noinit', desc='skip transform init')
init_orient = traits.Bool(argstr='--initorient',
desc='use moments for initial orient (recommended for stripped brains)')
max_iterations = traits.Int(argstr='--maxit %d',
desc='maximum # of times on each resolution')
high_iterations = traits.Int(argstr='--highit %d',
desc='max # of times on highest resolution')
iteration_thresh = traits.Float(argstr='--epsit %.3f',
desc='stop iterations when below threshold')
subsample_thresh = traits.Int(argstr='--subsample %d',
desc='subsample if dimension is above threshold size')
outlier_limit = traits.Float(argstr='--wlimit %.3f',
desc='set maximal outlier limit in satit')
write_vo2vox = traits.Bool(argstr='--vox2vox',
desc='output vox2vox matrix (default is RAS2RAS)')
no_multi = traits.Bool(argstr='--nomulti', desc='work on highest resolution')
mask_source = File(exists=True, argstr='--maskmov %s',
desc='image to mask source volume with')
mask_target = File(exists=True, argstr='--maskdst %s',
desc='image to mask target volume with')
force_double = traits.Bool(argstr='--doubleprec', desc='use double-precision intensities')
force_float = traits.Bool(argstr='--floattype', desc='use float intensities')
class RobustRegisterOutputSpec(TraitedSpec):
out_reg_file = File(exists=True, desc="output registration file")
registered_file = File(desc="output image with registration applied")
weights_file = File(desc="image of weights used")
half_source = File(desc="source image mapped to halfway space")
half_targ = File(desc="target image mapped to halfway space")
half_weights = File(desc="weights image mapped to halfway space")
half_source_xfm = File(desc="transform file to map source image to halfway space")
half_targ_xfm = File(desc="transform file to map target image to halfway space")
class RobustRegister(FSCommand):
"""Perform intramodal linear registration (translation and rotation) using robust statistics.
Examples
--------
>>> from nipype.interfaces.freesurfer import RobustRegister
>>> reg = RobustRegister()
>>> reg.inputs.source_file = 'structural.nii'
>>> reg.inputs.target_file = 'T1.nii'
>>> reg.inputs.auto_sens = True
>>> reg.inputs.init_orient = True
>>> reg.cmdline
'mri_robust_register --satit --initorient --lta structural_robustreg.lta --mov structural.nii --dst T1.nii'
References
----------
Reuter, M, Rosas, HD, and Fischl, B, (2010). Highly Accurate Inverse Consistent Registration:
A Robust Approach. Neuroimage 53(4) 1181-96.
"""
_cmd = 'mri_robust_register'
input_spec = RobustRegisterInputSpec
output_spec = RobustRegisterOutputSpec
def _format_arg(self, name, spec, value):
for option in ["registered_file", "weights_file", "half_source", "half_targ",
"half_weights", "half_source_xfm", "half_targ_xfm"]:
if name == option:
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
return super(RobustRegister, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_reg_file'] = self.inputs.out_reg_file
if not isdefined(self.inputs.out_reg_file) and self.inputs.source_file:
outputs['out_reg_file'] = fname_presuffix(self.inputs.source_file,
suffix='_robustreg.lta', use_ext=False)
prefices = dict(src=self.inputs.source_file, trg=self.inputs.target_file)
suffices = dict(registered_file=("src", "_robustreg", True),
weights_file=("src", "_robustweights", True),
half_source=("src", "_halfway", True),
half_targ=("trg", "_halfway", True),
half_weights=("src", "_halfweights", True),
half_source_xfm=("src", "_robustxfm.lta", False),
half_targ_xfm=("trg", "_robustxfm.lta", False))
for name, sufftup in suffices.items():
value = getattr(self.inputs, name)
if isdefined(value):
if isinstance(value, bool):
outputs[name] = fname_presuffix(prefices[sufftup[0]],
suffix=sufftup[1],
newpath=os.getcwd(),
use_ext=sufftup[2])
else:
outputs[name] = value
return outputs
def _gen_filename(self, name):
if name == 'out_reg_file':
return self._list_outputs()[name]
return None
class FitMSParamsInputSpec(FSTraitedSpec):
in_files = traits.List(File(exists=True), argstr="%s", position=-2, mandatory=True,
desc="list of FLASH images (must be in mgh format)")
tr_list = traits.List(traits.Int, desc="list of TRs of the input files (in msec)")
te_list = traits.List(traits.Float, desc="list of TEs of the input files (in msec)")
flip_list = traits.List(traits.Int, desc="list of flip angles of the input files")
xfm_list = traits.List(File(exists=True),
desc="list of transform files to apply to each FLASH image")
out_dir = Directory(argstr="%s", position=-1, genfile=True,
desc="directory to store output in")
class FitMSParamsOutputSpec(TraitedSpec):
t1_image = File(exists=True, desc="image of estimated T1 relaxation values")
pd_image = File(exists=True, desc="image of estimated proton density values")
t2star_image = File(exists=True, desc="image of estimated T2* values")
class FitMSParams(FSCommand):
"""Estimate tissue paramaters from a set of FLASH images.
Examples
--------
>>> from nipype.interfaces.freesurfer import FitMSParams
>>> msfit = FitMSParams()
>>> msfit.inputs.in_files = ['flash_05.mgz', 'flash_30.mgz']
>>> msfit.inputs.out_dir = 'flash_parameters'
>>> msfit.cmdline
'mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters'
"""
_cmd = "mri_ms_fitparms"
input_spec = FitMSParamsInputSpec
output_spec = FitMSParamsOutputSpec
def _format_arg(self, name, spec, value):
if name == "in_files":
cmd = ""
for i, file in enumerate(value):
if isdefined(self.inputs.tr_list):
cmd = " ".join((cmd, "-tr %.1f" % self.inputs.tr_list[i]))
if isdefined(self.inputs.te_list):
cmd = " ".join((cmd, "-te %.3f" % self.inputs.te_list[i]))
if isdefined(self.inputs.flip_list):
cmd = " ".join((cmd, "-fa %.1f" % self.inputs.flip_list[i]))
if isdefined(self.inputs.xfm_list):
cmd = " ".join((cmd, "-at %s" % self.inputs.xfm_list[i]))
cmd = " ".join((cmd, file))
return cmd
return super(FitMSParams, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_dir):
out_dir = self._gen_filename("out_dir")
else:
out_dir = self.inputs.out_dir
outputs["t1_image"] = os.path.join(out_dir, "T1.mgz")
outputs["pd_image"] = os.path.join(out_dir, "PD.mgz")
outputs["t2star_image"] = os.path.join(out_dir, "T2star.mgz")
return outputs
def _gen_filename(self, name):
if name == "out_dir":
return os.getcwd()
return None
class SynthesizeFLASHInputSpec(FSTraitedSpec):
fixed_weighting = traits.Bool(position=1, argstr="-w",
desc="use a fixed weighting to generate optimal gray/white contrast")
tr = traits.Float(mandatory=True, position=2, argstr="%.2f",
desc="repetition time (in msec)")
flip_angle = traits.Float(mandatory=True, position=3, argstr="%.2f",
desc="flip angle (in degrees)")
te = traits.Float(mandatory=True, position=4, argstr="%.3f",
desc="echo time (in msec)")
t1_image = File(exists=True, mandatory=True, position=5, argstr="%s",
desc="image of T1 values")
pd_image = File(exists=True, mandatory=True, position=6, argstr="%s",
desc="image of proton density values")
out_file = File(genfile=True, argstr="%s", desc="image to write")
class SynthesizeFLASHOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="synthesized FLASH acquisition")
class SynthesizeFLASH(FSCommand):
"""Synthesize a FLASH acquisition from T1 and proton density maps.
Examples
--------
>>> from nipype.interfaces.freesurfer import SynthesizeFLASH
>>> syn = SynthesizeFLASH(tr=20, te=3, flip_angle=30)
>>> syn.inputs.t1_image = 'T1.mgz'
>>> syn.inputs.pd_image = 'PD.mgz'
>>> syn.inputs.out_file = 'flash_30syn.mgz'
>>> syn.cmdline
'mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz'
"""
_cmd = "mri_synthesize"
input_spec = SynthesizeFLASHInputSpec
output_spec = SynthesizeFLASHOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.out_file):
outputs["out_file"] = self.inputs.out_file
else:
outputs["out_file"] = self._gen_fname("synth-flash_%02d.mgz" % self.inputs.flip_angle,
suffix="")
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
|
“Working in the automotive industry, from a car manufacturing factory plant to repair and tuning hops of all kinds, I (and add my crew members to that) have the longest practical experience, by far outnumbering all the competition in the local area. With such a vast range of practical knowledge and experience under the belt of our team, be sure that your car will surely be well looked after! This whole business is actually amazing for me, in sense of how big did it grow to be, considering that I started out as just any other young boy with the love of the sound and the speed. But luckily I also was always surrounded by all things automotive and mechanic from an early age, as my father Michael was an auto mechanic in Georgia! My father also was an auto body and heavy collision recovery specialist. Helping him fixing transmissions by night at the garage and checking the engines by day in the weekends were one of my brightest childhood memories! Although I loved it, he wanted different for me, he said “Work Smart, Not Hard.” but I didn’t listen. I lsincerely love doing the job I do and I think that is what makes most of the difference from any other repair shop! I have had training from most major car manufacturers and outside facility training, plus many years of hands-on. My roots stem from New York were I worked for several car dealerships.
“Working in the automotive industry, from a car manufacturing factory plant to repair and tuning hops of all kinds, I (and add my crew members to that) have the longest practical experience, by far outnumbering all the competition in the local area. With such a vast range of practical knowledge and experience under the belt of our team, be sure that your car will surely be well looked after! This whole business is actually amazing for me, in sense of how big did it grow to be, considering that I started out as just any other young boy with the love of the sound and the speed. But luckily I also was always surrounded by all things automotive and mechanic from an early age, as my father Michael was an auto mechanic in Georgia!
My father also was an auto body and heavy collision recovery specialist. Helping him fixing transmissions by night at the garage and checking the engines by day in the weekends were one of my brightest childhood memories! Although I loved it, he wanted different for me, he said “Work Smart, Not Hard.” but I didn’t listen. I lsincerely love doing the job I do and I think that is what makes most of the difference from any other repair shop! I have had training from most major car manufacturers and outside facility training, plus many years of hands-on. My roots stem from New York were I worked for several car dealerships.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-07 11:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0002_auto_20171005_2209'),
]
operations = [
migrations.AlterField(
model_name='course',
name='name',
field=models.CharField(help_text='Example: appetizer, main course, dessert', max_length=150, verbose_name='Course Name'),
),
migrations.AlterField(
model_name='dish',
name='name',
field=models.CharField(max_length=255, verbose_name='Dish Name'),
),
migrations.AlterField(
model_name='meal',
name='name',
field=models.CharField(max_length=60, verbose_name='Meal Name'),
),
migrations.AlterField(
model_name='timetable',
name='name',
field=models.CharField(max_length=255, verbose_name='Timetable Name'),
),
migrations.AlterField(
model_name='vendor',
name='name',
field=models.CharField(max_length=255, verbose_name='Vendor Name'),
),
]
|
If you want melt-in-your-mouth, succulent, juicy char siu pork, this is the only way to do it... in a pressure cooker. It will fall apart like pulled pork.
Order Pressure Cooker - This easy to use, ultra modern pressure cooker is not the pressure cooker that you mother or grandmother used. Cooks amazing meals, without the fear factor!
Set pressure cooker to sauté. Add soy sauce, sherry, stock and half the char siu sauce in the bowl of a 6-litre pressure cooker. Cook for 5 minutes or until slightly thickened. Add pork. Cook for 30 minutes on medium, following manufacturer's instructions. Remove pork from pressure cooker. Cool. Reserve 1 cup cooking liquid. Cut pork into 18 even-sized pieces.
Combine honey, sesame oil and remaining char siu in a bowl. Heat peanut oil in a large frying pan over medium-high heat. Add pork. Cook, brushing with char sui mixture, for 5 to 10 minutes, until brown and coated.
Meanwhile, place reserved cooking liquid in a small saucepan over medium-high heat. Bring to the boil. Reduce heat to low. Simmer 3 minutes or until slightly thickened.
Serve char siu pork with rice, Asian greens and sauce.
|
import math
from pyramid.response import Response
from .pagenumber import PageNumberPagination
from .utilities import replace_query_param
__all__ = ['LinkHeaderPagination']
class LinkHeaderPagination(PageNumberPagination):
"""
Add a header field to responses called Link. The value of the Link header contains information about
traversing the paginated resource. For more information about link header pagination checkout
githhub's great explanation: https://developer.github.com/v3/guides/traversing-with-pagination/
"""
def get_paginated_response(self, data):
next_url = self.get_next_link()
previous_url = self.get_previous_link()
first_url = self.get_first_link()
last_url = self.get_last_link()
link = ''
if next_url is not None and previous_url is not None:
link = '<{next_url}>; rel="next", <{previous_url}>; rel="prev"'
elif next_url is not None:
link = '<{next_url}>; rel="next"'
elif previous_url is not None:
link = '<{previous_url}>; rel="prev"'
if link:
link += ', <{first_url}>; rel="first", <{last_url}>; rel="last"'
response = Response(json=data) # todo, support renderer, should not hard code json
link = link.format(next_url=next_url, previous_url=previous_url, first_url=first_url, last_url=last_url)
if link:
response.headers['Link'] = link
response.headers['X-Total-Count'] = str(self.page.paginator.count)
return response
def get_first_link(self):
url = self.get_url_root()
return replace_query_param(url, self.page_query_param, 1)
def get_last_link(self):
url = self.get_url_root()
count = self.page.paginator.count
page_size = self.get_page_size(self.request)
total_pages = int(math.ceil(count / float(page_size)))
return replace_query_param(url, self.page_query_param, total_pages)
|
By Treve Ring – Treve is a wine writer and editor, judge and speaker, and perpetual traveler. Her work appears in publications around the globe. A certified sommelier, WSET diploma holder, French wine scholar and instructor, and Sherry instructor, she is based on Vancouver Island, Canada, though is most often found on a plane or in a vineyard.
You definitely do not need a reason to drink fizz, though the holidays certainly gives you ample opportunity and availability to fill your glass with bubbles. We’re fortunate that sparkling wine production has skyrocketed across British Columbia in the past few years, with producers creating sparkling stars across all styles and grapes.
What makes a great sparkling wine? It’s almost too simple to be true, but it starts with a great base wine, one that achieves appropriate ripeness of fruit while holding onto brisk acidity. This provides the wine with the needed intensity and structure to transform into fizz. We’re fortunate in BC to have the conditions for just that: concentrated warmth for a ripe core, and cool conditions and a sizable diurnal shift to preserve acidity.
Rooted in cool, Canada has emerged an indie darling of sparkling wine (we do share the 49th parallel with Champagne, after all). Within Canada, sparkling wine sales outpace total wine sales at +7.1% vs +4.7%. That said, Canadian-made sparkling wines account for 3-4% share of all Canadian wine, a clear sign (to me at least) that the potential future for sparkling wine is sparkling.
BC leads the country with nearly 75 wineries producing sparkling wines, and approximately 40 of those are using the traditional method, the same painstaking method used in Champagne production.
Chardonnay, Chenin Blanc, Gamay Noir, Gewurztraminer, Ortega, Pinot Auxerrois, Pinot Blanc, Pinot Gris, Pinot Meunier, Pinot Noir, Riesling and Shiraz are the most common grapes, illustrating that when it comes to bubbles, there is no limit. From fun and frizzante (lightly sparkling) through to ageworthy and serious, there are wines to suit all tastes, occasions and budgets.
In this year’s Best of BC Wine Country Awards, Summerhill Pyramid Winery Cipes Brut sparkled the greatest, selected from nearly 2000 voters as their favourite sparkling wine overall, and their top from the Okanagan Valley. Kelowna’s family-founded and operated Summerhill has become almost as known for their sparkling wines as for their iconic pyramid on the property, with Cipes being their hallmark fizz. A BC classic, and original, this traditional method fizz is made from organic Riesling, Chardonnay and Pinot Blanc, and is full of fresh orchard fruit and light toast on a lively, dry palate.
Illustrating the category’s diversity, Chardonnay was the only shared factor between the Okanagan’s top bubble and the Similkameen’s. Corcelette Estate Winery’s Santé is an off-dry fizz of Viognier, Pinot Gris and Chardonnay, fresh and fun, and in a crown-cap ready for your immediate enjoyment.
Backyard Vineyards Blanc de Noir Brut took the top spot for the Fraser Valley, showing that the traditional method and Pinot Noir grape work very well together here in BC. Full of light red berries and ample toasty spice, this is a great match for local salmon. It’s also great to see that the Fraser Valley is paying serious attention to serious sparkling production, and hopefully they will inspire more to the same.
From one traditional Champagne grape (Pinot Noir) to another (Chardonnay) and the Emerging Regions top bubble, Harper’s Trail Sparkling Chardonnay. Mirroring the striking freshness that Kamloops can achieve, this sparkling wine was given no dosage (additional sugar at bottling) to preserve the region’s bright fruitiness. Dry and refreshing, this carries a lot of citrus and green apple notes.
Vancouver Island certainly knows a lot about freshness – sometimes too much freshness. Averill Creek plays off the Island’s maritime-influenced acidity with ample time on the lees and in wood for complexity in their Averill Creek Vineyard 2010 Brut. Pinot Noir and Pinot Gris were barrel fermented and aged on the lees in French oak for 1 year prior to secondary fermentation and 3.5 years in the bottle before disgorging and bottling.
Numerous other fizz were recognized with Honorable mentions: Noble Ridge The One, Blue Mountain Estate Winery Brut, Intrigue Wines I Do, Orofino Winery Muscat Frizzante and Enrico Winery Celebration all garnered multiple votes. It really shows the diversity and potential for all sparkling styles, grapes and regions in this province. We should all toast to that! And enjoy your holiday season.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013:
# Sébastien Pasche, [email protected]
# Benoit Chalut, [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
author = "Sebastien Pasche"
maintainer = "Sebastien Pasche"
version = "0.0.1"
import sys
import optparse
import os
import traceback
import json
from pprint import pprint
#TODO : Move to asyncio_mongo
try:
import paramiko
except ImportError:
print("ERROR : this plugin needs the python-paramiko module. Please install it")
sys.exit(2)
#Ok try to load our directory to load the plugin utils.
my_dir = os.path.dirname(__file__)
sys.path.insert(0, my_dir)
try:
from openshift_checks import MongoDBHelper, OutputFormatHelpers, SSHHelper
except ImportError:
print("ERROR : this plugin needs the local openshift_checks lib. Please install it")
sys.exit(2)
#DEFAULT LIMITS
#--------------
DEFAULT_WARNING = 2
DEFAULT_CRITICAL = 3
def is_node_mco_ping(
client,
node_identitiy,
debug=False
):
"""
:param client:
:param node_identitiy:
:return:
"""
cmd = "oo-mco rpc rpcutil ping -j -I {i}".format(
i=node_identitiy
)
if debug:
print("Command to execute")
print(cmd)
stdin, stdout, stderr = client.exec_command(
cmd,
get_pty=True
)
lines = [line for line in stdout]
json_raw = ''.join(lines)
json_array = json.loads(json_raw)
if debug:
print("JSON mco ping output")
pprint(json_array)
if len(json_array) == 1:
mco_ping_status = json.loads(json_raw)[0]
if mco_ping_status:
if mco_ping_status['statusmsg'] == 'OK':
return True
return False
def nodes_mco_ping_status(
client,
mongo_district_dict,
debug=False
):
"""
:param client:
:param mongo_district_dict:
:return:
"""
servers_ping = {
server['name']: is_node_mco_ping(
client,
server['name'],
debug
)
for server in mongo_district_dict['servers']
}
servers_status = {
server_name: {
'unresponsive': not mco_ping,
'active': mco_ping
} for server_name, mco_ping in servers_ping.items()
}
return servers_status
def openshift_district(
mongodb_db_connection,
district_name,
debug=False
):
"""
:param mongodb_db_connection:
:param district_name:
:return:
"""
collection = mongodb_db_connection['districts']
if debug:
print("The db connection")
pprint(mongodb_db_connection)
print("The collection")
pprint(collection)
district = collection.find_one(
{
'name': district_name
},
{
'servers': 1
}
)
if debug:
print('The district')
pprint(district)
return district
def servers_status(
mongo_district_dict
):
"""
:param mongo_district_dict:
:return:
"""
servers_status = {
server['name']: {
'active': server['active'],
'unresponsive': server['unresponsive']
} for server in mongo_district_dict['servers']
}
return servers_status
def nb_unresponsive_servers(
servers_status_dict
):
"""
:param servers_status_dict:
:return:
"""
return sum (
[
status['unresponsive'] for server, status in servers_status_dict.items()
]
)
def nb_active_servers(
servers_status_dict
):
"""
:param servers_status_dict:
:return:
"""
return sum (
[
status['active'] for server, status in servers_status_dict.items()
]
)
# OPT parsing
# -----------
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + version)
#broker ssh param
parser.add_option('--broker-hostname', default='',
dest="broker_hostname", help='Broker to connect to')
parser.add_option('--broker-ssh-port',
dest="broker_ssh_port", type="int", default=22,
help='SSH port to connect to the broker. Default : 22')
parser.add_option('--broker-ssh-key', default=os.path.expanduser('~/.ssh/id_rsa'),
dest="broker_ssh_key_file", help='SSH key file to use. By default will take ~/.ssh/id_rsa.')
parser.add_option('--broker-ssh-user', default='shinken',
dest="broker_ssh_user", help='remote use to use. By default shinken.')
parser.add_option('--broker-passphrase', default='',
dest="broker_ssh_passphrase", help='SSH key passphrase. By default will use void')
#mongodb connection
parser.add_option('--mongo-hostname',
dest="mongo_hostnames",
help='space separated mongodb hostnames:port list to connect to. '
'Example : "server1:27017 server2:27017" ')
parser.add_option('--mongo-user',
dest="mongo_user", default="shinken",
help='remote use to use. By default shinken.')
parser.add_option('--mongo-password',
dest="mongo_password",
help='Password. By default will use void')
parser.add_option('--mongo-source-longon',
dest="mongo_source", default='admin',
help='Source where to log on. Default: admin')
parser.add_option('--mongo-replicaset',
dest="mongo_replicaset",
help='openshift current mongodb replicaset')
parser.add_option('--mongo-openshift-database-name',
dest="mongo_openshift_database",
help='openshift current database')
#openshift relative
parser.add_option('--openshift-district-name',
dest="openshift_district",
help='openshift district to query')
parser.add_option('-w', '--warning',
dest="warning", type="int",default=None,
help='Warning value for number of unresponsive nodes. Default : 2')
parser.add_option('-c', '--critical',
dest="critical", type="int",default=None,
help='Critical value for number of unresponsive nodes. Default : 3')
#generic
parser.add_option('--debug',
dest="debug", default=False, action="store_true",
help='Enable debug')
if __name__ == '__main__':
# Ok first job : parse args
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
#Broker ssh args
#---------------
# get broker server list
if opts.broker_hostname is None:
raise Exception("You must specify a broker server")
# get broker ssh user
if opts.broker_ssh_user is None:
raise Exception("You must specify a broker ssh user")
broker_ssh_host = opts.broker_hostname
broker_ssh_port = opts.broker_ssh_port
broker_ssh_user = opts.broker_ssh_user
broker_ssh_key_path = opts.broker_ssh_key_file
broker_ssh_passphrase = opts.broker_ssh_passphrase
#MongpDB args
#------------
# get mongodb server list
if opts.mongo_hostnames is None:
raise Exception("You must specify a mongodb servers list")
# get mongodb user
if opts.mongo_user is None:
raise Exception("You must specify a mongodb user")
# get mongodb user password
if opts.mongo_password is None:
raise Exception("You must specify a mongodb user password")
# get mongodb source logon
if opts.mongo_source is None:
raise Exception("You must specify a mongodb source longon")
# get mongodb openshift database name
if opts.mongo_openshift_database is None:
raise Exception("You must specify a mongodb openshift database name")
# get mongodb database replicaset
if opts.mongo_replicaset is None:
raise Exception("You must specify a mongodb database replicaset name")
mongodb_hostnames_array = opts.mongo_hostnames.split(' ')
mongodb_user = opts.mongo_user
mongodb_password = opts.mongo_password
mongodb_logon_source = opts.mongo_source
mongodb_openshift_db = opts.mongo_openshift_database
mongodb_replicaset = opts.mongo_replicaset
#Openshift related args
#----------------------
#Get district name
if opts.openshift_district is None:
raise Exception("You must specify a openshift district name")
openshift_district_name = opts.openshift_district
# Try to get numeic warning/critical values
s_warning = opts.warning or DEFAULT_WARNING
s_critical = opts.critical or DEFAULT_CRITICAL
debug = opts.debug
try:
# Ok now got an object that link to our destination
client = SSHHelper.connect(
hostname=broker_ssh_host,
user=broker_ssh_user,
ssh_key_file=broker_ssh_key_path,
passphrase=broker_ssh_passphrase,
port=broker_ssh_port
)
#Connecto to MongoDB
#-------------------
mongodb_client = MongoDBHelper.get_mongodb_connection_to_db(
mongodb_servers=mongodb_hostnames_array,
replicaset=mongodb_replicaset
)
mongodb_db = MongoDBHelper.get_mongodb_auth_db(
mongodb_client=mongodb_client,
database_name=mongodb_openshift_db,
username=mongodb_user,
password=mongodb_password,
source=mongodb_logon_source
)
#get district
#------------
district = openshift_district(
mongodb_db_connection=mongodb_db,
district_name=openshift_district_name,
debug=debug
)
if debug:
pprint(district)
#get server db status
#--------------------
servers_db_status = servers_status(district)
if debug:
print("mongodb servers status")
pprint(servers_db_status)
#get unresponsive/active count from the db
db_nb_unresponsive_servers = nb_unresponsive_servers(servers_db_status)
db_nb_active_servers = nb_active_servers(servers_db_status)
#get mco ping responce
#---------------------
ssh_mco_servers_status = nodes_mco_ping_status(
client,
district,
debug
)
if debug:
print("mco servers status")
pprint(ssh_mco_servers_status)
#get unresponsive/active count from remote mco ping
nb_mco_ping_active_servers = nb_active_servers(ssh_mco_servers_status)
nb_mco_ping_unresponsive_servers = nb_unresponsive_servers(ssh_mco_servers_status)
#format perf data
db_active_servers_data_string = OutputFormatHelpers.perf_data_string(
label="{d}_mongodb_active_nodes".format(d=openshift_district_name),
value=db_nb_active_servers,
)
db_unresponsive_servers_data_string = OutputFormatHelpers.perf_data_string(
label="{d}_mongodb_unresponsive_servers".format(d=openshift_district_name),
value=db_nb_unresponsive_servers,
warn=s_warning,
crit=s_critical
)
mco_active_servers_data_string = OutputFormatHelpers.perf_data_string(
label="{d}_mco_active_nodes".format(d=openshift_district_name),
value=nb_mco_ping_active_servers,
)
mco_unresponsive_servers_data_string = OutputFormatHelpers.perf_data_string(
label="{d}_mco_unresponsive_servers".format(d=openshift_district_name),
value=nb_mco_ping_unresponsive_servers,
warn=s_warning,
crit=s_critical
)
#check
nb_unresponsive_servers = max(db_nb_unresponsive_servers,nb_mco_ping_unresponsive_servers)
nb_active_servers = max(db_nb_active_servers,nb_mco_ping_active_servers)
status = "OK"
state = "active"
nb = nb_active_servers
if nb_unresponsive_servers >= s_warning:
status = "Warning"
state = "unresponsive"
nb = nb_unresponsive_servers
if nb_unresponsive_servers >= s_critical:
status = "Critical"
state = "unresponsive"
nb = nb_unresponsive_servers
#Format and print check result
message = "{nb} {state} openshift nodes".format(
nb=nb,
state=state
)
output = OutputFormatHelpers.check_output_string(
status,
message,
[
db_active_servers_data_string,
db_unresponsive_servers_data_string,
mco_active_servers_data_string,
mco_unresponsive_servers_data_string
]
)
print(output)
except Exception as e:
if debug:
print(e)
the_type, value, tb = sys.exc_info()
traceback.print_tb(tb)
print("Error: {m}".format(m=e))
sys.exit(2)
finally:
if mongodb_client is not None:
MongoDBHelper.close_mongodb_connection(mongodb_client)
if status == "Critical":
sys.exit(2)
if status == "Warning":
sys.exit(1)
sys.exit(0)
|
The region was once ruled by the thriving medieval, Kingdom of Lanna, or “kingdom of one million rice fields” (13th to 18th centuries CE), which existed long before the rise of the kingdoms of Sukhothai and Ayutthaya.
The resulting Lanna culture is a colourful combination of various elements brought together from both historic and multi-ethnic cohabitation. At its center lies the ancient Kingdoms capital; Chiang Mai which can trace its history back more than 700 years, making it one of the most historic cities in Thailand.
One of its least known inhabitants of the mountainous area, are the Tai Khuen; a small tribal sub-group of the Shan people, whose origins lie in the neighbouring Shan state of Myanmar. Unlike their new neighbours, the Tai Khuen didn’t gradually migrate into the area; they were brought to the region as war captives, by King Kawila of Chiang Mai in 1796, after he had taken control of the then Shan state of Kengtung. This mass forced repatriation (common during the period) was as part of the Lanna Kings, efforts to repopulate the city and areas around Chiang Mai which had been left deserted after years of war with the Burmese. The former ruling family of Kengtung State belonged to the Tai Khuen.
The Tai Khuen are also known as the Khuen, Kuan, Kween, Khween, Khouen and Kuanhua. The actual number of Tai Khuen in Thailand is not easy to judge, some reports state there are 100,000 in the Kingdom, yet there are only a few Tai Khuen communities in 4 districts in Chaing Mai, estimated at 5,000 people, while even smaller family units can be found in Chiang Rai, Lamphun, Phayao, Nan and Mae Hong Sorn.
The authoritative Ethnologue, which lists every known language in the world, doubts there are, indeed any Tai Khuen in Thailand! While the Christian religious site Joshua Project states there are only 14,000 in the world with 2,500 in the USA, (resettled after the Vietnam war), with the remaining living in Laos.
Khuen family names are taken from the names of sacred animals or plants. For their entire lives, the people are not to touch the particular animal or plant that bears their name.
While the people are predominately Theravada Buddhists, they also believe in spirit and ancestral worship. Worshiping ghosts and spirits is based on a hierarchy system, with the most important spirit being the spirit of the land, who is appeased daily with offerings of food and drink left at spirit houses.
On the full moon of June each year the people will worship the spirits of their village and ancestors with offerings of meat, fruit, flowers and rice at a special alter inside their houses.
A Khuen Wat is quite distinct from a Shan and a Burmese Temple. Gold stenciling on the inner walls and teakwood pillars are frequent in Tai Khuen viharas. Also noticeable features in Tai Khuen visual culture are the banners (tong) hanging from the ceiling in the monastery and the temple drums.
In Thai lacquer-ware is called ‘Khrueang Khuen’ (Khrueang in this situation means ‘Works’ so states – Antiques, Crafts, Collectibles By Tanistha Dansilp). It is thought that the art was brought by the Tai Khuen people, to the region even prior to their mass resettlement.
Also known as “Kreung Khuen” lacquer ware has two principal functions. First, the lacquer is used for coating bamboo house ware in order to make them water resistant. Second, lacquer is used to decorate objects with expressions of traditional beliefs. From a variety of small objects carried on a person, too much larger objects such as furniture and even coffins. Before lacquering, the surface is sometimes painted with pictures, inlaid with shell and other materials, or carved. The lacquer can be dusted with gold or silver and given further decorative treatments.
The Tai Khuen are said to have a ‘deep and strong rooted culture’, who cling to their traditions as a way of preserving their cultural identity. Even today, the people proudly wear their traditional costume and speak in their own dialect, which is a Tai-Kadai language spoken in Eastern Shan State, Myanmar.
|
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
# While this couples the geographic forms to the GEOS library,
# it decouples from database (by not importing SpatialBackend).
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from .widgets import OpenLayersWidget
class GeometryField(forms.Field):
"""
This is the basic form field for a Geometry. Any textual input that is
accepted by GEOSGeometry is accepted by this form. By default,
this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON.
"""
widget = OpenLayersWidget
geom_type = 'GEOMETRY'
default_error_messages = {
'required': _('No geometry value provided.'),
'invalid_geom': _('Invalid geometry value.'),
'invalid_geom_type': _('Invalid geometry type.'),
'transform_error': _('An error occurred when transforming the geometry '
'to the SRID of the geometry form field.'),
}
def __init__(self, **kwargs):
# Pop out attributes from the database field, or use sensible
# defaults (e.g., allow None).
self.srid = kwargs.pop('srid', None)
self.geom_type = kwargs.pop('geom_type', self.geom_type)
super(GeometryField, self).__init__(**kwargs)
self.widget.attrs['geom_type'] = self.geom_type
def to_python(self, value):
"""
Transforms the value to a Geometry object.
"""
if value in self.empty_values:
return None
if not isinstance(value, GEOSGeometry):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom')
# Try to set the srid
if not value.srid:
try:
value.srid = self.widget.map_srid
except AttributeError:
if self.srid:
value.srid = self.srid
return value
def clean(self, value):
"""
Validates that the input value can be converted to a Geometry
object (which is returned). A ValidationError is raised if
the value cannot be instantiated as a Geometry.
"""
geom = super(GeometryField, self).clean(value)
if geom is None:
return geom
# Ensuring that the geometry is of the correct type (indicated
# using the OGC string label).
if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY':
raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type')
# Transforming the geometry if the SRID was set.
if self.srid and self.srid != -1 and self.srid != geom.srid:
try:
geom.transform(self.srid)
except GEOSException:
raise forms.ValidationError(
self.error_messages['transform_error'], code='transform_error')
return geom
def _has_changed(self, initial, data):
""" Compare geographic value of data with its initial value. """
try:
data = self.to_python(data)
initial = self.to_python(initial)
except forms.ValidationError:
return True
# Only do a geographic comparison if both values are available
if initial and data:
data.transform(initial.srid)
# If the initial value was not added by the browser, the geometry
# provided may be slightly different, the first time it is saved.
# The comparison is done with a very low tolerance.
return not initial.equals_exact(data, tolerance=0.000001)
else:
# Check for change of state of existence
return bool(initial) != bool(data)
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
class PointField(GeometryField):
geom_type = 'POINT'
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
class PolygonField(GeometryField):
geom_type = 'POLYGON'
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
|
5 months ago, I announced that Stonemaier Games would no longer be using crowdfunding or pre-order campaigns. This was a notable announcement because I have Kickstarter to thank for my company’s inception and growth from 2012-2015, plus, well, this blog is about crowdfunding.
Today Richard Bliss released a new 20-minute podcast chat with me about life after Kickstarter, so I thought I’d take the opportunity to share the trials, tribulations, and successes of the last few months for Stonemaier Games. I’ll focus on the top 3 reasons I moved away from crowdfunding and the 5 pillars of Kickstarter.
Fulfillment Risk: Because we no longer rely on fulfillment centers to send our products to thousands of backers, we’ve completely eliminated any risk involved in this process. In a way, we’ve shifted some of that risk to partners who specialize in shipping specific products, like Top Shelf Gamer and Meeplesource. Also, there are great fulfillment companies I trust 100% if I ever need to ship something to a number of people.
Time: I work the same 80-hour workweek I did when I was planning, running, and fulfilling our Kickstarter campaigns. I still have deadlines and responsibilities, and I’m still just as averse to vacations as I have been since I went full time in late 2013. But I much prefer the current feeling of my most precious resource, time. It’s more fluid and often more relaxed.
Human Nature: I still get to see the darker side of human nature among a small number of people on social media. The difference now is that I have administrative control. Though even that comes with new challenges, as I don’t want to remove someone from a Facebook group just because they had a bad day. Instead, I’ve trained myself to look for a pattern of negativity before taking action.
The following are what I consider to be the top 5 purposes of crowdfunding. In this post I described my plan of how to accomplish these things without Kickstarter. Did these plans work out?
While there are many channels for people to know about Stonemaier Games and to interact with me (this blog, e-newsletter, e-mail, Twitter, BoardGameGeek, YouTube, etc), by far the most impactful platform has been Facebook. Specifically, product-specific Facebook groups. Combined, our groups for Viticulture, Euphoria, Between Two Cities, Scythe, Charterstone, and our treasure chests have over 10,000 members.
I’m frequently active on social media, but I also like that the Facebook groups are entirely self-sufficient without me. Also, unlike on crowdfunding platforms, I really like that anyone can join the Facebook groups or chat on Twitter, BoardGameGeek, etc. Those platforms allow you to love something or be interested in something without spending money to interact with others who are passionate about it.
I’ve shifted early information about our products from backers over to our 2,000+ ambassadors, allowing them to provide insights and suggestions while the product is still pliable. Most of this comes in the form of blind playtesting, but I’ve also just tried to ask ambassadors specific questions and simply leave my metaphorical door open for them to offer input about the information I share with them.
I’ve also used Facebook for this. Throughout the Charterstone design process, every now and then I’d pose a question in the Charterstone FB group to help me get outside perspectives about legacy games, worker-placement games, etc. I really liked having a forum to discuss games while they’re still in development.
Possibly the biggest change in methodology has been shifting my entire sales focus to distributors. Retailers and individual people are my customers, but distributors are my clients. In practical terms, it means that I frequently communicate with distributors to coordinate direct orders, product releases, reprints, and marketing efforts.
I look to distributors to help me gauge demand–it’s mutually beneficial for us to talk about those numbers. Back in December I went to distributors and asked if they wanted to commit to quantities for the 6th printing of Scythe. If they committed by late December, I would guarantee that quantity for them. Those games are now arriving at their destinations.
We did something similar with Charterstone. I used our future printing request form to gauge demand from our e-newsletter subscribers, then I shared that total with retailers to get a feel for their interest. With 450 retailers on our mailing list, I wish more than 50 had replied, though I think many of them communicated directly with their distributors. We took all of that data and shared it with distributors, which has contributed to determining the size of the first print run of Charterstone (51,000 units).
I learned an unexpected lesson while generating awareness for Charterstone: If you tell retailers a product’s SKU and MSRP, they will list it for pre-order.
Now, this isn’t inherently a bad thing. Pre-orders are how stores gauge demand, and they’re directly competing with other retailers.
The problem is that when I shared the SKU and MSRP for Charterstone with distributors and retailers, it wasn’t with the intent that they would accept pre-orders already. If that were the intent, my communication with them would have been different: I would have informed all stores that they could open pre-orders, creating a level playing field. I would also have been very precise in nothing that the release date was in flux. And ideally I would have provided links to early reviews so customers could make an informed decisions–that’s why I wanted to wait until much later in the process to initiate pre-orders.
So I’ve learned to either (a) not release the SKU and/or MSRP until I’m ready for retailers to accept pre-orders or (b) cater my communication to retailers so they all have the same pre-order information at the same time.
Beyond that lesson, I’ve continued to use the same methods of marketing I used while I was running Kickstarters: send out review copies, support play-and-win, post banner ads on BGG, and send out the monthly e-newsletter.
Cash flow in the business of board games is an odd thing. You may invest hundreds of thousands of dollars in a game that takes 3-4 months to manufacture, 1 month to ship, and then 1-2 months after that you’ll get payments from distributors. So there are extended periods of time when cash flow is a struggle, then all of a sudden (if all goes well) you’re flush with cash.
The X-factor we experienced a few months ago is taxes. I’ll discuss this in detail in an upcoming post, but the short story is that our cash flow was very healthy, then suddenly the US Treasury had almost all of our cash.
Fortunately, a major asset has been our partnerships with international publishers. When we localize a game as part of a bigger print run, the partner pays for half of their costs (manufacturing plus royalties) before printing begins and then the other half right before the games ship to them. This is a much tighter cycle than distributor payments, and it really helps with cash flow.
We also have some distributors who buy directly from us. Depending on how well I know them, I’ve let some pay me when they receive the product at their final destination instead of when their shipping company picks up the product in Shenzhen. But I think I’ll need to switch to a system where all distributors use the latter method. Without it, there may be times when we want to make a product but simply need to wait until we’ve received distributor payments, which doesn’t benefit anyone.
Overall, as much as I enjoyed and am grateful for my crowdfunding experience–and while I continue to believe it’s an amazing platform for creators–I’m personally much happier now that I no longer use Kickstarter as a creator. And when I’m happy, it’s a lot easier for me to create joyful experiences and interactions for our fans.
But that’s just me. Has Stonemaier Games suffered as a result of not using Kickstarter? Like, all other factors remaining equal, would Stonemaier Games be better off if we ran a Kickstarter campaign for Charterstone?
Honestly, I think the only metric by which we’d be better off is current cash flow. Otherwise, in every other way (including total revenue), the company is more successful because we grew out of crowdfunding instead of continuing to use it.
I’m happy to answer any of your questions and to read your comments below.
That’s how I feel about KS. The natural way is to start publishing games traditional way. However, still, a lot of companies treat KS as a pre-order platform. And that’s ok; I’m not blaming them.
But KS can be so exhausting for small businesses ( 1-2 people) when running the whole campaign alone. Especially when dealing with haters! And I know that all publishers struggle with them, even the biggest (not only from KS but traditional as well)!
Mateusz: I would wager that even companies perceived to use Kickstarter as a pre-order platform are motivated by several of those pillars mentioned above.
Some of the plusses you list are plusses for you but not necessarily for your customers. For example with you releasing the game to distributors it has left some people unsure of where they can secure a copy.
Do you think that the drawbacks of kickstarter are worth the hassle if they mean more plusses for your customers but not you?
It is not a criticism of your choice, I am just curious.
Josiah: That’s a great question: By shifting away from Kickstarter, are we neglecting our customers?
I would say the answer is not at all. Our customers still have the same opportunity to buy our products from any store. For example, we announced Charterstone well in advance of even starting production, so anyone who wants a copy can go to any store (local or online) and reserve/pre-order a copy. Stores communicate those quantities to distributors, and those distributors inform our production quantity.
Because of our relationships with distributors, we’re actually able to make more games in the first print run than when we were on Kickstarter, serving a much broader customer base than before and increasing the chances that everyone who wants a first-run copy will get one (even if they don’t decide until the day of release that they want one).
I would say it is even better for customers because the customer will get a full product straight from the store. There is no KS exclusives or SG. You get the best version of the game as a standard. And that’s what I like about traditional publishing!
Personally I’d say that it removes, or reduces, risk to the consumer as well.
Wait until there’s an actual product rather than raising funds for development of a product. While Stonemaier had an excellent track record, often managing to do the unheard of and get games to consumers ahead of schedule, there is always a risk involved in a Kickstarter project, and while the US has been pretty consistent on treating Kickstarter as a pre-order service legally speaking, so there is some consumer protection in there, lets face it, people with pre-orders are bottom of the totem pole of creditors in the event of a company going bust (Not that I expect Stonemaier is in any danger of that happening, but… It’s something that has happened with Kickstarters before.
Even beyond the disaster situation of a scammer or a bankruptcy, you get to see more reviews, hear more player impressions, and generally just have more information available, before putting your money down on the game. Less risk of accidentally buying a game you don’t like, which is a bonus.
Mateusz: That’s a great point!
Stephen: I’m glad you mentioned that aspect of risk mitigation as well. I agree.
Thank you for writing this blog because I love the topics and questions that come up from your writings. I think your happiness is all the reason you need to move away from Kickstarter. However, I am interested to see the long term cash flow effects it will have.
“Because of our relationships with distributors, we’re actually able to make more games in the first print run than when we were on Kickstarter”. We’ve seen situations with publishers where they didn’t print anywhere near demand and had to allocate product to the distributors. Many retailers and consumer preorders don’t get fulfilled even though they placed their order months in advance.
It’s not perfect but I feel like the best reasons for Kickstarter are the improved data on consumer interest, improved cash flow and reduction of financial risk on publishing. Anyways, thanks for the insights.
James: Thanks for your comment. My thoughts are as follows: Really, there’s no question in my mind that long-term revenue will be substantially higher using the distribution model.
As for the situation you mentioned, the key is communication. If retailers, distributors, and publishers are in contact with each other before production begins, they can anticipate demand and prepare to meet it with the proper supply. A Kickstarter campaign only addresses the very beginning of demand, the first early adopters. It’s true that it gives creators some data to work with, but I can get that same data simply by asking consumers, retailers, and distributors how many copies they want.
As for short-term cash flow, absolutely, a Kickstarter is great for that.
I guess I’ve been very lucky in my Kickstarter so far, in that I haven’t had any haters (fingers crossed that it stays that way for the rest of the campaign).
I think the other benefit of KS is, it’s a great way to learn all the ins and outs of manufacturing and shipping. I’m not averse to publishing in a more traditional way, nor to submitting my designs to established publishers, but it’s a per game decision, and assuming I’m sufficiently skeptical of my own designs (i.e. don’t just self publish because I personally think it’s a neat game), I think I can make a good judgment call on a given game. Plus, I don’t really think traditional self publishing is an option for me until I have been somewhat established as a reliable brand.
Paul: I’m glad to hear you’ve had a great group of backers so far! From my experience, the vast majority of backers are wonderful.
You’re absolutely right that Kickstarter is a great way to learn pretty much everything you need to know to run a business. I hadn’t thought about it as a way to establish your brand as a designer, but I like that approach!
I like the 5 reasons for using Kickstarter, but I think it forgets a segment of consumers that are under-served. Direct to Global Consumers. In many parts of the world, new games are often served by poor or inefficient distribution networks. Here in Australia, we have a large continent with small population and distribution of niche products like modern tabletop games is often inefficient and expensive. Kickstarter means we often get games 12 months or more before they are available in retail, or avoids having to order from overseas when the product simply is not distributed here. While my examples are from being in Australia, I know there are many parts of the world with similar pent-up demand that existing retail channels don’t serve well.
Richard: That’s true, certain markets can be underserved due to varying factors. That hasn’t been our experience, as we have some great partners in pretty much every location where games are sold (including Australia), many of whom ship the games directly from China. This allows games to arrive quickly (even faster than the US) and more cost-effectively for consumers than if they were purchased via Kickstarter.
Looking forward to seeing that pan out. At the moment the only Stonemaier product I can find in my 3 local game stores is Invaders from Afar, and while I and others at our gaming club know to let our local stores about games we are looking forward to, the retailers often cite difficulty getting accurate information on product releases and difficulty with procuring inventory.
Interesting. We have a monthly e-newsletter that goes out to hundreds of retailers, so either they’re not reading it or they haven’t subscribed. :) I think that’s one of the challenges–it takes both parties to have effective communication. Only 3 stores out of the 44 on our mailing list responded to our Charterstone poll.
That said, regardless of what retailers know, both VR Distribution and Let’s Play Games have large shipments of our games and accessories arriving soon (they may have already arrived). I appreciate you letting your store know that you want our products!
Jamey, this is a great post regarding your experience in retrospect. We are on the other side of the coin, looking at using kickstarter to launch our first game and thus our company. Would you say then that kickstarter is a good way for startup companies and or projects to begin? As you point out, it appears that there is a “growing up” phase and now you no longer need the platform, but is it good in your opinion for “beginners”? After reviewing your post and observing many examples, this seems to be the case.
Austin: I would definitely say that Kickstarter is a great way for companies to begin and grow. Stonemaier wouldn’t exist or be in our current position without it, and I highly recommend it.
Always such interesting and helpful information. I’m curious how and when taxation happens when you use Kickstarter. When the funds come in, is this when income is taxable? The reason I’m wondering it may take several months , in some cases a year to actually deliver products and you do not have an expense ratio established until product is delivered.
Also, how long after the campaign ends dies it take to relieve funds?
You’ll get the funds about 2 weeks after the campaign ends.
|
# Copyright (c) 2016 Sebastian Weber, Henri Menke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy import sparse
import json
class BinaryLoader:
def __init__(self):
# types
self.typeIds = {1008: 'int8', 1016: 'int16', 1032: 'int32',
1064: 'int64', 1108: 'uint8', 1116: 'uint16',
1132: 'uint32', 1164: 'int64', 2032: 'float32',
2064: 'float64'}
self.type_t = 'uint16'
# bit masks
self.csr_not_csc = 0x01 # xxx0: csc, xxx1: csr
self.complex_not_real = 0x02 # xx0x: real, xx1x: complex
def readNumber(self, f, sz=None):
datatype = self.typeIds[np.fromfile(
f, dtype=np.dtype(self.type_t), count=1)[0]]
if sz is None:
return np.fromfile(f, dtype=np.dtype(datatype), count=1)[0]
else:
return np.fromfile(f, dtype=np.dtype(datatype), count=sz)
def readVector(self, f):
size = self.readNumber(f)
return self.readNumber(f, size)
def readMatrix(self, f):
flags = self.readNumber(f)
rows = self.readNumber(f)
cols = self.readNumber(f)
if flags & self.complex_not_real:
data = self.readVector(f) + self.readVector(f) * 1j
else:
data = self.readVector(f)
indices = self.readVector(f)
indptr = np.append(self.readVector(f), len(data))
if flags & self.csr_not_csc:
return sparse.csr_matrix((data, indices, indptr), shape=(rows, cols))
else:
return sparse.csc_matrix((data, indices, indptr), shape=(rows, cols))
class Eigensystem(BinaryLoader):
def __init__(self, filename):
super().__init__()
self._filename = filename
self._shift = 0
self._params = None
self._energies = None
self._basis = None
@property
def params(self):
if self._params is None:
with open(self._filename + '.json', 'r') as f:
self._params = json.load(f)
return self._params
@property
def energies(self):
if self._energies is None:
with open(self._filename + '.mat', 'rb') as f:
self._energies = np.real(self.readMatrix(f).diagonal())
self._shift = f.tell()
return self._energies
@property
def basis(self):
if self._basis is None:
with open(self._filename + '.mat', 'rb') as f:
if self._shift > 0:
f.seek(self._shift, 0)
else:
self._energies = np.real(self.readMatrix(f).diagonal())
self._basis = self.readMatrix(f)
return self._basis
|
If you are after a dream location for your ski holidays, the Alps and the Dolomites are your best choice. With a variety of snow-guaranteed, perfectly groomed ski pistes for beginners, intermediates and pros, your ski holidays in the Alps will be the event of the year.
Choosing a ski hotel near the slopes is your best course if you want your ski holiday in the Alps to be as hassle-free as possible. The two main ski resorts in South Tyrol, namely the Dolomiti Superski and the Ortler Skiarena are maintained to perfection and provided with cutting-edge technologies, from snowmakers to top-notch lift facilities that take the captivated guests to incredible panoramic viewpoints and several challenging descents.
Those who prefer to enjoy the South Tyrolean winter magic on even ground can have their pick of the numerous cross-country skiing trails South Tyrol has to offer– plenty to explore for beginners and experienced alike.
|
# Notifry - Google App Engine backend
#
# Copyright 2011 Daniel Foote
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import db
from model.UserMessage import UserMessage
class UserMessages(db.Model):
messages = db.ListProperty(int)
owner = db.UserProperty()
def dict(self):
result = {
'type' : 'messages',
'owner': self.owner,
'messages': self.get_messages()
}
try:
result['key'] = self.key().name()
except db.NotSavedError, ex:
# Not saved yet, so it has no ID.
pass
return result
def get_messages(self):
return UserMessage.get_by_id(self.messages[-200:], self)
def get_messages_for_source(self, source):
final_messages = []
for message in self.get_messages():
if message.source.externalKey == source.externalKey:
final_messages.append(message)
return final_messages
def add_message(self, message):
id = message.key().id()
if self.messages:
if not id in self.messages:
self.messages.append(id)
else:
self.messages = []
self.messages.append(id)
# And cull off old messages.
if len(self.messages) > 500:
self.messages = self.messages[-500:]
def remove_message(self, message):
if self.messages:
try:
self.messages.remove(message.key().id())
except ValueError, ex:
# We don't have that device in the list.
pass
def delete_for_source(self, source):
messages = self.get_messages_for_source(source)
def transaction(collection, messages):
db.delete(messages)
for message in messages:
collection.remove_message(message)
collection.put()
db.run_in_transaction(transaction, self, messages)
@staticmethod
def key_for(owner):
return "messages:%s" % owner.nickname()
@staticmethod
def get_user_message_collection(owner):
return UserMessages.get_or_insert(UserMessages.key_for(owner), owner = owner)
@staticmethod
def get_user_message_collection_static(owner):
return UserMessages.get_by_key_name(UserMessages.key_for(owner))
@staticmethod
def get_user_messages(owner):
collection = UserMessages.get_user_message_collection(owner)
return collection.get_messages()
@staticmethod
def get_user_messages_for_source(source):
collection = UserMessages.get_user_message_collection(source.owner)
return collection.get_messages_for_source(source)
|
On the nose there are notes of roses, earth, kirsch, and cocoa. In the mouth, the wine is concentrated and viscious mirroring the nose with big but balanced tannins and hints of black pepper.
This wine is an intense experience! The extended barrel time helped to develop robust aromas of black pepper, blackberry, currant, fig jam, molasses, and smoky oak. These notes follow through on the palate and combine with dried herbs, smoked meat, and cocoa. Enjoy big tannins, an imposing mouth-feel, and an unexpected brightness on the finish. Petit Verdot fans will want to keep this wine around for many years to enjoy how the complex flavors age.
Rarely seen bottled alone, Petit Verdot traditionally lends support to its more famous stable mate Cabernet Sauvignon. However, Huge Bear decided that we should put Petit Verdot in the bottle and enjoy its’ massive explosion of flavor! The low crop in 2015 made an monumental Petit Verdot with indelible purple-black color combined with scintillating flavors of chocolate, cassis, and mulberry.
This robust Cabernet Sauvignon is selected from a specific block on the Knights Bridge vineyard, and bolstered by just a touch of Petit Verdot. Aromas of red currant, blackberry, plum, and cedar abound in the glass. These notes are followed on the palate accompanied by licorice, baking spices, and dark berry compote flavors. The dusty, yet gentle tannins lead to a sweet, powerful finish. This rich Cabernet is drinkable now, but will age for many years.
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains helper functions and classes for handling metadata.
"""
from functools import wraps
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from copy import deepcopy
import numpy as np
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import dtype_bytes_or_chars
__all__ = ['MergeConflictError', 'MergeConflictWarning', 'MERGE_STRATEGIES',
'common_dtype', 'MergePlus', 'MergeNpConcatenate', 'MergeStrategy',
'MergeStrategyMeta', 'enable_merge_strategies', 'merge', 'MetaData',
'MetaAttribute']
class MergeConflictError(TypeError):
pass
class MergeConflictWarning(AstropyWarning):
pass
MERGE_STRATEGIES = []
def common_dtype(arrs):
"""
Use numpy to find the common dtype for a list of ndarrays.
Only allow arrays within the following fundamental numpy data types:
``np.bool_``, ``np.object_``, ``np.number``, ``np.character``, ``np.void``
Parameters
----------
arrs : list of ndarray
Arrays for which to find the common dtype
Returns
-------
dtype_str : str
String representation of dytpe (dtype ``str`` attribute)
"""
def dtype(arr):
return getattr(arr, 'dtype', np.dtype('O'))
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = set(tuple(issubclass(dtype(arr).type, np_type) for np_type in np_types)
for arr in arrs)
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [dtype(arr).name for arr in arrs]
tme = MergeConflictError(f'Arrays have incompatible types {incompat_types}')
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=dtype(arr)) for arr in arrs]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for i, arr in enumerate(arrs):
if arr.dtype.kind in ('S', 'U'):
arrs[i] = [('0' if arr.dtype.kind == 'U' else b'0') *
dtype_bytes_or_chars(arr.dtype)]
arr_common = np.array([arr[0] for arr in arrs])
return arr_common.dtype.str
class MergeStrategyMeta(type):
"""
Metaclass that registers MergeStrategy subclasses into the
MERGE_STRATEGIES registry.
"""
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Wrap ``merge`` classmethod to catch any exception and re-raise as
# MergeConflictError.
if 'merge' in members and isinstance(members['merge'], classmethod):
orig_merge = members['merge'].__func__
@wraps(orig_merge)
def merge(cls, left, right):
try:
return orig_merge(cls, left, right)
except Exception as err:
raise MergeConflictError(err)
cls.merge = classmethod(merge)
# Register merging class (except for base MergeStrategy class)
if 'types' in members:
types = members['types']
if isinstance(types, tuple):
types = [types]
for left, right in reversed(types):
MERGE_STRATEGIES.insert(0, (left, right, cls))
return cls
class MergeStrategy(metaclass=MergeStrategyMeta):
"""
Base class for defining a strategy for merging metadata from two
sources, left and right, into a single output.
The primary functionality for the class is the ``merge(cls, left, right)``
class method. This takes ``left`` and ``right`` side arguments and
returns a single merged output.
The first class attribute is ``types``. This is defined as a list of
(left_types, right_types) tuples that indicate for which input types the
merge strategy applies. In determining whether to apply this merge
strategy to a pair of (left, right) objects, a test is done:
``isinstance(left, left_types) and isinstance(right, right_types)``. For
example::
types = [(np.ndarray, np.ndarray), # Two ndarrays
(np.ndarray, (list, tuple)), # ndarray and (list or tuple)
((list, tuple), np.ndarray)] # (list or tuple) and ndarray
As a convenience, ``types`` can be defined as a single two-tuple instead of
a list of two-tuples, e.g. ``types = (np.ndarray, np.ndarray)``.
The other class attribute is ``enabled``, which defaults to ``False`` in
the base class. By defining a subclass of ``MergeStrategy`` the new merge
strategy is automatically registered to be available for use in
merging. However, by default the new merge strategy is *not enabled*. This
prevents inadvertently changing the behavior of unrelated code that is
performing metadata merge operations.
In most cases (particularly in library code that others might use) it is
recommended to leave custom strategies disabled and use the
`~astropy.utils.metadata.enable_merge_strategies` context manager to locally
enable the desired strategies. However, if one is confident that the
new strategy will not produce unexpected behavior, then one can globally
enable it by setting the ``enabled`` class attribute to ``True``.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), (int, float)) # (left_types, right_types)
...
... @classmethod
... def merge(cls, left, right):
... return [left, right]
"""
# Set ``enabled = True`` to globally enable applying this merge strategy.
# This is not generally recommended.
enabled = False
# types = [(left_types, right_types), ...]
class MergePlus(MergeStrategy):
"""
Merge ``left`` and ``right`` objects using the plus operator. This
merge strategy is globally enabled by default.
"""
types = [(list, list), (tuple, tuple)]
enabled = True
@classmethod
def merge(cls, left, right):
return left + right
class MergeNpConcatenate(MergeStrategy):
"""
Merge ``left`` and ``right`` objects using np.concatenate. This
merge strategy is globally enabled by default.
This will upcast a list or tuple to np.ndarray and the output is
always ndarray.
"""
types = [(np.ndarray, np.ndarray),
(np.ndarray, (list, tuple)),
((list, tuple), np.ndarray)]
enabled = True
@classmethod
def merge(cls, left, right):
left, right = np.asanyarray(left), np.asanyarray(right)
common_dtype([left, right]) # Ensure left and right have compatible dtype
return np.concatenate([left, right])
def _both_isinstance(left, right, cls):
return isinstance(left, cls) and isinstance(right, cls)
def _not_equal(left, right):
try:
return bool(left != right)
except Exception:
return True
class _EnableMergeStrategies:
def __init__(self, *merge_strategies):
self.merge_strategies = merge_strategies
self.orig_enabled = {}
for left_type, right_type, merge_strategy in MERGE_STRATEGIES:
if issubclass(merge_strategy, merge_strategies):
self.orig_enabled[merge_strategy] = merge_strategy.enabled
merge_strategy.enabled = True
def __enter__(self):
pass
def __exit__(self, type, value, tb):
for merge_strategy, enabled in self.orig_enabled.items():
merge_strategy.enabled = enabled
def enable_merge_strategies(*merge_strategies):
"""
Context manager to temporarily enable one or more custom metadata merge
strategies.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), # left side types
... (int, float)) # right side types
... @classmethod
... def merge(cls, left, right):
... return [left, right]
By defining this class the merge strategy is automatically registered to be
available for use in merging. However, by default new merge strategies are
*not enabled*. This prevents inadvertently changing the behavior of
unrelated code that is performing metadata merge operations.
In order to use the new merge strategy, use this context manager as in the
following example::
>>> from astropy.table import Table, vstack
>>> from astropy.utils.metadata import enable_merge_strategies
>>> t1 = Table([[1]], names=['a'])
>>> t2 = Table([[2]], names=['a'])
>>> t1.meta = {'m': 1}
>>> t2.meta = {'m': 2}
>>> with enable_merge_strategies(MergeNumbersAsList):
... t12 = vstack([t1, t2])
>>> t12.meta['m']
[1, 2]
One can supply further merge strategies as additional arguments to the
context manager.
As a convenience, the enabling operation is actually done by checking
whether the registered strategies are subclasses of the context manager
arguments. This means one can define a related set of merge strategies and
then enable them all at once by enabling the base class. As a trivial
example, *all* registered merge strategies can be enabled with::
>>> with enable_merge_strategies(MergeStrategy):
... t12 = vstack([t1, t2])
Parameters
----------
*merge_strategies : `~astropy.utils.metadata.MergeStrategy`
Merge strategies that will be enabled.
"""
return _EnableMergeStrategies(*merge_strategies)
def _warn_str_func(key, left, right):
out = ('Cannot merge meta key {0!r} types {1!r}'
' and {2!r}, choosing {0}={3!r}'
.format(key, type(left), type(right), right))
return out
def _error_str_func(key, left, right):
out = f'Cannot merge meta key {key!r} types {type(left)!r} and {type(right)!r}'
return out
def merge(left, right, merge_func=None, metadata_conflicts='warn',
warn_str_func=_warn_str_func,
error_str_func=_error_str_func):
"""
Merge the ``left`` and ``right`` metadata objects.
This is a simplistic and limited implementation at this point.
"""
if not _both_isinstance(left, right, dict):
raise MergeConflictError('Can only merge two dict-based objects')
out = deepcopy(left)
for key, val in right.items():
# If no conflict then insert val into out dict and continue
if key not in out:
out[key] = deepcopy(val)
continue
# There is a conflict that must be resolved
if _both_isinstance(left[key], right[key], dict):
out[key] = merge(left[key], right[key], merge_func,
metadata_conflicts=metadata_conflicts)
else:
try:
if merge_func is None:
for left_type, right_type, merge_cls in MERGE_STRATEGIES:
if not merge_cls.enabled:
continue
if (isinstance(left[key], left_type) and
isinstance(right[key], right_type)):
out[key] = merge_cls.merge(left[key], right[key])
break
else:
raise MergeConflictError
else:
out[key] = merge_func(left[key], right[key])
except MergeConflictError:
# Pick the metadata item that is not None, or they are both not
# None, then if they are equal, there is no conflict, and if
# they are different, there is a conflict and we pick the one
# on the right (or raise an error).
if left[key] is None:
# This may not seem necessary since out[key] gets set to
# right[key], but not all objects support != which is
# needed for one of the if clauses.
out[key] = right[key]
elif right[key] is None:
out[key] = left[key]
elif _not_equal(left[key], right[key]):
if metadata_conflicts == 'warn':
warnings.warn(warn_str_func(key, left[key], right[key]),
MergeConflictWarning)
elif metadata_conflicts == 'error':
raise MergeConflictError(error_str_func(key, left[key], right[key]))
elif metadata_conflicts != 'silent':
raise ValueError('metadata_conflicts argument must be one '
'of "silent", "warn", or "error"')
out[key] = right[key]
else:
out[key] = right[key]
return out
class MetaData:
"""
A descriptor for classes that have a ``meta`` property.
This can be set to any valid `~collections.abc.Mapping`.
Parameters
----------
doc : `str`, optional
Documentation for the attribute of the class.
Default is ``""``.
.. versionadded:: 1.2
copy : `bool`, optional
If ``True`` the the value is deepcopied before setting, otherwise it
is saved as reference.
Default is ``True``.
.. versionadded:: 1.2
"""
def __init__(self, doc="", copy=True):
self.__doc__ = doc
self.copy = copy
def __get__(self, instance, owner):
if instance is None:
return self
if not hasattr(instance, '_meta'):
instance._meta = OrderedDict()
return instance._meta
def __set__(self, instance, value):
if value is None:
instance._meta = OrderedDict()
else:
if isinstance(value, Mapping):
if self.copy:
instance._meta = deepcopy(value)
else:
instance._meta = value
else:
raise TypeError("meta attribute must be dict-like")
class MetaAttribute:
"""
Descriptor to define custom attribute which gets stored in the object
``meta`` dict and can have a defined default.
This descriptor is intended to provide a convenient way to add attributes
to a subclass of a complex class such as ``Table`` or ``NDData``.
This requires that the object has an attribute ``meta`` which is a
dict-like object. The value of the MetaAttribute will be stored in a
new dict meta['__attributes__'] that is created when required.
Classes that define MetaAttributes are encouraged to support initializing
the attributes via the class ``__init__``. For example::
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, MetaAttribute):
setattr(self, attr, kwargs.pop(attr))
The name of a ``MetaAttribute`` cannot be the same as any of the following:
- Keyword argument in the owner class ``__init__``
- Method or attribute of the "parent class", where the parent class is
taken to be ``owner.__mro__[1]``.
:param default: default value
"""
def __init__(self, default=None):
self.default = default
def __get__(self, instance, owner):
# When called without an instance, return self to allow access
# to descriptor attributes.
if instance is None:
return self
# If default is None and value has not been set already then return None
# without doing touching meta['__attributes__'] at all. This helps e.g.
# with the Table._hidden_columns attribute so it doesn't auto-create
# meta['__attributes__'] always.
if (self.default is None
and self.name not in instance.meta.get('__attributes__', {})):
return None
# Get the __attributes__ dict and create if not there already.
attributes = instance.meta.setdefault('__attributes__', {})
try:
value = attributes[self.name]
except KeyError:
if self.default is not None:
attributes[self.name] = deepcopy(self.default)
# Return either specified default or None
value = attributes.get(self.name)
return value
def __set__(self, instance, value):
# Get the __attributes__ dict and create if not there already.
attributes = instance.meta.setdefault('__attributes__', {})
attributes[self.name] = value
def __delete__(self, instance):
# Remove this attribute from meta['__attributes__'] if it exists.
if '__attributes__' in instance.meta:
attrs = instance.meta['__attributes__']
if self.name in attrs:
del attrs[self.name]
# If this was the last attribute then remove the meta key as well
if not attrs:
del instance.meta['__attributes__']
def __set_name__(self, owner, name):
import inspect
params = [param.name for param in inspect.signature(owner).parameters.values()
if param.kind not in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL)]
# Reject names from existing params or best guess at parent class
if name in params or hasattr(owner.__mro__[1], name):
raise ValueError(f'{name} not allowed as {self.__class__.__name__}')
self.name = name
def __repr__(self):
return f'<{self.__class__.__name__} name={self.name} default={self.default}>'
|
Join us for our Annual Membership BBQ on Thursday, September 13 at Community Business Bank! This BBQ is to celebrate all of our members- and if you are interested in having a booth space to promote your business, contact Marina Narvarte at [email protected]. Each Chamber Member will receive 2 complimentary tickets to this event- but additional tickets can be purchased for $5 each by clicking the link above. We look forward to spending the evening BBQing with all of our wonderful members.
|
import random
import sys
someconst = 3
def inc(x):
return x + 1
def very_understandable_function(x=5):
def get_eleet():
return x
import platform
print 'Hello, %s (%s)' % (platform.platform(), platform.architecture()[0])
r = 10
print 'I like doing stuff with number: %r' % (r % 42)
for i in range(r):
print i + get_eleet(), get_eleet()
if (r % 10):
print 'wUuUUt'
else:
print 'dont care!'
with open('success', 'w') as f:
f.write('yoooo seems to work bra!')
return 0xdeadbeef
#print 'aaa'
class NewStyleClass(object):
#print 'newstyle'
def __init__(self):
super(NewStyleClass, self).__init__()
def doit(self):
print 'i am new'
class NewStyleClassCustomInit(object):
#print 'newstyle'
def __init__(self):
pass
def doit(self):
print 'i am new'
#print 'between'
class OldStyleClass:
#print 'oldstyle'
def doit(self):
print 'i am old'
#print 'bbb'
def generate_random_strings():
"""Generate a random string"""
print 'ucucuga'
charset = map(chr, range(0, 0x100))
print 'ucucuga1'
return ''.join(random.choice(charset) for i in range(random.randint(10, 100)))
if __name__ == '__main__':
very_understandable_function(293)
NewStyleClass().doit()
OldStyleClass().doit()
for i in xrange(10):
print inc(i)
generate_random_strings()
print someconst
|
fetchfile.net is a free online application that allows to download videos from npo: npo.nl and ntr.nl for free and fast. It is a simple web service to download your favorite video in different formats (mp4, webm, audio, 3gpp, x-flv) and quality (480p, HD, FullHD, UltraHD). Also you can save file to dropbox.
|
from phyltr import build_pipeline
from phyltr.commands.clades import Clades
def test_init_from_args():
clades = Clades.init_from_args("")
assert clades.opts.frequency == 0.0
assert clades.opts.ages == False
clades = Clades.init_from_args("--ages")
assert clades.opts.ages == True
clades = Clades.init_from_args("-f 0.42")
assert clades.opts.frequency == 0.42
def test_clades(basictrees):
clades = Clades(ages=True)
# Spin through all trees
list(clades.consume(basictrees))
# Check that the computed probabilities agree
# with hand calculated equivalents
assert clades.cp.clade_probs["A B"] == 4.0 / 6.0
assert clades.cp.clade_probs["A C"] == 2.0 / 6.0
assert clades.cp.clade_probs["A B C"] == 5.0 / 6.0
assert clades.cp.clade_probs["E F"] == 3.0 / 6.0
assert clades.cp.clade_probs["A C"] == 2.0 / 6.0
assert clades.cp.clade_probs["D F"] == 1.0 / 6.0
assert clades.cp.clade_probs["D E"] == 1.0 / 6.0
assert clades.cp.clade_probs["C E"] == 1.0 / 6.0
assert clades.cp.clade_probs["D E F"] == 5.0 / 6.0
assert clades.cp.clade_probs["A B C D E F"] == 6.0 / 6.0
def test_degenerate_clades(treefilenewick):
clades = Clades(ages=True)
list(clades.consume(treefilenewick('single_taxon.trees')))
def test_categorical_annotation(treefilenewick):
# This is just to make sure the clade probability calculator doesnt't
# erroneously try to calculate means etc. of categorical annotations
list(build_pipeline(
"annotate -f tests/argfiles/categorical_annotation.csv -k taxon | clades",
treefilenewick('basic.trees')))
|
been seeing the results they want.
— a ton more…just watch the video!
Now they know. They REALLY know.
And their bank accounts prove it.
for themselves on their own schedule.
I will give you a valuable bonus!
What is the Cross Channel Mojo Bonus?
for 4 episodes starting in January, 2012.
are created from the show each week.
How do you claim your Cross Channel Mojo Bonus?
a commission for your purchase).
The Tampa Bay Rays did something that people never thought they could pull off.
They were down 7 – 0 to the New York Yankees and went almost hitless into the 8th inning. Then with a flurry of activity managed to score 6 runs in the 8th inning and one in the ninth to tie the game. Then they managed to keep the Yankees scoreless through three more innings until they scored in the 12th to get into the playoffs. To say this game was a must win was an understatement.
But their manager was repeating the dream to them over and over again. He had built a power team of players who don’t have a record breaking salary… but they work well together to achieve the victories that they need!
Deb Spicer, the President and CEO of Quantum Level Success, is launching a new book today called Power Teams: The New SQUARE ROOT MODEL That Changes Everything! (Volume 1).
This book is designed to help you build and support teams through the victories and challenges that occur day to day. Discover how to make the biggest impact and drive your teams to success with Power Teams!
I just watched a 3 minute video that really made me think… is your dream a one shot dream?
Are you willing to stake your life on your dream?
Dreams are obviously very powerful. They can drive you to work harder than anyone else, lose sleep to get more done and stay upbeat in the face of struggles that would make mere mortals cry like a baby.
Enter Kiwi. A bird who has dreams of flying, and his vision is driving him to work hard. Spoiler alert: Watch the video now, because I am about to reveal what happens.
Ok. Kiwi is working hard; at first you wonder what the heck he is doing. Then as you see him put the finishing touches on his handiwork, you start to see the bigger picture. He is nailing trees to the side of a cliff. And this is one humdinger of a cliff, too.
When he looks down, you can’t see the bottom, because of the fog, but it has trees dotted all along the cliff as far as the eye can see.
When he is finally finished looking down, he takes a running start and dives off the cliff, and imagines himself flying past all the trees, his dream finally realized.
And isn’t that what we are in danger of doing when we build dreams that are not realistic?
Having a mentor is the best way to get the best possible advice when you are setting your sights on a dream.
A Mentor can ask you the right questions. They can give you some options. They might even save your life.
When speaking about dreaming, I am not using the standard definition of what you see while you are sleeping. A dream is something that you want so bad that you are willing to put forth the effort to achieve it.
I once had someone comment that dreams are not healthy, because he was 45 and dreamed of being a pro football running back, but that was not likely to happen. I countered that he was not dreaming of being a running back, but fantasizing about being one. The difference being that he was not willing to put forth the extreme amount of effort it would take to become a pro football running back at his age. Many running backs aren’t willing to put forth that much effort and they are there.
Because of his circumstances (his age), he would need to work harder than any running back currently in the NFL to condition his body. It is not an impossibility, just not probable that he had what it took to make his dream come true. Hence, he fantasizes about being a running back, because the effort is not going to happen.
Life gets in the way of everyone’s plans. We make plans, set goals and then life says, “Wait a minute, buddy! Your car needs a transmission – NOW!” These things are not planned, they take us off our game for a long enough while to slow down our pursuit. The problem with life getting in the way is that people will take that to mean that their plans or goals were not meant to be. They usually give up, and then fall back into their comfort zone.
A dream, however, is more than just a goal or a plan. In fact, a dream is much bigger. A dream is what you use to set your goals. Your dream should be big enough that you have to set tough goals to get to the next step. Making $1000 a day is not a dream. That could be a goal, but not the dream in itself.
Having a beautiful 2 story house with 4 bedrooms, 3 baths and a 4 car garage is closer to being a dream. In fact, if it is something you are truly interested in working for, then it is a dream worth dreaming. Fantasizing about a house is much easier, since the work is not necessary.
Once you have the dream firmly planted in your head and your heart, it is time to work out the plan to help you get your dream. You first go and see houses similar to the one you are dreaming about owning. Find out the prices, what it costs to maintain, how much the taxes are where you are planning on living. This helps you cement the details of your dream and make decisions about each individual detail, so that the house of your dreams is really going to solidify for you.
Now that you have the dream, it is time to use that dream as the blueprint for your goals. You will set goals for your income, then set goals for how to achieve that income. Next you might set a goal for finding the perfect community for that house. These are goals that you know will be a stretch for you, yet with enough applied activity (some call this work) you will be able to achieve them.
Without the dream, the motivation to work through the struggles will not accompany the goals. When that transmission falls out on the freeway, you might be tempted to cancel all your appointments for the rest of the week. Having a dream that instills a sense of urgency in you will cause you to reschedule the one that is directly impacted by the transmission problem, then find a way to make the rest of them by getting a ride, taking a taxi, renting a car or changing the location of your appointment. The dream motivates you to keep moving toward your goals, so that you can keep moving toward your dream.
Obviously, we are simplifying everything, because nothing happens in a vacuum. When you start to dream , you have to realize that once you get close to achieving the dream, it is time to dream bigger. Once you have broken ground on your dream house, you should then start looking at something bigger.
I am using material dreams for illustrative purposes only. By no means is a dream supposed to be anything material. In fact, having a altruistic dream is also very motivational. Maybe your dream is to stop world hunger. Your overall vision can be global, but you are going to want to segment your dream to first impact one subset of the overall vision. So eliminating hunger in Citrus County, Florida is probably your first dream. Work toward that dream and as you get close to achieving that vision, zoom out and add the rest of Florida to your dream, so that you don’t stop once your dream has been attained.
Having a larger overall dream is great, because you have the ability to plan your local campaigns with the idea that they can be expanded into more global campaigns once you are ready to take the next step.
In the house example, you might start thinking about vacationing in various exotic locations throughout the year. You might start reading up on those locations, find out when the best times of the year to visit would be, how much it normally costs, what you would need to bring along and any other details that accompany these travel plans. These are going to be your details to start planning new goals once your first dream has been achieved.
The problem with achieving dreams is the fact that many stop dreaming after reaching the first one. I had a friend who had a dream to marry a beautiful woman. He was driven to find this beautiful girl and when he found her and married her, that was it. He figured he would be happy, since his dream was achieved.
Unfortunately, he became complacent, and did not work on his marriage. He eventually began drinking and his beautiful wife left him. He was devastated and went further into alcohol and drugs. I really do not know what happened to him, because he dropped off the radar, and I moved away. I hope he found another dream.
While that is an extreme case, your dreams are what prevent you from falling into decline. As you continue to work to build a business, a career or a retirement, you need something to motivate you past all of the roadblocks. If all you see are the obstacles, that means your dream is too small. Your dream should rise above the transmission problems, broken legs and sick loved ones. I am not saying that you should ignore those obstacles, but you should handle them with the thought that you are still working toward your goals in spite of the obstacles.
Micheal Savoie writes for Resolve To Succeed as well as many other blogs. Leave a comment if you have an opinion one way or the other.
Admit it, you took one look at the headline of my blog and you thought, Battlefield?.
I didn’t quite understand that, but it sounded good, and I began looking for what I wanted. Not just things, though, because a dream does not have to be for tangible things. Sometimes, it is for a state of mind, or a better you. Those were harder to put onto a vision board, and the fact that I knew what they were, but could not picture them troubled me.
I didn’t realize that I was stepping onto a battlefield as soon as I envisioned a dream. The world may be run by dreamers, but it despises dreamers just the same. Those at the top have huge visions for their lives and for the general population, but everyone in between is stuck with the vision that they are handed by their boss (get this done by Friday at 3PM or you are fired).
The second you let anyone in on your vision is the beginning of the battle for your dream. Up until that time, you are a noncombatant enlistee in the war, but you aren’t ready to hit the battlefield until you are ready to share your dreams. At that point, whether you are prepared for it or not, the battle is on!
People will ridicule you for thinking you can rise above the rest of the people in your peer group. Friends and family who think they are helping you will tell you to get your head out of the clouds. And life itself will appear to convolute itself in such a way as to make it seemingly impossible to make any progress towards your dreams.
The battle begins within you before it ever comes to the rest of the world. You are audacious enough to believe that you can get out of your comfort zone and actually succeed at something that you have wanted for your whole life. Your inner consciousness is rebelling at the thought of having to get out of the comfort zone it has grown accustomed to. Your subconscious is still believing the negative that people (and your own self talk) have been feeding it for years, and it works hard to ensure that those beliefs are realized.
This fight is 90% internal.
So let’s work together here to fight the battles that we know we can win, and set ourselves up for success in the battles that we will need help with. This blog is here for you. I would like to hear your stories of success and those where you can identify where you could have done things better to get a favorable outcome. There are no failures, except when we stop trying.
|
###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor
from twisted.python import log
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def sendHello(self):
self.sendMessage("Hello, world!".encode('utf8'))
def onOpen(self):
self.sendHello()
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
reactor.callLater(1, self.sendHello)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need the WebSocket server address, i.e. ws://localhost:9000")
sys.exit(1)
if len(sys.argv) < 3:
print("Need the Proxy, i.e. 192.168.1.100:8050")
sys.exit(1)
proxyHost, proxyPort = sys.argv[2].split(":")
proxy = {'host': proxyHost, 'port': int(proxyPort)}
if len(sys.argv) > 3 and sys.argv[3] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WebSocketClientFactory(sys.argv[1],
proxy = proxy,
debug = debug,
debugCodePaths = debug)
# uncomment to use Hixie-76 protocol
#factory.setProtocolOptions(allowHixie76 = True, version = 0)
factory.protocol = EchoClientProtocol
connectWS(factory)
reactor.run()
|
We at Treemendous Tree Care are one of the leading companies in this space and are highly focused on providing our customers the best tree removals Tempe. We are a very well-established and credible company in this industry and have handled scores of tree removals since our inception.
In all these cases, you would need to hire the services of professional tree removal Tempe experts.
For any more information on the different types of tree removal services Tempe we provide, feel free to call Treemendous Tree Care on 0410 976 081 and speak with our helpful customer service representatives about your requirement. You can also send us your queries or request for a free, no obligation quote, via this online form.
|
# coding:utf-8
from _prototype import plugin_prototype
import sys
import re
import os
from cross_platform import *
# start meta
__plugin_name__ = 'query infomation of player'
__author = 'fffonion'
__version__ = 0.40
hooks = {}
extra_cmd = {'q_item':'query_item', 'qi':'query_item', 'q_holo':'query_holo', 'qh':'query_holo', 'qgc':'query_guild_contribution','q_rank':'query_rank','qr':'query_rank'}
# end meta
# query item count
def query_item(plugin_vals):
def do(*args):
logger = plugin_vals['logger']
if 'player' not in plugin_vals or not plugin_vals['player'].item.db:
logger.error('玩家信息未初始化,请随便执行一个操作再试')
return
print(du8('%-17s%s' % ('物品', '个数')))
print('-' * 30)
for (i, [n, j]) in plugin_vals['player'].item.db.items():
if j > 0: # has
# calc utf-8 length
l1 = len(n) # ascii length
n = raw_du8(n)
l2 = len(n) # char count
print(safestr('%s%s%s' % (n, ' ' * int(15 - l2 - (l1 - l2) / 2), j)))
return do
# query holo cards
def query_holo(plugin_vals):
def do(*args):
logger = plugin_vals['logger']
if 'player' not in plugin_vals or not plugin_vals['player'].item.db:
logger.error('玩家信息未初始化,请随便执行一个操作再试')
return
print(du8('%s' % ('当前拥有以下闪卡')))
print('-' * 30)
_player = plugin_vals['player']
cache = []
for c in _player.card.cards:
if c.holography == 1:
ca = _player.card.db[c.master_card_id]
cache.append((ca[0], ca[1], c.lv, c.hp, c.power))
cache = sorted(cache, key = lambda l:(l[1], l[2]))
print('\n'.join(map(lambda x:du8('[%s] ☆%d Lv%d HP:%d ATK:%d' % x), cache)))
return do
def query_guild_contribution(plugin_vals):
def do(*args):
lines = []
if plugin_vals['loc'][:2] == 'cn':
lines += open('events_cn.log').read().split('\n')
elif plugin_vals['loc'] == 'tw':
lines += open('events_tw.log').read().split('\n')
else:
print(du8('不支持%s的查询'%plugin_vals['loc']))
return
if os.path.exists('.IGF.log'):
lines += open('.IGF.log').read().split('\n')
pname, total = plugin_vals['player'].item.db[8001]
cnt = 0
for l in lines:
c = re.findall(pname+'\]\:\+(\d+)\(',l)
if c:
cnt += int(c[0])
print(du8('公会贡献: %d/ %s'%(cnt,total or '?')))
return do
def query_rank(plugin_vals):
def do(*args):
logger = plugin_vals['logger']
loc = plugin_vals['loc']
if loc[:2] not in ['cn','tw']:
logger.error('排位查询不支持日服和韩服')
return
if loc == 'tw':
import _query_rank_tw_lib as _lib
import re
import urllib
if PYTHON3:
import urllib.request as urllib2
opener = urllib2.build_opener(urllib2.ProxyHandler(urllib.request.getproxies()))
else:
import urllib2
opener = urllib2.build_opener(urllib2.ProxyHandler(urllib.getproxies()))
def show_it(content):
strl = '\n%s\n%s\n' %(_lib.query_title(content),'-'*20)
for (k, v) in _lib.query_regex[_guild_mode + _coll_mode if not _country_mode else -1]:
try:
strl += '%s %s\n' % (k, v(content))
except IndexError:
pass
logger.info(strl)
_header = _lib.broswer_headers
_header['cookie'] = plugin_vals['cookie']
_header['User-Agent'] = plugin_vals['poster'].header['User-Agent']
_guild_mode = 2 if raw_inputd('查询个人排名(s)(默认)还是公会排名(g)> ') == 'g' else 0
_country_mode = 0
if not _guild_mode and _lib.query_country_id:#build country selection
ctotal = len(_lib.query_country_id)
while True:
print(du8('\n'.join(['%d.%s' % (i + 1, _lib.query_country_id[i][0]) for i in range(ctotal)])))
_sel = raw_input('> ')
if _sel.isdigit() and 0 < int(_sel) <= ctotal:
_country_mode = _lib.query_country_id[int(_sel) - 1][1]
break
while True:
_goto = raw_inputd('输入要查询的排名开始数,按回车显示自己所在区域> ')
if not _goto or (
_goto.isdigit() and \
((0<int(_goto)<=20000 and not _guild_mode) or (0<int(_goto)<=2000 and _guild_mode))):
break
logger.error('请输入%d以内0以上的数字' % (2000 if _guild_mode else 20000))
#automatically judge
if not _country_mode and ((_guild_mode and _lib.query_rev[2] and _lib.query_rev[3]) or \
(not _guild_mode and _lib.query_rev[0] and _lib.query_rev[1]) or _goto):
_coll_mode = 1 if raw_inputd('查询收集品排名(c)(默认)还是妖精加权排名(f)> ') != 'f' else 0
else:
_coll_mode = (1 if _lib.query_rev[3] else 0) if _guild_mode else \
(1 if _lib.query_rev[1] else 0)
#request
if _country_mode:
if _goto:
_gurl = _lib.query_goto[-1]
x = opener.open(urllib2.Request(_gurl % (_goto, _country_mode),headers = _header)).read()
else:
_gurl = _lib.query_country
x = opener.open(urllib2.Request(_gurl % _country_mode,headers = _header)).read()
elif _goto:
_gurl = _lib.query_goto[_guild_mode + _coll_mode]
x = opener.open(urllib2.Request(_gurl % _goto,headers = _header)).read()
# if True:
# x = open(r'z:/test.htm').read()
else:
_rev = _lib.query_rev[_guild_mode + _coll_mode]
if not _rev:
logger.error('版本不存在,可能是当前活动没有该排名\n请尝试升级_query_rank_lib,或指定排名区域查询')
return
if _lib.now >= _lib.query_lifetime:
logger.error('查询库已过期,请升级_query_rank_lib为新版本\n或指定排名区域查询')
return
_url = _lib.query_base % _rev
x = opener.open(urllib2.Request(_url, headers = _header)).read()
try:
show_it(_lib.pre(x))
except IndexError:
logger.warning('匹配失败,请重新登录;如果问题仍然存在,请更新插件')
else:#cn
from xml2dict import XML2Dict
po = plugin_vals['poster']
po.post('menu/menulist')
sel_rankid = 0
to_top = False
while True:
resp, ct = po.post('ranking/ranking', postdata='move=%d&ranktype_id=%d&top=%d' % (
1 if sel_rankid == 0 else 0, sel_rankid, 1 if to_top else 0))
ct = XML2Dict.fromstring(ct).response.body.ranking
ranktype_id = int(ct.ranktype_id)
allranks = ct.ranktype_list.ranktype
rank_name = allranks[ranktype_id - 1].title
try:
_user = ct.user_list.user
except KeyError:
logger.warning('暂未列入排行榜,请继续努力ww')
return
if not to_top:
me = [_i for _i in _user if _i.id == plugin_vals['player'].id][0]
logger.info(rank_name +
(not to_top and '\n排名:%s 点数:%s\n' % (me.rank, me.battle_event_point) or '\n') +
'可见区域内 Up:%s/%s Down:%s/%s' % (
_user[0].rank, _user[0].battle_event_point,
_user[-1].rank, _user[-1].battle_event_point)
)
while True:
_inp = raw_inputd('\n输入序号查询其他排行:(9.排名至顶 0.退出)\n%s\n> ' %
('\n'.join(map(lambda x : '%s.%s' % (x.id, x.title), allranks)))
) or '0'
if not _inp.isdigit():
continue
else:
if _inp == '0':
return
if _inp == '9':
to_top = True
else:
sel_rankid = int(_inp)
to_top = False
break
return do
|
This counter top sign holder is perfect for displaying tickets, prices and product specification clearly to your customers. Spring Sign Holders are particularly popular within cafes and delicatessens. It features a stylish round base with a clear plastic finish to ensure it will not detract from your display.
The small sign holder features a 5cm base and the large features a 7cm base.
The clip is 7cm wide to hold larger tickets on both styles. Updating the contents couldn't be easier thanks to the simple spring clip design.
|
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.forms import ModelForm, ValidationError
from .models import Student, Group, Exam, MonthJournal
class StudentAdminForm(ModelForm):
def clean_student_group(self):
"""Check if student is leader in any group.
If yes, then ensure it's the same as selected group."""
# get group where current student is leader
groups = Group.objects.filter(leader=self.instance)
if len(groups) > 0 and \
self.cleaned_data['student_group'] != groups[0]:
raise ValidationError("Студент є старостою іншої групи.",
code='invalid')
return self.cleaned_data['student_group']
class StudentAdmin(admin.ModelAdmin):
list_display = ['last_name', 'first_name', 'ticket', 'student_group']
list_display_links = ['last_name', 'first_name']
list_editable = ['student_group']
ordering = ['last_name']
list_filter = ['student_group']
list_per_page = 10
search_fields = ['last_name', 'first_name', 'middle_name', 'ticket',
'notes']
form = StudentAdminForm
def view_on_site(self, obj):
return reverse('students_edit', kwargs={'pk': obj.id})
class GroupAdmin(admin.ModelAdmin):
list_display = ['title', 'leader']
list_display_links = ['title']
list_editable = ['leader']
ordering = ['title']
# list_filter = ['title']
list_per_page = 10
search_fields = ['tile', 'leader']
def view_on_site(self, obj):
return reverse('groups_edit', kwargs={'pk': obj.id})
admin.site.register(Student, StudentAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Exam)
admin.site.register(MonthJournal)
|
A simple, yet striking design, this outline shows just how powerfully recognisable the shape of the African continent can be.
The African landscape has been divided by so many imaginary lines, leading to so many countries and giving rise to so much conflict.
In this design we can see how, when stripped of all this internal noise and confusion, it is one, continuous line that encompasses us all and solidifies our place on the global stage.
If the continuous brush stroke for the continent wasn’t enough, the artist has included a seamless cursive script, forever intertwining the message of love with the land.
An understated combination, the silver and deep heather grey keep this shirt a little more formal than an everyday athletic heather, for the days spent indoor, near the end of autumn.
|
"""Preprocessing (SSP and filtering)."""
from collections import Counter
import os
import os.path as op
import warnings
import numpy as np
from mne import (concatenate_raws, compute_proj_evoked, compute_proj_epochs,
write_proj, pick_types, Epochs, compute_proj_raw, read_proj,
make_fixed_length_events, write_events)
from mne.preprocessing import find_ecg_events, find_eog_events
from mne.filter import filter_data
from mne.io import read_raw_fif
from mne.viz import plot_drop_log
from mne.utils import _pl
from ._epoching import _raise_bad_epochs
from ._paths import get_raw_fnames, get_bad_fname
from ._utils import (get_args, _fix_raw_eog_cals, _handle_dict, _safe_remove,
_get_baseline, _restrict_reject_flat, _get_epo_kwargs)
def _get_fir_kwargs(fir_design):
"""Get FIR kwargs in backward-compatible way."""
fir_kwargs = dict()
old_kwargs = dict()
if 'fir_design' in get_args(filter_data):
fir_kwargs.update(fir_design=fir_design)
old_kwargs.update(fir_design='firwin2')
elif fir_design != 'firwin2':
raise RuntimeError('cannot use fir_design=%s with old MNE'
% fir_design)
return fir_kwargs, old_kwargs
# noinspection PyPep8Naming
def _raw_LRFCP(raw_names, sfreq, l_freq, h_freq, n_jobs, n_jobs_resample,
projs, bad_file, disp_files=False, method='fir',
filter_length=32768, apply_proj=True, preload=True,
force_bads=False, l_trans=0.5, h_trans=0.5,
allow_maxshield=False, phase='zero-double', fir_window='hann',
fir_design='firwin2', pick=True,
skip_by_annotation=('bad', 'skip')):
"""Helper to load, filter, concatenate, then project raw files"""
from mne.io.proj import _needs_eeg_average_ref_proj
from ._sss import _read_raw_prebad
if isinstance(raw_names, str):
raw_names = [raw_names]
if disp_files:
print(f' Loading and filtering {len(raw_names)} '
f'file{_pl(raw_names)}.')
raw = list()
for ri, rn in enumerate(raw_names):
if isinstance(bad_file, tuple):
p, subj, kwargs = bad_file
r = _read_raw_prebad(p, subj, rn, disp=(ri == 0), **kwargs)
else:
r = read_raw_fif(rn, preload=True, allow_maxshield='yes')
r.load_bad_channels(bad_file, force=force_bads)
if pick:
r.pick_types(meg=True, eeg=True, eog=True, ecg=True, exclude=[])
if _needs_eeg_average_ref_proj(r.info):
r.set_eeg_reference(projection=True)
if sfreq is not None:
r.resample(sfreq, n_jobs=n_jobs_resample, npad='auto')
fir_kwargs = _get_fir_kwargs(fir_design)[0]
if l_freq is not None or h_freq is not None:
r.filter(l_freq=l_freq, h_freq=h_freq, picks=None,
n_jobs=n_jobs, method=method,
filter_length=filter_length, phase=phase,
l_trans_bandwidth=l_trans, h_trans_bandwidth=h_trans,
fir_window=fir_window, **fir_kwargs)
raw.append(r)
_fix_raw_eog_cals(raw)
raws_del = raw[1:]
raw = concatenate_raws(raw, preload=preload)
for r in raws_del:
del r
if disp_files and apply_proj and len(projs) > 0:
print(' Adding and applying projectors.')
raw.add_proj(projs)
if apply_proj:
raw.apply_proj()
return raw
def compute_proj_wrap(epochs, average, **kwargs):
if average:
return compute_proj_evoked(epochs.average(), **kwargs)
else:
return compute_proj_epochs(epochs, **kwargs)
def _get_pca_dir(p, subj):
pca_dir = op.join(p.work_dir, subj, p.pca_dir)
if not op.isdir(pca_dir):
os.mkdir(pca_dir)
return pca_dir
def _get_proj_kwargs(p):
proj_kwargs = dict()
p_sl = 1
if 'meg' not in get_args(compute_proj_raw):
if p.proj_meg != 'separate':
raise RuntimeError('MNE is too old for proj_meg option')
else:
proj_kwargs['meg'] = p.proj_meg
if p.proj_meg == 'combined':
p_sl = 2
return proj_kwargs, p_sl
def _compute_erm_proj(p, subj, projs, kind, bad_file, remove_existing=False,
disp_files=None):
disp_files = p.disp_files if disp_files is None else disp_files
assert kind in ('sss', 'raw')
proj_nums = _proj_nums(p, subj)
proj_kwargs, p_sl = _get_proj_kwargs(p)
empty_names = get_raw_fnames(p, subj, kind, 'only')
fir_kwargs, _ = _get_fir_kwargs(p.fir_design)
flat = _handle_dict(p.flat, subj)
raw = _raw_LRFCP(
raw_names=empty_names, sfreq=p.proj_sfreq,
l_freq=p.cont_hp, h_freq=p.cont_lp,
n_jobs=p.n_jobs_fir, apply_proj=not remove_existing,
n_jobs_resample=p.n_jobs_resample, projs=projs,
bad_file=bad_file, disp_files=disp_files, method='fir',
filter_length=p.filter_length, force_bads=True,
l_trans=p.cont_hp_trans, h_trans=p.cont_lp_trans,
phase=p.phase, fir_window=p.fir_window,
skip_by_annotation='edge', **fir_kwargs)
if remove_existing:
raw.del_proj()
raw.pick_types(meg=True, eeg=False, exclude=()) # remove EEG
use_reject, reject_kind = p.cont_reject, 'p.cont_reject'
if use_reject is None:
use_reject, reject_kind = p.reject, 'p.reject'
use_reject, use_flat = _restrict_reject_flat(
_handle_dict(use_reject, subj), flat, raw)
bad = False
pr = []
try:
pr = compute_proj_raw(raw, duration=1, n_grad=proj_nums[2][0],
n_mag=proj_nums[2][1], n_eeg=proj_nums[2][2],
reject=use_reject, flat=use_flat,
n_jobs=p.n_jobs_mkl, **proj_kwargs)
except RuntimeError as exc:
if 'No good epochs' not in str(exc):
raise
bad = True
if bad:
events = make_fixed_length_events(raw)
epochs = Epochs(raw, events, tmin=0, tmax=1. - 1. / raw.info['sfreq'],
proj=False, baseline=None, reject=use_reject,
flat=use_flat).drop_bad()
_raise_bad_epochs(
raw, epochs, events,
f'1-sec empty room via {reject_kind} = {use_reject} (consider '
f'changing p.cont_reject)')
assert len(pr) == np.sum(proj_nums[2][::p_sl])
# When doing eSSS it's a bit weird to put this in pca_dir but why not
pca_dir = _get_pca_dir(p, subj)
cont_proj = op.join(pca_dir, 'preproc_cont-proj.fif')
write_proj(cont_proj, pr)
return pr
def do_preprocessing_combined(p, subjects, run_indices):
"""Do preprocessing on all raw files together.
Calculates projection vectors to use to clean data.
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
run_indices : array-like | None
Run indices to include.
"""
drop_logs = list()
for si, subj in enumerate(subjects):
proj_nums = _proj_nums(p, subj)
ecg_channel = _handle_dict(p.ecg_channel, subj)
flat = _handle_dict(p.flat, subj)
if p.disp_files:
print(' Preprocessing subject %g/%g (%s).'
% (si + 1, len(subjects), subj))
pca_dir = _get_pca_dir(p, subj)
bad_file = get_bad_fname(p, subj, check_exists=False)
# Create SSP projection vectors after marking bad channels
raw_names = get_raw_fnames(p, subj, 'sss', False, False,
run_indices[si])
empty_names = get_raw_fnames(p, subj, 'sss', 'only')
for r in raw_names + empty_names:
if not op.isfile(r):
raise NameError('File not found (' + r + ')')
fir_kwargs, old_kwargs = _get_fir_kwargs(p.fir_design)
if isinstance(p.auto_bad, float):
print(' Creating post SSS bad channel file:\n'
' %s' % bad_file)
# do autobad
raw = _raw_LRFCP(raw_names, p.proj_sfreq, None, None, p.n_jobs_fir,
p.n_jobs_resample, list(), None, p.disp_files,
method='fir', filter_length=p.filter_length,
apply_proj=False, force_bads=False,
l_trans=p.hp_trans, h_trans=p.lp_trans,
phase=p.phase, fir_window=p.fir_window,
pick=True, skip_by_annotation='edge',
**fir_kwargs)
events = fixed_len_events(p, raw)
rtmin = p.reject_tmin \
if p.reject_tmin is not None else p.tmin
rtmax = p.reject_tmax \
if p.reject_tmax is not None else p.tmax
# do not mark eog channels bad
meg, eeg = 'meg' in raw, 'eeg' in raw
picks = pick_types(raw.info, meg=meg, eeg=eeg, eog=False,
exclude=[])
assert p.auto_bad_flat is None or isinstance(p.auto_bad_flat, dict)
assert p.auto_bad_reject is None or \
isinstance(p.auto_bad_reject, dict) or \
p.auto_bad_reject == 'auto'
if p.auto_bad_reject == 'auto':
print(' Auto bad channel selection active. '
'Will try using Autoreject module to '
'compute rejection criterion.')
try:
from autoreject import get_rejection_threshold
except ImportError:
raise ImportError(' Autoreject module not installed.\n'
' Noisy channel detection parameter '
' not defined. To use autobad '
' channel selection either define '
' rejection criteria or install '
' Autoreject module.\n')
print(' Computing thresholds.\n', end='')
temp_epochs = Epochs(
raw, events, event_id=None, tmin=rtmin, tmax=rtmax,
baseline=_get_baseline(p), proj=True, reject=None,
flat=None, preload=True, decim=1)
kwargs = dict()
if 'verbose' in get_args(get_rejection_threshold):
kwargs['verbose'] = False
reject = get_rejection_threshold(temp_epochs, **kwargs)
reject = {kk: vv for kk, vv in reject.items()}
elif p.auto_bad_reject is None and p.auto_bad_flat is None:
raise RuntimeError('Auto bad channel detection active. Noisy '
'and flat channel detection '
'parameters not defined. '
'At least one criterion must be defined.')
else:
reject = p.auto_bad_reject
if 'eog' in reject.keys():
reject.pop('eog', None)
epochs = Epochs(raw, events, None, tmin=rtmin, tmax=rtmax,
baseline=_get_baseline(p), picks=picks,
reject=reject, flat=p.auto_bad_flat,
proj=True, preload=True, decim=1,
reject_tmin=rtmin, reject_tmax=rtmax)
# channel scores from drop log
drops = Counter([ch for d in epochs.drop_log for ch in d])
# get rid of non-channel reasons in drop log
scores = {kk: vv for kk, vv in drops.items() if
kk in epochs.ch_names}
ch_names = np.array(list(scores.keys()))
# channel scores expressed as percentile and rank ordered
counts = (100 * np.array([scores[ch] for ch in ch_names], float) /
len(epochs.drop_log))
order = np.argsort(counts)[::-1]
# boolean array masking out channels with <= % epochs dropped
mask = counts[order] > p.auto_bad
badchs = ch_names[order[mask]]
if len(badchs) > 0:
# Make sure we didn't get too many bad MEG or EEG channels
for m, e, thresh in zip([True, False], [False, True],
[p.auto_bad_meg_thresh,
p.auto_bad_eeg_thresh]):
picks = pick_types(epochs.info, meg=m, eeg=e, exclude=[])
if len(picks) > 0:
ch_names = [epochs.ch_names[pp] for pp in picks]
n_bad_type = sum(ch in ch_names for ch in badchs)
if n_bad_type > thresh:
stype = 'meg' if m else 'eeg'
raise RuntimeError('Too many bad %s channels '
'found: %s > %s'
% (stype, n_bad_type, thresh))
print(' The following channels resulted in greater than '
'{:.0f}% trials dropped:\n'.format(p.auto_bad * 100))
print(badchs)
with open(bad_file, 'w') as f:
f.write('\n'.join(badchs))
if not op.isfile(bad_file):
print(' Clearing bad channels (no file %s)'
% op.sep.join(bad_file.split(op.sep)[-3:]))
bad_file = None
ecg_t_lims = _handle_dict(p.ecg_t_lims, subj)
ecg_f_lims = p.ecg_f_lims
ecg_eve = op.join(pca_dir, 'preproc_ecg-eve.fif')
ecg_epo = op.join(pca_dir, 'preproc_ecg-epo.fif')
ecg_proj = op.join(pca_dir, 'preproc_ecg-proj.fif')
all_proj = op.join(pca_dir, 'preproc_all-proj.fif')
get_projs_from = _handle_dict(p.get_projs_from, subj)
if get_projs_from is None:
get_projs_from = np.arange(len(raw_names))
pre_list = [r for ri, r in enumerate(raw_names)
if ri in get_projs_from]
projs = list()
raw_orig = _raw_LRFCP(
raw_names=pre_list, sfreq=p.proj_sfreq, l_freq=None, h_freq=None,
n_jobs=p.n_jobs_fir, n_jobs_resample=p.n_jobs_resample,
projs=projs, bad_file=bad_file, disp_files=p.disp_files,
method='fir', filter_length=p.filter_length, force_bads=False,
l_trans=p.hp_trans, h_trans=p.lp_trans, phase=p.phase,
fir_window=p.fir_window, pick=True, skip_by_annotation='edge',
**fir_kwargs)
# Apply any user-supplied extra projectors
if p.proj_extra is not None:
if p.disp_files:
print(' Adding extra projectors from "%s".' % p.proj_extra)
projs.extend(read_proj(op.join(pca_dir, p.proj_extra)))
proj_kwargs, p_sl = _get_proj_kwargs(p)
#
# Calculate and apply ERM projectors
#
if not p.cont_as_esss:
if any(proj_nums[2]):
assert proj_nums[2][2] == 0 # no EEG projectors for ERM
if len(empty_names) == 0:
raise RuntimeError('Cannot compute empty-room projectors '
'from continuous raw data')
if p.disp_files:
print(' Computing continuous projectors using ERM.')
# Use empty room(s), but processed the same way
projs.extend(
_compute_erm_proj(p, subj, projs, 'sss', bad_file))
else:
cont_proj = op.join(pca_dir, 'preproc_cont-proj.fif')
_safe_remove(cont_proj)
#
# Calculate and apply the ECG projectors
#
if any(proj_nums[0]):
if p.disp_files:
print(' Computing ECG projectors...', end='')
raw = raw_orig.copy()
raw.filter(ecg_f_lims[0], ecg_f_lims[1], n_jobs=p.n_jobs_fir,
method='fir', filter_length=p.filter_length,
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
phase='zero-double', fir_window='hann',
skip_by_annotation='edge', **old_kwargs)
raw.add_proj(projs)
raw.apply_proj()
find_kwargs = dict()
if 'reject_by_annotation' in get_args(find_ecg_events):
find_kwargs['reject_by_annotation'] = True
elif len(raw.annotations) > 0:
print(' WARNING: ECG event detection will not make use of '
'annotations, please update MNE-Python')
# We've already filtered the data channels above, but this
# filters the ECG channel
ecg_events = find_ecg_events(
raw, 999, ecg_channel, 0., ecg_f_lims[0], ecg_f_lims[1],
qrs_threshold='auto', return_ecg=False, **find_kwargs)[0]
use_reject, use_flat = _restrict_reject_flat(
_handle_dict(p.ssp_ecg_reject, subj), flat, raw)
ecg_epochs = Epochs(
raw, ecg_events, 999, ecg_t_lims[0], ecg_t_lims[1],
baseline=None, reject=use_reject, flat=use_flat, preload=True)
print(' obtained %d epochs from %d events.' % (len(ecg_epochs),
len(ecg_events)))
if len(ecg_epochs) >= 20:
write_events(ecg_eve, ecg_epochs.events)
ecg_epochs.save(ecg_epo, **_get_epo_kwargs())
desc_prefix = 'ECG-%s-%s' % tuple(ecg_t_lims)
pr = compute_proj_wrap(
ecg_epochs, p.proj_ave, n_grad=proj_nums[0][0],
n_mag=proj_nums[0][1], n_eeg=proj_nums[0][2],
desc_prefix=desc_prefix, **proj_kwargs)
assert len(pr) == np.sum(proj_nums[0][::p_sl])
write_proj(ecg_proj, pr)
projs.extend(pr)
else:
_raise_bad_epochs(raw, ecg_epochs, ecg_events, 'ECG')
del raw, ecg_epochs, ecg_events
else:
_safe_remove([ecg_proj, ecg_eve, ecg_epo])
#
# Next calculate and apply the EOG projectors
#
for idx, kind in ((1, 'EOG'), (3, 'HEOG'), (4, 'VEOG')):
_compute_add_eog(
p, subj, raw_orig, projs, proj_nums[idx], kind, pca_dir,
flat, proj_kwargs, old_kwargs, p_sl)
del proj_nums
# save the projectors
write_proj(all_proj, projs)
#
# Look at raw_orig for trial DQs now, it will be quick
#
raw_orig.filter(p.hp_cut, p.lp_cut, n_jobs=p.n_jobs_fir, method='fir',
filter_length=p.filter_length,
l_trans_bandwidth=p.hp_trans, phase=p.phase,
h_trans_bandwidth=p.lp_trans, fir_window=p.fir_window,
skip_by_annotation='edge', **fir_kwargs)
raw_orig.add_proj(projs)
raw_orig.apply_proj()
# now let's epoch with 1-sec windows to look for DQs
events = fixed_len_events(p, raw_orig)
reject = _handle_dict(p.reject, subj)
use_reject, use_flat = _restrict_reject_flat(reject, flat, raw_orig)
epochs = Epochs(raw_orig, events, None, p.tmin, p.tmax, preload=False,
baseline=_get_baseline(p), reject=use_reject,
flat=use_flat, proj=True)
try:
epochs.drop_bad()
except AttributeError: # old way
epochs.drop_bad_epochs()
drop_logs.append(epochs.drop_log)
del raw_orig
del epochs
if p.plot_drop_logs:
for subj, drop_log in zip(subjects, drop_logs):
plot_drop_log(drop_log, p.drop_thresh, subject=subj)
def _proj_nums(p, subj):
proj_nums = np.array(_handle_dict(p.proj_nums, subj), int)
if proj_nums.shape not in ((3, 3), (4, 3), (5, 3)):
raise ValueError('proj_nums for %s must be an array with shape '
'(3, 3), (4, 3), or (5, 3), got %s'
% (subj, proj_nums.shape))
proj_nums = np.pad(
proj_nums, ((0, 5 - proj_nums.shape[0]), (0, 0)), 'constant')
assert proj_nums.shape == (5, 3)
return proj_nums
def _compute_add_eog(p, subj, raw_orig, projs, eog_nums, kind, pca_dir,
flat, proj_kwargs, old_kwargs, p_sl):
assert kind in ('EOG', 'HEOG', 'VEOG')
bk = dict(EOG='blink').get(kind, kind.lower())
eog_eve = op.join(pca_dir, f'preproc_{bk}-eve.fif')
eog_epo = op.join(pca_dir, f'preproc_{bk}-epo.fif')
eog_proj = op.join(pca_dir, f'preproc_{bk}-proj.fif')
eog_t_lims = _handle_dict(getattr(p, f'{kind.lower()}_t_lims'), subj)
eog_f_lims = _handle_dict(getattr(p, f'{kind.lower()}_f_lims'), subj)
eog_channel = _handle_dict(getattr(p, f'{kind.lower()}_channel'), subj)
thresh = _handle_dict(getattr(p, f'{kind.lower()}_thresh'), subj)
if eog_channel is None and kind != 'EOG':
eog_channel = 'EOG061' if kind == 'HEOG' else 'EOG062'
if eog_nums.any():
if p.disp_files:
print(f' Computing {kind} projectors...', end='')
raw = raw_orig.copy()
raw.filter(eog_f_lims[0], eog_f_lims[1], n_jobs=p.n_jobs_fir,
method='fir', filter_length=p.filter_length,
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
phase='zero-double', fir_window='hann',
skip_by_annotation='edge', **old_kwargs)
raw.add_proj(projs)
raw.apply_proj()
eog_events = find_eog_events(
raw, ch_name=eog_channel, reject_by_annotation=True,
thresh=thresh)
use_reject, use_flat = _restrict_reject_flat(
_handle_dict(p.ssp_eog_reject, subj), flat, raw)
eog_epochs = Epochs(
raw, eog_events, 998, eog_t_lims[0], eog_t_lims[1],
baseline=None, reject=use_reject, flat=use_flat, preload=True)
print(' obtained %d epochs from %d events.' % (len(eog_epochs),
len(eog_events)))
del eog_events
if len(eog_epochs) >= 5:
write_events(eog_eve, eog_epochs.events)
eog_epochs.save(eog_epo, **_get_epo_kwargs())
desc_prefix = f'{kind}-%s-%s' % tuple(eog_t_lims)
pr = compute_proj_wrap(
eog_epochs, p.proj_ave, n_grad=eog_nums[0],
n_mag=eog_nums[1], n_eeg=eog_nums[2],
desc_prefix=desc_prefix, **proj_kwargs)
assert len(pr) == np.sum(eog_nums[::p_sl])
write_proj(eog_proj, pr)
projs.extend(pr)
else:
warnings.warn('Only %d usable EOG events!' % len(eog_epochs))
_safe_remove([eog_proj, eog_eve, eog_epo])
del raw, eog_epochs
else:
_safe_remove([eog_proj, eog_eve, eog_epo])
def apply_preprocessing_combined(p, subjects, run_indices):
"""Actually apply and save the preprocessing (projs, filtering)
Can only run after do_preprocessing_combined is done.
Filters data, adds projection vectors, and saves to disk
(overwriting old files).
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
run_indices : array-like | None
Run indices to include.
"""
# Now actually save some data
for si, subj in enumerate(subjects):
if p.disp_files:
print(' Applying processing to subject %g/%g.'
% (si + 1, len(subjects)))
pca_dir = op.join(p.work_dir, subj, p.pca_dir)
names_in = get_raw_fnames(p, subj, 'sss', False, False,
run_indices[si])
names_out = get_raw_fnames(p, subj, 'pca', False, False,
run_indices[si])
erm_in = get_raw_fnames(p, subj, 'sss', 'only')
erm_out = get_raw_fnames(p, subj, 'pca', 'only')
bad_file = get_bad_fname(p, subj)
all_proj = op.join(pca_dir, 'preproc_all-proj.fif')
projs = read_proj(all_proj)
fir_kwargs = _get_fir_kwargs(p.fir_design)[0]
if len(erm_in) > 0:
for ii, (r, o) in enumerate(zip(erm_in, erm_out)):
if p.disp_files:
print(' Processing erm file %d/%d.'
% (ii + 1, len(erm_in)))
raw = _raw_LRFCP(
raw_names=r, sfreq=None, l_freq=p.hp_cut, h_freq=p.lp_cut,
n_jobs=p.n_jobs_fir, n_jobs_resample=p.n_jobs_resample,
projs=projs, bad_file=bad_file, disp_files=False, method='fir',
apply_proj=False, filter_length=p.filter_length,
force_bads=True, l_trans=p.hp_trans, h_trans=p.lp_trans,
phase=p.phase, fir_window=p.fir_window, pick=False,
**fir_kwargs)
raw.save(o, overwrite=True, buffer_size_sec=None)
for ii, (r, o) in enumerate(zip(names_in, names_out)):
if p.disp_files:
print(' Processing file %d/%d.'
% (ii + 1, len(names_in)))
raw = _raw_LRFCP(
raw_names=r, sfreq=None, l_freq=p.hp_cut, h_freq=p.lp_cut,
n_jobs=p.n_jobs_fir, n_jobs_resample=p.n_jobs_resample,
projs=projs, bad_file=bad_file, disp_files=False, method='fir',
apply_proj=False, filter_length=p.filter_length,
force_bads=False, l_trans=p.hp_trans, h_trans=p.lp_trans,
phase=p.phase, fir_window=p.fir_window, pick=False,
**fir_kwargs)
raw.save(o, overwrite=True, buffer_size_sec=None)
# look at raw_clean for ExG events
if p.plot_raw:
from ._viz import _viz_raw_ssp_events
_viz_raw_ssp_events(p, subj, run_indices[si])
class FakeEpochs(object):
"""Make iterable epoch-like class, convenient for MATLAB transition"""
def __init__(self, data, ch_names, tmin=-0.2, sfreq=1000.0):
raise RuntimeError('Use mne.EpochsArray instead')
def fixed_len_events(p, raw):
"""Create fixed length trial events from raw object"""
dur = p.tmax - p.tmin
events = make_fixed_length_events(raw, 1, duration=dur)
return events
|
Orthogonal Defect Classification (ODC) is a concept that enables software developers to derive in-process feedback by extracting semantics from data that is already tracked in most organizations. The data and analytics assists in visualizing a process that typically evades visibility. This is analogous to the way magnetic resonance imaging (MRI) equipment aids a physician to have a rough idea of how a human being looks on the inside without knowing anything about the specific patient. ODC creates a new order of capability to gain insight and clarity. It can be used to profile market segments, evaluate processes and tools, and factor the impact of legacy code. Security bugs can be profiled to understand their circumstances and characteristics. Today many of the networks in mobile telephone have used ODC to improve their reliability and availability. Similarly, ODC has helped manage the process and quality of software in diesel engine controllers that ply on our roads. This talk aims to provide an overview of ODC and share a couple of case studies. It will be of interest to computer scientists, developers, architects, testers, service and product management. No specific background other than awareness of software engineering is required in order to attend this talk. One of the objectives of Dr Chillarege's talk is for different disciplines to reflect on how ODC can help current business challenges and accelerate their engineering processes.
Dr Ram Chillarege, President of Chillarege Inc. and chair of the IEEE Steering committee for Software Reliability , will be speaking about Orthogonal Defect Classification (ODC) at 11 am, Friday December 6th, at NIST, Washington DC.
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
"""Some documentation to refer to:
- Our main web socket (mWS) sends opcode 4 with a guild ID and channel ID.
- The mWS receives VOICE_STATE_UPDATE and VOICE_SERVER_UPDATE.
- We pull the session_id from VOICE_STATE_UPDATE.
- We pull the token, endpoint and server_id from VOICE_SERVER_UPDATE.
- Then we initiate the voice web socket (vWS) pointing to the endpoint.
- We send opcode 0 with the user_id, server_id, session_id and token using the vWS.
- The vWS sends back opcode 2 with an ssrc, port, modes(array) and hearbeat_interval.
- We send a UDP discovery packet to endpoint:port and receive our IP and our port in LE.
- Then we send our IP and port via vWS with opcode 1.
- When that's all done, we receive opcode 4 from the vWS.
- Finally we can transmit data to endpoint:port.
"""
import asyncio
import socket
import logging
import struct
import threading
from typing import Any, Callable
from . import opus, utils
from .backoff import ExponentialBackoff
from .gateway import *
from .errors import ClientException, ConnectionClosed
from .player import AudioPlayer, AudioSource
try:
import nacl.secret
has_nacl = True
except ImportError:
has_nacl = False
__all__ = (
'VoiceProtocol',
'VoiceClient',
)
log = logging.getLogger(__name__)
class VoiceProtocol:
"""A class that represents the Discord voice protocol.
This is an abstract class. The library provides a concrete implementation
under :class:`VoiceClient`.
This class allows you to implement a protocol to allow for an external
method of sending voice, such as Lavalink_ or a native library implementation.
These classes are passed to :meth:`abc.Connectable.connect <VoiceChannel.connect>`.
.. _Lavalink: https://github.com/freyacodes/Lavalink
Parameters
------------
client: :class:`Client`
The client (or its subclasses) that started the connection request.
channel: :class:`abc.Connectable`
The voice channel that is being connected to.
"""
def __init__(self, client, channel):
self.client = client
self.channel = channel
async def on_voice_state_update(self, data):
"""|coro|
An abstract method that is called when the client's voice state
has changed. This corresponds to ``VOICE_STATE_UPDATE``.
Parameters
------------
data: :class:`dict`
The raw `voice state payload`__.
.. _voice_state_update_payload: https://discord.com/developers/docs/resources/voice#voice-state-object
__ voice_state_update_payload_
"""
raise NotImplementedError
async def on_voice_server_update(self, data):
"""|coro|
An abstract method that is called when initially connecting to voice.
This corresponds to ``VOICE_SERVER_UPDATE``.
Parameters
------------
data: :class:`dict`
The raw `voice server update payload`__.
.. _voice_server_update_payload: https://discord.com/developers/docs/topics/gateway#voice-server-update-voice-server-update-event-fields
__ voice_server_update_payload_
"""
raise NotImplementedError
async def connect(self, *, timeout: float, reconnect: bool):
"""|coro|
An abstract method called when the client initiates the connection request.
When a connection is requested initially, the library calls the constructor
under ``__init__`` and then calls :meth:`connect`. If :meth:`connect` fails at
some point then :meth:`disconnect` is called.
Within this method, to start the voice connection flow it is recommended to
use :meth:`Guild.change_voice_state` to start the flow. After which,
:meth:`on_voice_server_update` and :meth:`on_voice_state_update` will be called.
The order that these two are called is unspecified.
Parameters
------------
timeout: :class:`float`
The timeout for the connection.
reconnect: :class:`bool`
Whether reconnection is expected.
"""
raise NotImplementedError
async def disconnect(self, *, force: bool):
"""|coro|
An abstract method called when the client terminates the connection.
See :meth:`cleanup`.
Parameters
------------
force: :class:`bool`
Whether the disconnection was forced.
"""
raise NotImplementedError
def cleanup(self):
"""This method *must* be called to ensure proper clean-up during a disconnect.
It is advisable to call this from within :meth:`disconnect` when you are
completely done with the voice protocol instance.
This method removes it from the internal state cache that keeps track of
currently alive voice clients. Failure to clean-up will cause subsequent
connections to report that it's still connected.
"""
key_id, _ = self.channel._get_voice_client_key()
self.client._connection._remove_voice_client(key_id)
class VoiceClient(VoiceProtocol):
"""Represents a Discord voice connection.
You do not create these, you typically get them from
e.g. :meth:`VoiceChannel.connect`.
Warning
--------
In order to use PCM based AudioSources, you must have the opus library
installed on your system and loaded through :func:`opus.load_opus`.
Otherwise, your AudioSources must be opus encoded (e.g. using :class:`FFmpegOpusAudio`)
or the library will not be able to transmit audio.
Attributes
-----------
session_id: :class:`str`
The voice connection session ID.
token: :class:`str`
The voice connection token.
endpoint: :class:`str`
The endpoint we are connecting to.
channel: :class:`abc.Connectable`
The voice channel connected to.
loop: :class:`asyncio.AbstractEventLoop`
The event loop that the voice client is running on.
"""
def __init__(self, client, channel):
if not has_nacl:
raise RuntimeError("PyNaCl library needed in order to use voice")
super().__init__(client, channel)
state = client._connection
self.token = None
self.socket = None
self.loop = state.loop
self._state = state
# this will be used in the AudioPlayer thread
self._connected = threading.Event()
self._handshaking = False
self._potentially_reconnecting = False
self._voice_state_complete = asyncio.Event()
self._voice_server_complete = asyncio.Event()
self.mode = None
self._connections = 0
self.sequence = 0
self.timestamp = 0
self._runner = None
self._player = None
self.encoder = None
self._lite_nonce = 0
self.ws = None
warn_nacl = not has_nacl
supported_modes = (
'xsalsa20_poly1305_lite',
'xsalsa20_poly1305_suffix',
'xsalsa20_poly1305',
)
@property
def guild(self):
"""Optional[:class:`Guild`]: The guild we're connected to, if applicable."""
return getattr(self.channel, 'guild', None)
@property
def user(self):
""":class:`ClientUser`: The user connected to voice (i.e. ourselves)."""
return self._state.user
def checked_add(self, attr, value, limit):
val = getattr(self, attr)
if val + value > limit:
setattr(self, attr, 0)
else:
setattr(self, attr, val + value)
# connection related
async def on_voice_state_update(self, data):
self.session_id = data['session_id']
channel_id = data['channel_id']
if not self._handshaking or self._potentially_reconnecting:
# If we're done handshaking then we just need to update ourselves
# If we're potentially reconnecting due to a 4014, then we need to differentiate
# a channel move and an actual force disconnect
if channel_id is None:
# We're being disconnected so cleanup
await self.disconnect()
else:
guild = self.guild
self.channel = channel_id and guild and guild.get_channel(int(channel_id))
else:
self._voice_state_complete.set()
async def on_voice_server_update(self, data):
if self._voice_server_complete.is_set():
log.info('Ignoring extraneous voice server update.')
return
self.token = data.get('token')
self.server_id = int(data['guild_id'])
endpoint = data.get('endpoint')
if endpoint is None or self.token is None:
log.warning('Awaiting endpoint... This requires waiting. ' \
'If timeout occurred considering raising the timeout and reconnecting.')
return
self.endpoint, _, _ = endpoint.rpartition(':')
if self.endpoint.startswith('wss://'):
# Just in case, strip it off since we're going to add it later
self.endpoint = self.endpoint[6:]
# This gets set later
self.endpoint_ip = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(False)
if not self._handshaking:
# If we're not handshaking then we need to terminate our previous connection in the websocket
await self.ws.close(4000)
return
self._voice_server_complete.set()
async def voice_connect(self):
await self.channel.guild.change_voice_state(channel=self.channel)
async def voice_disconnect(self):
log.info('The voice handshake is being terminated for Channel ID %s (Guild ID %s)', self.channel.id, self.guild.id)
await self.channel.guild.change_voice_state(channel=None)
def prepare_handshake(self):
self._voice_state_complete.clear()
self._voice_server_complete.clear()
self._handshaking = True
log.info('Starting voice handshake... (connection attempt %d)', self._connections + 1)
self._connections += 1
def finish_handshake(self):
log.info('Voice handshake complete. Endpoint found %s', self.endpoint)
self._handshaking = False
self._voice_server_complete.clear()
self._voice_state_complete.clear()
async def connect_websocket(self):
ws = await DiscordVoiceWebSocket.from_client(self)
self._connected.clear()
while ws.secret_key is None:
await ws.poll_event()
self._connected.set()
return ws
async def connect(self, *, reconnect: bool, timeout: bool):
log.info('Connecting to voice...')
self.timeout = timeout
for i in range(5):
self.prepare_handshake()
# This has to be created before we start the flow.
futures = [
self._voice_state_complete.wait(),
self._voice_server_complete.wait(),
]
# Start the connection flow
await self.voice_connect()
try:
await utils.sane_wait_for(futures, timeout=timeout)
except asyncio.TimeoutError:
await self.disconnect(force=True)
raise
self.finish_handshake()
try:
self.ws = await self.connect_websocket()
break
except (ConnectionClosed, asyncio.TimeoutError):
if reconnect:
log.exception('Failed to connect to voice... Retrying...')
await asyncio.sleep(1 + i * 2.0)
await self.voice_disconnect()
continue
else:
raise
if self._runner is None:
self._runner = self.loop.create_task(self.poll_voice_ws(reconnect))
async def potential_reconnect(self):
# Attempt to stop the player thread from playing early
self._connected.clear()
self.prepare_handshake()
self._potentially_reconnecting = True
try:
# We only care about VOICE_SERVER_UPDATE since VOICE_STATE_UPDATE can come before we get disconnected
await asyncio.wait_for(self._voice_server_complete.wait(), timeout=self.timeout)
except asyncio.TimeoutError:
self._potentially_reconnecting = False
await self.disconnect(force=True)
return False
self.finish_handshake()
self._potentially_reconnecting = False
try:
self.ws = await self.connect_websocket()
except (ConnectionClosed, asyncio.TimeoutError):
return False
else:
return True
@property
def latency(self):
""":class:`float`: Latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This could be referred to as the Discord Voice WebSocket latency and is
an analogue of user's voice latencies as seen in the Discord client.
.. versionadded:: 1.4
"""
ws = self.ws
return float("inf") if not ws else ws.latency
@property
def average_latency(self):
""":class:`float`: Average of most recent 20 HEARTBEAT latencies in seconds.
.. versionadded:: 1.4
"""
ws = self.ws
return float("inf") if not ws else ws.average_latency
async def poll_voice_ws(self, reconnect):
backoff = ExponentialBackoff()
while True:
try:
await self.ws.poll_event()
except (ConnectionClosed, asyncio.TimeoutError) as exc:
if isinstance(exc, ConnectionClosed):
# The following close codes are undocumented so I will document them here.
# 1000 - normal closure (obviously)
# 4014 - voice channel has been deleted.
# 4015 - voice server has crashed
if exc.code in (1000, 4015):
log.info('Disconnecting from voice normally, close code %d.', exc.code)
await self.disconnect()
break
if exc.code == 4014:
log.info('Disconnected from voice by force... potentially reconnecting.')
successful = await self.potential_reconnect()
if not successful:
log.info('Reconnect was unsuccessful, disconnecting from voice normally...')
await self.disconnect()
break
else:
continue
if not reconnect:
await self.disconnect()
raise
retry = backoff.delay()
log.exception('Disconnected from voice... Reconnecting in %.2fs.', retry)
self._connected.clear()
await asyncio.sleep(retry)
await self.voice_disconnect()
try:
await self.connect(reconnect=True, timeout=self.timeout)
except asyncio.TimeoutError:
# at this point we've retried 5 times... let's continue the loop.
log.warning('Could not connect to voice... Retrying...')
continue
async def disconnect(self, *, force: bool = False):
"""|coro|
Disconnects this voice client from voice.
"""
if not force and not self.is_connected():
return
self.stop()
self._connected.clear()
try:
if self.ws:
await self.ws.close()
await self.voice_disconnect()
finally:
self.cleanup()
if self.socket:
self.socket.close()
async def move_to(self, channel):
"""|coro|
Moves you to a different voice channel.
Parameters
-----------
channel: :class:`abc.Snowflake`
The channel to move to. Must be a voice channel.
"""
await self.channel.guild.change_voice_state(channel=channel)
def is_connected(self):
"""Indicates if the voice client is connected to voice."""
return self._connected.is_set()
# audio related
def _get_voice_packet(self, data):
header = bytearray(12)
# Formulate rtp header
header[0] = 0x80
header[1] = 0x78
struct.pack_into('>H', header, 2, self.sequence)
struct.pack_into('>I', header, 4, self.timestamp)
struct.pack_into('>I', header, 8, self.ssrc)
encrypt_packet = getattr(self, '_encrypt_' + self.mode)
return encrypt_packet(header, data)
def _encrypt_xsalsa20_poly1305(self, header, data):
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = bytearray(24)
nonce[:12] = header
return header + box.encrypt(bytes(data), bytes(nonce)).ciphertext
def _encrypt_xsalsa20_poly1305_suffix(self, header, data):
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)
return header + box.encrypt(bytes(data), nonce).ciphertext + nonce
def _encrypt_xsalsa20_poly1305_lite(self, header, data):
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = bytearray(24)
nonce[:4] = struct.pack('>I', self._lite_nonce)
self.checked_add('_lite_nonce', 1, 4294967295)
return header + box.encrypt(bytes(data), bytes(nonce)).ciphertext + nonce[:4]
def play(self, source: AudioSource, *, after: Callable[[Exception], Any]=None):
"""Plays an :class:`AudioSource`.
The finalizer, ``after`` is called after the source has been exhausted
or an error occurred.
If an error happens while the audio player is running, the exception is
caught and the audio player is then stopped. If no after callback is
passed, any caught exception will be displayed as if it were raised.
Parameters
-----------
source: :class:`AudioSource`
The audio source we're reading from.
after: Callable[[:class:`Exception`], Any]
The finalizer that is called after the stream is exhausted.
This function must have a single parameter, ``error``, that
denotes an optional exception that was raised during playing.
Raises
-------
ClientException
Already playing audio or not connected.
TypeError
Source is not a :class:`AudioSource` or after is not a callable.
OpusNotLoaded
Source is not opus encoded and opus is not loaded.
"""
if not self.is_connected():
raise ClientException('Not connected to voice.')
if self.is_playing():
raise ClientException('Already playing audio.')
if not isinstance(source, AudioSource):
raise TypeError(f'source must an AudioSource not {source.__class__.__name__}')
if not self.encoder and not source.is_opus():
self.encoder = opus.Encoder()
self._player = AudioPlayer(source, self, after=after)
self._player.start()
def is_playing(self):
"""Indicates if we're currently playing audio."""
return self._player is not None and self._player.is_playing()
def is_paused(self):
"""Indicates if we're playing audio, but if we're paused."""
return self._player is not None and self._player.is_paused()
def stop(self):
"""Stops playing audio."""
if self._player:
self._player.stop()
self._player = None
def pause(self):
"""Pauses the audio playing."""
if self._player:
self._player.pause()
def resume(self):
"""Resumes the audio playing."""
if self._player:
self._player.resume()
@property
def source(self):
"""Optional[:class:`AudioSource`]: The audio source being played, if playing.
This property can also be used to change the audio source currently being played.
"""
return self._player.source if self._player else None
@source.setter
def source(self, value):
if not isinstance(value, AudioSource):
raise TypeError(f'expected AudioSource not {value.__class__.__name__}.')
if self._player is None:
raise ValueError('Not playing anything.')
self._player._set_source(value)
def send_audio_packet(self, data, *, encode=True):
"""Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: :class:`bytes`
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: :class:`bool`
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
opus.OpusError
Encoding the data failed.
"""
self.checked_add('sequence', 1, 65535)
if encode:
encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME)
else:
encoded_data = data
packet = self._get_voice_packet(encoded_data)
try:
self.socket.sendto(packet, (self.endpoint_ip, self.voice_port))
except BlockingIOError:
log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp)
self.checked_add('timestamp', opus.Encoder.SAMPLES_PER_FRAME, 4294967295)
|
This pattern is dedicated to Princess Diana, who dreamed of being a prima ballerina. And, of course, to your sweet little ballerina, too.
Rnd 1: Ch 4, join w sl st in 1st ch to form ring, ch 3, 11 dc in ring, join w sl st in 1st dc (12 dc).
Rnd 3: (Work this round in back loops only) Ch 3, dc in each dc around, join as before (24 dc).
Rnds 4-5: Ch 3, dc in each dc around, join as before, fasten off after Rnd 5 (24 dc).
Join yarn with sl st in any free loop between Rnds 2 and 3, ch 3, 4 dc in same dc, 5 dc in next dc and in each dc around, join as before, fasten off (120 dc).
Sew straps at sides of Rnd 5 as shown, skipping about 5 dc between straps and between armholes.
With this pattern you can turn it around and make it a Dress basket and choose your own colors.
I will share updates with this pink dress basket I made. I will probably sew buttons and flowers or ribbons not sure yet but check back with me later.
My own version pattern will be posted later.
Delivering hand-made blankets to the victims of the 2005 Hurricanes – Rita and Katrina.
Love in the Language of Yarn (LILY) is an organization that helps provide orphans and Syrian refugee children with a little warmth and comfort through knit and crochet hats, scarves, mittens, blankets, and much more. LILY prides itself on providing garments and blankets for those in need throughout the world.
Stephie's Corner will show you how.
She has it picture by picture tutorial.
Saves me time to teach you when others are also teachers.
My Bow Tie Headband Ear Warmer...I LOVE IT!
Facing the cold weather. You can make this too! I know many of my fans will ask me. Do you have the pattern? This is easy and simple to make.
BandRow 1: Ch20, (or the width you need) dc in 4th ch from hook, dc across to the end of row. Ch3, turn.Row 2: 1dc in 2nd st (chain 3 counts as your 1 st ), 1dc across to end of row. Ch3, turn.Repeat row 2 until you reach your head circumference. The headband will stretch a little, so take this intoaccount when making your length. Fasten off leaving a long tail for sewing ends together. In the tutorial Imade mine 54cms long. My head is 58 cm. The crochet band will stretch quite a bit because it’s dc.
Place 1 st row and last row together and sew ends together with a whip stitch (or slip stitch them together)making sure you sew under both loop edges.
Once you’ve finished sewing edges together, fasten off and sewends into your work to hide yarn end.
TabRow 1: Ch9, or the width you want your tab) 1hdc in 2nd ch from hook. 1hdc to the end of row. Ch2, turnRow 2: 1hdc into each st to the end of row.Repeat row 2 until you have your desired length. Do not finish off How to work out if your tab is long enough.Lay the head band flat making sure the seam is in the middle. Next make several folds, from bottom to top,(fold like you were folding a paper fan) Note top and bottom edges should be facing up. You should haveabout three folds gathered. Wrap your tab around the folded middle of these folds and it should be a tightfit. Turn your gathered piece over and pull both ends of your tab tight and sew up with whip stitch taking yourdarning needle under both loop edges.
If the tab isn’t a tight fit take out a row of your crochet.
Hi! Fans, This is where you can make your own Granny Square Baby Bonnet and Booties.
I spend a lot of time on the internet looking for new patterns.
I usually find a pattern and use the basic pattern but I add to it to be creative. Here is a picture of one I made.
Grab this at my shop. It is newly listed and I only have one. I will be making more.
Follow the easy step-by-step instructions of this vintage crochet pattern to make this classic baby set. There is absolutely no charge for personal use of this crochet pattern for a sacque, baby bonnet, and baby booties.
Crochet a Set for an Infant!
Size 2 White Crochet Hook STEP-BY-STEP INSTRUCTIONS Crocheting the Sacque Starting at neck edge, ch 66. Row 1 - Skip 1 st, work 1 s c in each remaining ch st (65 sts). Ch. 1 turn. Row 2 - Work 1 s c in each of first 12 sts; 3 s c in next st; 1 s c in each of next 6 sts; 3 s c in next st; 1 s c in each of next 25 sts; 3 s c in next st; 1 s c in each of next 6 sts; 3 s c in next st; 1 s c in each of last 12 sts; ch 1 turn. Row 3 - 1 s c in each of first 13 sts; 3 s c in next st; 1 s c in each of next 8 sts; 3 s c in next st; 1 s c in each of next 27 sts; 3 s c in next st; 1 s c in each of next 8 sts; 3 s c in next st; 1 s c in each of last 13 sts. Ch 1 turn. Row 4 - 1 s c in each of first 14 sts; 3 s c in next st; 1 s c in each of next 10 sts; 3 s c in next st; 1 s c in each of next 29 sts; 3 s c in next st; 1 s c in each of next 10 sts; 3 s c in next st; 1 s c in each of last 14 sts. Continue increasing as given above, until 17 rows in all have been made (193 sts). Do not break yarn. With a separate strand of yarn, join front and back of yoke at underarms with a ch of 6, skipping 38 sts across each shoulder. On the next row, work in pattern across the 129 sts as follows (wrong side): Row 1 - Draw up a long loop in first st (about 3/4 inch), Y O hook and through the loop, ch 1, * skip 3 sts, work 3 d c in next st, ch 1, 3 d c in same st (shell st), skip 3 sts, Y O hook, draw up a loop in next st, Y O hook and through 3 loops on hook, ch 1; repeat from * across row. Row 2 - Draw up a long loop, Y O and through the loop, ch 1, * work 1 shell st over the ch 1 in center of shell of row below, Y O, draw up a loop in center of loop of row below, Y O and through 3 loops on hook, ch 1; repeat from * across row (16 shell sts), drawing up last loop through the ch 1 at end of row instead of the loop. Repeat row 2 until 15 rows in all have been made. Sleeves: On wrong side of garment, join yarn into the 4th ch st at underarm. Draw up a loop in next st, ch 1, skip 4 sts, * work a shell st into next st across shoulder edge, skip 3 sts, Y O hook and draw up a loop in next st, Y O, and through the 3 loops on hook, skip 3 sts; repeat from * skipping 4 sts before drawing up last loop at end of row (5 shell sts). Work back and forth in pattern until 11 rows in all have been made. Ch 1 turn. Cuff: Work 1 s c in each d c of row below, then work back and forth in s c, st for st, for 7 rows. Work 1 row of picot sts. Fasten off. Work 1 row of beading around neck edge. Work 1 row of picot sts around entire sacque. Sew sleeve seams. Crocheting the Bonnet Ch. 4. Join together with a slip st to form a ring. Row 1 - Work 6 s c in ring. Row 2 - Work 2 s c in each st. Row 3 - * Work 1 s c in each of first 2 sts, 1 d c in 2nd st of previous row: repeat from * 5 times. Row 4 - * Work 1 s c in each of first 3 sts, then work 1 d c around the d c of previous row, inserting the hook sideways; repeat from * 5 times. Row 5 - * Work 1 s c in each of first 4 sts, 1 d c around the d c of previous row; repeat from * 5 times. Continue increasing as described above, until there are 12 sts in each gore. Omitting one gore for back of neck, work back and forth in shell pattern for 9 rows or desired depth. Work 1 row of picot sts across front edge. Neckband: Work 40 s c across neck edge, then work 4 more rows of s c, st for st. Fasten off. Crocheting the Booties Work as given for Infant Star Stitch Set for instep and foot part only, then work as follows: Join yarn at center back of boot. Draw up a loop, work back and forth in pattern as given for sacque for 6 rows, having 4 shell sts in a row. Work 1 row of picot sts across top of bootie. Fasten off.
This site is great to get free patterns. I am actually going to make this one day.
Crochet Baby Booties Are now Listed in my Shop.
Baby booties are a great way of ensuring that your child's feet are covered, warm, and protected. Since infants cannot walk, baby booties are designed with the comfort and warmth of the child in mind. Crocheted baby booties are perfect for keeping little toes warm in the winter. Crochet booties are also much cheaper than shoes. Since infants grow so quickly, they will grow out of shoes quickly. Baby booties, on the other hand, have more stretch in them, and they are relatively cheap to make. Baby booties are small (naturally) and even though there is some shaping involved they are still relatively easy to crochet and it does not take very long to make a pair. They are a quick and simple crochet gift, for any time of year, that you can customize for any age, size or gender. And on top of all that, baby booties are super cute! Often crocheted with bright decorative colors or fanciful designs, baby booties are positively adorable and stylish. No wonder they are so popular! Perfect for rest, or play, baby booties are an essential part of a baby's wardrobe and a pair of them makes a wonderful gift for the newborn.
I will be making more booties and selling them at my shop so stay posted and updated.
Find crochet gift ideas for the crocheters on your gift list. For birthdays, Christmas, Valentine's Day, for women or for men, we'll help you find perfect crochet presents for everyone on your list!
Be determined and have a desire to learn something new. Learn how to Crochet with me. I will teach you the basic steps. Watch and practice your Crochet stitches. You may be slow in the beginning but in time you will become fast. Once you got all the basic stitches you will be impressed what you can make. Learn how to read Crochet. I learned at a very young age and I am glad I did take the time to follow directions. YOU CAN DO IT!!!!!!
I happened to make this over the weekend. Will be making more.
I was wondering how to pick the colors as I started with orange and blue. I had so many scraps of yarn that I really did not know how to start my colors but when I got done with it it looked so cute. Absolutely loved it.
|
# -*- coding: utf-8 -*-
from zope import interface
from AccessControl import Unauthorized
from AccessControl import getSecurityManager
from Products.CMFCore import permissions
from bika.lims.jsonapi.exceptions import APIError
from bika.lims.jsonapi.interfaces import IDataManager
from bika.lims.jsonapi.interfaces import IFieldManager
from bika.lims import logger
class BrainDataManager(object):
"""Adapter to get catalog brain attributes
"""
interface.implements(IDataManager)
def __init__(self, context):
self.context = context
def get(self, name):
"""Get the value by name
"""
# read the attribute
attr = getattr(self.context, name, None)
if callable(attr):
return attr()
return attr
def set(self, name, value, **kw):
"""Not used for catalog brains
"""
logger.warn("set attributes not allowed on catalog brains")
class PortalDataManager(object):
"""Adapter to set and get attributes of the Plone portal
"""
interface.implements(IDataManager)
def __init__(self, context):
self.context = context
def get(self, name):
"""Get the value by name
"""
# check read permission
sm = getSecurityManager()
permission = permissions.View
if not sm.checkPermission(permission, self.context):
raise Unauthorized("Not allowed to view the Plone portal")
# read the attribute
attr = getattr(self.context, name, None)
if callable(attr):
return attr()
# XXX no really nice, but we want the portal to behave like an ordinary
# content type. Therefore we need to inject the neccessary data.
if name == "uid":
return 0
if name == "path":
return "/%s" % self.context.getId()
return attr
def set(self, name, value, **kw):
"""Set the attribute to the given value.
The keyword arguments represent the other attribute values
to integrate constraints to other values.
"""
# check write permission
sm = getSecurityManager()
permission = permissions.ManagePortal
if not sm.checkPermission(permission, self.context):
raise Unauthorized("Not allowed to modify the Plone portal")
# set the attribute
if not hasattr(self.context, name):
return False
self.context[name] = value
return True
class ATDataManager(object):
"""Adapter to set and get field values of AT Content Types
"""
interface.implements(IDataManager)
def __init__(self, context):
self.context = context
def get_schema(self):
"""Get the schema
"""
try:
return self.context.Schema()
except AttributeError:
raise APIError(400, "Can not get Schema of %r" % self.context)
def get_field(self, name):
"""Get the field by name
"""
field = self.context.getField(name)
return field
def set(self, name, value, **kw):
"""Set the field to the given value.
The keyword arguments represent the other field values
to integrate constraints to other values.
"""
# fetch the field by name
field = self.get_field(name)
# bail out if we have no field
if not field:
return None
# call the field adapter and set the value
fieldmanager = IFieldManager(field)
return fieldmanager.set(self.context, value, **kw)
def get(self, name, **kw):
"""Get the value of the field by name
"""
logger.debug("ATDataManager::get: fieldname=%s", name)
# fetch the field by name
field = self.get_field(name)
# bail out if we have no field
if not field:
return None
# call the field adapter and get the value
fieldmanager = IFieldManager(field)
return fieldmanager.get(self.context, **kw)
|
Our remit for this project was to build a new front of house single floor extension to the owner’s bungalow with new flat roof porch and form a new gable in keeping with the existing gable end. The initial works involved the setting out, excavation of footings utilising a 1.5t machine before installation of a strip foundation. We then worked out of the ground up to damp course, to install a block and beam floor, followed by installation of windows, doors and wall plate for the roof and works for the 1st fix. We also had the tricky task of picking up the roof In line with support steel in line with S/Engineers calculations so of this task we installed a supporting across whilst we removed an internal lounge wall. Once all structural support was in place we then installed a cut roof to pick up the existing main roof and tiled to match the existing front gable. An Alkor membrane flat roof was then installed above the entrance hall at the bottom of the twin gable. Once all external works were finished we then completed all plastering and a partial 2nd fix to the property.
All works were carried out with good cooperation with Planning and Building Control and completed in a timely manner and to within budgets and to our client’s satisfaction.
|
import logging
import os
import json
from flask import Blueprint, Response, request, abort
from flask.ext.jsonpify import jsonpify
from werkzeug.contrib.cache import MemcachedCache, SimpleCache
from . import controllers
if 'OS_CONDUCTOR_CACHE' in os.environ:
cache = MemcachedCache([os.environ['OS_CONDUCTOR_CACHE']])
else:
cache = SimpleCache()
logging.info('CACHE=%r', cache)
def cache_get(key):
return cache.get(key)
def cache_set(key, value, timeout):
logging.info('CACHE[%s] <- %r', key, value)
return cache.set(key, value, timeout)
# Controller Proxies
def upload():
jwt = request.values.get('jwt')
datapackage = request.values.get('datapackage')
if datapackage is None:
abort(400)
if jwt is None:
abort(403)
ret = controllers.upload(datapackage, jwt, cache_get, cache_set)
return jsonpify(ret)
def upload_status():
datapackage = request.values.get('datapackage')
if datapackage is None:
abort(400)
ret = controllers.upload_status(datapackage, cache_get)
if ret is None:
abort(404)
return jsonpify(ret)
def toggle_publish():
id = request.values.get('id')
jwt = request.values.get('jwt')
if jwt is None:
abort(403)
value = request.values.get('publish', '')
value = value.lower()
toggle = None
publish = None
if value == 'toggle':
toggle = True
else:
if value in ['true', 'false']:
publish = json.loads(value)
if publish is None and toggle is None:
return Response(status=400)
return jsonpify(controllers.toggle_publish(id, jwt, toggle, publish))
def delete_package():
id = request.values.get('id')
jwt = request.values.get('jwt')
if jwt is None:
abort(403)
return jsonpify(controllers.delete_package(id, jwt))
def run_hooks():
id = request.values.get('id')
jwt = request.values.get('jwt')
pipeline = request.values.get('pipeline')
if jwt is None:
abort(403)
if pipeline is None or id is None:
abort(400)
return jsonpify(controllers.run_hooks(id, jwt, pipeline))
def stats():
return jsonpify(controllers.stats())
def update_params():
jwt = request.values.get('jwt')
datapackage = request.values.get('id')
params = request.get_json()
if 'params' not in params or not isinstance(params['params'], str):
abort(400, "No 'params' key or bad params value.")
if datapackage is None:
abort(400)
if jwt is None:
abort(403)
ret = controllers.update_params(datapackage, jwt, params)
return jsonpify(ret)
def create():
"""Create blueprint.
"""
# Create instance
blueprint = Blueprint('package', 'package')
# Register routes
blueprint.add_url_rule(
'upload', 'load', upload, methods=['POST'])
blueprint.add_url_rule(
'status', 'poll', upload_status, methods=['GET'])
blueprint.add_url_rule(
'publish', 'publish', toggle_publish, methods=['POST'])
blueprint.add_url_rule(
'delete', 'delete', delete_package, methods=['POST'])
blueprint.add_url_rule(
'run-hooks', 'run-hooks', run_hooks, methods=['POST'])
blueprint.add_url_rule(
'stats', 'stats', stats, methods=['GET'])
blueprint.add_url_rule(
'update_params', 'update_params', update_params, methods=['POST'])
# Return blueprint
return blueprint
|
Authors: Elke Goos, Alexander Burcat and Branko Ruscic.
It is strictly forbidden to include this database information as it is or parts of it in any commercial database, software, firmware or hardware and any other type of commercial use without written permission from the authors.
Report ANL 05/20 and TAE 960 Technion-IIT, Aerospace Engineering, and Argonne National Laboratory, Chemistry Division, September 2005.
This file was checked by Egil Jahnsen (Norway) for errors and compatibility to automatic devices. Last check: 16 July 2016.
mirrored at http://garfield.chem.elte.hu/Burcat/burcat.html; quote date.
or received from Elke Goos, date.
Table 6. Enthalpy of formation, fH298 and fH0, heat capacity and entropy at 298 K, and H298-H0 from the original calculation Last addition September 27 2016.
|
import ipaddress
from abc import abstractmethod
from collections import OrderedDict
from typing import Optional
from typing import List
from common.exceptions import LogicError
from plenum.common.messages.internal_messages import VoteForViewChange
from plenum.server.suspicion_codes import Suspicions
from stp_core.common.log import getlogger
from stp_core.network.auth_mode import AuthMode
from stp_core.network.exceptions import RemoteNotFound
from stp_core.types import HA
from plenum.common.constants import NODE, TARGET_NYM, DATA, ALIAS, \
NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT, VERKEY, SERVICES, \
VALIDATOR, CLIENT_STACK_SUFFIX, BLS_KEY
from plenum.common.stack_manager import TxnStackManager
from plenum.common.txn_util import get_type, get_payload_data
logger = getlogger()
class PoolManager:
@abstractmethod
def getStackParamsAndNodeReg(self, name, keys_dir, nodeRegistry=None,
ha=None, cliname=None, cliha=None):
"""
Returns a tuple(nodestack, clientstack, nodeReg)
"""
@property
@abstractmethod
def merkleRootHash(self) -> str:
"""
"""
@property
@abstractmethod
def txnSeqNo(self) -> int:
"""
"""
@staticmethod
def _get_rank(needle_id: str, haystack_ids: List[str]):
# Return the rank of the node where rank is defined by the order in
# which node was added to the pool
try:
return haystack_ids.index(needle_id)
except ValueError:
return None
@property
@abstractmethod
def id(self):
"""
"""
@abstractmethod
def get_rank_of(self, node_id, node_reg, node_ids) -> Optional[int]:
"""Return node rank among active pool validators by id
:param node_id: node's id
:param node_reg: (optional) node registry to operate with. If not specified,
current one is used.
:return: rank of the node or None if not found
"""
@property
def rank(self) -> Optional[int]:
# Nodes have a total order defined in them, rank is the node's
# position in that order
return self.get_rank_of(self.id, self.nodeReg, self._ordered_node_ids)
@abstractmethod
def get_name_by_rank(self, rank, node_reg, node_ids) -> Optional[str]:
# Needed for communicating primary name to others and also nodeReg
# uses node names (alias) and not ids
# TODO: Should move to using node ids and not node names (alias)
"""Return node name (alias) by rank among active pool validators
:param rank: rank of the node
:param node_reg: (optional) node registry to operate with. If not specified,
current one is used.
:return: name of the node or None if not found
"""
class HasPoolManager:
# noinspection PyUnresolvedReferences, PyTypeChecker
def __init__(self, ledger, state, write_manager, ha=None, cliname=None, cliha=None):
self.poolManager = TxnPoolManager(self, ledger, state, write_manager,
ha=ha, cliname=cliname, cliha=cliha)
class TxnPoolManager(PoolManager, TxnStackManager):
def __init__(self, node, ledger, state, write_manager, ha=None, cliname=None, cliha=None):
self.node = node
self.name = node.name
self.config = node.config
self.genesis_dir = node.genesis_dir
self.keys_dir = node.keys_dir
self.ledger = ledger
self._id = None
TxnStackManager.__init__(
self, self.name, node.keys_dir, isNode=True)
self.state = state
self.write_manager = write_manager
self._load_nodes_order_from_ledger()
self.nstack, self.cstack, self.nodeReg, self.cliNodeReg = \
self.getStackParamsAndNodeReg(self.name, self.keys_dir, ha=ha,
cliname=cliname, cliha=cliha)
self._dataFieldsValidators = (
(NODE_IP, self._isIpAddressValid),
(CLIENT_IP, self._isIpAddressValid),
(NODE_PORT, self._isPortValid),
(CLIENT_PORT, self._isPortValid),
)
def __repr__(self):
return self.node.name
def getStackParamsAndNodeReg(self, name, keys_dir, nodeRegistry=None,
ha=None, cliname=None, cliha=None):
nodeReg, cliNodeReg, nodeKeys = self.parseLedgerForHaAndKeys(
self.ledger)
self.addRemoteKeysFromLedger(nodeKeys)
# If node name was not found in the pool transactions file
if not ha:
ha = nodeReg[name]
nstack = dict(name=name,
ha=HA(*ha),
main=True,
auth_mode=AuthMode.RESTRICTED.value,
queue_size=self.config.ZMQ_NODE_QUEUE_SIZE)
cliname = cliname or (name + CLIENT_STACK_SUFFIX)
if not cliha:
cliha = cliNodeReg[cliname]
cstack = dict(name=cliname or (name + CLIENT_STACK_SUFFIX),
ha=HA(*cliha),
main=True,
auth_mode=AuthMode.ALLOW_ANY.value,
queue_size=self.config.ZMQ_CLIENT_QUEUE_SIZE)
if keys_dir:
nstack['basedirpath'] = keys_dir
cstack['basedirpath'] = keys_dir
return nstack, cstack, nodeReg, cliNodeReg
def onPoolMembershipChange(self, txn) -> bool:
# `onPoolMembershipChange` method can be called only after txn added to ledger
if get_type(txn) != NODE:
return False
txn_data = get_payload_data(txn)
if DATA not in txn_data:
return False
nodeName = txn_data[DATA][ALIAS]
nodeNym = txn_data[TARGET_NYM]
self._set_node_ids_in_cache(nodeNym, nodeName)
def _updateNode(txn_data):
node_reg_changed = False
if SERVICES in txn_data[DATA]:
node_reg_changed = self.nodeServicesChanged(txn_data)
if txn_data[DATA][ALIAS] in self.node.nodeReg:
if {NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT}. \
intersection(set(txn_data[DATA].keys())):
self.nodeHaChanged(txn_data)
if VERKEY in txn_data:
self.nodeKeysChanged(txn_data)
if BLS_KEY in txn_data[DATA]:
self.node_blskey_changed(txn_data)
return node_reg_changed
node_reg_changed = False
# If nodeNym is never added in self._ordered_node_services,
# nodeNym is never added in ledger
if nodeNym not in self._ordered_node_services:
if VALIDATOR in txn_data[DATA].get(SERVICES, []):
self.addNewNodeAndConnect(txn_data)
node_reg_changed = True
self._set_node_services_in_cache(nodeNym, txn_data[DATA].get(SERVICES, []))
else:
node_reg_changed = _updateNode(txn_data)
self._set_node_services_in_cache(nodeNym, txn_data[DATA].get(SERVICES, None))
return node_reg_changed
def addNewNodeAndConnect(self, txn_data):
nodeName = txn_data[DATA][ALIAS]
if nodeName == self.name:
logger.debug("{} adding itself to node registry".
format(self.name))
self.node.nodeReg[nodeName] = HA(txn_data[DATA][NODE_IP],
txn_data[DATA][NODE_PORT])
self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX] = \
HA(txn_data[DATA][CLIENT_IP],
txn_data[DATA][CLIENT_PORT])
else:
self.connectNewRemote(txn_data, nodeName, self.node, nodeName != self.name)
def node_about_to_be_disconnected(self, nodeName):
if self.node.master_primary_name == nodeName:
self.node.master_replica.internal_bus.send(
VoteForViewChange(Suspicions.PRIMARY_ABOUT_TO_BE_DISCONNECTED))
def nodeHaChanged(self, txn_data):
nodeNym = txn_data[TARGET_NYM]
nodeName = self.getNodeName(nodeNym)
# TODO: Check if new HA is same as old HA and only update if
# new HA is different.
if nodeName == self.name:
# Update itself in node registry if needed
ha_changed = False
(ip, port) = self.node.nodeReg[nodeName]
if NODE_IP in txn_data[DATA] and ip != txn_data[DATA][NODE_IP]:
ip = txn_data[DATA][NODE_IP]
ha_changed = True
if NODE_PORT in txn_data[DATA] and port != txn_data[DATA][NODE_PORT]:
port = txn_data[DATA][NODE_PORT]
ha_changed = True
if ha_changed:
self.node.nodeReg[nodeName] = HA(ip, port)
ha_changed = False
(ip, port) = self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX]
if CLIENT_IP in txn_data[DATA] and ip != txn_data[DATA][CLIENT_IP]:
ip = txn_data[DATA][CLIENT_IP]
ha_changed = True
if CLIENT_PORT in txn_data[DATA] and port != txn_data[DATA][CLIENT_PORT]:
port = txn_data[DATA][CLIENT_PORT]
ha_changed = True
if ha_changed:
self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX] = HA(ip, port)
self.node.nodestack.onHostAddressChanged()
self.node.clientstack.onHostAddressChanged()
else:
rid = self.stackHaChanged(txn_data, nodeName, self.node)
if rid:
self.node.nodestack.outBoxes.pop(rid, None)
self.node_about_to_be_disconnected(nodeName)
def nodeKeysChanged(self, txn_data):
# TODO: if the node whose keys are being changed is primary for any
# protocol instance, then we should trigger an election for that
# protocol instance. For doing that, for every replica of that
# protocol instance, `_primaryName` as None, and then the node should
# call its `decidePrimaries`.
nodeNym = txn_data[TARGET_NYM]
nodeName = self.getNodeName(nodeNym)
# TODO: Check if new keys are same as old keys and only update if
# new keys are different.
if nodeName == self.name:
# TODO: Why?
logger.debug("{} not changing itself's keep".
format(self.name))
return
else:
rid = self.stackKeysChanged(txn_data, nodeName, self.node)
if rid:
self.node.nodestack.outBoxes.pop(rid, None)
self.node_about_to_be_disconnected(nodeName)
def nodeServicesChanged(self, txn_data) -> bool:
nodeNym = txn_data[TARGET_NYM]
nodeName = self.getNodeName(nodeNym)
oldServices = set(self._ordered_node_services.get(nodeNym, []))
newServices = set(txn_data[DATA].get(SERVICES, []))
if oldServices == newServices:
logger.info("Node {} not changing {} since it is same as existing".format(nodeNym, SERVICES))
return False
node_count_changed = False
if VALIDATOR in newServices.difference(oldServices):
node_count_changed = True
# If validator service is enabled
node_info = self.write_manager.get_node_data(nodeNym)
self.node.nodeReg[nodeName] = HA(node_info[NODE_IP],
node_info[NODE_PORT])
self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX] = HA(node_info[CLIENT_IP],
node_info[CLIENT_PORT])
self.updateNodeTxns({DATA: node_info, }, txn_data)
if self.name != nodeName:
self.connectNewRemote({DATA: node_info,
TARGET_NYM: nodeNym}, nodeName, self.node)
else:
logger.debug("{} adding itself to node registry".
format(self.name))
if VALIDATOR in oldServices.difference(newServices):
node_count_changed = True
# If validator service is disabled
del self.node.nodeReg[nodeName]
del self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX]
if self.name != nodeName:
try:
rid = TxnStackManager.removeRemote(
self.node.nodestack, nodeName)
if rid:
self.node.nodestack.outBoxes.pop(rid, None)
except RemoteNotFound:
logger.info('{} did not find remote {} to remove'.format(self, nodeName))
self.node_about_to_be_disconnected(nodeName)
return node_count_changed
def node_blskey_changed(self, txn_data):
# if BLS key changes for my Node, then re-init BLS crypto signer with new keys
node_nym = txn_data[TARGET_NYM]
node_name = self.getNodeName(node_nym)
if node_name == self.name:
bls_key = txn_data[DATA][BLS_KEY]
self.node.update_bls_key(bls_key)
def getNodeName(self, nym):
# Assuming ALIAS does not change
return self._ordered_node_ids[nym]
@property
def merkleRootHash(self) -> str:
return self.ledger.root_hash
@property
def txnSeqNo(self) -> int:
return self.ledger.seqNo
# Question: Why are `_isIpAddressValid` and `_isPortValid` part of
# pool_manager?
@staticmethod
def _isIpAddressValid(ipAddress):
try:
ipaddress.ip_address(ipAddress)
except ValueError:
return False
else:
return ipAddress != '0.0.0.0'
@staticmethod
def _isPortValid(port):
return isinstance(port, int) and 0 < port <= 65535
@property
def id(self):
if not self._id:
for _, txn in self.ledger.getAllTxn():
txn_data = get_payload_data(txn)
if self.name == txn_data[DATA][ALIAS]:
self._id = txn_data[TARGET_NYM]
return self._id
def _load_nodes_order_from_ledger(self):
self._ordered_node_ids = OrderedDict()
self._ordered_node_services = {}
for _, txn in self.ledger.getAllTxn():
if get_type(txn) == NODE:
txn_data = get_payload_data(txn)
self._set_node_ids_in_cache(txn_data[TARGET_NYM],
txn_data[DATA][ALIAS])
self._set_node_services_in_cache(txn_data[TARGET_NYM],
txn_data[DATA].get(SERVICES, None))
def _set_node_ids_in_cache(self, node_nym, node_name):
curName = self._ordered_node_ids.get(node_nym)
if curName is None:
self._ordered_node_ids[node_nym] = node_name
logger.info("{} sets node {} ({}) order to {}".format(
self.name, node_name, node_nym,
len(self._ordered_node_ids[node_nym])))
elif curName != node_name:
msg = "{} is trying to order already ordered node {} ({}) with other alias {}" \
.format(self.name, curName, node_nym, node_name)
logger.error(msg)
raise LogicError(msg)
def _set_node_services_in_cache(self, node_nym, node_services):
if node_services is not None:
self._ordered_node_services[node_nym] = node_services
def node_ids_ordered_by_rank(self, node_reg, node_ids) -> List:
return [nym for nym, name in node_ids.items()
if name in node_reg]
def node_names_ordered_by_rank(self) -> List:
return self.calc_node_names_ordered_by_rank(self.nodeReg, self._ordered_node_ids)
@staticmethod
def calc_node_names_ordered_by_rank(node_reg, node_ids) -> List:
return [name for nym, name in node_ids.items()
if name in node_reg]
def get_rank_of(self, node_id, node_reg, node_ids) -> Optional[int]:
if self.id is None:
# This can happen if a non-genesis node starts
return None
return self._get_rank(node_id, self.node_ids_ordered_by_rank(node_reg, node_ids))
def get_rank_by_name(self, name, node_reg, node_ids) -> Optional[int]:
for nym, nm in node_ids.items():
if name == nm:
return self.get_rank_of(nym, node_reg, node_ids)
def get_name_by_rank(self, rank, node_reg, node_ids) -> Optional[str]:
try:
nym = self.node_ids_ordered_by_rank(node_reg, node_ids)[rank]
except IndexError:
return None
else:
return node_ids[nym]
def get_nym_by_name(self, node_name) -> Optional[str]:
for nym, name in self._ordered_node_ids.items():
if name == node_name:
return nym
return None
def get_node_ids(self):
return self._ordered_node_ids
|
OCULA plug-in tools for NUKE. Explore the range in Australia by Intraware.
OCULA is a unique collection of plug-in tools for NUKE that solve common problems with stereoscopic imagery, boost productivity in post production and ultimately help deliver a more rewarding stereoscopic viewing experience.
The plug-ins are designed to automatically replicate key processes on left and right channels and help artists polish and refine stereoscopic material, literally removing headaches from the final viewing experience. OCULA, created by The Foundry’s SCI-TECH Award®-winning R&D team, uses algorithms to work out the geometrical relationship between left and right cameras and generate disparity maps.
Disparity maps track and correlate the differences in positional space and movement between corresponding pixels in the left and right cameras, delivering pixel-level control over images. Knowing where disparities occur, OCULA tools apply corrections by warping, stretching and squeezing only the areas of an image that require treatment. Image manipulation using disparity maps is different to the X, Y or Z-axis shifting of images, where only whole image planes are being shifted.
These building blocks enable OCULA to assist in correcting issues such as vertical alignment and localised colour density differences, and focus mismatches, saving artists a huge amount of pain in performing day to day tasks such as keying and tracking which can become very cumbersome on uncorrected footage.
The OCULA plug-in set contains additional tools that facilitate the copying of rotos and paint strokes from one eye to the other, reduce interaxial separation, performing stereo retimes, create new views from a stereo pair and generate per-view depth maps. All corrections can be made to the left and right eye channels either together or separately, minimising or eliminating discomfort from the stereo viewing experience.
|
import unittest
import unittest.mock as mock
from tower_of_hanoi import Disk, Game, Peg
class DiskTestCase(unittest.TestCase):
def test____eq____when_self_equals_other__returns_true(self):
self.assertEqual(Disk(1), Disk(1))
def test____eq____when_self_size_not_equals_other__returns_false(self):
self.assertNotEqual(Disk(1), Disk(2))
def test____lt____when_self_equals_other__returns_false(self):
self.assertFalse(Disk(1) < Disk(1))
def test____lt____when_self_greater_than_other__returns_false(self):
self.assertFalse(Disk(2) < Disk(1))
def test____lt____when_self_less_than_other__returns_true(self):
self.assertTrue(Disk(1) < Disk(2))
class PegTestCase(unittest.TestCase):
def _create_peg(self, name=None, disks=[]):
return Peg(name if None else self._name, disks)
def setUp(self):
self._disk_1 = Disk(1)
self._disk_2 = Disk(2)
self._disk_3 = Disk(3)
self._name = 'name'
def test____eq____when_self_equals_other__returns_true(self):
self.assertEqual(Peg(self._name, [self._disk_1]), Peg(self._name, [self._disk_1]))
def test____eq____when_self_disks_not_equals_other__returns_false(self):
self.assertNotEqual(Peg(self._name, [self._disk_1]), Peg(self._name, [self._disk_2]))
def test____eq____when_self_name_not_equals_other__returns_false(self):
self.assertNotEqual(Peg(self._name, [self._disk_1]), Peg('other-name', [self._disk_1]))
def test__disks__returns_copy(self):
peg = self._create_peg()
peg.disks().append(self._disk_1)
self.assertEqual([], peg.disks())
def test__disks__returns_in_order_from_bottom_to_top(self):
peg = self._create_peg(disks=[self._disk_3, self._disk_2, self._disk_1])
self.assertEqual([self._disk_3, self._disk_2, self._disk_1], peg.disks())
def test__is_empty__when_empty__returns_true(self):
peg = self._create_peg()
self.assertTrue(peg.is_empty())
def test__is_empty__when_not_empty__returns_false(self):
peg = self._create_peg(disks=[self._disk_1])
self.assertFalse(peg.is_empty())
def test__pop__when_empty__raises_exception(self):
peg = self._create_peg()
with self.assertRaises(Exception):
peg.pop()
def test__pop__when_not_empty__returns_new_peg_with_top_disk_removed_and_removed_disk(self):
peg = self._create_peg(disks=[self._disk_2, self._disk_1])
new_peg, popped_disk = peg.pop()
self.assertEqual(self._create_peg(disks=[self._disk_2]), new_peg)
self.assertEqual(self._disk_1, popped_disk)
def test__push__when_empty__returns_new_peg_with_added_disk(self):
peg = self._create_peg()
new_peg = peg.push(self._disk_1)
self.assertEqual(self._create_peg(disks=[self._disk_1]), new_peg)
def test__push__when_disk_smaller_than_top_disk__returns_peg_with_added_disk_on_top(self):
peg = self._create_peg(disks=[self._disk_2])
new_peg = peg.push(self._disk_1)
self.assertEqual(self._create_peg(disks=[self._disk_2, self._disk_1]), new_peg)
def test__push__when_disk_same_as_top_disk__raises_exception(self):
peg = self._create_peg(disks=[self._disk_1])
with self.assertRaises(Exception):
peg.push(self._disk_1)
def test__push__when_disk_larger_than_top_disk__raises_exception(self):
peg = self._create_peg(disks=[self._disk_1])
with self.assertRaises(Exception):
peg.push(self._disk_2)
class GameTestCase(unittest.TestCase):
def _create_peg_a(self, disks):
return Peg('a', disks)
def _create_peg_b(self, disks=[]):
return Peg('b', disks)
def _create_peg_c(self, disks=[]):
return Peg('c', disks)
def setUp(self):
self._disk_1 = Disk(1)
self._disk_2 = Disk(2)
self._disk_3 = Disk(3)
self._disk_4 = Disk(4)
self._peg_b = self._create_peg_b()
self._peg_c = self._create_peg_c()
self._game = Game()
def test__create_peg__returns_peg_with_specified_name(self):
name = 'name'
peg = self._game.create_peg(name)
self.assertEqual(name, peg.name())
def test__create_peg__when_disk_count_is_0__returns_empty_peg(self):
peg = self._game.create_peg('name', 0)
self.assertEqual([], peg.disks())
def test__create_peg__when_disk_count_is_1__returns_peg_with_1_disk(self):
peg = self._game.create_peg('name', 1)
self.assertEqual([self._disk_1], peg.disks())
def test__create_peg__when_disk_count_is_3__returns_peg_with_3_disks_in_ascending_order_from_top(self):
peg = self._game.create_peg('name', 3)
self.assertEqual([self._disk_3, self._disk_2, self._disk_1], peg.disks())
def test__move__when_disk_count_is_1__invokes_callback_after_each_move(self):
move_spy = mock.Mock()
peg_a = self._create_peg_a([self._disk_1])
self._game.move(1, peg_a, self._peg_c, self._peg_b, move_spy)
expected_move_spy_call_args_list = [
mock.call([
self._create_peg_a([]),
self._create_peg_c([self._disk_1]),
self._create_peg_b([])
])
]
self.assertEqual(expected_move_spy_call_args_list, move_spy.call_args_list)
def test__move__when_disk_count_is_1__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_1])
new_peg_a, new_peg_c, new_peg_b = self._game.move(1, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), new_peg_a)
self.assertEqual(self._create_peg_b([]), new_peg_b)
self.assertEqual(self._create_peg_c([self._disk_1]), new_peg_c)
def test__move__when_disk_count_is_2__invokes_callback_after_each_move(self):
move_spy = mock.Mock()
peg_a = self._create_peg_a([self._disk_2, self._disk_1])
self._game.move(2, peg_a, self._peg_c, self._peg_b, move_spy)
expected_move_spy_call_args_list = [
mock.call([
self._create_peg_a([self._disk_2]),
self._create_peg_b([self._disk_1]),
self._create_peg_c([])
]),
mock.call([
self._create_peg_a([]),
self._create_peg_c([self._disk_2]),
self._create_peg_b([self._disk_1])
]),
mock.call([
self._create_peg_b([]),
self._create_peg_c([self._disk_2, self._disk_1]),
self._create_peg_a([])
])
]
self.assertSequenceEqual(expected_move_spy_call_args_list, move_spy.call_args_list)
def test__move__when_disk_count_is_2__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_2, self._disk_1])
new_peg_a, new_peg_c, new_peg_b = self._game.move(2, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), new_peg_a)
self.assertEqual(self._create_peg_b([]), new_peg_b)
self.assertEqual(self._create_peg_c([self._disk_2, self._disk_1]), new_peg_c)
def test__move__when_disk_count_is_3__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_3, self._disk_2, self._disk_1])
new_peg_a, new_peg_c, new_peg_b = self._game.move(3, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), new_peg_a)
self.assertEqual(self._create_peg_b([]), new_peg_b)
self.assertEqual(self._create_peg_c([self._disk_3, self._disk_2, self._disk_1]), new_peg_c)
def test__move__when_disk_count_is_4__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_4, self._disk_3, self._disk_2, self._disk_1])
new_peg_a, new_peg_c, new_peg_b = self._game.move(4, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), new_peg_a)
self.assertEqual(self._create_peg_b([]), new_peg_b)
self.assertEqual(self._create_peg_c([self._disk_4, self._disk_3, self._disk_2, self._disk_1]), new_peg_c)
def test__move__when_disk_count_exceeds_source_peg_disk_count__raises_exception(self):
peg_a = self._create_peg_a([self._disk_1])
with self.assertRaises(Exception):
self._game.move(2, peg_a, self._peg_c, self._peg_b)
if __name__ == '__main__':
unittest.main()
|
Affirm's Prone Biopsy System is the world's first and only dedicated prone biopsy system to offer both 2D and 3D imaging-guided breast biopsies. 3D guidance provides the most accurate targeting of breast lesions, including those that can be challenging to detect with conventional techniques. The new technology provides an advanced, minimally invasive alternative to a surgical excisional biopsy and allows the patient to rest comfortably lying down with the biopsy instruments out of sight. Additionally, the technology allows for shorter patient procedure time and fewer X-ray exposures resulting in reduced patient dose.
|
import gzip
import re
import requests
import string
import sys
import time
import random
DEFAULT_HEADERS = {'User-Agent': 'ArchiveTeam'}
class FetchError(Exception):
'''Custom error class when fetching does not meet our expectation.'''
def main():
# Take the program arguments given to this script
# Normal programs use 'argparse' but this keeps things simple
start_num = int(sys.argv[1])
end_num = int(sys.argv[2])
output_filename = sys.argv[3] # this should be something like myfile.txt.gz
assert start_num <= end_num
print('Starting', start_num, end_num)
gzip_file = gzip.GzipFile(output_filename, 'wb')
for shortcode in check_range(start_num, end_num):
# Write the valid result one per line to the file
line = '{0}\n'.format(shortcode)
gzip_file.write(line.encode('ascii'))
gzip_file.close()
print('Done')
def check_range(start_num, end_num):
for num in range(start_num, end_num + 1):
shortcode = num
url = 'https://www.blogger.com/profile/{0}'.format(shortcode)
counter = 0
while True:
# Try 20 times before giving up
if counter > 4:
# This will stop the script with an error
raise Exception('Giving up!')
try:
text = fetch(url)
except FetchError:
# The server may be overloaded so wait a bit
print('Sleeping...')
sys.stdout.flush()
time.sleep(10)
else:
if text:
yield 'id:{0}'.format(shortcode)
userid = extract_handle(text)
if userid:
yield 'user:{0}'.format(userid)
for blog in extract_blogs(text):
yield 'blog:{0}'.format(blog)
break # stop the while loop
counter += 1
def fetch(url):
'''Fetch the URL and check if it returns OK.
Returns True, returns the response text. Otherwise, returns None
'''
time.sleep(random.randint(10, 25))
print('Fetch', url)
sys.stdout.flush()
response = requests.get(url, headers=DEFAULT_HEADERS)
# response doesn't have a reason attribute all the time??
print('Got', response.status_code, getattr(response, 'reason'))
sys.stdout.flush()
if response.status_code == 200:
# The item exists
if not response.text:
# If HTML is empty maybe server broke
raise FetchError()
return response.text
elif response.status_code == 404:
# Does not exist
return
elif response.status_code == 503:
# Captcha!
print('You are receiving a temporary captcha from Google. Sleep 45 minutes.')
sys.stdout.flush()
time.sleep(2700)
raise FetchError()
else:
# Problem
raise FetchError()
def extract_handle(text):
'''Return the page creator from the text.'''
# Search for something like
# "http://www.blogger.com/feeds/14366755180455532991/blogs"
match = re.search(r'"https?://www\.blogger\.[a-z]+/feeds/([0-9]+)/', text)
if match:
return match.group(1)
def extract_blogs(text):
'''Return a list of tags from the text.'''
# Search for "http://onwonder.blogspot.com/"
return re.findall(r'"(https?://[^"]+)" rel="contributor\-to nofollow"', text)
if __name__ == '__main__':
main()
|
Turn your autoresponders into a sales tool!
Too many dealerships are guilty of having poor e-mail autoresponders.
If that’s you, you’re missing an amazing sales opportunity if your autoresponder is lacking. Watch now to generate more sales, build more trust, and skyrocket engagement with these simple steps.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.