text
stringlengths 29
850k
|
---|
#!/usr/bin/env python3
import sys
import os
import re
import json
import subprocess
import stat
import anchore_engine.analyzers.utils
analyzer_name = "file_list"
try:
config = anchore_engine.analyzers.utils.init_analyzer_cmdline(
sys.argv, analyzer_name
)
except Exception as err:
print(str(err))
sys.exit(1)
imgname = config["imgid"]
imgid = config["imgid_full"]
outputdir = config["dirs"]["outputdir"]
unpackdir = config["dirs"]["unpackdir"]
meta = anchore_engine.analyzers.utils.get_distro_from_squashtar(
os.path.join(unpackdir, "squashed.tar"), unpackdir=unpackdir
)
distrodict = anchore_engine.analyzers.utils.get_distro_flavor(
meta["DISTRO"], meta["DISTROVERS"], likedistro=meta["LIKEDISTRO"]
)
simplefiles = {}
outfiles = {}
try:
allfiles = {}
fmap = {}
if os.path.exists(unpackdir + "/anchore_allfiles.json"):
with open(unpackdir + "/anchore_allfiles.json", "r") as FH:
allfiles = json.loads(FH.read())
else:
fmap, allfiles = anchore_engine.analyzers.utils.get_files_from_squashtar(
os.path.join(unpackdir, "squashed.tar")
)
with open(unpackdir + "/anchore_allfiles.json", "w") as OFH:
OFH.write(json.dumps(allfiles))
# fileinfo
for name in list(allfiles.keys()):
outfiles[name] = json.dumps(allfiles[name])
simplefiles[name] = oct(stat.S_IMODE(allfiles[name]["mode"]))
except Exception as err:
import traceback
traceback.print_exc()
raise err
if simplefiles:
ofile = os.path.join(outputdir, "files.all")
anchore_engine.analyzers.utils.write_kvfile_fromdict(ofile, simplefiles)
if outfiles:
ofile = os.path.join(outputdir, "files.allinfo")
anchore_engine.analyzers.utils.write_kvfile_fromdict(ofile, outfiles)
sys.exit(0)
|
Newly developed yu gi oh duel links hack tool no survey – yu gi oh duel links hack no survey no download has all new features and even more options that will be describer in “readme.txt” file, that you will get after installation. This tool is clean and safe from any viruses or malwares. We do not scam people with hidden ads, offers or surveys. All files are free and easy to use with full instructions and features.
yu gi oh duel links hack tool no survey – yu gi oh duel links hack no survey no download Supports Windows and MAC OS platforms. Many of our files works on mobile platforms also, to check if your mobile device is supported, press download button and out system will check your device autocratically. In case your device is not supported, contact as and we may update this tool.
yu gi oh duel links hack tool no survey – yu gi oh duel links hack no survey no download has built in proxy system to hide your IP address and provide anti-detection for this tool.
understandable but if you are a regular user of our gaming files and hacks, you would know that we only bring quality working files. For those who are skeptical, I challenge you to try yu gi oh duel links hack tool no survey – yu gi oh duel links hack no survey no download. Besides you have nothing to loose in trying. You don’t have to pay anything nor share you personal information.
By downloading you agree that we are NOT responsible for anything that happens to you by using yu gi oh duel links hack tool no survey – yu gi oh duel links hack no survey no download. Please download with responsibility.
|
import json
from collections import OrderedDict
from retriever.lib.templates import TEMPLATES
from retriever.lib.models import myTables
from retriever.lib.tools import open_fr
def read_json(json_file):
"""Read Json dataset package files
Load each json and get the appropriate encoding for the dataset
Reload the json using the encoding to ensure correct character sets
"""
json_object = OrderedDict()
json_file_encoding = None
json_file = str(json_file) + ".json"
try:
file_obj = open_fr(json_file)
json_object = json.load(file_obj)
if "encoding" in json_object:
json_file_encoding = json_object['encoding']
file_obj.close()
except ValueError:
return None
# Reload json using encoding if available
try:
if json_file_encoding:
file_obj = open_fr(json_file, encoding=json_file_encoding)
else:
file_obj = open_fr(json_file)
json_object = json.load(file_obj)
file_obj.close()
except ValueError:
return None
if isinstance(json_object, dict) and "resources" in json_object.keys():
# Note::formats described by frictionless data may need to change
tabular_exts = {"csv", "tab"}
vector_exts = {"shp", "kmz"}
raster_exts = {"tif", "tiff", "bil", "hdr", "h5", "hdf5", "hr", "image"}
for resource_item in json_object["resources"]:
if "format" not in resource_item:
if "format" in json_object:
resource_item["format"] = json_object["format"]
else:
resource_item["format"] = "tabular"
if "extensions" in resource_item:
exts = set(resource_item["extensions"])
if exts <= tabular_exts:
resource_item["format"] = "tabular"
elif exts <= vector_exts:
resource_item["format"] = "vector"
elif exts <= raster_exts:
resource_item["format"] = "raster"
if "url" in resource_item:
if "urls" in json_object:
json_object["urls"][resource_item["name"]] = resource_item["url"]
json_object["tables"] = OrderedDict()
temp_tables = {}
table_names = [item["name"] for item in json_object["resources"]]
temp_tables["tables"] = OrderedDict(zip(table_names, json_object["resources"]))
for table_name, table_spec in temp_tables["tables"].items():
json_object["tables"][table_name] = myTables[temp_tables["tables"][table_name]
["format"]](**table_spec)
json_object.pop("resources", None)
return TEMPLATES["default"](**json_object)
return None
|
Chaenactis glabriuscula var. heterocarpha, a dicot, is an annual herb that is native to California, and is endemic (limited) to California.
0000 0000 0115 3041:!/app/up/mg/273/th/mg82103-0.jpg:!/app/up/mg/273/th/mg82103-1.jpg:!0000 0000 0115 3042:!0000 0000 0611 0433:!0000 0000 0611 0435:! 2015 John Doyen:!2019 Adam Chasey:!2019 Adam Chasey:!2015 John Doyen:!2011 Steven Perry:!2011 Steven Perry:! :!mg82103:!mg82103:!:!:!:!
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
==================================
Reading a file as fast as possible
==================================
MaxSpeedFileReader reads a file in bytes mode as fast as it can; limited only
by any size limit on the inbox it is sending the data to.
This component is therefore useful for building systems that are self rate
limiting - systems that are just trying to process data as fast as they can and
are limited by the speed of the slowest part of the chain.
Example Usage
-------------
Read "myfile" in in chunks of 1024 bytes. The rate is limited by the rate at
which the consumer component can consume the chunks, since its inbox has a size
limit of 5 items of data::
consumer = Consumer()
consumer.inboxes["inbox"].setSize(5)
Pipeline( MaxSpeedFileReader("myfile", chunksize=1024),
consumer,
).run()
More details
------------
Specify a filename and chunksize and MaxSpeedFileReader will read bytes from
the file in the chunksize you specified and send them out of its "outbox"
outbox.
If the destination inbox it is sending chunks to is size limited, then
MaxSpeedFileReader will pause until space becomes available. This is how the
speed at which the file is ingested is regulated - by the rate at which it is
consumed.
When the whole file has been read, this component will terminate and send a
producerFinished() message out of its "signal" outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete sending any data that may be waiting. It will then send the
producerFinished message on out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete sending on any pending data.
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Axon.AxonExceptions import noSpaceInBox
class MaxSpeedFileReader(component):
"""\
MaxSpeedFileReader(filename[,chunksize]) -> new MaxSpeedFileReader component.
Reads the contents of a file in bytes mode; sending it out as fast as it can
in chunks from the "outbox" outbox. The rate of reading is only limited by
any size limit of the destination inbox to which the data is being sent.
Keyword arguments:
- filename -- The filename of the file to read
- chunksize -- Optional. The maximum number of bytes in each chunk of data read from the file and sent out of the "outbox" outbox (default=32768)
"""
def __init__(self, filename, chunksize=32768):
super(MaxSpeedFileReader,self).__init__()
self.filename=filename
self.chunksize=chunksize
def handleControl(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) and not isinstance(self.shutdownMsg, shutdownMicroprocess):
self.shutdownMsg = msg
elif isinstance(msg, shutdownMicroprocess):
self.shutdownMsg = msg
def canStop(self):
self.handleControl()
return isinstance(self.shutdownMsg, (producerFinished,shutdownMicroprocess))
def mustStop(self):
self.handleControl()
return isinstance(self.shutdownMsg, shutdownMicroprocess)
def waitSend(self,data,boxname):
while 1:
try:
self.send(data,boxname)
return
except noSpaceInBox:
if self.mustStop():
raise UserWarning( "STOP" )
self.pause()
yield 1
if self.mustStop():
raise UserWarning( "STOP" )
def main(self):
self.shutdownMsg=""
fh = open(self.filename,"rb")
try:
while 1:
data = fh.read(self.chunksize)
if data=="":
self.shutdownMsg=producerFinished(self)
raise UserWarning( "STOP" )
for _ in self.waitSend(data,"outbox"):
yield _
if self.mustStop():
raise UserWarning( "STOP" )
except UserWarning( "STOP") :
self.send(self.shutdownMsg, "signal")
__kamaelia_components__ = ( MaxSpeedFileReader, )
|
Leslie Shay Season 2 স্মারক. . HD Wallpaper and background images in the Chicago আগুন (2012 TV Series) club tagged: photo leslie shay season 2 screencaps chicago fire.
This Chicago আগুন (2012 TV Series) photo contains প্রতিকৃতি, ধনু, and চতুর.
|
import datetime
import os
import re
import time
from enum import Enum, unique
def LatestDate(repo):
date = 0
for submodule in repo.submodules:
subrepo = submodule.module()
head = subrepo.head
if head.is_detached:
commitDate = head.commit.committed_date
else:
commitDate = head.ref.commit.committed_date
if commitDate > date:
date = commitDate
return datetime.datetime.fromtimestamp(date)
def Branch(repo):
head = repo.head
if head.is_detached:
return "DETACHED"
return head.reference.name
def Fetch(submodule):
if not submodule.module_exists():
return
print('Fetching {}'.format(submodule.name))
subrepo = submodule.module()
subrepo.git.fetch()
def FetchAll(repo):
for submodule in repo.submodules:
Fetch(submodule)
def Update(repo, submodule, remote=True, recursive=True, depth=None):
args = ['update', '--init']
if recursive:
args.append('--recursive')
if remote:
args.append('--remote')
if depth:
args.append('--depth')
args.append(depth)
if not submodule.module_exists():
print('Initializing {}'.format(submodule.name))
else:
subrepo = submodule.module()
remote = subrepo.remote()
head = subrepo.head
if head.is_detached:
print('Updating {}'.format(submodule.name))
else:
args.append('--rebase')
print('Updating {} from {}/{}'.format(submodule.name, remote.name, head.reference.name))
args.append('--')
args.append(submodule.name)
repo.git.submodule(args)
def UpdateAll(repo, remote=True, recursive=True, depth=None):
for submodule in repo.submodules:
Update(repo, submodule, remote=remote, recursive=recursive, depth=depth)
def Checkout(repo, submodule):
if not submodule.module_exists():
Update(repo, submodule)
branch = submodule.branch
print('Switching {} to {}'.format(submodule.name, branch.name))
branch.checkout()
if not submodule.module_exists():
print('Cannot recursively checkout, {} has not been initialized yet.'.format(submodule.name))
return
subrepo = submodule.module()
for submodule in subrepo.submodules:
Checkout(subrepo, submodule)
def CheckoutAll(repo):
for submodule in repo.submodules:
Checkout(repo, submodule)
def Clean(submodule):
if not submodule.module_exists():
print('Cannot clean, {} has not been initialized yet.'.format(submodule.name))
return
subrepo = submodule.module()
print('Cleaning {}'.format(submodule.name))
subrepo.git.clean('-dfx', '-e', '.project', '-e', '.classpath', '-e', '.settings', '-e', 'META-INF')
def CleanAll(repo):
for submodule in repo.submodules:
Clean(submodule)
def Reset(submodule, toRemote):
if not submodule.module_exists():
print('Cannot reset, {} has not been initialized yet.'.format(submodule.name))
return
subrepo = submodule.module()
if toRemote:
head = subrepo.head
if head.is_detached:
print('Cannot reset, {} has a DETACHED HEAD.'.format(submodule.name))
return
remote = subrepo.remote()
branchName = '{}/{}'.format(remote.name, head.reference.name)
print('Resetting {} to {}'.format(submodule.name, branchName))
subrepo.git.reset('--hard', branchName)
else:
print('Resetting {}'.format(submodule.name))
subrepo.git.reset('--hard')
def ResetAll(repo, toRemote):
for submodule in repo.submodules:
Reset(submodule, toRemote)
def Merge(submodule, branchName):
if not submodule.module_exists():
print('Cannot merge, {} has not been initialized yet.'.format(submodule.name))
return
subrepo = submodule.module()
subrepo.git.merge(branchName)
def MergeAll(repo, branchName):
for submodule in repo.submodules:
Merge(submodule, branchName)
def Tag(submodule, tagName, tagDescription):
if not submodule.module_exists():
print('Cannot tag, {} has not been initialized yet.'.format(submodule.name))
return
print('Creating tag {} in {}'.format(tagName, submodule.name))
subrepo = submodule.module()
subrepo.create_tag(path=tagName, message=tagDescription)
def TagAll(repo, tagName, tagDescription):
for submodule in repo.submodules:
Tag(submodule, tagName, tagDescription)
def Push(submodule, **kwargs):
if not submodule.module_exists():
print('Cannot push, {} has not been initialized yet.'.format(submodule.name))
return
print('Pushing {}'.format(submodule.name))
subrepo = submodule.module()
remote = subrepo.remote()
remote.push(**kwargs)
def PushAll(repo, **kwargs):
for submodule in repo.submodules:
Push(submodule, **kwargs)
def Track(submodule):
if not submodule.module_exists():
print('Cannot set tracking branch, {} has not been initialized yet.'.format(submodule.name))
return
subrepo = submodule.module()
head = subrepo.head
remote = subrepo.remote()
localBranchName = head.reference.name
remoteBranchName = '{}/{}'.format(remote.name, localBranchName)
print('Setting tracking branch for {} to {}'.format(localBranchName, remoteBranchName))
subrepo.git.branch('-u', remoteBranchName, localBranchName)
def TrackAll(repo):
for submodule in repo.submodules:
Track(submodule)
@unique
class RemoteType(Enum):
SSH = 1
HTTP = 2
def SetRemoteAll(repo, toType=RemoteType.SSH):
for submodule in repo.submodules:
SetRemote(submodule, toType)
def SetRemote(submodule, toType):
if not submodule.module_exists():
print('Cannot set remote, {} has not been initialized yet.'.format(submodule.name))
return
name = submodule.name
subrepo = submodule.module()
origin = subrepo.remote()
currentUrl = origin.config_reader.get('url')
httpMatch = re.match('https?://([\w\.@:\-~]+)/(.+)', currentUrl)
sshMatch = re.match('(?:ssh://)?([\w\.@\-~]+)@([\w\.@\-~]+)[:/](.+)', currentUrl)
if httpMatch:
user = 'git'
host = httpMatch.group(1)
path = httpMatch.group(2)
elif sshMatch:
user = sshMatch.group(1)
host = sshMatch.group(2)
path = sshMatch.group(3)
else:
raise RuntimeError('Cannot set remote for {}, unknown URL format {}.'.format(name, currentUrl))
if toType is RemoteType.SSH:
newUrl = '{}@{}:{}'.format(user, host, path)
elif toType is RemoteType.HTTP:
newUrl = 'https://{}/{}'.format(host, path)
else:
raise RuntimeError('Cannot set remote for {}, unknown URL type {}.'.format(name, str(toType)))
print('Setting remote for {} to {}'.format(name, newUrl))
origin.config_writer.set('url', newUrl)
def create_qualifier(repo, branch=None):
timestamp = LatestDate(repo)
if not branch:
branch = Branch(repo)
return _format_qualifier(timestamp, branch)
def create_now_qualifier(repo, branch=None):
timestamp = datetime.datetime.now()
if not branch:
branch = Branch(repo)
return _format_qualifier(timestamp, branch)
def _format_qualifier(timestamp, branch):
return '{}-{}'.format(timestamp.strftime('%Y%m%d-%H%M%S'), branch.replace('/', '_'))
def repo_changed(repo, qualifierLocation):
timestamp = LatestDate(repo)
branch = Branch(repo)
changed = False
if not os.path.isfile(qualifierLocation):
changed = True
else:
with open(qualifierLocation, mode='r') as qualifierFile:
storedTimestampStr = qualifierFile.readline().replace('\n', '')
storedBranch = qualifierFile.readline().replace('\n', '')
if not storedTimestampStr or not storedBranch:
raise RuntimeError('Invalid qualifier file {}, please delete this file and retry'.format(qualifierLocation))
storedTimestamp = datetime.datetime.fromtimestamp(int(storedTimestampStr))
changed = (timestamp > storedTimestamp) or (branch != storedBranch)
with open(qualifierLocation, mode='w') as timestampFile:
timestampStr = str(int(time.mktime(timestamp.timetuple())))
timestampFile.write('{}\n{}\n'.format(timestampStr, branch))
return changed, _format_qualifier(timestamp, branch)
|
My own pleasure as a reader of that type of fiction is being left in the dark, confused, gradually putting it together.
That's exactly the kind of reader you have to be to enjoy William Gibson's new novel.
Peripheral drops you into two separate futures, one near, one distant, without so much as a guide in either, much less some kind of portable universal translator. Nope, you're on your own. And it gets bewildering at times.
I do love a good perplexing story though. One that has to be puzzled through. Where the sense of mystery isn't confined to the whodunnit, but pervades everything. Peripheral begins with an ordinary person witnessing a crime. A familiar enough beginning, something we've seen a hundred times before – even if this time the technology is slightly out of reach. But the story soon leaves such footholds behind.
Still, if you're like me, you'll appreciate the novel's concept (more intriguing, I think, than anything Gibson has come up with before) more than its execution. His writing has a certain poetry to it, so it may come down to having a taste for his particular style. I have to say, my favorite Gibson novel is Pattern Recognition, which avoids the heavy neologizing of Peripheral and his original cyberpunk novels for plainer prose, putting a greater premium on character and story.
But if you're like me, you'll appreciate Peripheral all the same. It was both fun and frustrating trying to decipher Gibson's invented jargon. I built an index to it as I went along. A glossary. A key. More for the pleasure of cataloging my own discoveries than as an aid to others. But it occurred to me that it might help some readers sort through things, as they read, or after they've finished. So I've dropped my list into this post. If I ever go back in for a second reading, I'll be taking it with me.
I've tried to exclude details that might act as spoilers. Hopefully none have slipped through. I don't recommend reading this before starting the book; wait til you get frustrated yourself, or even better, til after you're done. Then tell me about any mistakes or omissions!
Hefty: utterly dominant retail chain, Walmart + Starbucks?
|
import argparse
import cPickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pylab
import scipy
import time
import sys, os, re
from context import diana
import diana.classes.comparison as diana_comparison
import diana.classes.analysis as diana_analysis
def main():
options = parse_user_arguments()
analysis_results(options)
def parse_user_arguments(*args, **kwds):
"""
Parses the arguments of the program
"""
parser = argparse.ArgumentParser(
description = "Generate the profiles of the input drug",
epilog = "@oliva's lab 2017")
parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store',
help = """List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing:
- Different numbers that will be the threshold values separated by newline characters.
For example, a file called "top_threshold.list" containing:
0.1
0.5
1
5
10
""")
parser.add_argument('-f','--formula',dest='formula',action = 'store',default='simpson',
help = """Define the formula used to classify. It can be: simpson, jaccard""")
parser.add_argument('-se','--consider_se',dest='consider_se',action = 'store_true',
help = """" Consider Side Effects / ATCs. """)
parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),
help = """Define the workspace directory where the data directory and the results directory will be created""")
options=parser.parse_args()
return options
#################
#################
# MAIN FUNCTION #
#################
#################
def analysis_results(options):
"""
Analyzes the results of the comparisons
"""
# Start marker for time measure
start = time.time()
print("\n\t\t-------------------------------------------------------------------------------------------------------------------------------\n")
print("\t\tStarting Drug Interactions ANAlysis (DIANA), a program created by @OLIVA'S LAB. Analysis of results: Classify drug combinations\n")
print("\t\t-------------------------------------------------------------------------------------------------------------------------------\n")
# Get the script path
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
toolbox_dir = os.path.join(main_path, 'diana/toolbox')
# Check the directory of the profiles, comparisons and analysis
data_dir = os.path.join(options.workspace, "profiles")
check_directory(data_dir)
results_dir = os.path.join(options.workspace, "comparisons")
check_directory(results_dir)
analysis_dir = os.path.join(options.workspace, "analysis")
check_directory(analysis_dir)
# Get the list of thresholds to create the profiles
if options.threshold_list and fileExist(options.threshold_list):
threshold_list = get_values_from_threshold_file(options.threshold_list)
else:
threshold_list = [1, 5, 10, 20, 50]
# Do we consider Side Effects/ATC?
if options.consider_se:
consider_se = True
else:
consider_se = False
# Get the names of the columns
columns = diana_analysis.obtain_columns(threshold_list, ATC_SE=consider_se)
#-----------------------------------------------------#
# PARSE THE RESULTS AND CREATE A PANDAS DATAFRAME #
#-----------------------------------------------------#
pair2comb_file = os.path.join(toolbox_dir, 'pair2comb.pcl')
pair2comb = cPickle.load(open(pair2comb_file))
diana_id_to_drugbank_file = os.path.join(toolbox_dir, 'diana_id_to_drugbank.pcl')
diana_id_to_drugbank = cPickle.load(open(diana_id_to_drugbank_file))
ddi = sum(1 for x in pair2comb.values() if x == 1)
non_ddi = sum(1 for x in pair2comb.values() if x == 0)
print('NUMBER OF DRUG COMBINATIONS:\t\t{}\n'.format(ddi))
print('NUMBER OF NON-DRUG COMBINATIONS:\t{}\n'.format(non_ddi))
output_dataframe = os.path.join(analysis_dir, 'dcdb_comparisons.csv')
if not fileExist(output_dataframe):
# Create a data frame to store the results
df = pd.DataFrame(columns=columns)
# Obtain all the results subfolders of the results main folder
results_dir_list = [f for f in os.listdir(results_dir) if os.path.isdir(os.path.join(results_dir, f))]
for comparison in results_dir_list:
drug_id1, drug_id2 = comparison.split('---')
comparison_dir = os.path.join(results_dir, comparison)
results_table = os.path.join(comparison_dir, 'results_table.tsv')
# Add the Comb field (if it is drug combination or not)
drug1 = diana_id_to_drugbank[drug_id1].upper()
drug2 = diana_id_to_drugbank[drug_id2].upper()
comparison_without_id = '{}---{}'.format(drug1, drug2)
if comparison_without_id in pair2comb:
combination_field = pair2comb[comparison_without_id]
else:
print('The comparison {} is not in the pair2comb dictionary!\n'.format(comparison_without_id))
print(pair2comb)
sys.exit(10)
if not fileExist(results_table):
print('The comparison {} has not been executed properly!\n'.format(comparison))
sys.exit(10)
results = diana_analysis.get_results_from_table(results_table, columns, combination_field)
df2 = pd.DataFrame([results], columns=columns, index=[comparison])
# Add the information to the main data frame
df = df.append(df2)
# Output the Pandas dataframe in a CSV file
df.to_csv(output_dataframe)
else:
df = pd.read_csv(output_dataframe, index_col=0)
#---------------------------#
# REMOVE MISSING VALUES #
#---------------------------#
# Replace the None values in dcstructure by nan
if 'None' in df['dcstructure']:
df = df.replace(to_replace={'dcstructure':{'None':np.nan}})
# Remove the nan values in dcstructure
df = df.dropna()
# Count the number of drug combinations / non-drug combinations
dc_data = df[df['combination'] == 1]
ndc_data = df[df['combination'] == 0]
num_dc = len(dc_data.index)
num_ndc = len(ndc_data.index)
print('Number of drug combinations after removing missing values:\t{}\n'.format(num_dc))
print('Number of non-drug combinations after removing missing values:\t{}\n'.format(num_ndc))
#---------------------------#
# IDENTIFY ME-TOO DRUGS #
#---------------------------#
me_too_dir = os.path.join(analysis_dir, 'me_too_drugs')
create_directory(me_too_dir)
me_too_drugs_table = os.path.join(me_too_dir, 'me_too_drugs.tsv')
me_too_drug_combs_table = os.path.join(me_too_dir, 'me_too_drug_combinations.tsv')
me_too_drug_pairs_file = os.path.join(me_too_dir, 'me_too_drug_pairs.pcl')
me_too_drug_comb_pairs_file = os.path.join(me_too_dir, 'me_too_drug_comb_pairs.pcl')
if not fileExist(me_too_drug_pairs_file) or not fileExist(me_too_drug_comb_pairs_file):
df_struc = df[['dcstructure']]
df_struc = df_struc.astype(float)
me_too_drug_pairs, me_too_drug_comb_pairs = diana_analysis.obtain_me_too_drugs_and_combinations(df_struc, columns, me_too_drugs_table, me_too_drug_combs_table)
cPickle.dump(me_too_drug_pairs, open(me_too_drug_pairs_file, 'w'))
cPickle.dump(me_too_drug_comb_pairs, open(me_too_drug_comb_pairs_file, 'w'))
else:
me_too_drug_pairs = cPickle.load(open(me_too_drug_pairs_file))
me_too_drug_comb_pairs = cPickle.load(open(me_too_drug_comb_pairs_file))
# Process me-too drug combination pairs
me_too_drug_combinations = set()
drug_pair_to_me_too_times = {}
for pair in me_too_drug_comb_pairs:
drug_comb1, drug_comb2 = pair.split('___')
me_too_drug_combinations.add(frozenset([drug_comb1, drug_comb2]))
drug_pair_to_me_too_times.setdefault(drug_comb1, 0)
drug_pair_to_me_too_times.setdefault(drug_comb2, 0)
drug_pair_to_me_too_times[drug_comb1] += 1
drug_pair_to_me_too_times[drug_comb2] += 1
removed_drug_pairs = set()
for pair in me_too_drug_comb_pairs:
drug_comb1, drug_comb2 = pair.split('___')
if drug_comb1 in removed_drug_pairs or drug_comb2 in removed_drug_pairs:
continue
if drug_pair_to_me_too_times[drug_comb1] > drug_pair_to_me_too_times[drug_comb2]:
removed_drug_pairs.add(drug_comb1)
else:
removed_drug_pairs.add(drug_comb2)
# Remove the drug pairs which appear in me-too pairs of drug pairs more times
df = df.loc[~df.index.isin(list(removed_drug_pairs))]
# Count the number of drug combinations / non-drug combinations
dc_data = df[df['combination'] == 1]
ndc_data = df[df['combination'] == 0]
num_dc = len(dc_data.index)
num_ndc = len(ndc_data.index)
print('Number of drug combinations after removing me-too conflictive drug pairs:\t{}\n'.format(num_dc))
print('Number of non-drug combinations after removing me-too conflictive drug pairs:\t{}\n'.format(num_ndc))
img_dir = os.path.join(analysis_dir, 'figures')
create_directory(img_dir)
fig_format = 'png'
#-----------------------------------------------------#
# PLOT DISTRIBUTION OF NUMBER OF TARGETS PER DRUG #
#-----------------------------------------------------#
# Plot distribution of comparisons of targets
drugbank2targets_file = os.path.join(toolbox_dir, 'drugbank_to_targets.pcl')
drugbank_to_targets = cPickle.load(open(drugbank2targets_file))
plot_distribution_targets = os.path.join(img_dir, 'distribution_number_targets.{}'.format(fig_format))
targets = [len(x) for x in drugbank_to_targets.values()]
n, bins, patches = plt.hist(np.array(targets), bins=50, weights=np.zeros_like(np.array(targets)) + 1. / np.array(targets).size, facecolor='r')
plt.xlabel('Number of targets per drug')
plt.ylabel('Relative frequency')
plt.title('Distribution of the number of targets per drug')
plt.savefig(plot_distribution_targets, format=fig_format, dpi=300)
plt.clf()
#----------------------------------------------------------------------------------------------#
# EVALUATE OVERLAP BETWEEN TARGETS, BIOLOGICAL PROCESSES AND PATHWAYS IN DRUG COMBINATIONS #
#----------------------------------------------------------------------------------------------#
tables_dir = os.path.join(analysis_dir, 'tables')
create_directory(tables_dir)
if options.formula != 'jaccard' and options.formula != 'simpson':
print('Please, introduce a correct formula to classify drug combinations: jaccard or simpson!\n')
sys.exit(10)
# Plot of distribution of comparisons of Targets
plot_ji_targets = os.path.join(img_dir, 'distribution_{}_index_targets.{}'.format(options.formula, fig_format))
# Plot of distribution of comparisons of Biological Processes
plot_ji_bp = os.path.join(img_dir, 'distribution_{}_index_biological_processes.{}'.format(options.formula, fig_format))
# Plot of distribution of comparisons of Pathways
plot_ji_pathways = os.path.join(img_dir, 'distribution_{}_index_pathways.{}'.format(options.formula, fig_format))
# Output pickle file of the classification
classification_targets_bp_file = os.path.join(toolbox_dir, 'classification_targets_bp.pcl')
classification_targets_pathways_file = os.path.join(toolbox_dir, 'classification_targets_pathways.pcl')
# Get the classification files
drug_int_2_drugs_file = os.path.join(toolbox_dir, 'drug_int_2_drugs.pcl')
drug_int_2_drugs = cPickle.load(open(drug_int_2_drugs_file))
drug_int_2_info_file = os.path.join(toolbox_dir, 'drug_int_2_info.pcl')
drug_int_2_info = cPickle.load(open(drug_int_2_info_file))
drugbank_to_dcdb_file = os.path.join(toolbox_dir, 'drugbank_to_dcdb.pcl')
drugbank_to_dcdb = cPickle.load(open(drugbank_to_dcdb_file))
bio_processes_file = os.path.join(toolbox_dir, 'target_to_bio_processes.pcl')
target_to_bio_processes = cPickle.load(open(bio_processes_file))
pathways_file = os.path.join(toolbox_dir, 'target_to_pathways.pcl')
target_to_pathways = cPickle.load(open(pathways_file))
target_comparisons = []
bp_comparisons = []
pathway_comparisons = []
dc_to_target_ji = {}
dc_to_bp_ji = {}
dc_to_pathway_ji = {}
all_drugs = set()
for index, row in dc_data.iterrows():
(drug_id1, drug_id2) = index.split('---')
drug1 = diana_id_to_drugbank[drug_id1].upper()
drug2 = diana_id_to_drugbank[drug_id2].upper()
all_drugs.add(drug1)
all_drugs.add(drug2)
if drug1 in drugbank_to_targets and drug2 in drugbank_to_targets:
targets1 = drugbank_to_targets[drug1]
targets2 = drugbank_to_targets[drug2]
if options.formula == 'jaccard':
result_targets = diana_comparison.calculate_jaccard_index(targets1, targets2)
elif options.formula == 'simpson':
result_targets = diana_comparison.calculate_simpson_index(targets1, targets2)
target_comparisons.append(result_targets)
dc_to_target_ji[index] = result_targets
bio_proc1 = get_results_from_dict_of_sets(targets1, target_to_bio_processes)
bio_proc2 = get_results_from_dict_of_sets(targets2, target_to_bio_processes)
if options.formula == 'jaccard':
result_bp = diana_comparison.calculate_jaccard_index(bio_proc1, bio_proc2)
elif options.formula == 'simpson':
result_bp = diana_comparison.calculate_simpson_index(bio_proc1, bio_proc2)
bp_comparisons.append(result_bp)
dc_to_bp_ji[index] = result_bp
pathways1 = get_results_from_dict_of_sets(targets1, target_to_pathways)
pathways2 = get_results_from_dict_of_sets(targets2, target_to_pathways)
if options.formula == 'jaccard':
result_pathways = diana_comparison.calculate_jaccard_index(pathways1, pathways2)
elif options.formula == 'simpson':
result_pathways = diana_comparison.calculate_simpson_index(pathways1, pathways2)
pathway_comparisons.append(result_pathways)
dc_to_pathway_ji[index] = result_pathways
# Plot distribution of comparisons of targets
n, bins, patches = plt.hist(np.array(target_comparisons), bins=50, weights=np.zeros_like(np.array(target_comparisons)) + 1. / np.array(target_comparisons).size, facecolor='r')
plt.xlabel('{} Index of Targets'.format(options.formula.capitalize()))
plt.ylabel('Relative frequency')
plt.title('Distribution of {} Index of Targets in drug combinations'.format(options.formula.capitalize()))
plt.savefig(plot_ji_targets, format=fig_format, dpi=300)
plt.clf()
# Plot distribution of comparisons of biological processes
n, bins, patches = plt.hist(np.array(bp_comparisons), bins=50, weights=np.zeros_like(np.array(bp_comparisons)) + 1. / np.array(bp_comparisons).size, facecolor='b')
plt.xlabel('{} Index of Biological Processes'.format(options.formula.capitalize()))
plt.ylabel('Relative frequency')
plt.title('Distribution of {} Index of Biological Processes in drug combinations'.format(options.formula.capitalize()))
plt.savefig(plot_ji_bp, format=fig_format, dpi=300)
plt.clf()
# Plot distribution of comparisons of pathways
n, bins, patches = plt.hist(np.array(pathway_comparisons), bins=50, weights=np.zeros_like(np.array(pathway_comparisons)) + 1. / np.array(pathway_comparisons).size, facecolor='g')
plt.xlabel('{} Index of Pathways'.format(options.formula.capitalize()))
plt.ylabel('Relative frequency')
plt.title('Distribution of {} Index of Pathways in drug combinations'.format(options.formula.capitalize()))
plt.savefig(plot_ji_pathways, format=fig_format, dpi=300)
plt.clf()
#------------------------------------#
# CLASSIFY THE DRUG COMBINATIONS #
#------------------------------------#
# Similar targets --> ji > 0.25
# Different targets --> ji <= 0.25
target_cut_off = 0.5
# Similar biological processes --> ji >= 0.25
# Different biological processes --> ji < 0.25
bp_cut_off = 0.5
# Similar pathways --> ji >= 0.5
# Different pathways --> ji < 0.5
pathway_cut_off = 0.5
classification_tar_bp = {}
st = 0
dt = 0
st_sbp = 0
st_dbp = 0
dt_sbp = 0
dt_dbp = 0
for dc in dc_to_target_ji:
# Classify by targets and biological processes
if dc in dc_to_bp_ji:
ji_tar = dc_to_target_ji[dc]
ji_bp = dc_to_bp_ji[dc]
if ji_tar > target_cut_off:
classification_tar_bp[dc] = 'similar_targets'
st += 1
if ji_bp > bp_cut_off:
st_sbp += 1
elif ji_bp <= bp_cut_off:
st_dbp += 1
elif ji_tar <= target_cut_off:
dt += 1
if ji_bp > bp_cut_off:
dt_sbp += 1
classification_tar_bp[dc] = 'different_targets_similar_bp'
elif ji_bp <= bp_cut_off:
dt_dbp += 1
classification_tar_bp[dc] = 'different_targets_different_bp'
print('Similar targets {}: similar bp {}, diff bp {}\n'.format(st, st_sbp, st_dbp))
print('Different targets {}: similar bp {}, diff bp {}\n'.format(dt, dt_sbp, dt_dbp))
cPickle.dump(classification_tar_bp, open(classification_targets_bp_file, 'w'))
classification_tar_pathway = {}
st = 0
dt = 0
st_spath = 0
st_dpath = 0
dt_spath = 0
dt_dpath = 0
for dc in dc_to_target_ji:
# Classify by targets and biological processes
if dc in dc_to_pathway_ji:
ji_tar = dc_to_target_ji[dc]
ji_path = dc_to_pathway_ji[dc]
if ji_tar > target_cut_off:
classification_tar_pathway[dc] = 'similar_targets'
st += 1
if ji_path > pathway_cut_off:
st_spath += 1
elif ji_path <= pathway_cut_off:
st_dpath += 1
elif ji_tar <= target_cut_off:
dt += 1
if ji_path > pathway_cut_off:
dt_spath += 1
classification_tar_pathway[dc] = 'different_targets_similar_pathways'
elif ji_path <= pathway_cut_off:
dt_dpath += 1
classification_tar_pathway[dc] = 'different_targets_different_pathways'
print('Similar targets {}: similar pathways {}, diff pathways {}\n'.format(st, st_spath, st_dpath))
print('Different targets {}: similar pathways {}, diff pathways {}\n'.format(dt, dt_spath, dt_dpath))
cPickle.dump(classification_tar_pathway, open(classification_targets_pathways_file, 'w'))
# Get number of drugs in drug combinations per number of targets
targets = [len(drugbank_to_targets[drug]) for drug in drugbank_to_targets if drug in all_drugs]
numtargets_to_numdrugs = {}
for target in targets:
numtargets_to_numdrugs.setdefault(target, 0)
numtargets_to_numdrugs[target] += 1
print('Number of drugs in drug combination: {}. Divided by four: {}'.format(len(all_drugs), len(all_drugs)/4))
for numtar, numdrug in sorted(numtargets_to_numdrugs.iteritems(), key=lambda (x, y): x, reverse = True):
print(numtar, numdrug)
# End marker for time
end = time.time()
print('\n DIANA INFO:\tTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\n'.format(end - start, (end - start) / 60))
return
#######################
#######################
# SECONDARY FUNCTIONS #
#######################
#######################
def fileExist(file):
"""
Checks if a file exists AND is a file
"""
return os.path.exists(file) and os.path.isfile(file)
def check_file(file):
"""
Checks if a file exists and if not, raises FileNotFound exception
"""
if not fileExist(file):
raise FileNotFound(file)
def create_directory(directory):
"""
Checks if a directory exists and if not, creates it
"""
try:
os.stat(directory)
except:
os.mkdir(directory)
return
def check_directory(directory):
"""
Checks if a directory exists and if not, raises DirNotFound exception
"""
try:
os.stat(directory)
except:
raise DirNotFound(directory)
class FileNotFound(Exception):
"""
Exception raised when a file is not found.
"""
def __init__(self, file):
self.file = file
def __str__(self):
return 'The file {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the profiles have been correctly generated.\n'.format(self.file)
class DirNotFound(Exception):
"""
Exception raised when a directory is not found.
"""
def __init__(self, directory):
self.directory = directory
def __str__(self):
return 'The directory {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the parameters have been correctly introduced and the profiles have been correctly generated.\n'.format(self.directory)
def get_results_from_dict_of_sets(list_of_elements, dict_of_sets):
"""
We have a list of elements that are in a dict of elements, and every element have a set with results.
We want to extract the results corresponding to our elements.
"""
results = set()
for element in list_of_elements:
if element in dict_of_sets:
for result in dict_of_sets[element]:
results.add(result)
return results
if __name__ == "__main__":
main()
|
All holidays have purpose and value. But not all holidays are created equal. Some are not only holidays, some are holy-days. Easter and Christmas are certainly two significant holy-days. But the holiest of holy-days is rarely esteemed—Good Friday.
Our culture pays little attention to this holy-day. And if we take our cues and our values from it, Good Friday will most likely come and go with little or no fanfare. But let’s not overlook the mystery and wonder of this holiest of days this year!
I don’t believe it would be an overstatement to propose that the overarching message of the Bible is more profoundly illustrated on Good Friday than on any other day in human history! This is not to take away from Christmas or Easter, but rather to see how these holy-days find their ultimate explanation in Good Friday.
I would also suggest that there is no topic known to us more poignant nor profound than Soteriology—the study of God’s gracious gift of salvation from our sin, and how Jesus Christ purchased it for us. The Bible teaches this gracious gift will be the incomparable, inexplicable, and inexhaustible focal point of every man, woman, boy and girl who will spend the coming ages in the heavenly realms with Christ Jesus!
Accordingly, the mystery of the Son of God’s death on a cross, should be the most fascinating and compelling topic for our consideration in our earthly realm as well! After all, there is only one Person in history who was so significant that our ancestors devised our calendar around His brief earthly pilgrimage. And there is no other event in history so disturbing—and yet so encouraging, as Christ’s untimely and unjust death on a cross.
But don’t be mistaken. Jesus didn’t die because Judas betrayed Him. Jesus wasn’t condemned because the Jewish leaders lied about Him. Jesus wasn’t sentenced to death because Pilate was a coward. Jesus wasn’t killed by Roman soldiers. Jesus didn’t die because He was powerless to save Himself.
The Bible makes it clear that there is no one righteous before God, for all of us have sinned and fallen short of our Creator’s expectation for us. God’s Word also makes it plain: the wages of our sin is death. Physical death, yes. But far worse, spiritual death as well. We understand that physical death occurs when our spirit is separated from our bodies. Spiritual death, the second death, is when our spirit is separated from its Creator—forever. The consequence of our sin against our Creator is both physical and spiritual death.
Because God is just, someone had to pay the consequences of our sin. Even though Jesus had never committed any sin (thoughts, words or deeds) He bore our sins, and He took our punishment for them. He died because of us. He died for us. He died in place of us. But there’s much more.
The Bible makes it equally clear that God the Father so loved us, that He gave His one and only Son, that whoever would believe on Him would not have to die, but would have eternal life instead. God’s Word also tells us that His Son Jesus so loved us, that He willingly laid down His life for us.
So on Good Friday, two thousand years ago, God’s grace was gloriously provided by Jesus’ death on a cross! He who was rich became poor so that we through His poverty might become rich! He who had no experience with sin became sin for us so that through Him we might gain the sinlessness of God!
God’s justice was perfectly satisfied on Good Friday. God’s love was perfectly demonstrated on Good Friday. God’s grace is freely offered to you because of Good Friday! That is the Good News of Good Friday! And because Jesus conquered death on Resurrection Day, if you will have faith in God’s grace, you too will be resurrected one day to marvel at the magnitude of His grace forever! No wonder the holiest of holy-days has come to be known as Good Friday!
Now that Election Day is behind us, Americans have spoken. But what have they said? Based on the percentage of those who didn’t vote, many Americans said they didn’t have anything to say, or maybe they communicated that they didn’t care.
Some might not have said anything because they made the common mistake of thinking their vote wouldn’t make a difference. They overlooked the clear lessons of history that demonstrate elections were often decided by very narrow margins, on many occasions. They needed to take to heart that their vote will make a difference!
While we aren’t sure what may have kept people from voting, at least we have an objective record of what the voters did say. Now it’s up to the pundits to interpret the implications of America’s votes. Is America satisfied with status quo, or do they want change? If they want change, will their elected officials will able to bring it about? Finally, if they bring about change, will it be in the appropriate direction?
Most Americans give lip service that democracy is the greatest form of government in the world. Accordingly, we do everything within our power to bring democracy’s benefits to countries who have been ruled harshly by some other form of government. While these efforts are well-intentioned, they often overlook the reality that a democracy is only effective when the citizens of that country are sufficiently committed to secure and maintain a society of order and fairness.
Our national anthem says we are, “the land of the free and the home of the brave.” Some have pointed out that we are “the land of the free because of the brave.” And while “the brave” generally refers to the men and women who make up our military forces, I wonder if any democracy can endure without a significant percentage of “brave” citizens. Are you brave enough to speak out against evil, even fight against evil, for the sake of freedom?
Not long ago I was cleaning our pickup truck when I found an acorn in the otherwise empty bed. Since I couldn’t remember parking under any trees, I wondered how it got there. I started to throw it out, but for some reason dropped it in my pocket instead.
As I emptied my pockets at the end of the day, out came my lowly acorn; and the reason I couldn’t throw it away. What at first seemed small and insignificant, actually has great potential! Given enough time and reasonable conditions, that tiny acorn could produce a vast forest of majestic oak trees! And since there are lots of beautiful oak trees around here, we intend to plant this one where there aren’t many. We will carry it 1,500 miles west to the often tree-less soil where I grew up.
I’m convinced God put that acorn in my truck for a reason. In fact I even carry it in my pocket from time to time as a reminder. That acorn has become symbolic of the life-changing Gospel seed we carry. While Jesus may have become ordinary to us, His seed can still produce extraordinary results when planted in reasonable conditions!
Knowing this to be true, we intend to carry the Gospel to Colorado to plant a vibrant, new church where there are remarkably few. In contrast to its scenic vistas, Colorado’s spiritual landscape is largely barren. We have found many people there who are thirsty for hope, purpose, and peace. Sadly, the spiritual oasis they see in the distance often ends up being a mirage. But with God’s help, and with your partnership, we intend to change that!
There are some important ways that you can partner with us in beautifying the spiritual landscape of Colorado. First, we need people who will pray for us. Church planting is a supernatural task that requires supernatural enablement. Second, we need people who will invest financial support in our ministry. Since time is short, we want to be able to focus our time and energy on reaching the unreached multitudes. Lastly, we need people who will go with us to Colorado to help us make disciples of Jesus who will eventually become our new church!
If you would like to know more about what we are doing, and how you can be involved with us, we encourage you to visit our website: www.makelovegrow.co or if you prefer, give us a call. We pray God will lead you to partner with us in planting a new, vibrant church in Northeastern Colorado!
A Chinese proverb has wisely noted, “The best time to plant a tree was twenty years ago. The next best time is now.” So, whether planting a tree or a church, now is the time to plant! And just like one tree can produce a phenomenal forest, with a touch from God just one church can produce a landscape that is blooming with verdant, new churches!
William Miller was a well-known Baptist preacher and student of Bible prophecy. After years of research, he concluded that Christ would return sometime between March 21, 1843 and March 21, 1844. When that time period passed, further study convinced him and another preacher that the wrong Jewish calendar had been used and a new date was proposed, October 22, 1844.
The support for this prediction was compelling enough that up to 100,000 followers gathered in groups to await Christ’s return on that day. When midnight passed without Christ’s return, most of the followers gave up their hopes and left; some even gave up their faith.
Fast forward one hundred and forty years or so. Edgar C. Whisenant, a former NASA engineer and student of Bible prophecy predicted that the Rapture would occur sometime between September 11 and September 13, 1988. His books, “88 Reasons Why the Rapture Will Be in 1988” and “On Borrowed Time” were very popular and hundreds of thousands of them were published and distributed.
Since many of my Christian friends were convinced of his compelling arguments and conclusion, I too read “88 Reasons.” While his research was flawed, at least many complacent Christians began to witness and give as never before!
When his prediction failed to come to pass, Whisenant wrote more books with revised predictions for the Rapture specifying dates in 1989, 1993, 1994, and even as late as 1997. Not surprisingly, his subsequent books received little attention.
For instance, Jesus himself made it crystal clear in Matthew 24:36, “But of that day and hour no one knows, not even the angels of heaven, nor the Son, but My Father only.” Nevertheless, in verses 32-35 Jesus also warns that we can know “the season” of His return! Therefore, His subsequent warning to “keep watch” for His return are especially relevant in light of the many Bible prophecies that continue to be fulfilled!
What we can know for sure boils down to this; each passing day brings us one day closer to Christ’s return. Be ready!
“The LORD is my shepherd… So begins one of the most loved, and best known passages in all of the Bible. True to the saying, “it takes one to know one,” Psalm 23 was written about 3,000 years ago by a shepherd who later became a king, David, son of Jesse of Bethlehem.
After David’s bold opening declaration, he makes three statements expressing his complete trust in the LORD as his shepherd. “I shall not want” summarizes David’s confidence in the LORD’s ability to provide for his every need. “I will fear no evil” demonstrates his trust in the LORD to protect him, even in the darkest days of his life. “I will dwell in the house of the LORD forever,” articulates David’s faith that even in death, his Shepherd will care for him throughout eternity!
This beautiful psalm not only talks about David’s Shepherd, it talks to his Shepherd as well! In verses 2-3, David talks about all his Shepherd does for him and why. In verse 4 the pronouns shift from “He” to “You” and David now speaks to his Shepherd in a written prayer. David’s confidence in facing death is in his Shepherd’s presence and protection. In verse 5 he speaks affectionately of the LORD’s gracious provisions, even in the presence of his enemies. Finally, in verse 6 David concludes his thoughts by speaking about his Shepherd’s care “all the days of my life” and then even after his days come to an end and “forever” begins.
This poignant psalm is rich with word pictures! It starts with a Shepherd, and his flock of sheep, one of whom is King David himself. It pictures them lying down in green pastures and drinking from still waters. It paints sheep following the Shepherd on paths of righteousness and through the valley of the shadow of death. It draws comfort from a shepherd’s rod and his staff. It portrays a bountiful banquet table prepared in the presence of enemies. It visualizes the blessedness of an anointed head and a cup running over. It concludes with the (obviously magnificent) house of the LORD in heaven.
This psalm is so rich I have assigned aspiring preachers the task of writing multiple messages from this psalm, each one focusing on a unique truth, promise, principle, viewpoint, illustration or challenge contained within it. Though they would often complain about my assignment, invariably these students of God’s Word would fall deeper in love with Psalm 23 with each new message they wrote.
In my earlier years I wondered how a psalm with six verses and a little over one hundred words could be so special to so many people. But over the last twenty years I have studied it, memorized it, preached it and have recited it in so many difficult occasions, and in countless places. Its ability to move me and so many others is no longer mysterious! After all, like King David, we too are sheep in desperate need of a trustworthy Shepherd!
The day started out like every other day he could remember. As soon as he felt the warm rays of the sun strike his face, he pulled himself up from his makeshift bed on the side of the road. Thankfully, it hadn’t rained during the night and the temperatures were still manageable this time of the year. Of course, this good fortune would change all too soon.
He shook the dust off his motley robe, took his cup and placed his last two coins in it. “Alms for the poor?” he constantly pleaded, as he rattled the coins in his cup.
Thankfully, an occasional passerby would reward his begging with another small coin. Maybe he would have enough by day’s end to buy some stale bread to temporarily appease the constant gnawing in his stomach.
He couldn’t help that he was poor, as no one would hire him to work. After all, what kind of work could a blind man do? It appeared painfully obvious to him and everyone else that his life’s work would amount to nothing but begging.
While hope was normally a thought that rarely crossed his mind, he had recently heard some amazing stories of sick people being healed, cripples walking, deaf hearing, and yes, even blind people seeing! Not surprisingly, hope was not only crossing his mind these days, hope was blazing a major pathway through it! Could this be the day he received infinitely more than a few coins; his sight, his life?
His dreams were interrupted when he heard a large crowd coming his way. His curiosity piqued, he stopped begging for alms, and started begging for information. Thankfully, a passerby had mercy on him and responded, “Jesus of Nazareth is passing by.” Could it be true that Hope was within reach? But how could he speak to Him since He was surrounded by such a large crowd?
Hope heard and Hope stopped! Those who had just chided him for his annoying shouts, now encouraged him, “Cheer up! He’s calling you!” Bartimaeus threw off his cloak, jumped to his feet and felt his way to Hope.
“Go,” He simply said, “your faith has healed you.” Immediately, he saw Hope, and his life reborn!
What do you see when you look at someone? I was told recently that one of the first things that people notice about you is your shoes. Maybe, but I would think it might depend on what is important to you.
Of course, a shoe salesperson would probably notice our shoes. But, a hair stylist would likely notice our hair. An athlete would notice our muscles. An optometrist would notice our glasses or contacts. A clothing salesperson would notice our clothes. A dental hygienist would notice our teeth. A car dealer would notice our car. A Realtor would notice our house. You get the idea!
But there is more to what we see. Men would notice particular characteristics about men, and different attributes about ladies. Conversely, ladies would notice certain characteristics about ladies, and different qualities about men. Our age could very likely cause us to notice unique characteristics in the people we see. But is there still more?
I remember getting on an elevator at the hospital one day with a family member of someone having surgery. After we exited, the family member told me that she was fascinated about how I stood in an elevator full of people. Why? Because she was in the elevator business!
In case you are wondering if I was standing on my head, I wasn’t. But, I was standing facing the people in the elevator rather than facing the door! I’m pretty sure I don’t normally do that, and that I had no particular reason to stand facing the people that day, it just turned out that way and I didn’t give it a thought.
This family member went on to say that I had validated the research of people in the elevator business. They had found that only people from a handful of careers (preachers was one of them) would be comfortable facing people in the close confines of an elevator! Accordingly, the inside of elevator doors should look as good as possible!
But we may be seeing just the tip of the iceberg of what and how we see! Do you have eyes that see what Jesus saw? Did Jesus notice what kind of sandals people wore? Or, how they styled their hair? Or, what kind of clothes they had on? Or, how nice their teeth were? Or, what breed of donkey they were riding?
I’ve noticed Jesus saw at least a couple of things that I often miss. Jesus had eyes that saw when people were harassed and helpless (Matthew 9:36). Wow, what does that look like? Would I even notice that if I was looking for it?
Jesus also had eyes to see faith (Mark 2:5). We often think faith is an internal, invisible quality. Yet when we read about the people in the “faith chapter” of the Bible, Hebrews 11, we see that their faith obeyed, offered, pleased, sacrificed, built, left, blessed, spoke, worked and so much more! Their actions made their faith visible. Do you see what Jesus sees?
A few years ago we built an enclosed storage area near our house. Not long afterward I was moving things around looking for something when this unpleasant thought crossed my mind: “Snakes would probably like to nest in this space!” While I had never seen a snake, or any evidence of one in this area, just the possibility of it would quicken my heart rate, and my rummaging speed, every time I opened the door!
It wasn’t too long before I decided I should do something to minimize the potential of encountering a slithering reptile in my storage area! So after doing some research on the various methods and products available for repelling these repulsive creatures, I purchased a granular snake repellent at a local hardware store.
Now all I had to do was to spread the repellent around the walls of the storage space. It was guaranteed that snakes would not cross it. I immediately thought of two concerns: First, I would have to move all the items in storage away from the walls so that I could pour out the repellent as directed. But what if I already had snakes hiding in the stuff I would be moving? I wasn’t too excited about the idea of moving things around and raising the ire of any snakes that may have already made my storage area their home!
Secondly, what if I had snakes living in the center of the storage area? I might not disturb them while moving the stuff around the perimeter, but what if the repellent ended up keeping them from escaping my building? Worse, what if the repellent infuriated them to the point that my hypothetical snakes became more aggressive than normal?
A few days ago I noticed my snake repellent container sitting in a corner of my storage area. I had never opened it! I had feared snakes enough to buy the repellent, and to store it in my storage area, but I didn’t fear the slithering reptiles enough to use the repellent as directed! Bottom line: Unless snakes can read what it says on the container, I might as well set out a liter of Dr. Pepper!
Before you take me to task on my vacillation, let me suggest this is a fairly common trait among us humans! If not so much in buying snake repellents, I see it often in “buying fire insurance.” It’s a term some folks use to describe doing religious practices intended to appease God and avoid hell. Token activities like merely going to church, coming down to the altar, repeating a rote prayer, or even being baptized will not “buy” anyone anything!
If we really want to repel snakes, we are going to have to go all-in and follow the directions wholeheartedly having faith it will work! Accordingly, if we want to appease God and avoid hell, we are going to have to go all-in and follow Jesus wholeheartedly having faith He will work!
You will never get lost if you always know where you are and where you are going! I used that statement often when I taught flying. After all, present position is a simple extension of where one has been in the past, combined with the direction and speed one has traveled since the last known position.
For instance, pilots always know the airport they are departing from. And as long as they kept track of their time in flight and the direction and speed they were heading, then they would always know their approximate position, even if they couldn’t always pinpoint it on a map.
Then each time they could positively identify their present position on the map, the navigating process would start all over again. They were always a given number of minutes away from their last known position, and they had been flying a given direction and speed. So even when they were in between positive identifications of their location on a map, they still had an approximate idea of where they were, and where they were going.
Unfortunately my students didn’t always know how the winds aloft would affect their direction and speed, and because they often wandered from their desired heading, they would get off course. But once they determined how much they were off course, I would teach them how to make course corrections to get them back on course to ensure they would reach their ultimate destination; before running out of fuel. Obviously, everything they were doing before the flight and during the flight, should be a means to getting them safely to their destination airport!
Have you ever been lost? Do you know where you came from? Do you know where you are? Do you know where you are going? Can you get where you want to go on the direction you are presently traveling? If you want to go to the Atlantic Ocean, you must proceed on an easterly course. If you are heading on a westerly course, you will never make it!
Some people tell me they want to go to heaven, yet they want to get there maintaining a misguided course. Sadly, if their present course is not heading in heaven’s direction, they will never make it! They must make a course correction before they run out of fuel, or miss heaven forever. Since heaven is God’s home, He alone has the prerogative to advise us on the required course to get there.
The only course that will take you to heaven is to aim for Jesus Christ! He said that He was the only way to the Father, and the only way to the Father’s eternal home in heaven. Reaching heaven will only be by God’s grace, through your faith in God’s Son alone. Are you heading the wrong direction, or just off course? Since your fuel is limited, get on course today by turning from anyone or anything, to Jesus alone!
|
#
# Quru Image Server
#
# Document: template_manager.py
# Date started: 22 Sep 2015
# By: Matt Fozard
# Purpose: Provides a managed interface to the image templates
# Requires:
# Copyright: Quru Ltd (www.quru.com)
# Licence:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Last Changed: $Date$ $Rev$ by $Author$
#
# Notable modifications:
# Date By Details
# ========= ==== ============================================================
# 19Aug2016 Matt Added system default template
#
from datetime import datetime, timedelta
import threading
from .models import ImageTemplate, Property
from .template_attrs import TemplateAttrs
from .util import KeyValueCache
class ImageTemplateManager(object):
"""
Provides access to image templates (used mostly by the image manager),
in a variety of formats, backed by a cache for performance.
The cache is invalidated and refreshed automatically.
Rather than having to monitor object change times and search for added and
deleted rows, we'll just use a simple counter for detecting database changes.
This is the same mechanism as used for PermissionsManager.
"""
TEMPLATE_CACHE_SYNC_INTERVAL = 60
_TEMPLATE_LIST_KEY = '__template_info_list__'
_TEMPLATE_NAMES_KEY = '__template_names__'
_TEMPLATE_NAMES_LOWER_KEY = '__template_names_lower__'
def __init__(self, data_manager, logger):
self._db = data_manager
self._logger = logger
self._default_template_name = ''
self._data_version = 0
self._template_cache = KeyValueCache()
self._update_lock = threading.Lock()
self._last_check = datetime.min
self._useable = threading.Event()
self._useable.set()
def get_template_list(self):
"""
Returns a list of {id, name, description, is_default} dictionaries
representing the available templates, sorted by name.
"""
self._check_data_version()
cached_list = self._template_cache.get(ImageTemplateManager._TEMPLATE_LIST_KEY)
if cached_list is None:
db_obj_list = [
tdata['db_obj'] for tdata in self._template_cache.values()
if isinstance(tdata, dict)
]
cached_list = [{
'id': dbo.id,
'name': dbo.name,
'description': dbo.description,
'is_default': (dbo.name.lower() == self._default_template_name)
} for dbo in db_obj_list
]
cached_list.sort(key=lambda o: o['name'])
self._template_cache.set(ImageTemplateManager._TEMPLATE_LIST_KEY, cached_list)
return cached_list
def get_template_names(self, lowercase=False):
"""
Returns a sorted list of available template names - those names that
are valid for use with get_template() and when generating an image.
"""
self._check_data_version()
if lowercase:
cached_list = self._template_cache.get(ImageTemplateManager._TEMPLATE_NAMES_LOWER_KEY)
if cached_list is None:
names_list = self._get_cached_names_list(True)
cached_list = [name.lower() for name in names_list]
self._template_cache.set(
ImageTemplateManager._TEMPLATE_NAMES_LOWER_KEY, cached_list
)
return cached_list
else:
cached_list = self._template_cache.get(ImageTemplateManager._TEMPLATE_NAMES_KEY)
if cached_list is None:
cached_list = self._get_cached_names_list(True)
self._template_cache.set(
ImageTemplateManager._TEMPLATE_NAMES_KEY, cached_list
)
return cached_list
def get_template(self, name):
"""
Returns the TemplateAttrs object matching the given name (case insensitive),
or None if no template matches the name.
"""
self._check_data_version()
tdata = self._template_cache.get(name.lower())
return tdata['attr_obj'] if tdata is not None else None
def get_default_template(self):
"""
Returns the TemplateAttrs object for the system's default image template.
"""
self._check_data_version()
tdata = self._template_cache.get(self._default_template_name)
if tdata is None:
raise ValueError(
'System default template \'%s\' was not found' % self._default_template_name
)
return tdata['attr_obj']
def get_template_db_obj(self, name):
"""
Returns the ImageTemplate database object matching the given name
(case insensitive), or None if no template matches the name.
"""
self._check_data_version()
tdata = self._template_cache.get(name.lower())
return tdata['db_obj'] if tdata is not None else None
def reset(self):
"""
Invalidates the cached template data by incrementing the database data
version number. This change will be detected on the next call to this
object, and within the SYNC_INTERVAL by all other processes.
"""
with self._update_lock:
new_ver = self._db.increment_property(Property.IMAGE_TEMPLATES_VERSION)
self._last_check = datetime.min
self._logger.info('Image templates setting new version ' + new_ver)
def _load_data(self):
"""
Re-populates the internal caches with the latest template data from the database.
The internal update lock must be held while this method is being called.
"""
# Reset the caches
self._template_cache.clear()
db_ver = self._db.get_object(Property, Property.IMAGE_TEMPLATES_VERSION)
self._data_version = int(db_ver.value)
# Refresh default template setting
db_def_t = self._db.get_object(Property, Property.DEFAULT_TEMPLATE)
self._default_template_name = db_def_t.value.lower()
# Load the templates
db_templates = self._db.list_objects(ImageTemplate)
for db_template in db_templates:
try:
# Create a TemplateAttrs (this also validates the template values)
template_attrs = TemplateAttrs(
db_template.name,
db_template.template
)
# If here it's valid, so add to cache
self._template_cache.set(
db_template.name.lower(),
{'db_obj': db_template, 'attr_obj': template_attrs}
)
except Exception as e:
self._logger.error(
'Unable to load \'%s\' template configuration: %s' % (
db_template.name, str(e)
)
)
self._logger.info('Loaded templates: %s at version %d' % (
', '.join(self._template_cache.keys()), self._data_version
))
def _check_data_version(self, _force=False):
"""
Periodically checks for changes in the template data, sets the
internal data version number, and resets the caches if necessary.
Uses an internal lock for thread safety.
"""
check_secs = ImageTemplateManager.TEMPLATE_CACHE_SYNC_INTERVAL
if _force or (self._data_version == 0) or (
self._last_check < (datetime.utcnow() - timedelta(seconds=check_secs))
):
# Check for newer data version
if self._update_lock.acquire(0): # 0 = nonblocking
try:
old_ver = self._data_version
db_ver = self._db.get_object(Property, Property.IMAGE_TEMPLATES_VERSION)
if int(db_ver.value) != old_ver:
action = 'initialising with' if old_ver == 0 else 'detected new'
self._logger.info('Image templates %s version %s' % (action, db_ver.value))
self._useable.clear()
self._load_data()
finally:
self._last_check = datetime.utcnow()
self._useable.set()
self._update_lock.release()
else:
# Another thread is checking or updating. When the server is busy,
# because the update is time based, many threads get here at the
# same time.
# v4.1 It is safe to carry on if a check is in place but an update
# is not. If an update is in place then the template cache is empty
# and we should wait for it to load.
if not self._useable.is_set() or self._data_version == 0:
self._logger.debug('Another thread is loading image templates, waiting for it')
if not self._useable.wait(10.0):
self._logger.warning('Timed out waiting for image template data')
else:
self._logger.debug('Got new image template data, continuing')
def _get_cached_names_list(self, sort=False):
"""
Returns a list of all the template names currently in the internal cache.
"""
db_obj_list = [
tdata['db_obj'] for tdata in self._template_cache.values()
if isinstance(tdata, dict)
]
names_list = [dbo.name for dbo in db_obj_list]
if sort:
names_list.sort()
return names_list
|
A handmade print of a pair of Runner Ducks. Indian Runner Ducks are such characters – here are two of the tufted variety having a tussle over a worm. This hand-made print is based on a real life incident involving the ‘Cheeky Girls’; my cousin’s ducks which I had the pleasure of duck sitting for a week.
The printed area is 23 x 13.5cm and the paper is 29.5 x 21cm. It is a 2 colour print, with the ducks printed in blue and yellow oil based inks, overlapping to create green. They are printed onto Heritage paper.
All of my linocut prints turn out slightly differently – depending on where the linocut is placed on the paper, how much ink is rolled on, and how much pressure is used to transfer the ink from the linocut onto the paper. This is limited edition print of 50.
|
import json
import re
from . import ThirdPCSource
# Generate trackers.json using Ghostery data (issue #1)
class GhosterySource(ThirdPCSource):
SOURCE = 'https://raw.githubusercontent.com/jonpierce/ghostery/master/' + \
'firefox/ghostery-statusbar/ghostery/chrome/content/ghostery-bugs.js'
FILENAME = 'trackers.json'
def _generate(self):
content = self._fetch_url(self.SOURCE).strip(';')
rules = json.loads(content)
self._data['by_regexp'] = {}
self._data['by_url'] = {}
self._logger.info('Parsing {} rules'.format(len(rules)))
for entry in rules:
pattern = entry['pattern']
if re.search(r'[\(\|\*\?]', pattern):
# regexp rule: "/google-analytics\\.com\\/(urchin\\.js|ga\\.js)/i"
pattern = re.sub(r'^/|/i$', '', pattern) # remove wrapping /
self._data['by_regexp'][pattern] = entry['name']
self._count += 1
else:
# strpos rule: "/\\/piwik\\.js/i"
pattern = re.sub(r'^/|/i$', '', pattern)
pattern = re.sub(r'\\', '', pattern)
self._data['by_url'][pattern] = entry['name']
self._count += 1
|
Huge savings with Betfair discount codes: Use this amazing offer and receive Win a Share of £10K at the Showdown at Betfair. So don't wait and use this Betfair voucher now!
Fantastic Betfair discount codes: Enjoy your savings to the fullest and get £30 Free Bet with a £10 Deposit. Amazing Betfair voucher codes to save huge on your purchase!
Awesome Betfair Vouchers: Take advantage of this deal and grab up to £100 in Free Bets at Betfair. Excellent bargain on your Betfair products purchase.
Check out our exclusive Betfair discount codes to receive huge discount on your purchase. Bookmark this page now, as most of your Betfair voucher codes have limited validity.
|
import os
import sys
try:
import termios
except ImportError:
# Not available on Windows
termios = None
from contextlib import contextmanager
from invoke.vendor.six import BytesIO, b, wraps
from mock import patch, Mock
from pytest import skip
from pytest_relaxed import trap
from invoke import Program, Runner
from invoke.terminals import WINDOWS
support = os.path.join(os.path.dirname(__file__), "_support")
ROOT = os.path.abspath(os.path.sep)
def skip_if_windows(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if WINDOWS:
skip()
return fn(*args, **kwargs)
return wrapper
@contextmanager
def support_path():
sys.path.insert(0, support)
try:
yield
finally:
sys.path.pop(0)
def load(name):
with support_path():
imported = __import__(name)
return imported
def support_file(subpath):
with open(os.path.join(support, subpath)) as fd:
return fd.read()
@trap
def run(invocation, program=None, invoke=True):
"""
Run ``invocation`` via ``program``, returning output stream captures.
``program`` defaults to ``Program()``.
To skip automatically assuming the argv under test starts with ``"invoke
"``, say ``invoke=False``.
:returns: Two-tuple of ``stdout, stderr`` strings.
"""
if program is None:
program = Program()
if invoke:
invocation = "invoke {}".format(invocation)
program.run(invocation, exit=False)
return sys.stdout.getvalue(), sys.stderr.getvalue()
def expect(
invocation, out=None, err=None, program=None, invoke=True, test=None
):
"""
Run ``invocation`` via ``program`` and expect resulting output to match.
May give one or both of ``out``/``err`` (but not neither).
``program`` defaults to ``Program()``.
To skip automatically assuming the argv under test starts with ``"invoke
"``, say ``invoke=False``.
To customize the operator used for testing (default: equality), use
``test`` (which should be an assertion wrapper of some kind).
"""
stdout, stderr = run(invocation, program, invoke)
# Perform tests
if out is not None:
if test:
test(stdout, out)
else:
assert out == stdout
if err is not None:
if test:
test(stderr, err)
else:
assert err == stderr
# Guard against silent failures; since we say exit=False this is the only
# real way to tell if stuff died in a manner we didn't expect.
elif stderr:
assert False, "Unexpected stderr: {}".format(stderr)
return stdout, stderr
class MockSubprocess(object):
def __init__(self, out="", err="", exit=0, isatty=None, autostart=True):
self.out_file = BytesIO(b(out))
self.err_file = BytesIO(b(err))
self.exit = exit
self.isatty = isatty
if autostart:
self.start()
def start(self):
# Start patchin'
self.popen = patch("invoke.runners.Popen")
Popen = self.popen.start()
self.read = patch("os.read")
read = self.read.start()
self.sys_stdin = patch("sys.stdin", new_callable=BytesIO)
sys_stdin = self.sys_stdin.start()
# Setup mocks
process = Popen.return_value
process.returncode = self.exit
process.stdout.fileno.return_value = 1
process.stderr.fileno.return_value = 2
# If requested, mock isatty to fake out pty detection
if self.isatty is not None:
sys_stdin.isatty = Mock(return_value=self.isatty)
def fakeread(fileno, count):
fd = {1: self.out_file, 2: self.err_file}[fileno]
return fd.read(count)
read.side_effect = fakeread
# Return the Popen mock as it's sometimes wanted inside tests
return Popen
def stop(self):
self.popen.stop()
self.read.stop()
self.sys_stdin.stop()
def mock_subprocess(out="", err="", exit=0, isatty=None, insert_Popen=False):
def decorator(f):
@wraps(f)
# We have to include a @patch here to trick pytest into ignoring
# the wrapped test's sometimes-there, sometimes-not mock_Popen arg. (It
# explicitly "skips ahead" past what it perceives as patch args, even
# though in our case those are not applying to the test function!)
# Doesn't matter what we patch as long as it doesn't
# actually get in our way.
@patch("invoke.runners.pty")
def wrapper(*args, **kwargs):
proc = MockSubprocess(
out=out, err=err, exit=exit, isatty=isatty, autostart=False
)
Popen = proc.start()
args = list(args)
args.pop() # Pop the dummy patch
if insert_Popen:
args.append(Popen)
try:
f(*args, **kwargs)
finally:
proc.stop()
return wrapper
return decorator
def mock_pty(
out="",
err="",
exit=0,
isatty=None,
trailing_error=None,
skip_asserts=False,
insert_os=False,
be_childish=False,
os_close_error=False,
):
# Windows doesn't have ptys, so all the pty tests should be
# skipped anyway...
if WINDOWS:
return skip_if_windows
def decorator(f):
import fcntl
ioctl_patch = patch("invoke.runners.fcntl.ioctl", wraps=fcntl.ioctl)
@wraps(f)
@patch("invoke.runners.pty")
@patch("invoke.runners.os")
@ioctl_patch
def wrapper(*args, **kwargs):
args = list(args)
pty, os, ioctl = args.pop(), args.pop(), args.pop()
# Don't actually fork, but pretend we did (with "our" pid differing
# depending on be_childish) & give 'parent fd' of 3 (typically,
# first allocated non-stdin/out/err FD)
pty.fork.return_value = (12345 if be_childish else 0), 3
# We don't really need to care about waiting since not truly
# forking/etc, so here we just return a nonzero "pid" + sentinel
# wait-status value (used in some tests about WIFEXITED etc)
os.waitpid.return_value = None, Mock(name="exitstatus")
# Either or both of these may get called, depending...
os.WEXITSTATUS.return_value = exit
os.WTERMSIG.return_value = exit
# If requested, mock isatty to fake out pty detection
if isatty is not None:
os.isatty.return_value = isatty
out_file = BytesIO(b(out))
err_file = BytesIO(b(err))
def fakeread(fileno, count):
fd = {3: out_file, 2: err_file}[fileno]
ret = fd.read(count)
# If asked, fake a Linux-platform trailing I/O error.
if not ret and trailing_error:
raise trailing_error
return ret
os.read.side_effect = fakeread
if os_close_error:
os.close.side_effect = IOError
if insert_os:
args.append(os)
# Do the thing!!!
f(*args, **kwargs)
# Short-circuit if we raised an error in fakeread()
if trailing_error:
return
# Sanity checks to make sure the stuff we mocked, actually got ran!
pty.fork.assert_called_with()
# Skip rest of asserts if we pretended to be the child
if be_childish:
return
# Expect a get, and then later set, of terminal window size
assert ioctl.call_args_list[0][0][1] == termios.TIOCGWINSZ
assert ioctl.call_args_list[1][0][1] == termios.TIOCSWINSZ
if not skip_asserts:
for name in ("execve", "waitpid"):
assert getattr(os, name).called
# Ensure at least one of the exit status getters was called
assert os.WEXITSTATUS.called or os.WTERMSIG.called
# Ensure something closed the pty FD
os.close.assert_called_once_with(3)
return wrapper
return decorator
class _Dummy(Runner):
"""
Dummy runner subclass that does minimum work required to execute run().
It also serves as a convenient basic API checker; failure to update it to
match the current Runner API will cause TypeErrors, NotImplementedErrors,
and similar.
"""
# Neuter the input loop sleep, so tests aren't slow (at the expense of CPU,
# which isn't a problem for testing).
input_sleep = 0
def start(self, command, shell, env, timeout=None):
pass
def read_proc_stdout(self, num_bytes):
return ""
def read_proc_stderr(self, num_bytes):
return ""
def _write_proc_stdin(self, data):
pass
def close_proc_stdin(self):
pass
@property
def process_is_finished(self):
return True
def returncode(self):
return 0
def stop(self):
pass
@property
def timed_out(self):
return False
# Dummy command that will blow up if it ever truly hits a real shell.
_ = "nope"
# Runner that fakes ^C during subprocess exec
class _KeyboardInterruptingRunner(_Dummy):
def __init__(self, *args, **kwargs):
super(_KeyboardInterruptingRunner, self).__init__(*args, **kwargs)
self._interrupted = False
# Trigger KeyboardInterrupt during wait()
def wait(self):
if not self._interrupted:
self._interrupted = True
raise KeyboardInterrupt
# But also, after that has been done, pretend subprocess shutdown happened
# (or we will loop forever).
def process_is_finished(self):
return self._interrupted
class OhNoz(Exception):
pass
|
4-1/4” High, 7-3/4” front to back.
Sits flat on any surface.
All metal units are extremely solid.
Great for lobbies, tradeshow floors, malls, etc.
Stand can be used with graphics up to 7’ high. Substrate, environment, and other factors can effect maximum appropriate height. Test your media to ensure stability before use.
Both the satin silver and matte black powder coat finishes feature fine textures, designed to withstand high traffic areas.
No assembly required. Simply place 1/2" thick rigid media into slot of base.
|
# import numpy as np
# import os
# import sys
# from numpy.testing import assert_almost_equal, assert_array_equal
# __file__ = os.getcwd()
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),"../utils/")))
# from regression import *
# n_splits = 10 # number of subdivisions of validation data for cross validation of ridge parameter (alpha)
# n_resamps = 10 # number of times to compute regression & prediction within training data (can be <= n_splits)
# chunk_sz = 6000 # number of voxels to fit at once. Memory-saving.
# pthr = 0.005 # Ridge parameter is chosen based on how many voxels are predicted above a correlation threshold
# # for each alpha value (technically it's slightly more complicated than that, see the code).
# # This p value sets that correlation threshold.
# def test_ridge_cv():
# out = regression.ridge_cv(efs,data_est_masked,val_fs=vfs,
# val_data=data_val_masked,alphas=alpha,n_resamps=n_resamps,
# n_splits=n_splits,chunk_sz=chunk_sz,pthr=pthr,is_verbose=True)
#
|
Arpaio contends that the 22-page complaint the Department of Justice released Dec. 15 against his office was nothing more than anecdotal and didn’t prove there are systematic sheriff’s department policies aimed at depriving Hispanics in Maricopa County of their civil rights.
Nor was Arpaio concerned that the DOJ might take him and his sheriff’s department to federal court.
“If the Justice Department wants to take me to court, I’m ready,” Arpaio said.
Arpaio was responding to a statement emailed today to WND in which the DOJ threatened to go to court immediately rather than show Arpaio’s office and the U.S. public the evidence it claims to have.
“If MCSO wants to debate the facts instead of fixing the problems stated in our findings, we will do so by way of litigation,” DOJ said in the statement.
The statement implied the DOJ already has rejected an offer of cooperation made yesterday by attorneys representing MCSO in a cover letter to the MCSO 38-page response to the DOJ complaint.
In the cover letter, Arpaio attorney Joseph J. Popolizio made it clear that the sheriff was willing to cooperate with Holder and the DOJ, but only if the DOJ revealed to the MCSO and the public its proof.
“Sheriff Joseph M. Arpaio and the MCSO are certainly interested in constructive dialogue, but constructive dialogue can only occur if the DOJ provides the facts and information on which it bases its findings,” Popolizio wrote.
Nothing more than a political attack?
Arpaio also took exception to the DOJ assertion that the federal investigation against his office began under the Bush administration.
Arpaio pointed out that after three years of cooperating with the Justice Department investigation, his office was given only one-hour’s advance notice that the report was going to be released.
|
# Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import log10
from logging import debug
from lib.error import ObjectNotFound
from .resources import Resources
def get_all_building_names():
return [cls.name for cls in ALL_BUILDINGS]
def get_all_building_abbr():
return [cls.abbr for cls in ALL_BUILDINGS]
def get_building(building_name, level=None):
if isinstance(building_name, type):
building_name = building_name.__name__
debug('getting building type %s, lvl %s' % (building_name, level))
for building in ALL_BUILDINGS:
if (building.__name__.lower() == building_name.lower() or
building.name.lower() == building_name.lower() or
building.abbr.lower() == building_name.lower()):
if level is None:
return building
else:
return building(level)
else:
raise ObjectNotFound(name=building_name)
class BuildingRequirements(object):
def __init__(self, resources=None, research=None, buildings=None):
self.resources = resources
if not self.resources:
self.resources = Resources()
# research dict: not defined yet
self.research = research
if self.research is None:
self.research = dict()
# buildings dict: key=str, building name; value=int, building level
self.buildings = buildings
if self.buildings is None:
self.buildings = dict()
def __repr__(self):
return ("{}(Resources: {}, Research: {}, Buildings: {})".format(
self.__class__.__name__, repr(self.resources), repr(self.research),
repr(self.buildings)))
def __str__(self):
# remove 1st and last char from resources repr string, "(", ")"
ret_val = repr(self.resources)[1:-1]
if self.research:
ret_val += "\nResearch: {}".format(self.research)
if self.buildings:
ret_val += "\nBuildings: {}".format(self.buildings)
return ret_val
def __getstate__(self):
return (self.resources, self.research, self.buildings)
def __setstate__(self, state):
(self.resources, self.research, self.buildings) = state
class Building(object):
name = 'Building'
abbr = 'BLDNG'
def __init__(self, level=None):
if level is None:
self.level = 1
else:
self.level = level
self.under_construction = False
def _modifier(self):
"""The building's per time unit resource production."""
return Resources(ore=self.level)
def _construction_modifier(self):
"""The building's production capacity while under construction."""
return Resources()
@property
def modifier(self):
if self.under_construction:
return self._construction_modifier()
return self._modifier()
def electricity(self, sun_energy):
"""The building's per time unit electricity production/consumption."""
return 0
@property
def requirements(self):
return BuildingRequirements()
def __repr__(self):
return ("{}(level: {}, modifier: {}, under construction: {}"
"requirements: {})".format(
self.__class__.__name__, self.level, repr(self.modifier),
self.under_construction, repr(self.requirements)))
def __str__(self):
return ("{}: level: {}\n - modifier: {}\n - under construction:{}\n"
" - requirements: {})".format(
self.__class__.__name__, self.level,
repr(self.modifier)[1:-1], self.under_construction,
str(self.requirements).replace('\n', '\n' + ' ' * 8)))
def __eq__(self, other):
return (self.modifier == other.modifier and
self.level == other.level)
def __ne__(self, other):
return not self.__eq__(other)
def _compare(self, other):
"""Calculate an evenly weighted average of the atributes."""
mod = self.modifier.trade_value - other.modifier.trade_value
lev = self.level - other.level
avg = (lev + mod) / 2.0
return avg
def __lt__(self, other):
return self._compare(other) < 0
def __gt__(self, other):
return self._compare(other) > 0
def __le__(self, other):
return self._compare(other) <= 0
def __ge__(self, other):
return self._compare(other) >= 0
def __hash__(self):
return hash("%s%s" % (self.__class__, self.level))
@classmethod
def are_requirements_met(cls, build_site, level=None):
reqs = cls(level).requirements
if reqs.resources > build_site.resources:
return False
for bldng in reqs.buildings:
if (bldng not in build_site.buildings or
reqs.buildings[bldng] > build_site.buildings[bldng]):
return False
# TODO: implement research requirements here
return True
class Mine(Building):
name = 'Mine'
abbr = 'Mn'
def _modifier(self):
return Resources(ore=0.2*self.level, metal=0.025*self.level)
def electricity(self, sun_energy):
return -1 * pow(self.level, 2)
@property
def requirements(self):
return BuildingRequirements(resources=Resources(
ore=10+(2*(-1+self.level)), metal=-10+(5*(1+self.level))))
class SolarPowerPlant(Building):
name = 'Solar Power Plant'
abbr = 'SPP'
def _modifier(self):
return Resources()
def electricity(self, sun_energy):
return 10 * abs(log10(sun_energy)) * self.level
@property
def requirements(self):
return BuildingRequirements(resources=Resources(
ore=10+(5*self.level), metal=50+(6*self.level)))
ALL_BUILDINGS = [Mine, SolarPowerPlant]
|
The Opportunity 100 Checking Account at Centennial Bank offers you the opportunity to re-establish a checking account relationship.
Open your Opportunity 100 checking account online or begin our in-branch enrollment and receive benefits that will allow you to conduct your daily transactions in a fast and convenient manner.
*Customers are eligible upon request for other checking products after 12 months in good standings in the Opportunity 100 account, and upon proof of paid closures with other institutions. See Bank for details.
¹Some restrictions may apply. See Bank for details.
|
import inspect
import os
import sys
#----------------------------------------------------------------------
class AnsiPrint( object ):
FG_BLACK = "\033[30m"
FG_BLUE = "\033[34m"
FG_GREEN = "\033[32m"
FG_CYAN = "\033[36m"
FG_RED = "\033[31m"
FG_MAGENTA = "\033[35m"
FG_YELLOW = "\033[33m"
FG_DARK_GRAY = "\033[1;30m"
RESET = "\033[0m"
def __init__( self, color ):
self.color = color
#----------------------------------------------------------------------
def getColoredText( self, text ):
return self.color + text + self.RESET
#----------------------------------------------------------------------
def getTerminalSize():
rows, columns = os.popen( 'stty size', 'r' ).read().split()
return ( int( rows ), int( columns ) )
#----------------------------------------------------------------------
def printLine( text, color = None ):
if color:
text = color + text + AnsiPrint.RESET
print text
#----------------------------------------------------------------------
def printLog( text ):
cwd = os.getcwd()
stack = inspect.stack()[1]
dir = stack[1]
if dir.startswith( os.getenv( 'HOME' ) ):
dir = dir[ len( cwd ) + 1 : ]
text = "*** in %s:%d:\n%s\n" % (dir, stack[2], text)
print text
#----------------------------------------------------------------------
def printDebug( text ):
cwd = os.getcwd()
stack = inspect.stack()[1]
dir = stack[1]
if dir.startswith( os.getenv( 'HOME' ) ):
dir = dir[ len( cwd ) + 1 : ]
text = "*** in %s:%d:\n%s\n" % (dir, stack[2], text )
# text = "%s*** in %s:%d:\n%s%s\n" % (
# AnsiPrint.FG_MAGENTA, dir, stack[2], text, AnsiPrint.RESET )
print text
#----------------------------------------------------------------------
def printWarn( text ):
cwd = os.getcwd()
stack = inspect.stack()[2]
dir = stack[1]
if dir.startswith( os.getenv( 'HOME' ) ):
dir = dir[ len( cwd ) + 1 : ]
text = "%s*** in %s:%d:\n%s%s" % (
AnsiPrint.FG_RED, dir, stack[2], text, AnsiPrint.RESET )
print text
#----------------------------------------------------------------------
def printDeprecated( text ):
cwd = os.getcwd()
stack = inspect.stack()[2]
dir = stack[1]
if dir.startswith( os.getenv( 'HOME' ) ):
dir = dir[ len( cwd ) + 1 : ]
text = "%s!!! DEPRECATION in %s:%d: %s%s" % (
AnsiPrint.FG_RED, dir, stack[2], text, AnsiPrint.RESET )
print text
#----------------------------------------------------------------------
def formatStorage( st, indent = '' ):
if isinstance( st, dict ):
text = indent + '{\n'
indent += ' '
first = True
for k in st.keys():
v = st[k]
# printLog( 'v:' + repr( v ) )
if v and repr( v ).startswith( '<' ):
continue
if first:
first = False
else:
text += ',\n'
text += indent + k + ': ' + formatStorage( v, indent )
text += '\n'
text += indent + '}\n'
return text
else:
print 'not dict'
return str( st )
#----------------------------------------------------------------------
def printLogStorage( storage ):
text = formatStorage( storage )
stack = inspect.stack()[1]
text = "*** in %s:%d:\n%s" % (stack[1], stack[2], text)
print text
#----------------------------------------------------------------------
def printLogDict( d, indent = 0, dictName = '' ):
cwd = os.getcwd()
stack = inspect.stack()[1]
dir = stack[1]
if dir.startswith( os.getenv( 'HOME' ) ):
dir = dir[ len( cwd ) + 1 : ]
text = "*** in %s:%d:" % (dir, stack[2] )
print text
print( 'dictName: %s' % dictName )
printDict( d, indent )
#----------------------------------------------------------------------
def printDict( d, indent = 0 ):
iStr = ' ' * indent
kList = d.keys()
for k in kList:
print k
val = d[k]
if isinstance( val, dict ):
print '%s%s:' % (iStr, k, )
printDict( val, indent + 1 )
else:
print '%s%s: %s' % (iStr, k, repr( val ))
#----------------------------------------------------------------------
def printChars( text, color = None ):
if color:
text = color + text + AnsiPrint.RESET
sys.stdout.write( text )
sys.stdout.flush()
#----------------------------------------------------------------------
if __name__ == "__main__":
printDebug( 'ola' )
|
Having been in the recruitment industry for over 20 years I often get people asking me if I like what I do, and what makes a good recruiter. This got me thinking about how recruitment has changed over the years and why do I like it so much. The easiest way to answer this was to look at the key things I think are important for me to be both successful and enjoy my job.
Recruiting is a relationship business. You need the desire and skills to build relationships effectively, and, to some extent, a predisposition to connecting with people. Your success as a recruiter is due, in part, to the people you know and how they feel about you. Job changes require candidates and clients to share important, and sometimes personal, information to make the search and placement process successful, and they will be more open to sharing that information when recruiters have invested time in getting to know them. I am still in contact with candidates that I have placed numerous times during my career due to our ongoing and genuine relationship.
Recruitment is an industry that sometimes suffers from a perception problem much like sales. Recruiting agencies can sometimes be numbers focused rather than people focused. The problem with this is that successful recruitment is about placing candidates in roles for the long-term so both parties are happy and the key to doing this is by being honest with both parties. You also want your candidates to trust and value your opinion so sending them to interviews for roles they are not really suited too will end up being detrimental to all parties involved.
The best recruiters listen twice as much as they speak. Active listening, means genuinely looking into all aspects of recruitment for both parties including culture fit, job skills, work/life balance, generational fit etc the list is endless so having the experience to ask the right questions and listen to the answers is extremely important.
The most successful recruiters know how to be persistent without crossing the line. This applies when talking to both the candidate and the client. Recruitment is more about matching the client and candidate not about trying to sell something that neither party requires. I suppose you could say it’s like a dating agency for clients and candidates; the end goal is a long lasting mutually beneficial positive and engaging relationship.
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Basic (binary) GP classification model
#
#
# This notebook shows how to build a GP classification model using variational inference.
# Here we consider binary (two-class, 0 vs. 1) classification only (there is a separate notebook on [multiclass classification](../advanced/multiclass_classification.ipynb)).
# We first look at a one-dimensional example, and then show how you can adapt this when the input space is two-dimensional.
# %%
import numpy as np
import gpflow
import tensorflow as tf
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams["figure.figsize"] = (8, 4)
# %% [markdown]
# ## One-dimensional example
#
# First of all, let's have a look at the data. `X` and `Y` denote the input and output values.
# **NOTE:** `X` and `Y` must be two-dimensional NumPy arrays, $N \times 1$ or $N \times D$, where $D$ is the number of input dimensions/features, with the same number of rows as $N$ (one for each data point):
# %%
X = np.genfromtxt("data/classif_1D_X.csv").reshape(-1, 1)
Y = np.genfromtxt("data/classif_1D_Y.csv").reshape(-1, 1)
plt.figure(figsize=(10, 6))
_ = plt.plot(X, Y, "C3x", ms=8, mew=2)
# %% [markdown]
# ### Reminders on GP classification
#
# For a binary classification model using GPs, we can simply use a `Bernoulli` likelihood. The details of the generative model are as follows:
#
# __1. Define the latent GP:__ we start from a Gaussian process $f \sim \mathcal{GP}(0, k(\cdot, \cdot'))$:
# %%
# build the kernel and covariance matrix
k = gpflow.kernels.Matern52(variance=20.0)
x_grid = np.linspace(0, 6, 200).reshape(-1, 1)
K = k(x_grid)
# sample from a multivariate normal
rng = np.random.RandomState(6)
L = np.linalg.cholesky(K)
f_grid = np.dot(L, rng.randn(200, 5))
plt.plot(x_grid, f_grid, "C0", linewidth=1)
_ = plt.plot(x_grid, f_grid[:, 1], "C0", linewidth=2)
# %% [markdown]
# __2. Squash them to $[0, 1]$:__ the samples of the GP are mapped to $[0, 1]$.
# By default, GPflow uses the standard normal cumulative distribution function (inverse probit function): $p(x) = \Phi(f(x)) = \frac{1}{2} (1 + \operatorname{erf}(x / \sqrt{2}))$.
# (This choice has the advantage that predictive mean, variance and density can be computed analytically, but any choice of invlink is possible, e.g. the logit $p(x) = \frac{\exp(f(x))}{1 + \exp(f(x))}$. Simply pass another function as the `invlink` argument to the `Bernoulli` likelihood class.)
# %%
def invlink(f):
return gpflow.likelihoods.Bernoulli().invlink(f).numpy()
p_grid = invlink(f_grid)
plt.plot(x_grid, p_grid, "C1", linewidth=1)
_ = plt.plot(x_grid, p_grid[:, 1], "C1", linewidth=2)
# %% [markdown]
# __3. Sample from a Bernoulli:__ for each observation point $X_i$, the class label $Y_i \in \{0, 1\}$ is generated by sampling from a Bernoulli distribution $Y_i \sim \mathcal{B}(g(X_i))$.
# %%
# Select some input locations
ind = rng.randint(0, 200, (30,))
X_gen = x_grid[ind]
# evaluate probability and get Bernoulli draws
p = p_grid[ind, 1:2]
Y_gen = rng.binomial(1, p)
# plot
plt.plot(x_grid, p_grid[:, 1], "C1", linewidth=2)
plt.plot(X_gen, p, "C1o", ms=6)
_ = plt.plot(X_gen, Y_gen, "C3x", ms=8, mew=2)
# %% [markdown]
# ### Implementation with GPflow
#
# For the model described above, the posterior $f(x)|Y$ (say $p$) is not Gaussian any more and does not have a closed-form expression.
# A common approach is then to look for the best approximation of this posterior by a tractable distribution (say $q$) such as a Gaussian distribution.
# In variational inference, the quality of an approximation is measured by the Kullback-Leibler divergence $\mathrm{KL}[q \| p]$.
# For more details on this model, see Nickisch and Rasmussen (2008).
#
# The inference problem is thus turned into an optimization problem: finding the best parameters for $q$.
# In our case, we introduce $U \sim \mathcal{N}(q_\mu, q_\Sigma)$, and we choose $q$ to have the same distribution as $f | f(X) = U$.
# The parameters $q_\mu$ and $q_\Sigma$ can be seen as parameters of $q$, which can be optimized in order to minimise $\mathrm{KL}[q \| p]$.
#
# This variational inference model is called `VGP` in GPflow:
# %%
m = gpflow.models.VGP(
(X, Y), likelihood=gpflow.likelihoods.Bernoulli(), kernel=gpflow.kernels.Matern52()
)
opt = gpflow.optimizers.Scipy()
opt.minimize(m.training_loss, variables=m.trainable_variables)
# %% [markdown]
# We can now inspect the result of the optimization with `gpflow.utilities.print_summary(m)`:
# %%
gpflow.utilities.print_summary(m, fmt="notebook")
# %% [markdown]
# In this table, the first two lines are associated with the kernel parameters, and the last two correspond to the variational parameters.
# **NOTE:** In practice, $q_\Sigma$ is actually parameterized by its lower-triangular square root $q_\Sigma = q_\text{sqrt} q_\text{sqrt}^T$ in order to ensure its positive-definiteness.
#
# For more details on how to handle models in GPflow (getting and setting parameters, fixing some of them during optimization, using priors, and so on), see [Manipulating GPflow models](../understanding/models.ipynb).
# %% [markdown]
# ### Predictions
#
# Finally, we will see how to use model predictions to plot the resulting model.
# We will replicate the figures of the generative model above, but using the approximate posterior distribution given by the model.
# %%
plt.figure(figsize=(12, 8))
# bubble fill the predictions
mu, var = m.predict_f(x_grid)
plt.fill_between(
x_grid.flatten(),
np.ravel(mu + 2 * np.sqrt(var)),
np.ravel(mu - 2 * np.sqrt(var)),
alpha=0.3,
color="C0",
)
# plot samples
tf.random.set_seed(6)
samples = m.predict_f_samples(x_grid, 10).numpy().squeeze().T
plt.plot(x_grid, samples, "C0", lw=1)
# plot p-samples
p = invlink(samples)
plt.plot(x_grid, p, "C1", lw=1)
# plot data
plt.plot(X, Y, "C3x", ms=8, mew=2)
plt.ylim((-3, 3))
# %% [markdown]
# ## Two-dimensional example
#
# In this section we will use the following data:
# %%
X = np.loadtxt("data/banana_X_train", delimiter=",")
Y = np.loadtxt("data/banana_Y_train", delimiter=",").reshape(-1, 1)
mask = Y[:, 0] == 1
plt.figure(figsize=(6, 6))
plt.plot(X[mask, 0], X[mask, 1], "oC0", mew=0, alpha=0.5)
_ = plt.plot(X[np.logical_not(mask), 0], X[np.logical_not(mask), 1], "oC1", mew=0, alpha=0.5)
# %% [markdown]
# The model definition is the same as above; the only important difference is that we now specify that the kernel operates over a two-dimensional input space:
# %%
m = gpflow.models.VGP(
(X, Y), kernel=gpflow.kernels.SquaredExponential(), likelihood=gpflow.likelihoods.Bernoulli()
)
opt = gpflow.optimizers.Scipy()
opt.minimize(
m.training_loss, variables=m.trainable_variables, options=dict(maxiter=25), method="L-BFGS-B"
)
# in practice, the optimization needs around 250 iterations to converge
# %% [markdown]
# We can now plot the predicted decision boundary between the two classes.
# To do so, we can equivalently plot the contour lines $E[f(x)|Y]=0$, or $E[g(f(x))|Y]=0.5$.
# We will do the latter, because it allows us to introduce the `predict_y` function, which returns the mean and variance at test points:
# %%
x_grid = np.linspace(-3, 3, 40)
xx, yy = np.meshgrid(x_grid, x_grid)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
p, _ = m.predict_y(Xplot) # here we only care about the mean
plt.figure(figsize=(7, 7))
plt.plot(X[mask, 0], X[mask, 1], "oC0", mew=0, alpha=0.5)
plt.plot(X[np.logical_not(mask), 0], X[np.logical_not(mask), 1], "oC1", mew=0, alpha=0.5)
_ = plt.contour(
xx,
yy,
p.numpy().reshape(*xx.shape),
[0.5], # plot the p=0.5 contour line only
colors="k",
linewidths=1.8,
zorder=100,
)
# %% [markdown]
# ## Further reading
#
# There are dedicated notebooks giving more details on how to manipulate [models](../understanding/models.ipynb) and [kernels](../advanced/kernels.ipynb).
#
# This notebook covers only very basic classification models. You might also be interested in:
# * [Multiclass classification](../advanced/multiclass_classification.ipynb) if you have more than two classes.
# * [Sparse models](../advanced/gps_for_big_data.ipynb). The models above have one inducing variable $U_i$ per observation point $X_i$, which does not scale to large datasets. Sparse Variational GP (SVGP) is an efficient alternative where the variables $U_i$ are defined at some inducing input locations $Z_i$ that can also be optimized.
# * [Exact inference](../advanced/mcmc.ipynb). We have seen that variational inference provides an approximation to the posterior. GPflow also supports exact inference using Markov Chain Monte Carlo (MCMC) methods, and the kernel parameters can also be assigned prior distributions in order to avoid point estimates.
#
# ## References
#
# Hannes Nickisch and Carl Edward Rasmussen. 'Approximations for binary Gaussian process classification'. *Journal of Machine Learning Research* 9(Oct):2035--2078, 2008.
|
With all the talk about the power of push notifications, we thought we’d cut to the chase and make it easy for everybody.
So we built Mobidea Push.
Mobidea Push is a single dashboard for push and campaign management connected to 20+ push traffic suppliers, that will take your business to the next level!
Enjoy exceptional ad visibility and CR, and serve your ads to real users who want to see them.
Here’s the deal: to all the Mobidea Academy readers, we’ve got a special bonus! Use the promo code academy10 in the cashier and get a 10% bonus on your first deposit of at least $250. The maximum bonus is $100.
Important note: if you didn’t use any promo code on your first deposit, don’t worry! You can still use it in the following ones, but only once.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from tipi.compat import unicode
from tipi.html import HTMLFragment
__all__ = ('Replacement', 'replace')
class Replacement(object):
"""Replacement representation."""
skipped_tags = (
'code', 'kbd', 'pre', 'samp', 'script', 'style', 'tt', 'xmp'
)
textflow_tags = (
'b', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'cite',
'dfn', 'em', 'kbd', 'strong', 'samp', 'var', 'a', 'bdo', 'q', 'script',
'span', 'sub', 'sup'
)
def __init__(self, pattern, replacement):
self.pattern = pattern
self.replacement = replacement
def _is_replacement_allowed(self, s):
"""Tests whether replacement is allowed on given piece of HTML text."""
if any(tag in s.parent_tags for tag in self.skipped_tags):
return False
if any(tag not in self.textflow_tags for tag in s.involved_tags):
return False
return True
def replace(self, html):
"""Perform replacements on given HTML fragment."""
self.html = html
text = html.text()
positions = []
def perform_replacement(match):
offset = sum(positions)
start, stop = match.start() + offset, match.end() + offset
s = self.html[start:stop]
if self._is_replacement_allowed(s):
repl = match.expand(self.replacement)
self.html[start:stop] = repl
else:
repl = match.group() # no replacement takes place
positions.append(match.end())
return repl
while True:
if positions:
text = text[positions[-1]:]
text, n = self.pattern.subn(perform_replacement, text, count=1)
if not n: # all is already replaced
break
def replace(html, replacements=None):
"""Performs replacements on given HTML string."""
if not replacements:
return html # no replacements
html = HTMLFragment(html)
for r in replacements:
r.replace(html)
return unicode(html)
|
Aircraft Aerodynamic Seals Market by Air..
This report, from Stratview Research, studies the global aircraft aerodynamic seals market over the trend period of 2012 to 2017 and forecast period of 2018 to 2023. The report provides detailed insights into the market dynamics to enable informed business decision making and growth strategy formulation based on the opportunities present in the market.
Seals are used in the multiple areas of an aircraft in order to serve a wide array of functions including aerodynamics, fire resistance, conduction, insulation, and air and fluid handling. Aerodynamic seals are the most widely used in an aircraft in terms of both value and units sold and account for more than 40% of the total aircraft seals market. Aerodynamic seals in an aircraft are exposed to a wide range of challenging and operating conditions, such as high temperature, pressure, aggressive chemicals, high frequency of oscillation, and the threat of fire and explosion. It becomes necessary to select a sealing material that can handle such extreme circumstances. The most commonly used materials in aircraft aerodynamic seals are polymers, which comprise elastomers, such as silicone, fluorosilicone, PTFE, rubber, and thermoplastics.
The global aircraft aerodynamic seals market is projected to grow at a healthy rate over the next five years to reach US$ 494.7 million in 2023. Increasing production rates of commercial aircraft are primarily driving the demand for aerodynamic seals. Both major commercial aircraft manufacturers, Boeing and Airbus, are incessantly increasing the production rates of their best-selling aircraft in order to meet such huge order backlogs. At the same time, both have strategically been launching fuel-efficient version of their best-selling aircraft programs to have a better grip on the market growth. Currently, Boeing and Airbus had a combined total order backlog of 13,090 commercial aircraft as of 31 March 2018. Owing to this factor, it is estimated that there would have a sustained demand for aerodynamic seals in the foreseeable future.
Another factor assuring a greater demand for fuel-efficient aircraft. The aerospace industry is highly regulated by stringent regulations imposed by different regulatory bodies located across the world. Currently, these bodies are tightening the aerospace industry by introducing more stringent regulations on the airline industry regarding the carbon emissions reductions. Additionally, volatile crude oil prices plummet the margin of airlines in this fiercely competitive market as fuel is the leading cost component of the overall airline operating cost, roughly accounting for 35% to 40% of the total operating cost. These factors are pushing airlines to demand more fuel-efficient aircraft in order to elevate their profit margin as well as to abide such stringent regulations. Making aircraft lightweight and aerodynamic are the major ways through which the aerospace industry can make aircraft more fuel efficient. This, in turn, is driving the usage of aerodynamic seals in various sections of the aircraft.
The global aircraft aerodynamic seals market is segmented based on aircraft type as Commercial Aircraft, Regional Aircraft, Helicopter, Military Aircraft, and General Aviation. Commercial aircraft is expected to remain the largest and fastest-growing segment of the global aircraft aerodynamic seals market during the forecast period. Increasing demand for commercial aircraft to support rising passenger traffic, increasing production rates of key programs, such as B737, A320 family, B787, and A350XWB; market entry of new players, such as COMAC and Irkut; an introduction of variants of existing best-selling aircraft programs, such as B737 max, A320neo, and B777X; and rising commercial aircraft fleet size across regions are the key factors propelling the demand for aerodynamic seals in the commercial aircraft segment.
Based on the application type, the global aircraft aerodynamic seals market is segmented as Airframe; Flight Control Surfaces; Landing Gear, Wheels & Brakes; and Others. The airframe is expected to remain the most dominant application segment of the aircraft aerodynamic seals market during the forecast period. Aerodynamic seals are used in a wide spectrum of airframe applications, such as molded seals for doors & hatches, wings and fuselage fairings, and cabin windows.
Based on the material type, the aircraft aerodynamic seals market is segmented as Polymers, Composites, and Metals. The polymer seal is expected to remain the largest segment of the global aircraft aerodynamic seals market during the forecast period. Elastomers are the most widely preferred seals in the polymer category used for helping the airframers to make aircraft more aerodynamic, driven by their ability to contract 10 times more than steel seals along with an increased flexibility in low temperatures.
Based on the regions, North America is expected to remain the largest aircraft aerodynamic seals market during the forecast period, whereas Asia-Pacific is expected to experience the highest growth during the same period. The highest growth of Asia-Pacific region is driven by a host of factors including increasing demand for commercial aircraft to support rising passenger traffic, the opening of assembly plants of Boeing and Airbus in China, upcoming indigenous commercial and regional aircraft (COMAC C919 and Mitsubishi MRJ), and rising aircraft fleet size.
The supply chain of this market comprises raw material suppliers, aircraft aerodynamic seal manufacturers, distributors, aircraft OEMs, and airline companies. The key aircraft aerodynamic seal manufacturers are Hutchinson SA, Trelleborg AB, Meggitt Plc, Esterline Technologies Corporation, Freudenberg Group, and Parker Hannifin Corporation. The development of lightweight seals with low friction as well as wear and abrasion resistance, regional expansion, and the execution of mergers & acquisitions are the key strategies adopted by the key players to gain a competitive edge in the market.
The global aircraft aerodynamic seals market is segmented into the following categories.
|
# Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
# Do not allocate extra space for another array, you must do this in place with constant memory.
# For example,
# Given input array nums = [1,1,2],
# Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively. It doesn't matter what you leave beyond the new length.
class Solution:
# @param {integer[]} nums
# @return {integer}
def removeDuplicates(self, nums):
# O(n?)
if not nums:
return 0
tail = 0
for i in xrange(1, len(nums)):
if nums[i] != nums[tail]:
tail += 1
nums[tail] = nums[i]
return tail + 1
# # O(n?)
# i = 0
# while i < len(nums) - 1:
# if nums[i] == nums[i+1]:
# del nums[i]
# else:
# i += 1
# return len(nums)
# WA
# nums[:] = list(set(nums))
# return len(nums)
import time
start_time = time.time()
s = Solution()
print s.removeDuplicates([])
print s.removeDuplicates([1])
print s.removeDuplicates([1, 1, 1])
print s.removeDuplicates([1, 2])
print s.removeDuplicates([1, 1, 2])
print s.removeDuplicates([1, 1, 2, 2])
print s.removeDuplicates([1, 1, 2, 2, 2, 2, 3])
print s.removeDuplicates([0, 1, 1, 2, 2, 2, 2, 3])
print s.removeDuplicates([0, 0, 1, 1, 2, 2, 2, 2, 3, 8])
print s.removeDuplicates([0, 0, 1, 1, 2, 2, 2, 2, 3, 8, 9, 9, 10, 10])
print("--- %s seconds ---" % (time.time() - start_time))
|
Complex spine technologies made simple company, K2M Group Holdings, Inc., has taken the opportunity presented by the Scoliosis Research Society (SRS) 50th Annual Meeting in Minneapolis, Minnesota at which to announce the U.S. commercial launch of its EVEREST® Deformity Spinal System. EVEREST Deformity is an expansion of the EVEREST family of products, which includes the EVEREST Degenerative and EVEREST Minimally Invasive Spinal Systems.
K2M says its EVEREST Deformity Spinal System includes state-of-the-art implant technology with several enhancing attributes to facilitate more efficient intraoperative use of the system. It features a top-loading pedicle screw in a variety of screw types and offers the ability to accommodate titanium and cobalt chrome rods in two different diameters. The Basecamp™ Deformity Rod Reducer, the system’s efficient and versatile instrumentation, provides surgeons with multiple options during surgery in one system to help address the most difficult correction maneuvers for complex spinal pathologies.
Another key feature of the EVEREST Deformity Spinal System is the Basecamp Deformity Rod Reducer, which provides 60 mm of quick or controlled rod reduction. Basecamp replaces the need for reduction screws, offers segmental reduction of the rod, and provides intraoperative flexibility. Placement of multiple Basecamp Tubes allows for sequential reduction and correction of the spine.
|
#
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
from toolset.io import fire, run
from toolset.tool import Template
#: Our ochopod logger.
logger = logging.getLogger('ochopod')
def go():
class _Tool(Template):
help = \
'''
Displays detailled information for the specified cluster(s).
'''
tag = 'info'
def customize(self, parser):
parser.add_argument('clusters', type=str, nargs='*', default='*', help='1+ clusters (can be a glob pattern, e.g foo*)')
def body(self, args, proxy):
for token in args.clusters:
def _query(zk):
replies = fire(zk, token, 'info')
return len(replies), {key: hints for key, (_, hints, code) in replies.items() if code == 200}
total, js = run(proxy, _query)
if not total:
logger.info('\n<%s> -> no pods found' % token)
else:
#
# - justify & format the whole thing in a nice set of columns
#
pct = (len(js) * 100) / total
unrolled = ['%s\n%s\n' % (k, json.dumps(js[k], indent=4, separators=(',', ': '))) for k in sorted(js.keys())]
logger.info('\n<%s> -> %d%% replies (%d pods total) ->\n\n- %s' % (token, pct, total, '\n- '.join(unrolled)))
return _Tool()
|
November is chronic obstructive pulmonary disease (COPD) awareness month.
The pulmonologists at Bronson offer the following advice to help you lower the chances of developing this disease through education and early detection.
COPD, which includes chronic bronchitis and emphysema, is a chronic lung disease that makes it hard to breathe. The disease is increasingly common, affecting millions of Americans, and is the third leading cause of death in the U.S.
What are some of the symptoms of COPD?
What is my risk of developing COPD?
If you think you have any signs or symptoms of COPD, contact your primary care provider.
COPD most often occurs in people 40 years of age and older who have a history of smoking. These may be individuals who are current or former smokers.
COPD can also occur in those who have had long-term contact with harmful pollutants in the workplace. Some of these harmful lung irritants include certain chemicals, dust, or fumes. Heavy or prolonged contact with secondhand smoke or other lung irritants in the home, such as organic cooking fuel, may also cause COPD.
Alpha-1 Antitrypsin Deficiency (AATD) is the most commonly known genetic risk factor for emphysema. This can be caused by the absence of the Alpha-1 Antitrypsin protein in the bloodstream. Without this protein, white blood cells begin to harm the lungs, causing them to deteriorate.
Our board certified pulmonologists and respiratory care teams are here for you during every step of your COPD journey. They specialize in the diagnosis and treatment of a wide range of conditions that may affect your lungs.
If you currently are diagnosed with COPD, download our COPD daily evaluation tracking sheets. This helps both you and your doctor get a better sense of how you are dealing with this disease.
For additional information on COPD, visit copdfoundation.org or speak with your primary care provider. Looking for a Bronson doctor? For a complete list of providers at Bronson, visit bronsonhealth.com/find-a-doctor or call Bronson HealthAnswers at (269) 341-7723.
|
"""init
Revision ID: e32bf2e2b443
Revises:
Create Date: 2016-11-23 14:33:06.308794
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e32bf2e2b443'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('about',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('forum_categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
users_table = op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.Column('group', sa.String(length=255), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('workshop',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('link', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('forum_topics',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['forum_categories.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('forum_replies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('forum_topic_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['forum_topic_id'], ['forum_topics.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
op.bulk_insert(users_table,
[
{'id': 1, 'name': 'admin',
'password': '$2b$12$OUCC1PDBsg305zzY5KaR5uR14./Ohsopd1K2usCb05iewLtY1Bb6S',
'group': 'admin', 'email': '[email protected]'},
]
)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('forum_replies')
op.drop_table('forum_topics')
op.drop_table('workshop')
op.drop_table('users')
op.drop_table('forum_categories')
op.drop_table('about')
### end Alembic commands ###
|
Get ready to experience 100% Free video based webcam chatrooms, featuring rooms local to Bonita Springs Chat Society is absolutely free and registration is not required. Click to enter Bonita Springs Chat or explore more cities in Florida.
Meet people in our free chat rooms from all over Florida including Bonita Springs and nearby cities as well, such as, Naples Park, Estero, Pelican Bay, Pine Ridge, San Carlos Park, Three Oaks, Vineyards, Golden Gate, Orangetree, Fort Myers Beach, Naples, Harlem Heights, Villas, Cypress Lake, Gateway, Iona, Lely, Pine Manor, Sanibel, McGregor, Naples Manor, Punta Rassa, Whiskey Creek, Lely Resort, Cape Coral, Fort Myers, Saint James City, Lehigh Acres, Lochmoor Waterway Estates, Tice, North Fort Myers, Buckingham, Immokalee, Fort Myers Shores, Marco, Olga, Suncoast Estates, Alva, Pine Island Center, Burnt Store Marina, Bokeelia, LaBelle, Port LaBelle, Charlotte Park, Punta Gorda, Punta Gorda Isles, Cleveland, Charlotte Harbor, Harbour Heights.
Enter Bonita Springs Chatrooms or explore more cities in Florida.
|
# TODO: Implement input interupts if needed
class PCF8574:
def __init__(self, i2c, address):
self._i2c = i2c
self._address = address
self._input = 0 # Buffers the result of read in memory
self._input_mask = 0 # Mask specifying which pins are set as input
self._output = 0 # The state of pins set for output
self._write()
def _read(self):
self._input = self._i2c.readfrom(self._address, 1)[0] & self._input_mask
def _write(self):
self._i2c.writeto(self._address, bytes([self._output | self._input_mask]))
def read(self, pin):
bit_mask = 1 << pin
self._input_mask |= bit_mask
self._output &= ~bit_mask
self._write() # Update input mask before reading
self._read()
return (self._input & bit_mask) >> pin
def read8(self):
self._input_mask = 0xFF
self._output = 0
self._write() # Update input mask before reading
self._read()
return self._input
def write(self, pin, value):
bit_mask = 1 << pin
self._input_mask &= ~bit_mask
self._output = self._output | bit_mask if value else self._output & (~bit_mask)
self._write()
def write8(self, value):
self._input_mask = 0
self._output = value
self._write()
def set(self):
self.write8(0xFF)
def clear(self):
self.write8(0x0)
def toggle(self, pin):
bit_mask = 1 << pin
self._input_mask &= ~bit_mask
self._output ^= bit_mask
self._write()
|
Scroll down to see some of my latest commissioned work.
Working with a range of clients to create high-quality and professional imagery at an affordable price. Enquire about commissioning Richard Beech Photography to undertake your photographic projects.
|
# Copyright 2012 OpenStack, Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
from glance.common import exception
import glance.openstack.common.log as logging
from glance.openstack.common import timeutils
from glance.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
DATA = {
'images': {},
'members': {},
'tags': {},
'locations': [],
'tasks': {},
}
def log_call(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
LOG.info(_('Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s') %
{"funcname": func.__name__,
"args": args,
"kwargs": kwargs})
output = func(*args, **kwargs)
LOG.info(_('Returning %(funcname)s: %(output)s') %
{"funcname": func.__name__,
"output": output})
return output
return wrapped
def reset():
global DATA
DATA = {
'images': {},
'members': [],
'tags': {},
'locations': [],
'tasks': {},
}
def setup_db_env(*args, **kwargs):
"""
Setup global environment configuration variables.
We have no connection-oriented environment variables, so this is a NOOP.
"""
pass
def clear_db_env(*args, **kwargs):
"""
Setup global environment configuration variables.
We have no connection-oriented environment variables, so this is a NOOP.
"""
pass
def _get_session():
return DATA
def _image_locations_format(image_id, value, meta_data):
dt = timeutils.utcnow()
return {
'id': uuidutils.generate_uuid(),
'image_id': image_id,
'created_at': dt,
'updated_at': dt,
'deleted_at': None,
'deleted': False,
'url': value,
'metadata': meta_data,
}
def _image_property_format(image_id, name, value):
return {
'image_id': image_id,
'name': name,
'value': value,
'deleted': False,
'deleted_at': None,
}
def _image_member_format(image_id, tenant_id, can_share, status='pending'):
dt = timeutils.utcnow()
return {
'id': uuidutils.generate_uuid(),
'image_id': image_id,
'member': tenant_id,
'can_share': can_share,
'status': status,
'created_at': dt,
'updated_at': dt,
}
def _task_format(task_id, **values):
dt = timeutils.utcnow()
task = {
'id': task_id,
'type': 'import',
'status': 'pending',
'input': None,
'result': None,
'owner': None,
'message': None,
'expires_at': None,
'created_at': dt,
'updated_at': dt,
'deleted_at': None,
'deleted': False,
}
task.update(values)
return task
def _image_format(image_id, **values):
dt = timeutils.utcnow()
image = {
'id': image_id,
'name': None,
'owner': None,
'locations': [],
'status': 'queued',
'protected': False,
'is_public': False,
'container_format': None,
'disk_format': None,
'min_ram': 0,
'min_disk': 0,
'size': None,
'checksum': None,
'tags': [],
'created_at': dt,
'updated_at': dt,
'deleted_at': None,
'deleted': False,
}
locations = values.pop('locations', None)
if locations is not None:
locations = [
_image_locations_format(image_id, location['url'],
location['metadata'])
for location in locations
]
image['locations'] = locations
#NOTE(bcwaldon): store properties as a list to match sqlalchemy driver
properties = values.pop('properties', {})
properties = [{'name': k,
'value': v,
'image_id': image_id,
'deleted': False} for k, v in properties.items()]
image['properties'] = properties
image.update(values)
return image
def _filter_images(images, filters, context,
status='accepted', is_public=None,
admin_as_user=False):
filtered_images = []
if 'properties' in filters:
prop_filter = filters.pop('properties')
filters.update(prop_filter)
if status == 'all':
status = None
visibility = filters.pop('visibility', None)
for image in images:
member = image_member_find(context, image_id=image['id'],
member=context.owner, status=status)
is_member = len(member) > 0
has_ownership = context.owner and image['owner'] == context.owner
can_see = (image['is_public'] or has_ownership or is_member or
(context.is_admin and not admin_as_user))
if not can_see:
continue
if visibility:
if visibility == 'public':
if not image['is_public']:
continue
elif visibility == 'private':
if image['is_public']:
continue
if not (has_ownership or (context.is_admin
and not admin_as_user)):
continue
elif visibility == 'shared':
if not is_member:
continue
if is_public is not None:
if not image['is_public'] == is_public:
continue
add = True
for k, value in filters.iteritems():
key = k
if k.endswith('_min') or k.endswith('_max'):
key = key[0:-4]
try:
value = int(value)
except ValueError:
msg = _("Unable to filter on a range "
"with a non-numeric value.")
raise exception.InvalidFilterRangeValue(msg)
if k.endswith('_min'):
add = image.get(key) >= value
elif k.endswith('_max'):
add = image.get(key) <= value
elif k != 'is_public' and image.get(k) is not None:
add = image.get(key) == value
elif k == 'tags':
filter_tags = value
image_tags = image_tag_get_all(context, image['id'])
for tag in filter_tags:
if tag not in image_tags:
add = False
break
else:
properties = {}
for p in image['properties']:
properties = {p['name']: p['value'],
'deleted': p['deleted']}
add = (properties.get(key) == value and
properties.get('deleted') is False)
if not add:
break
if add:
filtered_images.append(image)
return filtered_images
def _do_pagination(context, images, marker, limit, show_deleted,
status='accepted'):
start = 0
end = -1
if marker is None:
start = 0
else:
# Check that the image is accessible
_image_get(context, marker, force_show_deleted=show_deleted,
status=status)
for i, image in enumerate(images):
if image['id'] == marker:
start = i + 1
break
else:
raise exception.NotFound()
end = start + limit if limit is not None else None
return images[start:end]
def _sort_images(images, sort_key, sort_dir):
reverse = False
if images and not (sort_key in images[0]):
raise exception.InvalidSortKey()
keyfn = lambda x: (x[sort_key] if x[sort_key] is not None else '',
x['created_at'], x['id'])
reverse = sort_dir == 'desc'
images.sort(key=keyfn, reverse=reverse)
return images
def _image_get(context, image_id, force_show_deleted=False, status=None):
try:
image = DATA['images'][image_id]
image['locations'] = _image_location_get_all(image_id)
except KeyError:
LOG.info(_('Could not find image %s') % image_id)
raise exception.NotFound()
if image['deleted'] and not (force_show_deleted or context.show_deleted):
LOG.info(_('Unable to get deleted image'))
raise exception.NotFound()
if not is_image_visible(context, image):
LOG.info(_('Unable to get unowned image'))
raise exception.Forbidden("Image not visible to you")
return image
@log_call
def image_get(context, image_id, session=None, force_show_deleted=False):
image = _image_get(context, image_id, force_show_deleted)
image = _normalize_locations(image)
return copy.deepcopy(image)
@log_call
def image_get_all(context, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc',
member_status='accepted', is_public=None,
admin_as_user=False):
filters = filters or {}
images = DATA['images'].values()
images = _filter_images(images, filters, context, member_status,
is_public, admin_as_user)
images = _sort_images(images, sort_key, sort_dir)
images = _do_pagination(context, images, marker, limit,
filters.get('deleted'))
for image in images:
image['locations'] = _image_location_get_all(image['id'])
_normalize_locations(image)
return images
@log_call
def image_property_create(context, values):
image = _image_get(context, values['image_id'])
prop = _image_property_format(values['image_id'],
values['name'],
values['value'])
image['properties'].append(prop)
return prop
@log_call
def image_property_delete(context, prop_ref, image_ref, session=None):
prop = None
for p in DATA['images'][image_ref]['properties']:
if p['name'] == prop_ref:
prop = p
if not prop:
raise exception.NotFound()
prop['deleted_at'] = timeutils.utcnow()
prop['deleted'] = True
return prop
@log_call
def image_member_find(context, image_id=None, member=None, status=None):
filters = []
images = DATA['images']
members = DATA['members']
def is_visible(member):
return (member['member'] == context.owner or
images[member['image_id']]['owner'] == context.owner)
if not context.is_admin:
filters.append(is_visible)
if image_id is not None:
filters.append(lambda m: m['image_id'] == image_id)
if member is not None:
filters.append(lambda m: m['member'] == member)
if status is not None:
filters.append(lambda m: m['status'] == status)
for f in filters:
members = filter(f, members)
return [copy.deepcopy(m) for m in members]
@log_call
def image_member_create(context, values):
member = _image_member_format(values['image_id'],
values['member'],
values.get('can_share', False),
values.get('status', 'pending'))
global DATA
DATA['members'].append(member)
return copy.deepcopy(member)
@log_call
def image_member_update(context, member_id, values):
global DATA
for member in DATA['members']:
if (member['id'] == member_id):
member.update(values)
member['updated_at'] = timeutils.utcnow()
return copy.deepcopy(member)
else:
raise exception.NotFound()
@log_call
def image_member_delete(context, member_id):
global DATA
for i, member in enumerate(DATA['members']):
if (member['id'] == member_id):
del DATA['members'][i]
break
else:
raise exception.NotFound()
def _image_locations_set(image_id, locations):
global DATA
image = DATA['images'][image_id]
for location in image['locations']:
location['deleted'] = True
location['deleted_at'] = timeutils.utcnow()
for i, location in enumerate(DATA['locations']):
if image_id == location['image_id'] and location['deleted'] is False:
del DATA['locations'][i]
for location in locations:
location_ref = _image_locations_format(image_id, value=location['url'],
meta_data=location['metadata'])
DATA['locations'].append(location_ref)
image['locations'].append(location_ref)
def _normalize_locations(image):
undeleted_locations = filter(lambda x: not x['deleted'],
image['locations'])
image['locations'] = [{'url': loc['url'],
'metadata': loc['metadata']}
for loc in undeleted_locations]
return image
def _image_location_get_all(image_id):
location_data = []
for location in DATA['locations']:
if image_id == location['image_id']:
location_data.append(location)
return location_data
@log_call
def image_create(context, image_values):
global DATA
image_id = image_values.get('id', uuidutils.generate_uuid())
if image_id in DATA['images']:
raise exception.Duplicate()
if 'status' not in image_values:
raise exception.Invalid('status is a required attribute')
allowed_keys = set(['id', 'name', 'status', 'min_ram', 'min_disk', 'size',
'checksum', 'locations', 'owner', 'protected',
'is_public', 'container_format', 'disk_format',
'created_at', 'updated_at', 'deleted_at', 'deleted',
'properties', 'tags'])
incorrect_keys = set(image_values.keys()) - allowed_keys
if incorrect_keys:
raise exception.Invalid(
'The keys %s are not valid' % str(incorrect_keys))
image = _image_format(image_id, **image_values)
DATA['images'][image_id] = image
location_data = image_values.get('locations', None)
if location_data is not None:
_image_locations_set(image_id, location_data)
DATA['tags'][image_id] = image.pop('tags', [])
return _normalize_locations(copy.deepcopy(image))
@log_call
def image_update(context, image_id, image_values, purge_props=False):
global DATA
try:
image = DATA['images'][image_id]
except KeyError:
raise exception.NotFound()
location_data = image_values.pop('locations', None)
if location_data is not None:
_image_locations_set(image_id, location_data)
# replace values for properties that already exist
new_properties = image_values.pop('properties', {})
for prop in image['properties']:
if prop['name'] in new_properties:
prop['value'] = new_properties.pop(prop['name'])
elif purge_props:
# this matches weirdness in the sqlalchemy api
prop['deleted'] = True
# add in any completly new properties
image['properties'].extend([{'name': k, 'value': v,
'image_id': image_id, 'deleted': False}
for k, v in new_properties.items()])
image['updated_at'] = timeutils.utcnow()
image.update(image_values)
DATA['images'][image_id] = image
return _normalize_locations(image)
@log_call
def image_destroy(context, image_id):
global DATA
try:
DATA['images'][image_id]['deleted'] = True
DATA['images'][image_id]['deleted_at'] = timeutils.utcnow()
_image_locations_set(image_id, [])
for prop in DATA['images'][image_id]['properties']:
image_property_delete(context, prop['name'], image_id)
members = image_member_find(context, image_id=image_id)
for member in members:
image_member_delete(context, member['id'])
tags = image_tag_get_all(context, image_id)
for tag in tags:
image_tag_delete(context, image_id, tag)
_normalize_locations(DATA['images'][image_id])
return copy.deepcopy(DATA['images'][image_id])
except KeyError:
raise exception.NotFound()
@log_call
def image_tag_get_all(context, image_id):
return DATA['tags'].get(image_id, [])
@log_call
def image_tag_get(context, image_id, value):
tags = image_tag_get_all(context, image_id)
if value in tags:
return value
else:
raise exception.NotFound()
@log_call
def image_tag_set_all(context, image_id, values):
global DATA
DATA['tags'][image_id] = values
@log_call
def image_tag_create(context, image_id, value):
global DATA
DATA['tags'][image_id].append(value)
return value
@log_call
def image_tag_delete(context, image_id, value):
global DATA
try:
DATA['tags'][image_id].remove(value)
except ValueError:
raise exception.NotFound()
def is_image_mutable(context, image):
"""Return True if the image is mutable in this context."""
# Is admin == image mutable
if context.is_admin:
return True
# No owner == image not mutable
if image['owner'] is None or context.owner is None:
return False
# Image only mutable by its owner
return image['owner'] == context.owner
def is_image_sharable(context, image, **kwargs):
"""Return True if the image can be shared to others in this context."""
# Is admin == image sharable
if context.is_admin:
return True
# Only allow sharing if we have an owner
if context.owner is None:
return False
# If we own the image, we can share it
if context.owner == image['owner']:
return True
# Let's get the membership association
if 'membership' in kwargs:
member = kwargs['membership']
if member is None:
# Not shared with us anyway
return False
else:
members = image_member_find(context,
image_id=image['id'],
member=context.owner)
if members:
member = members[0]
else:
# Not shared with us anyway
return False
# It's the can_share attribute we're now interested in
return member['can_share']
def is_image_visible(context, image, status=None):
"""Return True if the image is visible in this context."""
# Is admin == image visible
if context.is_admin:
return True
# No owner == image visible
if image['owner'] is None:
return True
# Image is_public == image visible
if image['is_public']:
return True
# Perform tests based on whether we have an owner
if context.owner is not None:
if context.owner == image['owner']:
return True
# Figure out if this image is shared with that tenant
if status == 'all':
status = None
members = image_member_find(context,
image_id=image['id'],
member=context.owner,
status=status)
if members:
return True
# Private image
return False
def user_get_storage_usage(context, owner_id, image_id=None, session=None):
images = image_get_all(context, filters={'owner': owner_id})
total = 0
for image in images:
if image['id'] != image_id:
total = total + (image['size'] * len(image['locations']))
return total
@log_call
def task_create(context, task_values):
"""Create a task object"""
global DATA
task_id = task_values.get('id', uuidutils.generate_uuid())
required_attributes = ['type', 'status', 'input']
allowed_attributes = ['id', 'type', 'status', 'input', 'result', 'owner',
'message', 'expires_at', 'created_at',
'updated_at', 'deleted_at', 'deleted']
if task_id in DATA['tasks']:
raise exception.Duplicate()
for key in required_attributes:
if key not in task_values:
raise exception.Invalid('%s is a required attribute' % key)
incorrect_keys = set(task_values.keys()) - set(allowed_attributes)
if incorrect_keys:
raise exception.Invalid(
'The keys %s are not valid' % str(incorrect_keys))
task = _task_format(task_id, **task_values)
DATA['tasks'][task_id] = task
return copy.deepcopy(task)
@log_call
def task_update(context, task_id, values, purge_props=False):
"""Update a task object"""
global DATA
try:
task = DATA['tasks'][task_id]
except KeyError:
msg = (_("No task found with ID %s") % task_id)
LOG.debug(msg)
raise exception.TaskNotFound(task_id=task_id)
task.update(values)
task['updated_at'] = timeutils.utcnow()
DATA['tasks'][task_id] = task
return task
@log_call
def task_get(context, task_id, force_show_deleted=False):
task = _task_get(context, task_id, force_show_deleted)
return copy.deepcopy(task)
def _task_get(context, task_id, force_show_deleted=False):
try:
task = DATA['tasks'][task_id]
except KeyError:
msg = _('Could not find task %s') % task_id
LOG.info(msg)
raise exception.TaskNotFound(task_id=task_id)
if task['deleted'] and not (force_show_deleted or context.show_deleted):
msg = _('Unable to get deleted task %s') % task_id
LOG.info(msg)
raise exception.TaskNotFound(task_id=task_id)
if not _is_task_visible(context, task):
msg = (_("Forbidding request, task %s is not visible") % task_id)
LOG.debug(msg)
raise exception.Forbidden(msg)
return task
@log_call
def task_delete(context, task_id):
global DATA
try:
DATA['tasks'][task_id]['deleted'] = True
DATA['tasks'][task_id]['deleted_at'] = timeutils.utcnow()
DATA['tasks'][task_id]['updated_at'] = timeutils.utcnow()
return copy.deepcopy(DATA['tasks'][task_id])
except KeyError:
msg = (_("No task found with ID %s") % task_id)
LOG.debug(msg)
raise exception.TaskNotFound(task_id=task_id)
@log_call
def task_get_all(context, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc'):
"""
Get all tasks that match zero or more filters.
:param filters: dict of filter keys and values.
:param marker: task id after which to start page
:param limit: maximum number of tasks to return
:param sort_key: task attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:return: tasks set
"""
filters = filters or {}
tasks = DATA['tasks'].values()
tasks = _filter_tasks(tasks, filters, context)
tasks = _sort_tasks(tasks, sort_key, sort_dir)
tasks = _paginate_tasks(context, tasks, marker, limit,
filters.get('deleted'))
return tasks
def _is_task_visible(context, task):
"""Return True if the task is visible in this context."""
# Is admin == task visible
if context.is_admin:
return True
# No owner == task visible
if task['owner'] is None:
return True
# Perform tests based on whether we have an owner
if context.owner is not None:
if context.owner == task['owner']:
return True
return False
def _filter_tasks(tasks, filters, context, admin_as_user=False):
filtered_tasks = []
for task in tasks:
has_ownership = context.owner and task['owner'] == context.owner
can_see = (has_ownership or (context.is_admin and not admin_as_user))
if not can_see:
continue
add = True
for k, value in filters.iteritems():
add = task[k] == value and task['deleted'] is False
if not add:
break
if add:
filtered_tasks.append(task)
return filtered_tasks
def _sort_tasks(tasks, sort_key, sort_dir):
reverse = False
if tasks and not (sort_key in tasks[0]):
raise exception.InvalidSortKey()
keyfn = lambda x: (x[sort_key] if x[sort_key] is not None else '',
x['created_at'], x['id'])
reverse = sort_dir == 'desc'
tasks.sort(key=keyfn, reverse=reverse)
return tasks
def _paginate_tasks(context, tasks, marker, limit, show_deleted):
start = 0
end = -1
if marker is None:
start = 0
else:
# Check that the task is accessible
_task_get(context, marker, force_show_deleted=show_deleted)
for i, task in enumerate(tasks):
if task['id'] == marker:
start = i + 1
break
else:
if task:
raise exception.TaskNotFound(task_id=task['id'])
else:
msg = _("Task does not exist")
raise exception.NotFound(message=msg)
end = start + limit if limit is not None else None
return tasks[start:end]
|
Our brushless DC motors run solar power fans to keep greenhouses, attics and barns from overheating in hot weather. A similar motor is being designed to run on solar power in cellphone towers to keep equipment cool.
You’ll also find our motors releasing your soft drink purchase in the vending machine and powering many other devices that you encounter each day.
|
#!/usr/bin/python
"""
faaIngest by ThreeSixes (https://github.com/ThreeSixes)
This project is licensed under GPLv3. See COPYING for dtails.
This file is part of the airSuck project (https://github.com/ThreeSixes/airSUck).
"""
import config as config
import csv
import traceback
import datetime
import pymongo
import zipfile
import os
import shutil
import pycurl
from libAirSuck import asLog
from pprint import pprint
class importFaaDb:
def __init__(self):
"""
Handle the FAA aircraft database files.
"""
# Build logger.
self.__logger = asLog(config.ssrRegMongo['logMode'])
self.__logger.log("AirSuck FAA database import starting...")
# Master list of aircraft properties.
self.__acList = {}
# Master list of engine properties.
self.__engList = {}
try:
#MongoDB config
faaRegMongo = pymongo.MongoClient(config.ssrRegMongo['host'], config.ssrRegMongo['port'])
mDB = faaRegMongo[config.ssrRegMongo['dbName']]
tempCollName = "%s_tmp" %config.ssrRegMongo['coll']
# Set up the temporary colleciton.
self.__mDBColl = mDB[tempCollName]
# Nuke it if it exists.
try:
self.__mDBColl.drop()
except:
# Probably means it doesn't exist. We DGAF if it blows up.
None
except:
tb = traceback.format_exc()
self.__logger.log("Failed to connect to MongoDB:\n%s" %tb)
raise
def __getFaaData(self):
"""
Download and decompress FAA data.
"""
# Final location the zip file should end up.
fileTarget = "%s%s" %(config.ssrRegMongo['tempPath'], config.ssrRegMongo['tempZip'])
self.__logger.log("Downloading FAA database to %s..." %fileTarget)
try:
try:
# Try to create our directory
os.makedirs(config.ssrRegMongo['tempPath'])
except OSError:
# Already exists. We DGAF.
None
except:
raise
# Open the file and download the FAA DB into it.
with open(fileTarget, 'wb') as outZip:
# Use cURL to snag our database.
crl = pycurl.Curl()
crl.setopt(crl.URL, config.ssrRegMongo['faaDataURL'])
crl.setopt(crl.WRITEDATA, outZip)
crl.perform()
crl.close()
except:
raise
self.__logger.log("Unzipping relevatnt files from %s..." %fileTarget)
try:
# Open our zip file
zipF = zipfile.ZipFile(fileTarget, 'r')
# Extract master file
zipF.extract(config.ssrRegMongo['masterFile'], config.ssrRegMongo['tempPath'], config.ssrRegMongo['tempPath'])
# Extract aircraft file.
zipF.extract(config.ssrRegMongo['acFile'], config.ssrRegMongo['tempPath'], config.ssrRegMongo['tempPath'])
# Extract engine file.
zipF.extract(config.ssrRegMongo['engFile'], config.ssrRegMongo['tempPath'], config.ssrRegMongo['tempPath'])
except:
raise
finally:
zipF.close()
return
def __nukeFaaData(self):
"""
Delete FAA data files downloaded above.
"""
self.__logger.log("Deleting %s..." %fileTarget)
try:
# Nuke the temporary directory and all files under it.
shutil.rmtree(config.ssrRegMongo['tempPath'])
except:
raise
def __loadAcftRef(self):
"""
Load eircraft reference data from file.
"""
dataRow = False
targetFile = "%s%s" %(config.ssrRegMongo['tempPath'], config.ssrRegMongo['acFile'])
self.__logger.log("Processing aicraft reference data in %s..." %targetFile)
with open(targetFile, 'rb') as csvFile:
for row in csv.reader(csvFile):
# Blank the row, create template.
thisRow = {}
if dataRow:
# Type-correct our CSV data.
try:
thisRow.update({'mfgName': row[1].strip()})
except:
None
try:
thisRow.update({'modelName': row[2].strip()})
except:
None
try:
thisRow.update({'acType': int(row[3].strip())})
except:
None
try:
thisRow.update({'engType': int(row[4].strip())})
except:
None
try:
thisRow.update({'acCat': int(row[5].strip())})
except:
None
try:
thisRow.update({'buldCert': int(row[6].strip())})
except:
None
try:
thisRow.update({'engCt': int(row[7].strip())})
except:
None
try:
thisRow.update({'seatCt': int(row[8].strip())})
except:
None
try:
thisRow.update({'weight': int(row[9].replace("CLASS ", "").strip())})
except:
None
try:
thisRow.update({'cruiseSpd': int(row[10].strip())})
except:
None
self.__acList.update({row[0].strip(): thisRow})
else:
dataRow = True
return
def __loadEngine(self):
"""
Load engine reference data from file.
"""
dataRow = False
targetFile = "%s%s" %(config.ssrRegMongo['tempPath'], config.ssrRegMongo['engFile'])
self.__logger.log("Processing engine reference data in %s..." %targetFile)
with open(targetFile, 'rb') as csvFile:
for row in csv.reader(csvFile):
# Blank the row, create template.
thisRow = {}
if dataRow:
# Type-correct our CSV data.
try:
thisRow.update({'mfgName': row[1].strip()})
except:
None
try:
thisRow.update({'modelName': row[2].strip()})
except:
None
try:
thisRow.update({'engType': int(row[3].strip())})
except:
None
try:
thisRow.update({'engHP': int(row[4].strip())})
except:
None
try:
thisRow.update({'thrust': int(row[5].strip())})
except:
None
# Tack our row on.
self.__engList.update({row[0].strip(): thisRow})
else:
dataRow = True
return
def __processMaster(self):
"""
Load master aircraft data from file. This should be called AFTER __loadAcftRef and __loadEngine.
"""
dataRow = False
targetFile = "%s%s" %(config.ssrRegMongo['tempPath'], config.ssrRegMongo['masterFile'])
self.__logger.log("Processing master aicraft data in %s..." %targetFile)
with open(targetFile, 'rb') as csvFile:
for row in csv.reader(csvFile):
# Blank the row, create template.
thisRow = {}
if dataRow:
# Type-correct our CSV data.
try:
thisRow.update({'nNumber': "N%s" %row[0].strip()})
except:
None
try:
thisRow.update({'serial': row[1].strip()})
except:
None
try:
thisRow.update({'acMfg': self.__acList[row[2].strip()]})
except:
None
try:
thisRow.update({'engMfg': self.__engList[row[3].strip()]})
except:
None
try:
thisRow.update({'yearMfg': int(row[4].strip())})
except:
None
try:
thisRow.update({'regType': int(row[5].strip())})
except:
None
try:
thisRow.update({'regName': row[6].strip()})
except:
None
try:
thisRow.update({'street1': row[7].strip()})
except:
None
try:
thisRow.update({'street2': row[8].strip()})
except:
None
try:
thisRow.update({'city': row[9].strip()})
except:
None
try:
thisRow.update({'state': row[10].strip()})
except:
None
try:
thisRow.update({'zip': row[11].strip()})
except:
None
try:
thisRow.update({'region': row[12].strip()})
except:
None
try:
thisRow.update({'countyCode': row[13].strip()})
except:
None
try:
thisRow.update({'countryCode': row[14].strip()})
except:
None
try:
thisRow.update({'lastActDate': datetime.datetime.strptime(row[15].strip(), '%Y%m%d')})
except:
None
try:
thisRow.update({'certIssDate': datetime.datetime.strptime(row[16].strip(), '%Y%m%d')})
except:
None
try:
thisRow.update({'airWorthClass': row[17].strip()})
except:
None
try:
thisRow.update({'acType': int(row[18].strip())})
except:
None
try:
thisRow.update({'engType': int(row[19].strip())})
except:
None
try:
thisRow.update({'statCode': row[20].strip()})
except:
None
try:
thisRow.update({'modeSInt': int(row[21].strip())})
except:
None
try:
thisRow.update({'fractOwner': row[22].strip()})
except:
None
try:
thisRow.update({'airWorthyDate': datetime.datetime.strptime(row[23].strip(), '%Y%m%d')})
except:
None
try:
thisRow.update({'otherName1': row[24].strip()})
except:
None
try:
thisRow.update({'otherName2': row[25].strip()})
except:
None
try:
thisRow.update({'otherName3': row[26].strip()})
except:
None
try:
thisRow.update({'otherName4': row[27].strip()})
except:
None
try:
thisRow.update({'otherName5': row[28].strip()})
except:
None
try:
thisRow.update({'expireDate': datetime.datetime.strptime(row[29].strip(), '%Y%m%d')})
except:
None
try:
thisRow.update({'uid': row[30].strip()})
except:
None
try:
thisRow.update({'kitMfr': row[31].strip()})
except:
None
try:
thisRow.update({'kitMdl': row[32].strip()})
except:
None
try:
thisRow.update({'modeSHex': row[33].strip().lower()})
except:
None
# Insert the row.
try:
self.__mDBColl.insert(thisRow)
except:
raise
else:
dataRow = True
return
def migrateDb(self):
"""
Swap out the old database for the new.
"""
self.__logger.log("Migrate new processed aircraft data to live data...")
try:
# Try to overwrite the main collection.
self.__mDBColl.renameCollection(config.ssrRegMongo['coll'], True)
except:
raise
return
def run(self):
"""
Do all the work in sequence.
"""
try:
# Grab and decompress file.
self.__getFaaData()
# Pull aircraft reference data.
self.__loadAcftRef()
# Pull aircraft engine data.
self.__loadEngine()
# Insert master aircraft records combined with record from the engine and aicraft records.
self.__processMaster()
# Swap the database.
self.__migrateDb()
except:
tb = traceback.format_exc()
print("Unhandled exception:\n%s" %tb)
finally:
try:
# Drop the temporary collection.
self.__mDBColl.drop()
except:
# We DGAF it this doesn't work.
None
try:
# Drop the temporary collection.
self.__nukeFaaData()
except:
# We DGAF it this doesn't work.
None
ifdb = importFaaDb()
ifdb.run()
|
mоre often. Diԁ you hire out а designeг to creаte yοur theme?
thгоugh many of these іsѕues аs well.
аn overly skіlled blοgger. I have ϳoineԁ your feeԁ and look aheаd to seeκing eхtrа of yοuг excellеnt post.
paphos car hire and Israel to begin explorations in their respective territorial waters.
going to bookmark your website and keep checking for new information about once per week.
|
import theano
import theano.tensor as T
import cgml.types
from cgml.optimizers import Momentum,AdaDelta
from cgml.graph import ComputationalGraph
from cgml.layers.base import _make_shared
import numpy as np
from nose.tools import assert_true,assert_equals,assert_almost_equals
def test_momentum():
schema = {'description':'logreg',
'type':'classification',
'supervised-cost':
{'type':'negative-log-likelihood',
'name':'class-out'},
'graph':
[{'activation':'softmax',
'n_in':10,
'n_out':2,
'dropout':0.0,
'name':'class-out'}]
}
#optimizer = Momentum()
#model = ComputationalGraph(schema = schema,
# optimizer = optimizer)
def test_adadelta_logreg():
x = T.fvector('x')
y = T.fscalar('y')
w = _make_shared([1.0,1.0],name='w')
b = _make_shared([1.0],name='b')
yhat = 1.0 / ( 1.0 + T.exp( - T.dot(x,w) - b ) )
e = y - yhat
cost = T.dot(e,e)
ad = AdaDelta(cost = cost,
params = [w,b])
update = theano.function( inputs = [x,y],
outputs = cost,
updates = ad.updates )
c = update([2,1],0)
assert_almost_equals(c,0.9643510838246173)
c_prev = c
for i in range(100):
c = update([2,1],0)
assert_equals(c,c)
assert_true(c < c_prev)
c_prev = c
def test_adadelta_model():
schema = {'description':'logreg',
'type':'classification',
'supervised-cost':
{'type':'negative-log-likelihood',
'name':'class-out'},
'graph':
[{'activation':'softmax',
'n_in':10,
'n_out':2,
'dropout':0.0,
'name':'class-out'}]
}
model = ComputationalGraph(schema = schema,
seed = 0)
x = np.asarray([[1,2,3,4,5,1,2,3,4,5]]).astype(cgml.types.floatX)
y = np.asarray([0],dtype=cgml.types.intX)
model.setTrainDataOnDevice(x,y)
for i in range(10):
model.supervised_update(0,1)
|
The ball end design allows up to a 25-degree entry angle for easy reach over and around obstructions.
For fast and sure identification of each series, inch sizes come in a gray housing and metric sizes come in a red housing.
13-pc. long arm ball end inch hex key wrenches: 3/64, 1/16, 5/64, 3/32, 7/64, 1/8, 9/64, 5/32, 3/16, 7/32, 1/4, 5/16, 3/8 in.
|
import os, subprocess, time, re
from waflib.Configure import conf
def configure(conf):
conf.start_msg('Checking for program module')
# this does not work, since module is exported as a function on valgol:
# conf.find_program('module')
# Therfore:
if os.system('source /usr/local/Modules/current/init/bash && module purge') == 0:
conf.end_msg('module')
else:
conf.end_msg('module not found')
conf.fatal('Could not find the program module')
@conf
def load_modules(self,*k,**kw):
module_string = ''
try:
for module in kw['modules']:
module_string += module+' '
except KeyError:
self.fatal('You must give modules to function check_modules like this: check_module(modules=[a,b,c])')
#self.start_msg('Loading modules')
p = subprocess.Popen('source /usr/local/Modules/current/init/bash && module load '+module_string+' && export -p', shell=True, stdout=subprocess.PIPE)
p.wait()
if p.returncode == 0:
for key in os.environ.iterkeys():
os.unsetenv(key)
for line in p.stdout:
m = re.search('(\w+)=(".+")$', line)
if (m):
os.putenv(m.group(1), m.group(2))
#self.end_msg(module_string)
else:
self.fatal('Loading modules did not work')
|
With this being the last video pick from Clutch and Chrome before Christmas Day, there was never any hesitation what would be featured. Only a few years old, Harley-Davidson’s ‘The Miracle of Biker Claus’ is a modern classic.
Our motorcycle video pick of the day is as much about the well-known actor it features as it is the gorgeous ride.
Our motorcycle video pick of the day brings together frequently mentioned names on Clutch and Chrome, Café racers, Royal Enfield and Ace Café in London.
With the holidays getting closer, Clutch and Chrome has the perfect way to reach out to biker-buddies using our video pick of the day.
The latest challenge taking over social media, participants freezing in place as if they were a mannequin, rides into our motorcycle world with a new video from Yamaha.
Motorcycle commercials have become what ads for constipation and Viagra once were. Everyone who uses the product is having such a great time and live an action-filled life.
The motorcycles featured in today’s motorcycle video pick are catching many a biker’s eye.
If two things were ever meant to go together, a Ural motorcycle and sidecar would be it. Not surprisingly, this combination is used to show exactly how to ride with a sidecar.
Tall rider? Today’s video pick looks at how Harley-Davidson is making time in the saddle more comfortable for the bigger bikers among us.
Nostalgia may be a thing of the past, but there is nothing sexier than a modern motorcycle enjoying a classic style. For this video pick we look to BMW Motorrad’s new R nineT Racer.
A biker who just took off from Sydney Australia with no particular planning or budget makes for a fascinating motorcycle story and certainly warrants a video pick from Clutch and Chrome.
There is nothing good about a motorcycle accident, but an air of humor creeps in when a rider is rear-ended by his own mother.
Motorcycle enthusiasts looking for a new channel to follow on YouTube should prepare themselves for some works of video art from Honda.
Using a version of online virtual reality, motorcycle enthusiasts can put themselves at the center of a legendary racing event with the latest video pick from Clutch and Chrome.
Motorcycle enthusiasts who ride around the globe and women who ride. Both are popular topics and BMW Motorrad combines the two for an incredible video, so much so it’s our pick for the day.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-05 12:07
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feeds', '0002_urlshared_cleaned_text'),
]
operations = [
migrations.AddField(
model_name='authtoken',
name='me_json',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AddField(
model_name='twitteraccount',
name='account_json',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AddField(
model_name='urlshared',
name='url_json',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AlterField(
model_name='urlshared',
name='url_shared',
field=models.DateTimeField(),
),
]
|
Irish American bride Laurin Long knew she was at risk for cancer; her mother had died of pancreatic, her father of colon. Like her sisters, Erin Long Bergeson and Kristen Long Connell, she vowed to be watchful.
Her worst fears were realized when doctors told her she was at stage two breast cancer--just before she met the love of her life Michael Bank.
Indeed, the 29-year-old South Carolina girl was undergoing chemotherapy when they met in 2015 and quickly became soulmates.
Just weeks until her wedding day, she decided to shave her head.
“I woke up with bald spots this morning — so shaved head it is,” she said in a nine-minute Facebook Live video.
Happy shave my head day!!
Laurin had battled breast cancer before and, in fact, had been bald when she met Michael three years ago. Incredibly she posed bald and wore boxing gloves on the dating site she used, PlentyofFish.com.
Michael told the Washington Post her smile captured his attention-and her ballsy message.
“If you met me in person, you were going to see a bald girl, so I just wanted to be open and honest and say, ‘Hey, this is me,'” she said.
They talked for hours before they met.
After they began dating, Laurin had her breasts removed, and Michael stood guard at her bedside when she got out of surgery and made sure her home was spick and span.
“Mike made it very clear when I got back from my surgery that I was his,” she said.
They traveled widely and became incredibly close. In June 2017, they went to Niagara Falls, and Mike shocked her with a proposal and a wedding ring.
“Can we keep this party going?” he asked.
“I said ‘yes’ and was kissing him like crazy,” Laurin said.
Laurin found out it had spread to her bones and her liver.
“I looked at Mike and said, ‘Do you still want to marry me?’ He said, ‘Yes, I asked you. We’re not changing anything,'” she said.
The doctors disagreed -- at stage 4 and metastic, time was running out.
“Mike and I felt like if we changed our wedding date, it would be a rush; we wouldn’t have it on our terms,” Laurin said.
“For Mike, it felt like we would be giving in to cancer,” she said.
“Not moving the wedding date gave Laurin something to look forward to and to keep pushing toward,” Michael said.
By December, it was in her bones.
She decided to join a clinical trial. But again, the couple refused to change their plans.
Laurin was accepted into a new clinical trial.
The couple’s wedding photographer, Tiffany Ellis, said the love and support from family and friends was palpable.
A few weeks later came more good news. The experimental drug appeared to be working, at least for now.
Finally the couple appear to have caught a break. Laurin and Michael deserve all the luck of the Irish in their new lives.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('enhance', '0002_auto_20150303_2223'),
]
operations = [
migrations.RenameField(
model_name='messagetopic',
old_name='source',
new_name='message',
),
migrations.RenameField(
model_name='messageword',
old_name='source',
new_name='message',
),
migrations.AlterField(
model_name='messagetopic',
name='topic_model',
field=models.ForeignKey(to='enhance.TopicModel', db_index=False),
preserve_default=True,
),
migrations.AlterField(
model_name='messageword',
name='dictionary',
field=models.ForeignKey(to='enhance.Dictionary', db_index=False),
preserve_default=True,
),
migrations.AlterIndexTogether(
name='messagetopic',
index_together=set([('topic_model', 'message')]),
),
migrations.AlterIndexTogether(
name='messageword',
index_together=set([('dictionary', 'message')]),
),
]
|
In a strange little coincidence, LISNews has this little article about a library in Brooklyn that had living quarters for the librarian. Many of you know that this is my dream job, ultimately, and to see this in Brooklyn was just too crazy! Turns out, it wasn’t the Brooklyn that I was thinking of.
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
class WebhookAuth(object):
_specific_actions = []
def __init__(self, project_repo):
self.project_repo = project_repo
@property
def specific_actions(self):
return self._specific_actions
def can(self, user, action, webhook=None, project_id=None):
action = ''.join(['_', action])
return getattr(self, action)(user, webhook, project_id)
def _create(self, user, webhook, project_id=None):
return False
def _read(self, user, webhook=None, project_id=None):
if user.is_anonymous() or (webhook is None and project_id is None):
return False
project = self._get_project(webhook, project_id)
return user.admin or user.id == project.owner_id
def _update(self, user, webhook, project_id=None):
return False
def _delete(self, user, webhook, project_id=None):
return False
def _get_project(self, webhook, project_id):
if webhook is not None:
return self.project_repo.get(webhook.project_id)
return self.project_repo.get(project_id)
|
What are the age levels for children that the WABC®-S tests? The WABC-S® contains two age-level tests. Level 1 assesses knowledge and use of basic concepts for ages 3;0 to 5;11. Level 2 assesses knowledge and use of basic concepts for ages 5;0 to 7;11. Children from the ages of 5;0 to 5;11 may take both levels.
How long does it take to administer the WABC®-S? Each level of the WABC®-S takes 10-15 minutes to administer. For children ages 5;0 to 5;11 taking both levels, allow approximately 20-30 minutes.
How did you choose the basic concept words tested in the WABC®-S? The adequate selection of vocabulary is crucial to test reliability because children from different countries may use different words or names to describe items. The WABC®-S contains multiple word choices for each one of the two sections, expressive and receptive. These word choices assist in using the child’s appropriate regional vocabulary (Mexican, Puerto Rican, Cuban, as well as Central or South American).
Did you compare the WABC®-S to another basic concepts test? Yes. A concurrent validity study was conducted in which the results of the WABC® were compared to the results of the Boehm Test of Basic Concepts-3 Preschool (Boehm-3Preschool) and Boehm Test of Basic Concepts-3 (Boehm-3). The results indicated significant correlations between the Boehm tests and the WABC®-S.
Is the WABC-S® merely a translation? No, the WABC-S® is an interpretation of the English Version, not a direct translation. Some words and items vary based on cultural influences.
Why is it important to assess basic concepts in children? Basic concepts are the building blocks that children must use to follow directions, engage in classroom routines, and provide descriptions of the world around them. They are fundamental for performing everyday tasks such as reading, writing, speaking, and math. Knowledge of these concepts directly relates to academic achievement.
How is the WABC®-S different from other tests of basic concepts? The WABC®-S differs from other basic concept tests in many ways.
First, the WABC®-S evaluates the receptive and expressive use of basic concepts. WABC®-S presents the concepts in opposite (big/small) and/or related pairs (half/whole), rather than single, unrelated concepts.
Second, the test is in an interactive storybook format, giving the examiner the opportunity to test the child in a more natural setting. There is no easel. Instead, the WABC®-S embeds concept pairs or related words in colorfully illustrated scenes of a storybook.
Third, the WABC®-S kit contains two age-level tests for evaluating the understanding and use of basic concepts in Spanish. Level 1 - Un Día en el Zoológico is for the preschool child from 3;0 to 5;11 years. Level 2 - Un Diá en el Parque is for children ages 5;0 to 7;11 in kindergarten and early elementary grades.
How can I use the results of the WABC®-S to identify areas of weakness and write IEP goals? The results of testing for both Level 1 and Level 2 can be summarized on the WABC-S® inventory form. This form categorizes the basic concepts into seven semantic categories: Color/Forma(Color/Shape), Peso/Volumen (Weight/Volume), Distancia/Tiempo/Velocidad (Distance/Time/Speed), Cantidad/Capacidad (Quantity/Completeness), Ubicación/Orientación (Location/Direction), Condición/Calidad (Condition/Quality), Sensación/Emoción/Evaluación (Sensation/Emotion/Evaluation).
Transfer the testing results to the inventory form in order to assist you in writing IEP goals and explaining test results to parents.
May I purchase components of the WABC®-S kit separately? Yes.
|
"""
Acceptance tests for Studio.
"""
from bok_choy.web_app_test import WebAppTest
from ..pages.studio.asset_index import AssetIndexPage
from ..pages.studio.auto_auth import AutoAuthPage
from ..pages.studio.checklists import ChecklistsPage
from ..pages.studio.course_import import ImportPage
from ..pages.studio.course_info import CourseUpdatesPage
from ..pages.studio.edit_tabs import PagesPage
from ..pages.studio.export import ExportPage
from ..pages.studio.howitworks import HowitworksPage
from ..pages.studio.index import DashboardPage
from ..pages.studio.login import LoginPage
from ..pages.studio.manage_users import CourseTeamPage
from ..pages.studio.overview import CourseOutlinePage
from ..pages.studio.settings import SettingsPage
from ..pages.studio.settings_advanced import AdvancedSettingsPage
from ..pages.studio.settings_graders import GradingPage
from ..pages.studio.signup import SignupPage
from ..pages.studio.textbooks import TextbooksPage
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
from .helpers import UniqueCourseTest
class LoggedOutTest(WebAppTest):
"""
Smoke test for pages in Studio that are visible when logged out.
"""
def setUp(self):
super(LoggedOutTest, self).setUp()
self.pages = [LoginPage(self.browser), HowitworksPage(self.browser), SignupPage(self.browser)]
def test_page_existence(self):
"""
Make sure that all the pages are accessible.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
for page in self.pages:
page.visit()
class LoggedInPagesTest(WebAppTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and do not have a course yet.
"""
def setUp(self):
super(LoggedInPagesTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
def test_dashboard_no_courses(self):
"""
Make sure that you can get to the dashboard page without a course.
"""
self.auth_page.visit()
self.dashboard_page.visit()
class CoursePagesTest(UniqueCourseTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and have a course.
"""
COURSE_ID_SEPARATOR = "."
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CoursePagesTest, self).setUp()
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.pages = [
clz(self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'])
for clz in [
AssetIndexPage, ChecklistsPage, ImportPage, CourseUpdatesPage,
PagesPage, ExportPage, CourseTeamPage, CourseOutlinePage, SettingsPage,
AdvancedSettingsPage, GradingPage, TextbooksPage
]
]
def test_page_existence(self):
"""
Make sure that all these pages are accessible once you have a course.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
# Log in
self.auth_page.visit()
# Verify that each page is available
for page in self.pages:
page.visit()
class DiscussionPreviewTest(UniqueCourseTest):
"""
Tests that Inline Discussions are rendered with a custom preview in Studio
"""
def setUp(self):
super(DiscussionPreviewTest, self).setUp()
CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
)
)
)
)
).install()
AutoAuthPage(self.browser, staff=True).visit()
cop = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
cop.visit()
self.unit = cop.section('Test Section').subsection('Test Subsection').toggle_expand().unit('Test Unit')
self.unit.go_to()
def test_is_preview(self):
"""
Ensure that the preview version of the discussion is rendered.
"""
self.assertTrue(self.unit.q(css=".discussion-preview").present)
self.assertFalse(self.unit.q(css=".discussion-show").present)
|
I bought this book because: it may have been the only Crowther book I could find, but the illustrations are charming and I would have picked it anyway! They possess so much energy while maintaining a a level of secrecy, of mystery, always leaving me wanting more. It’s a folk tale, unlike conventional American counterparts in word count and style, but universal in the telling of how a wild creature might not adapt to a home life.
Resources/activities: as this book may not be available in English I won’t add activities for it, but would like to invite caregivers and children to explore the world languages departments of their local bookstores and libraries.
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import math
from mathutils import Vector, Matrix
import bpy
from bpy.props import BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
# unit normal vector of plane defined by points a, b, and c
def unit_normal(a, b, c):
mat_x = Matrix(((1, a[1], a[2]), (1, b[1], b[2]), (1, c[1], c[2])))
mat_y = Matrix(((a[0], 1, a[2]), (b[0], 1, b[2]), (c[0], 1, c[2])))
mat_z = Matrix(((a[0], a[1], 1), (b[0], b[1], 1), (c[0], c[1], 1)))
x = Matrix.determinant(mat_x)
y = Matrix.determinant(mat_y)
z = Matrix.determinant(mat_z)
magnitude = (x**2 + y**2 + z**2)**.5
if magnitude == 0:
magnitude = 1
return (x/magnitude, y/magnitude, z/magnitude)
# area of polygon poly
def area_pol(poly):
if len(poly) < 3: # not a plane - no area
return 0
total = Vector((0, 0, 0))
for i in range(len(poly)):
vi1 = Vector(poly[i])
if i is len(poly)-1:
vi2 = Vector(poly[0])
else:
vi2 = Vector(poly[i+1])
prod = vi1.cross(vi2)[:]
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = total.dot(unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2)
def areas(Vertices, Polygons, per_face):
areas = []
for i, obj in enumerate(Polygons):
res = []
for face in obj:
poly = []
for j in face:
poly.append(Vertices[i][j])
res.append(area_pol(poly))
if per_face:
areas.extend(res)
else:
areas.append(math.fsum(res))
return areas
class AreaNode(bpy.types.Node, SverchCustomTreeNode):
''' Area '''
bl_idname = 'AreaNode'
bl_label = 'Area'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_AREA'
per_face = BoolProperty(name='per_face',
default=True,
update=updateNode)
def sv_init(self, context):
self.inputs.new('VerticesSocket', "Vertices", "Vertices")
self.inputs.new('StringsSocket', "Polygons", "Polygons")
self.outputs.new('StringsSocket', "Area", "Area")
def draw_buttons(self, context, layout):
layout.prop(self, "per_face", text="Count faces")
def process(self):
# inputs
inputs = self.inputs
outputs = self.outputs
if not 'Area' in outputs:
return
Vertices = inputs["Vertices"].sv_get()
Polygons = inputs["Polygons"].sv_get()
# outputs
if outputs['Area'].is_linked:
outputs['Area'].sv_set([areas(Vertices, Polygons, self.per_face)])
def register():
bpy.utils.register_class(AreaNode)
def unregister():
bpy.utils.unregister_class(AreaNode)
|
Find Vast Verity of Pre-Owned Indian Vintage Sari's in Re-Usable condition.
We offer All types of Vintage Saree's like Hand Beaded, Embroidered, Woven, Printed, Heavy & Antique Banarasi Sari's.
One can Use these Saris for Wearing Purpose or can be used as a Craft Fabric to Create something New.
Find Hand Picked Verity of Pre-Owned Vintage Indian Long Stole in Re-Usable condition.
We offer All types of Vintage Indian Dupatta's like Hand Beaded, Embroidered, Woven & Printed.
One can Use these Scarves for Wearing Purpose or can be used as a Craft Fabric to Create something New.
Find large Verity of Pre-Owned & New Printed, Embroidered, Woven Fabrics, Borders, Lace & Trims for your sewing Craft Projects.
Find Vast Verity of Banarasi Brocade Dupatta / Indian Long Stole direct from Banaras.
We are working on all new product range of Banarasi Dupatta, Banarasi Sarees, Banarasi Brocade Fabrics & Many more items manufacture in the Holy city Banaras.
|
#!/usr/bin/env python
#
# === Search for unicode symbol ===
#
# MIT License
# Copyright (c) 2016 Attila Majoros
#
# Source: https://github.com/majk1/shellrc/blob/master/utils/unicode.py
#
import sys
import os.path
import re
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
UNICODE_SYMBOLS_SOURCE_URL = 'http://www.unicode.org/Public/UNIDATA/UnicodeData.txt'
UNICODE_SYMBOLS_SOURCE_URL_FALLBACK = 'https://static.codelens.io/UnicodeData.txt'
UNICODE_SYMBOLS_LIST = '/tmp/unicode_symbols.lst'
DEBUG = False
def debug(message):
if DEBUG:
sys.stderr.write('DBG: ' + message + '\n')
def fetch_unicode_data(url, target_file):
debug('Fetching unicode symbol list from ' + url)
data = urlopen(url)
fl = open(target_file, 'wt')
line_counter = 0
for desc_line in data:
m = re.match('^(.{4};[^<][^;]+);.*', desc_line.decode())
if m:
fl.write(m.group(1).lower())
fl.write('\n')
line_counter = line_counter + 1
fl.close()
debug('Fetched and filtered ' + str(line_counter) + ' symbols into ' + target_file)
def is_unicode_symbols_list_file_usable(file_name):
if os.path.exists(file_name):
if os.path.getsize(file_name) > 0:
return True
return False
if __name__ == '__main__':
args = list(sys.argv[1:])
if len(args) > 0:
if args[0] == '-d':
DEBUG = True
args = args[1:]
debug('Python version: ' + sys.version)
if len(args) == 0:
sys.stderr.write('Usage: unicode.py [-d] <search>\n')
sys.stderr.write('\n')
sys.stderr.write(' -d - enabled debug messages\n')
sys.stderr.write(' <search> - multiple search patterns separated by space\n')
sys.stderr.write('\n')
sys.stderr.write('Example: ./unicode.py black circle\n')
sys.stderr.write('\n')
sys.exit(1)
if not is_unicode_symbols_list_file_usable(UNICODE_SYMBOLS_LIST):
try:
fetch_unicode_data(UNICODE_SYMBOLS_SOURCE_URL, UNICODE_SYMBOLS_LIST)
except Exception as e:
debug('Could not download unicode symbol list: ' + str(e))
debug('trying fallback url: ' + UNICODE_SYMBOLS_SOURCE_URL_FALLBACK)
try:
fetch_unicode_data(UNICODE_SYMBOLS_SOURCE_URL_FALLBACK, UNICODE_SYMBOLS_LIST)
except Exception as ee:
sys.stderr.write('Could not download unicode symbol list from fallback url: ' + str(ee) + '\n')
sys.exit(2)
search = [s.lower() for s in args]
debug('searching for unicode symbols by: ' + '+'.join(search))
with open(UNICODE_SYMBOLS_LIST, 'r') as f:
for line in f:
if all(s in line for s in search):
code, name = line.rstrip().split(';')
symbol = ('\\u' + code).encode('utf-8').decode('raw_unicode_escape')
try:
print(symbol + '\t\\u' + code + '\t&#x' + code + ';\t' + name)
except UnicodeEncodeError as e:
print((symbol + '\t\\u' + code + '\t&#x' + code + ';\t' + name).encode('utf-8'))
|
In order to use the SFFlexSuite ® Web Portal, you must have an account. To create an account, click on the Sign up link in the upper right portion of the screen.
You will see a form asking you for your contact information as well as your email address and password. All of the information on this page must be entered in order to create your account. The email address will be your login name and your password must be at least six characters long and contain at least one capital letter, one lower case letter, and one number.
You will be required to read and agree to the terms laid out in the disclaimer, which can be viewed by clicking on the link next to the checkbox at the bottom of the form. Check the checkbox to indicate that you have read and agree to the disclaimer and create your account.
Once you have created your account, you will be taken to the User Profile.
Once you have created your account or have had one created for you by an administrator, you can log in to the portal using the controls in the upper right portion of the screen. Your login name is the email address used to create the account.
If your account was created for you by an administrator, you will see a screen requesting that you read and agree to the terms of the disclaimer. It can be viewed by clicking on the link next to the checkbox at the bottom of the form. Check the checkbox to indicate that you have read and agree to the disclaimer and continue into the portal. This screen will only appear the first time you log in.
If you have forgotten your password, click on the Forgot my password link on the portal page. This will open a box to enter your email address into. Once you have entered your email address, click on the Submit button. A temporary password will be generated and emailed to you. Log in using this password, then go to the User Profile and change your password to a new one.
|
from qanta import qlogging
from qanta.ingestion.answer_mapping import read_wiki_titles
from qanta.ingestion.annotated_mapping import PageAssigner
log = qlogging.get("validate_annotations")
def normalize(title):
return title.replace(" ", "_")
def check_page(page, titles):
n_page = normalize(page)
if n_page not in titles:
log.error(f"Title not found: {page}")
def main():
titles = read_wiki_titles()
assigner = PageAssigner()
log.info("Checking direct protobowl mappings...")
for page in assigner.protobowl_direct.values():
check_page(page, titles)
log.info("Checking direct quizdb mappings...")
for page in assigner.quizdb_direct.values():
check_page(page, titles)
log.info("Checking unambiguous mappings...")
for page in assigner.unambiguous.values():
check_page(page, titles)
log.info("Checking ambiguous mappings...")
for entry in assigner.ambiguous.values():
for option in entry:
check_page(option["page"], titles)
if __name__ == "__main__":
main()
|
Giulio Riotta is a professional photographer in Rome specialized in the conference field. Since 2008 he has been working as a photographer for conferences on behalf of Google Enterprise, Spar, Moleskine, Avaya, Gartner, Gazprom, Danone, Shell and other companies which have chosen him for the visual storytelling of international events from 50 to 1500 people.
Thanks to the Wi-Fi you can load your event live on the Internet and social media. The best images shot by the photographer will be loaded simultaneously on an online space and you will be able to use them on Twitter, Facebook or wherever you wish to.
Sometimes a company invests substantial sums of money on the organization of a conference but decides to save money on the photographic service. But the professionalism of a photographer is essential even after the event when it’s important for the company to remind, promote or look for a sponsor.
The organization of a conference may require days, weeks, sometimes even months. That’s why it’s important to rely on a professional photographer. A professional knows how to work in a conference hall and how to valorise your company. In this way, you will have a photographic service which shows your event at its best.
In order to photograph an event in the most accurate way, extreme care, sixth sense and curiosity are essential. You can’t be happy with the traditional group photo, but you have to look for the right frame, the correct light, the best moment to shoot. Determination and enterprising spirit are essential to do this job at its best!
|
"""Utils."""
import getpass
import keyring
from sys import stdout
from .exceptions import PyiCloudNoStoredPasswordAvailableException
KEYRING_SYSTEM = "pyicloud://icloud-password"
def get_password(username, interactive=stdout.isatty()):
"""Get the password from a username."""
try:
return get_password_from_keyring(username)
except PyiCloudNoStoredPasswordAvailableException:
if not interactive:
raise
return getpass.getpass(
"Enter iCloud password for {username}: ".format(username=username)
)
def password_exists_in_keyring(username):
"""Return true if the password of a username exists in the keyring."""
try:
get_password_from_keyring(username)
except PyiCloudNoStoredPasswordAvailableException:
return False
return True
def get_password_from_keyring(username):
"""Get the password from a username."""
result = keyring.get_password(KEYRING_SYSTEM, username)
if result is None:
raise PyiCloudNoStoredPasswordAvailableException(
"No pyicloud password for {username} could be found "
"in the system keychain. Use the `--store-in-keyring` "
"command-line option for storing a password for this "
"username.".format(username=username)
)
return result
def store_password_in_keyring(username, password):
"""Store the password of a username."""
return keyring.set_password(KEYRING_SYSTEM, username, password)
def delete_password_in_keyring(username):
"""Delete the password of a username."""
return keyring.delete_password(KEYRING_SYSTEM, username)
def underscore_to_camelcase(word, initial_capital=False):
"""Transform a word to camelCase."""
words = [x.capitalize() or "_" for x in word.split("_")]
if not initial_capital:
words[0] = words[0].lower()
return "".join(words)
|
Ahahahaaaaaa. The article is FAKE one like that of Bor community last time. The two articles were written by a confused Gogrial thug who is worried by the imminent failure of incompetent Kiir in the SPLM convention. This is why he rushes to ask for suspension of convention for two years. So he wants Kiir to declare war on Khartoum in order to suspension the commission. See how visionless they are.
|
import sys
import re
import types
import fnmatch
from collections import OrderedDict
from django.conf import settings
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
from django_js_utils import conf_jsutils
import six
class PatternsParser(object):
def __init__(self):
self._patterns = OrderedDict()
def parse(self, input):
self.handle_url_module(input)
def handle_url_module(self, module_name, prefix=""):
"""
Load the module and output all of the patterns
Recurse on the included modules
"""
if isinstance(module_name, six.string_types):
__import__(module_name)
root_urls = sys.modules[module_name]
patterns = root_urls.urlpatterns
elif isinstance(module_name, types.ModuleType):
root_urls = module_name
patterns = root_urls.urlpatterns
else:
root_urls = module_name
patterns = root_urls
def match(rule, target):
return re.match(rule, target.strip('^$'))
for pattern in patterns:
if issubclass(pattern.__class__, RegexURLPattern):
if any(match(k, prefix) for k in getattr(settings, 'URLS_EXCLUDE_PREFIX', [])):
continue
if getattr(settings, 'URLS_INCLUDE_PREFIX', []):
if not any(match(k, prefix) for k in getattr(settings, 'URLS_INCLUDE_PREFIX', [])):
continue
val = getattr(pattern, 'name', None) or ''
if any(match(k, pattern.regex.pattern) for k in getattr(settings, 'URLS_EXCLUDE_PATTERN', [])):
continue
if getattr(settings, 'URLS_INCLUDE_PATTERN', []):
if not any(match(k, pattern.regex.pattern) for k in getattr(settings, 'URLS_INCLUDE_PATTERN', [])):
continue
self.parse_pattern(pattern, prefix)
elif issubclass(pattern.__class__, RegexURLResolver):
if pattern.url_patterns:
self.handle_url_module(pattern.url_patterns, prefix=prefix+pattern.regex.pattern)
elif pattern.urlconf_name:
self.handle_url_module(pattern.urlconf_name, prefix=pattern.regex.pattern)
def parse_pattern(self, pattern, prefix):
full_url = prefix + pattern.regex.pattern
for chr in ("^", "$"):
full_url = full_url.replace(chr, "")
#handle kwargs, args
kwarg_matches = conf_jsutils.RE_KWARG.findall(full_url)
if kwarg_matches:
for el in kwarg_matches:
#prepare the output for JS resolver
full_url = full_url.replace(el[0], "<%s>" % el[1])
#after processing all kwargs try args
args_matches = conf_jsutils.RE_ARG.findall(full_url)
if args_matches:
for el in args_matches:
full_url = full_url.replace(el, "<>") # replace by a empty parameter name
#unescape escaped chars which are not special sequences
full_url = re.sub(r'\\([^\dAZbBdDsSwW])', r'\1', full_url)
self._patterns[pattern.name] = "/" + full_url
@property
def patterns(self):
return self._patterns
|
Please find a list of our upcoming Webinars below. Click the title link for more details and to register.
Register Now ! Agenda available soon.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import tornado.web
import tornado.ioloop
import tornado.httpserver
from tornado.escape import json_encode
from tornado.options import define, options, parse_command_line
from twitterick import emoji
from twitterick.limericker import write
from twitterick.database import get_connection
define("port", default=3058, help="run on the given port", type=int)
define("debug", default=False, help="run in debug mode")
define("xheaders", default=True, help="use X-headers")
define("cookie_secret", default="secret key", help="secure key")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
(r"/new", NewHandler),
(r"/recent", RecentHandler),
(r"/popular", PopularHandler),
(r"/([0-9]+)", TwitterickHandler),
(r"/like/([0-9]+)", LikeHandler),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
xheaders=options.xheaders,
cookie_secret=options.cookie_secret,
debug=options.debug,
)
super(Application, self).__init__(handlers, ui_methods=emoji,
**settings)
self._db = get_connection()
@property
def db(self):
return self._db
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get_poems(self, poem_id=None, page=0, per_page=10, popular=False):
q = """
select
twittericks.id, twittericks.votes,
t1.tweet_id, t1.username, t1.body,
t2.tweet_id, t2.username, t2.body,
t3.tweet_id, t3.username, t3.body,
t4.tweet_id, t4.username, t4.body,
t5.tweet_id, t5.username, t5.body
from twittericks
join tweets as t1 on l1=t1.id
join tweets as t2 on l2=t2.id
join tweets as t3 on l3=t3.id
join tweets as t4 on l4=t4.id
join tweets as t5 on l5=t5.id
"""
args = []
if poem_id is not None:
q += "where twittericks.id=%s limit 1\n"
args += [poem_id]
else:
if popular:
q += "where votes > 0 order by votes desc, id desc"
else:
q += "order by id desc"
q += " offset %s limit %s"
args += [page * per_page, per_page]
with self.db as conn:
c = conn.cursor()
c.execute(q, args)
results = c.fetchall()
return [dict(poem_id=r[0], votes=r[1],
lines=[dict(tweet_id=r[2+i*3], username=r[3+i*3],
body=r[4+i*3]) for i in range(5)])
for r in results]
class IndexHandler(BaseHandler):
def get(self):
# Count the number of tweets.
with self.db as conn:
c = conn.cursor()
c.execute("select count(*) from tweets")
count = c.fetchone()
if count is None:
count = "many"
else:
count = count[0]
poems = self.get_poems()
# Get the result of the query.
if not len(poems):
self.render("noresults.html")
return
# Parse the poem and display the results.
self.render("index.html", title="Twitterick", poems=poems, count=count)
class NewHandler(BaseHandler):
def get(self):
with self.db as conn:
poem_id = write(conn.cursor())
self.redirect("/{0}".format(poem_id))
class RecentHandler(BaseHandler):
def get(self):
# Pagination.
page = self.get_argument("page", 0)
page = max([0, int(page)])
poems = self.get_poems(page=page)
# Get the result of the query.
if not len(poems):
self.render("noresults.html")
return
# Parse the poem and display the results.
self.render("poems.html", title="Recent Twittericks", poems=poems,
next_page=page+1, prev_page=page-1)
class PopularHandler(BaseHandler):
def get(self):
# Pagination.
page = self.get_argument("page", 0)
page = max([0, int(page)])
poems = self.get_poems(page=page, popular=True)
# Get the result of the query.
if not len(poems):
self.render("noresults.html")
return
# Parse the poem and display the results.
self.render("poems.html", title="Popular Twittericks", poems=poems,
next_page=page+1, prev_page=page-1)
class TwitterickHandler(BaseHandler):
def get(self, poem_id):
poems = self.get_poems(poem_id=poem_id)
# Get the result of the query.
if not len(poems):
self.render("noresults.html")
return
# Parse the poem and display the results.
self.render("poem.html", title="Twitterick #{0}".format(poem_id),
poem=poems[0])
class LikeHandler(BaseHandler):
def get(self, poem_id):
with self.db as conn:
c = conn.cursor()
c.execute("update twittericks set votes=votes+1 where id=%s "
"returning votes",
(poem_id, ))
votes = c.fetchone()
self.set_header("Content-Type", "application/json")
if votes is None:
self.set_status(404)
self.write(json_encode(dict(message="Failure", votes=0)))
self.finish()
self.write(json_encode(dict(message="Success", votes=votes[0])))
def main():
parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port, address="0.0.0.0")
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
EUDML | Strong Ditkin algebras without bounded relative units. EuDML | Strong Ditkin algebras without bounded relative units.
Strong Ditkin algebras without bounded relative units.
Feinstein, J.F.. "Strong Ditkin algebras without bounded relative units.." International Journal of Mathematics and Mathematical Sciences 22.2 (1999): 437-443. <http://eudml.org/doc/49036>.
TI - Strong Ditkin algebras without bounded relative units.
|
"""Define an object to interact with the REST API."""
import logging
import re
import time
from asyncio import ensure_future
from .const import (
API_HOST,
API_POLL_PERIOD,
DEVICES_PATH,
DEVICE_HISTORY_PATH,
TIMER_PROGRAMS_PATH,
LOGIN_PATH,
WS_HOST,
)
from .errors import RequestError
from .websocket import OrbitWebsocket
_LOGGER = logging.getLogger(__name__)
class Client:
"""Define the API object."""
def __init__(
self, username: str, password: str, loop, session, async_callback
) -> None:
"""Initialize."""
self._username: str = username
self._password: int = password
self._ws_url: str = WS_HOST
self._token: str = None
self._websocket = None
self._loop = loop
self._session = session
self._async_callback = async_callback
self._devices = []
self._last_poll_devices = 0
self._timer_programs = []
self._last_poll_programs = 0
self._device_histories = dict()
self._last_poll_device_histories = 0
async def _request(
self, method: str, endpoint: str, params: dict = None, json: dict = None
) -> list:
"""Make a request against the API."""
url: str = f"{API_HOST}{endpoint}"
if not params:
params = {}
headers = {
"Accept": "application/json, text/plain, */*",
"Host": re.sub("https?://", "", API_HOST),
"Content-Type": "application/json; charset=utf-8;",
"Referer": API_HOST,
"Orbit-Session-Token": self._token or "",
}
headers["User-Agent"] = (
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/72.0.3626.81 Safari/537.36"
)
async with self._session.request(
method, url, params=params, headers=headers, json=json
) as resp:
try:
resp.raise_for_status()
return await resp.json(content_type=None)
except Exception as err:
raise RequestError(f"Error requesting data from {url}: {err}")
async def _refresh_devices(self, force_update=False):
now = time.time()
if force_update:
_LOGGER.info("Forcing device refresh")
elif now - self._last_poll_devices < API_POLL_PERIOD:
return
self._devices = await self._request(
"get", DEVICES_PATH, params={"t": str(time.time())}
)
self._last_poll_devices = now
async def _refresh_timer_programs(self, force_update=False):
now = time.time()
if force_update:
_LOGGER.debug("Forcing device refresh")
elif now - self._last_poll_programs < API_POLL_PERIOD:
return
self._timer_programs = await self._request(
"get", TIMER_PROGRAMS_PATH, params={"t": str(time.time())}
)
self._last_poll_programs = now
async def _refresh_device_history(self, device_id, force_update=False):
now = time.time()
if force_update:
_LOGGER.info("Forcing refresh of device history %s", device_id)
elif now - self._last_poll_device_histories < API_POLL_PERIOD:
return
device_history = await self._request(
"get",
DEVICE_HISTORY_PATH.format(device_id),
params={"t": str(time.time()), "page": str(1), "per-page": str(10),},
)
self._device_histories.update({device_id: device_history})
self._last_poll_device_histories = now
async def _async_ws_handler(self, data):
"""Process incoming websocket message."""
if self._async_callback:
ensure_future(self._async_callback(data))
async def login(self) -> bool:
"""Log in with username & password and save the token."""
url: str = f"{API_HOST}{LOGIN_PATH}"
json = {"session": {"email": self._username, "password": self._password}}
async with self._session.request("post", url, json=json) as resp:
try:
resp.raise_for_status()
response = await resp.json(content_type=None)
_LOGGER.debug("Logged in")
self._token = response["orbit_session_token"]
except Exception as err:
raise RequestError(f"Error requesting data from {url}: {err}")
if self._token is None:
return False
self._websocket = OrbitWebsocket(
token=self._token,
loop=self._loop,
session=self._session,
url=self._ws_url,
async_callback=self._async_ws_handler,
)
self._websocket.start()
return True
async def stop(self):
"""Stop the websocket."""
if self._websocket is not None:
await self._websocket.stop()
@property
async def devices(self):
"""Get all devices."""
await self._refresh_devices()
return self._devices
@property
async def timer_programs(self):
"""Get timer programs."""
await self._refresh_timer_programs()
return self._timer_programs
async def get_device(self, device_id, force_update=False):
"""Get device by id."""
await self._refresh_devices(force_update=force_update)
for device in self._devices:
if device.get("id") == device_id:
return device
return None
async def get_device_history(self, device_id, force_update=False):
"""Get device watering history by id."""
await self._refresh_device_history(device_id, force_update=force_update)
return self._device_histories.get(device_id)
async def update_program(self, program_id, program):
"""Update the state of a program"""
path = "{0}/{1}".format(TIMER_PROGRAMS_PATH, program_id)
json = {"sprinkler_timer_program": program}
await self._request("put", path, json=json)
async def send_message(self, payload):
"""Send a message via the websocket"""
await self._websocket.send(payload)
|
HOUSTON, TX – The City of Houston Office of Emergency Management (OEM) is encouraging Houston residents to take precautions as heavy rain causes street flooding throughout the city. The Houston Fire Department (HFD) reports that they have responded to over 150 motor vehicle accidents across the City since 5:30am, many of these related to the heavy rain.
The Houston area is under an Areal Flood Watch until 7:00am Friday as the potential exists for another round of heavy downpours over the next few hours. Portions of the City have experienced flash flooding as well.
Storms that form as part of this system may cause additional areas of street flooding and flash flooding throughout Houston which are not only a nuisance to drivers, but can also be very dangerous. Houston residents are urged to plan extra time in their commute, and to avoid driving through areas of high water.
Driving through high water is not only dangerous to drivers, but can also be dangerous to responders if vehicles become stuck.
Houstonians should monitor local television and radio stations, as well as the National Weather Service Houston/Galveston forecast office website at weather.gov/houston for up-to-the-minute weather information.
Rain gauge information and bayou levels can be found online at the Harris County Flood Control District’s Flood Warning System at harriscountyfws.org.
Traffic information, including a list of high water areas on highways can be found at houstontranstar.org.
The City of Houston is not reporting any significant impacts to City services as a result of these storms. All City offices are on normal schedules, and city business continues to be conducted as normal. The Houston Airport System (HAS) is asking travelers to check for flight delays by contacting their air carrier before departing for the airport, and to plan extra time getting to and from the airports. Information on delays and parking can be found at fly2houston.com.
For updates on the City’s response to severe weather, visit the City of Houston Emergency Information Center at houstontx.gov/emergency, or follow OEM on social media (Twitter – Facebook – Nextdoor).
|
#!/usr/bin/env python
# Copyright (c) 2013-2015 Quanta Research Cambridge, Inc.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import sys
import os
import socket
import struct
import select
import time
import threading
import argparse
import netifaces
from adb import adb_commands
from adb import common
deviceAddresses = []
def ip2int(addr):
return struct.unpack("!I", socket.inet_aton(addr))[0]
def int2ip(addr):
return socket.inet_ntoa(struct.pack("!I", addr))
def connect_with_adb(ipaddr,port):
global deviceAddresses
device_serial = '%s:%d' % (ipaddr,port)
cnt = 0
while cnt < 5:
try:
connection = adb_commands.AdbCommands.ConnectDevice(serial=device_serial)
except:
#print 'discover_tcp: connection error to', device_serial
pass
else:
if 'hostname.txt' in connection.Shell('ls /mnt/sdcard/'):
name = connection.Shell('cat /mnt/sdcard/hostname.txt').strip()
connection.Close()
print('discover_tcp: ', ipaddr, name)
deviceAddresses[ipaddr] = name
return
else:
print('discover_tcp: ', ipaddr, " /mnt/sdcard/hostname.txt not found")
deviceAddresses[ipaddr] = ipaddr
return
cnt = cnt+1
def open_adb_socket(dest_addr,port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
sock.connect_ex((dest_addr,port))
return sock
# non-Darwin version
def do_work_poll(start, end, port, get_hostname):
print("scanning "+int2ip(start)+" to "+int2ip(end))
connected = []
total = end-start
READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR
READ_WRITE = READ_ONLY | select.POLLOUT
poller = select.poll()
while (start <= end):
fd_map = {}
while (start <= end):
try:
s = open_adb_socket(int2ip(start),port)
except:
break
else:
fd_map[s.fileno()] = (start,s)
start = start+1
poller.register(s, READ_WRITE)
time.sleep(0.2)
events = poller.poll(0.1)
for fd,flag in events:
(addr,sock) = fd_map[fd]
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) == 0:
print('ADDCON', fd, int2ip(addr))
connected.append(int2ip(addr))
try:
fd_map_items = fd_map.iteritems()
except AttributeError:
fd_map_items = fd_map.items() # Python 3 compatibility
for fd,t in fd_map_items:
poller.unregister(t[1])
t[1].close()
sys.stdout.write("\r%d/%d" % (total-(end-start),total))
sys.stdout.flush()
print()
if get_hostname:
for c in connected:
connect_with_adb(c,port)
# Darwin version
def do_work_kqueue(start, end, port, get_hostname):
print("kqueue scanning "+int2ip(start)+" to "+int2ip(end))
connected = []
total = end-start
while (start <= end):
kq = select.kqueue()
fd_map = {}
kevents = []
while (start <= end):
try:
s = open_adb_socket(int2ip(start),port)
except:
break
else:
fd_map[s.fileno()] = (start,s)
start = start+1
kevents.append(select.kevent(s,filter=select.KQ_FILTER_WRITE))
kq.control(kevents,0,0)
time.sleep(0.2)
for k in kq.control([],len(kevents),0.1):
w = fd_map[k.ident][1]
addr = fd_map[w.fileno()][0]
if w.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) == 0:
print('ADDCON2', k.ident, w.fileno(), int2ip(addr), fd_map[w.fileno()])
connected.append(int2ip(addr))
try:
fd_map_items = fd_map.iteritems()
except AttributeError:
fd_map_items = fd_map.items() # Python 3 compatibility
for fd,t in fd_map_items:
t[1].close()
sys.stdout.write("\r%d/%d" % (total-(end-start),total))
sys.stdout.flush()
print()
if get_hostname:
for c in connected:
connect_with_adb(c,port)
argparser = argparse.ArgumentParser("Discover Zedboards on a network")
argparser.add_argument('-n', '--network', help='xxx.xxx.xxx.xxx/N')
argparser.add_argument('-p', '--port', default=5555, help='Port to probe')
argparser.add_argument('-g', '--get_hostname', default=True, help='Get hostname with adb')
def do_work(start,end,port,get_hostname):
if sys.platform == 'darwin':
do_work_kqueue(start,end,port,get_hostname)
else:
do_work_poll(start,end,port,get_hostname)
def detect_network(network=None, port=5555, get_hostname=True):
global deviceAddresses
deviceAddresses = {}
if network:
nw = network.split("/")
start = ip2int(nw[0])
if len(nw) != 2:
print('Usage: discover_tcp.py ipaddr/prefix_width')
sys.exit(-1)
end = start + (1 << (32-int(nw[1])) ) - 2
do_work(start+1,end,port,get_hostname)
else:
for ifc in netifaces.interfaces():
ifaddrs = netifaces.ifaddresses(ifc)
if netifaces.AF_INET in ifaddrs.keys():
af_inet = ifaddrs[netifaces.AF_INET]
for i in af_inet:
if i.get('addr') == '127.0.0.1':
print('skipping localhost')
else:
addr = ip2int(i.get('addr'))
netmask = ip2int(i.get('netmask'))
start = addr & netmask
end = start + (netmask ^ 0xffffffff)
start = start+1
end = end-1
print((int2ip(start), int2ip(end)))
do_work(start, end,port,get_hostname)
if __name__ == '__main__':
options = argparser.parse_args()
detect_network(options.network,options.port,options.get_hostname)
|
By Dylan Thomas. For my Dad.
This entry was posted on Monday, April 23rd, 2018 at 8:00 am and is filed under Moments & Celebrations. You can follow any responses to this entry through the RSS 2.0 feed. You can leave a response, or trackback from your own site.
|
#!/usr/bin/env python
import requests
import pandas as pd
from FIPS_Reference import FIPS_Reference
# Load American POSTAL data
columns = ["country", "better_fips", "name", "state", "state_code", "county", "county_code", "subdivision",\
"subdivision_code", "latitude", "longitude", "accuracy"]
#data = pd.read_csv("US.txt", sep="\t", header=None, names=columns)
data = pd.read_table("US.txt", header=None, names=columns)
# print data.loc[data["state_code"] == 'CA']
print data[:1]
def get_data(row, year, param_code):
params = {}
params["Query Type"] = "rawData"
params["Output Format"] = "AQCVS"
params["Parameter Code"] = param_code
params["Begin Date"] = str(year) + "0101"
params["End Date"] = str(year + 1) + "0101"
params["State Code"] = FIPS_Reference[row['state_code']]
params["County Code"] = '%03d' % int(row['county_code'])
print params
r = requests.get("https://aqs.epa.gov/api", auth=("[email protected] ", "saffronfrog61"))
get_data(data[:1],0,0)
#print r.status_code
|
Make several copies and lock the bottom image so you have a block to crop the image back to.
Resize the canvas so 50 to 100 pixels of space surround on all sides.
Place a copy of the central image on both the right side and the left side.
Deform the central image to square up the horizontal band of water. I used the distort tool as opposed to simple skewing. Once you square up the image, replace the left image with a copy of the center image.
There will now be "too much" sky above due to the skewing. You will eliminate that later.
Use a blurring or smudging tool on the left and right images to create a blend effect over the central image and erase to make building edges line up.
Flatten the three images together and finish blending and smudging where they meet.
Repeat with the top and bottom edges using copies of the flattened, three-image montage.
Locate the locked original copy under the changes and crop the document to that original.
You will now have a nearly seamless repeat.
Repeat process as needed with new image to get blends at edges right.
To create slipping, I tiled the image on a larger space, flattened it, and cut out the middle row.
I then pasted two copies of the middle, joining their ends together and placed them to offset the design.
There was a little roughness at the point where the buildings met the water. I created a long 50 pixel high rectangle to cover the width of the page and applied the image as a pattern which I could shift up and down in the rectangle to get a good yellow/blue event horizon. I feathered the edges to create a blend effect and smooth the transition between the houses and water, and the sky and water.
Below is the final version.
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pyocd.debug.cache import RegisterCache
from pyocd.debug.context import DebugContext
from pyocd.coresight.cortex_m import (
CortexM,
CORE_REGISTER,
register_name_to_index,
is_psr_subregister,
sysm_to_psr_mask
)
from pyocd.core import memory_map
from pyocd.utility import conversion
from pyocd.utility import mask
import pytest
import logging
@pytest.fixture(scope='function')
def regcache(mockcore):
return RegisterCache(DebugContext(mockcore))
# Copy of the register list without composite registers.
CORE_REGS_NO_COMPOSITES = CORE_REGISTER.copy()
CORE_REGS_NO_COMPOSITES.pop('cfbp')
CORE_REGS_NO_COMPOSITES.pop('xpsr')
CORE_REGS_NO_COMPOSITES.pop('iapsr')
CORE_REGS_NO_COMPOSITES.pop('eapsr')
CORE_REGS_NO_COMPOSITES.pop('iepsr')
# Appropriate modifiers for masked registers - others modified by adding 7
REG_MODIFIER = {
'apsr': 0x30010000,
'epsr': 0x01000C00,
}
def get_modifier(r):
return REG_MODIFIER.get(r, 7)
def get_expected_reg_value(r):
i = register_name_to_index(r)
if is_psr_subregister(i):
return 0x55555555 & sysm_to_psr_mask(i)
if i < 0:
i += 100
return i + 1
def get_expected_cfbp():
return ((get_expected_reg_value('control') << 24) |
(get_expected_reg_value('faultmask') << 16) |
(get_expected_reg_value('basepri') << 8) |
get_expected_reg_value('primask'))
def get_expected_xpsr():
return (get_expected_reg_value('apsr') |
get_expected_reg_value('ipsr') |
get_expected_reg_value('epsr'))
class TestRegisterCache:
def set_core_regs(self, mockcore, modify=False):
for r in CORE_REGS_NO_COMPOSITES:
if modify:
modifier = get_modifier(r)
else:
modifier = 0
mockcore.write_core_registers_raw([r], [get_expected_reg_value(r) + modifier])
assert mockcore.read_core_registers_raw([r]) == [get_expected_reg_value(r) + modifier]
def test_r_1(self, mockcore, regcache):
assert regcache.read_core_registers_raw(['r0']) == [0] # cache initial value of 0
mockcore.write_core_registers_raw(['r0'], [1234]) # modify reg behind the cache's back
assert mockcore.read_core_registers_raw(['r0']) == [1234] # verify modified reg
assert regcache.read_core_registers_raw(['r0']) == [0] # should return cached 0 value
regcache.invalidate() # explicitly invalidate cache
assert mockcore.read_core_registers_raw(['r0']) == [1234] # verify modified reg
assert regcache.read_core_registers_raw(['r0']) == [1234] # now should return updated 1234 value
def test_run_token(self, mockcore, regcache):
assert regcache.read_core_registers_raw(['r0']) == [0] # cache initial value of 0
mockcore.write_core_registers_raw(['r0'], [1234]) # modify reg behind the cache's back
assert mockcore.read_core_registers_raw(['r0']) == [1234] # verify modified reg
assert regcache.read_core_registers_raw(['r0']) == [0] # should return cached 0 value
mockcore.run_token += 1 # bump run token to cause cache to invalidate
assert regcache.read_core_registers_raw(['r0']) == [1234] # now should return updated 1234 value
def test_reading_from_core(self, mockcore, regcache):
self.set_core_regs(mockcore)
for r in CORE_REGS_NO_COMPOSITES:
assert regcache.read_core_registers_raw([r]) == [get_expected_reg_value(r)]
def test_read_cached(self, mockcore, regcache):
self.set_core_regs(mockcore)
# cache all regs
regcache.read_core_registers_raw(CORE_REGS_NO_COMPOSITES.values())
# modify regs in mock core
self.set_core_regs(mockcore, True)
# cache should return original unmodified values
for r in CORE_REGS_NO_COMPOSITES:
assert regcache.read_core_registers_raw([r]) == [get_expected_reg_value(r)]
def test_read_cfbp(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert regcache.read_core_registers_raw(['cfbp', 'control', 'faultmask']) == [
get_expected_cfbp(), get_expected_reg_value('control'), get_expected_reg_value('faultmask')
]
def test_read_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert regcache.read_core_registers_raw(['xpsr', 'ipsr', 'apsr', 'eapsr']) == [
get_expected_xpsr(), get_expected_reg_value('ipsr'),
get_expected_reg_value('apsr'), get_expected_reg_value('eapsr')
]
def test_read_cached_cfbp(self, mockcore, regcache):
self.set_core_regs(mockcore)
# cache it
regcache.read_core_registers_raw(['cfbp'])
# modify behind the cache's back
mockcore.write_core_registers_raw(['control', 'primask'], [0x55, 0xaa])
# cache should return original value
assert regcache.read_core_registers_raw(['cfbp']) == [get_expected_cfbp()]
def test_read_cached_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
# cache it
regcache.read_core_registers_raw(['xpsr'])
# modify behind the cache's back
mockcore.write_core_registers_raw(['ipsr', 'apsr'], [0x22, 0x10000000])
# cache should return original value
assert regcache.read_core_registers_raw(['xpsr']) == [get_expected_xpsr()]
def test_write_1(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['r0']) == [get_expected_reg_value('r0')]
assert regcache.read_core_registers_raw(['r0']) == [get_expected_reg_value('r0')]
regcache.write_core_registers_raw(['r0'], [1234])
assert mockcore.read_core_registers_raw(['r0']) == [1234]
assert regcache.read_core_registers_raw(['r0']) == [1234]
def test_write_regs(self, mockcore, regcache):
self.set_core_regs(mockcore)
for r in CORE_REGS_NO_COMPOSITES:
regcache.write_core_registers_raw([r], [get_expected_reg_value(r) + get_modifier(r)])
for r in CORE_REGS_NO_COMPOSITES:
assert mockcore.read_core_registers_raw([r]) == [get_expected_reg_value(r) + get_modifier(r)]
def test_write_cfbp(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['cfbp']) == [get_expected_cfbp()]
regcache.write_core_registers_raw(['control', 'primask'], [3, 19])
assert mockcore.read_core_registers_raw(['control', 'primask', 'cfbp']) == [
3, 19,
((3 << 24) | (get_expected_reg_value('faultmask') << 16) |
(get_expected_reg_value('basepri') << 8) | 19)
]
def test_write_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['xpsr']) == [get_expected_xpsr()]
regcache.write_core_registers_raw(['iapsr'], [0x10000022])
assert mockcore.read_core_registers_raw(['ipsr', 'apsr', 'iapsr', 'xpsr']) == [
0x22, 0x10000000, 0x10000022,
0x10000022 | get_expected_reg_value('epsr')
]
def test_write_full_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['xpsr']) == [get_expected_xpsr()]
regcache.write_core_registers_raw(['xpsr'], [0xffffffff])
assert mockcore.read_core_registers_raw(['ipsr', 'apsr', 'epsr', 'xpsr']) == [
CortexM.IPSR_MASK, CortexM.APSR_MASK, CortexM.EPSR_MASK,
0xffffffff
]
def test_invalid_reg_r(self, regcache):
with pytest.raises(ValueError):
regcache.read_core_registers_raw([132423])
def test_invalid_reg_w(self, regcache):
with pytest.raises(ValueError):
regcache.write_core_registers_raw([132423], [1234])
def test_invalid_fpu_reg_r(self, mockcore, regcache):
mockcore.has_fpu = False
with pytest.raises(ValueError):
regcache.read_core_registers_raw(['s1'])
def test_invalid_fpu_reg_w(self, mockcore, regcache):
mockcore.has_fpu = False
with pytest.raises(ValueError):
regcache.write_core_registers_raw(['s1'], [1.234])
|
Trigger Point Acupuncture is a modern approach to acupuncture that combines Traditional Chinese Medicine with Western physical medicine techniques. It is a more aggressive, direct manipulation of muscular tightness (knots) – known as trigger points. It seeks to generate repeated, involuntary twitching from the suspect muscle or muscle group and usually leads to an immediate reduction of the tightness as well as a reduction or elimination of the related problems.
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from django.utils.translation import get_language
from django.urls import reverse
from pootle.core.delegate import revision
from pootle_app.views.index.index import (
COOKIE_NAME, IndexView, WelcomeView)
from pootle_score.display import TopScoreDisplay
@pytest.mark.django_db
def test_view_index(client, rf, request_users, language0):
user = request_users["user"]
client.login(
username=user.username,
password=request_users["password"])
response = client.get("")
if not user.is_authenticated:
assert response.status_code == 200
assert isinstance(response.context["view"], WelcomeView)
else:
assert response.status_code == 302
assert response["Location"] == reverse("pootle-projects-browse")
request = rf.get("")
request.user = user
request.COOKIES[COOKIE_NAME] = language0.code
response = IndexView.as_view()(request=request)
if not user.is_authenticated:
assert response.status_code == 200
else:
assert response.status_code == 302
assert response["Location"] == reverse(
"pootle-language-browse",
kwargs=dict(language_code=language0.code))
@pytest.mark.django_db
def test_view_welcome(client, member, system, project_set):
response = client.get(reverse('pootle-home'))
assert isinstance(response.context["top_scorers"], TopScoreDisplay)
assert isinstance(response.context["view"], WelcomeView)
assert response.context["view"].request_lang == get_language()
assert (
response.context["view"].project_set.directory
== project_set.directory)
assert (
response.context["view"].revision
== revision.get(project_set.directory.__class__)(
project_set.directory).get(key="stats"))
assert (
response.context["view"].cache_key
== (
"%s.%s.%s"
% (response.wsgi_request.user.username,
response.context["view"].revision,
get_language())))
|
When I stop to think what He’s done for me, How can I not give him my all.
He has promised to guide me all the journey thru, How can I not serve the Lord.
Lord, I need you again today.
|
import inspect
import sys
import traceback
'''
This print the full stack trace from the current point in the code
(from where "stack = inspect.stack()", below, is called).
It can be useful, e.g., to understand code that makes many calls
or to get information for debugging exceptions.
'''
# reference:
# https://gist.github.com/diosmosis/1148066
def get_exception_info():
# this variable is never used. it exists so we can detect if a frame is
# referencing this specific function.
__lgw_marker_local__ = 0
value_to_string = str
frame_template = ' File "%s", line %i, in %s\n %s\n'
log_file = []
# iterate through the frames in reverse order so we print the
# most recent frame first
frames = inspect.getinnerframes(sys.exc_info()[2])
for frame_info in reversed(frames):
f_locals = frame_info[0].f_locals
# if there's a local variable named __lgw_marker_local__, we assume
# the frame is from a call of this function, 'wrapper', and we skip
# it. Printing these frames won't help determine the cause of an
# exception, so skipping it reduces clutter.
if '__lgw_marker_local__' in f_locals:
continue
# log the frame information
log_file.append(frame_template %
(frame_info[1], frame_info[2], frame_info[3], frame_info[4][0].lstrip()))
# log every local variable of the frame
for k, v in f_locals.items():
log_file.append(' %s = %s\n' % (k, value_to_string(v)))
log_file.append('\n')
return ''.join(log_file)
try:
print('Hey!')
raise Exception('forced error!')
except:
exc_info = get_exception_info()
print(exc_info)
|
Heronemus, Nicole Ann was born in 1988 and she registered to vote, giving her address as 645 Night Shade CT, NEW CASTLE, Garfield County, CO. Her voter ID number is 600482770.
Heronemus, Stevan Gene was born in 1949 and he registered to vote, giving his address as 1160 E 5Th ST TRLR 2, DELTA, Delta County, CO. His voter ID number is 600876745.
Heronimus, Benjamin Thomas was born in 2000 and he registered to vote, giving his address as 6065 W Hoover LN, LITTLETON, Jefferson County, CO. His voter ID number is 601591246.
Heronimus, Jacquelyn Mary was born in 1943 and she registered to vote, giving her address as 1717 Stove Prairie CIR, LOVELAND, Larimer County, CO. Her voter ID number is 1626626.
Heronimus, John Robert was born in 1970 and he registered to vote, giving his address as 6296 W Long DR, LITTLETON, Jefferson County, CO. His voter ID number is 4151335.
Heronimus, Laura Lea was born in 1974 and she registered to vote, giving her address as 16327 Josephine PL, THORNTON, Adams County, CO. Her voter ID number is 3929317.
Heronimus, Marcy M was born in 1972 and she registered to vote, giving her address as 6065 W Hoover LN, LITTLETON, Jefferson County, CO. Her voter ID number is 4281576.
Heronimus, Timothy John was born in 1967 and he registered to vote, giving his address as 2740 W 144Th CT, BROOMFIELD, Broomfield County, CO. His voter ID number is 3906811.
Heron-Pusey, Thachana A was born in 1990 and registered to vote, giving the address as 2669 Abbey RD, STEAMBOAT SPRINGS, Routt County, CO. Heron-Pusey voter ID number is 601463689.
Heros, Alex Dagoberto was born in 1969 and he registered to vote, giving his address as 100 E Meadow DR # 310, VAIL, Eagle County, CO. His voter ID number is 601173671.
Herosik, Theresa was born in 1954 and she registered to vote, giving her address as 10150 E Virginia AVE UNIT F17-206, DENVER, Denver County, CO. Her voter ID number is 2768614.
Herota, Alexander Norekage was born in 2000 and he registered to vote, giving his address as 181 Springfield ST, GYPSUM, Eagle County, CO. His voter ID number is 601657554.
Herota, Elizabeth Anne was born in 1975 and she registered to vote, giving her address as 181 Springfield ST, GYPSUM, Eagle County, CO. Her voter ID number is 601657583.
Heroth, Claudia Jean was born in 1950 and she registered to vote, giving her address as 11306 Jasmine ST, THORNTON, Adams County, CO. Her voter ID number is 7070017.
Herout, Brent Lee was born in 1970 and he registered to vote, giving his address as 1442 Tesla DR, COLO SPRINGS, El Paso County, CO. His voter ID number is 600631432.
Herout, Melanie Beth was born in 1974 and she registered to vote, giving her address as 1442 Tesla DR, COLO SPRINGS, El Paso County, CO. Her voter ID number is 231693.
Heroux, Albert Laureat was born in 1935 and he registered to vote, giving his address as 6571 S Hoyt WAY, LITTLETON, Jefferson County, CO. His voter ID number is 4110023.
Heroux, Alecia Joanne was born in 1981 and she registered to vote, giving her address as 27363 E Euclid DR, AURORA, Arapahoe County, CO. Her voter ID number is 751457.
Heroux, Andre Mark was born in 1955 and he registered to vote, giving his address as 7451 Back Stretch DR, WELLINGTON, Larimer County, CO. His voter ID number is 4130676.
Heroux, Ann Colleen was born in 1955 and she registered to vote, giving her address as 711 S Simms ST, LAKEWOOD, Jefferson County, CO. Her voter ID number is 2709318.
Heroux, Blanche Lorraine was born in 1935 and she registered to vote, giving her address as 6571 S Hoyt WAY, LITTLETON, Jefferson County, CO. Her voter ID number is 4110022.
Heroux, Caroline Rose was born in 1992 and she registered to vote, giving her address as 503 S Whitcomb ST, FORT COLLINS, Larimer County, CO. Her voter ID number is 600519229.
Heroux, Chelsea Lauren was born in 1979 and she registered to vote, giving her address as 12936 Kearney ST, THORNTON, Adams County, CO. Her voter ID number is 8038341.
Heroux, Christopher Scott was born in 1958 and he registered to vote, giving his address as 8981 E 29Th AVE, DENVER, Denver County, CO. His voter ID number is 600811738.
Heroux, Colin James was born in 1995 and he registered to vote, giving his address as 3670 S Lincoln ST # B118, ENGLEWOOD, Arapahoe County, CO. His voter ID number is 601814026.
Heroux, Elisa Christine was born in 1962 and she registered to vote, giving her address as 8981 E 29Th AVE, DENVER, Denver County, CO. Her voter ID number is 600792990.
Heroux, Jane L was born in 1945 and she registered to vote, giving her address as 1580 Outrider WAY, MONUMENT, El Paso County, CO. Her voter ID number is 81601.
Heroux, Jeffrey Dean was born in 1979 and he registered to vote, giving his address as 27363 E Euclid DR, AURORA, Arapahoe County, CO. His voter ID number is 927055.
Heroux, Jonathan Tucker was born in 2000 and he registered to vote, giving his address as 1564 Prairie Falcon LN, BROOMFIELD, Broomfield County, CO. His voter ID number is 601887172.
Heroux, Jon Paul was born in 1967 and he registered to vote, giving his address as 9151 Kenwood CT, HIGHLANDS RANCH, Douglas County, CO. His voter ID number is 2809288.
Heroux, Karen Sue was born in 1974 and she registered to vote, giving her address as 1916 Diana DR, LOVELAND, Larimer County, CO. Her voter ID number is 962868.
Heroux, Katherine Lynn was born in 1987 and she registered to vote, giving her address as 612 Republic DR, FORT COLLINS, Larimer County, CO. Her voter ID number is 600494871.
Heroux, Kathleen Ann was born in 1949 and she registered to vote, giving her address as 2748 La Strada Grande HTS, COLO SPRINGS, El Paso County, CO. Her voter ID number is 445622.
Heroux, Lindsey Morgan was born in 1984 and she registered to vote, giving her address as 7240 W Custer AVE # 114, LAKEWOOD, Jefferson County, CO. Her voter ID number is 601756489.
Heroux, Maddie was born in 1998 and she registered to vote, giving her address as 1564 Prairie Falcon LN, BROOMFIELD, Broomfield County, CO. Her voter ID number is 601402096.
Heroux, Mark Dion was born in 1980 and he registered to vote, giving his address as 7240 W Custer AVE # 114, LAKEWOOD, Jefferson County, CO. His voter ID number is 601756513.
Heroux, Martin Paul was born in 1986 and he registered to vote, giving his address as 5101 S Rio Grande ST # 1-302, LITTLETON, Arapahoe County, CO. His voter ID number is 601053520.
Heroux, Mary Beth was born in 1957 and she registered to vote, giving her address as 15200 Logging Canyon Road, WESTON, Las Animas County, CO. Her voter ID number is 600666301.
Heroux, Matthew Bp was born in 1960 and he registered to vote, giving his address as 840 University DR, ESTES PARK, Larimer County, CO. His voter ID number is 543863.
Heroux, Pamela Kay was born in 1956 and she registered to vote, giving her address as 3171 Fairmont DR # 10C, WELLINGTON, Larimer County, CO. Her voter ID number is 4273554.
Heroux, Paul D was born in 1954 and he registered to vote, giving his address as 7595 Stuart ST, WESTMINSTER, Adams County, CO. His voter ID number is 6817845.
Heroux, Paul Phillippe was born in 1981 and he registered to vote, giving his address as 3300 W Union AVE, DENVER, Denver County, CO. His voter ID number is 867641.
Heroux, P Jonathan was born in 1965 and he registered to vote, giving his address as 2795 W 115Th DR, WESTMINSTER, Adams County, CO. His voter ID number is 3920031.
Heroux, Ronald Edmond was born in 1941 and he registered to vote, giving his address as 2748 La Strada Grande HTS, COLO SPRINGS, El Paso County, CO. His voter ID number is 441978.
Heroux, Ryan Christopher was born in 1989 and he registered to vote, giving his address as 2330 S Kearney ST APT 405, DENVER, Denver County, CO. His voter ID number is 200210675.
Heroux, Sarah Katherine was born in 1995 and she registered to vote, giving her address as 1209 N Pennsylvania ST APT 16, DENVER, Denver County, CO. Her voter ID number is 601679547.
Heroux, Shane was born in 1978 and he registered to vote, giving his address as 12936 Kearney ST, THORNTON, Adams County, CO. His voter ID number is 601015703.
Heroux, Stryker Mitchell was born in 1998 and he registered to vote, giving his address as 55 County Rd 5141S, TABERNASH, Grand County, CO. His voter ID number is 601374642.
Heroux, Terre Lynn was born in 1958 and she registered to vote, giving her address as 1107 Rudd AVE, CANON CITY, Fremont County, CO. Her voter ID number is 601607629.
Heroux, Thad Mitchell was born in 1965 and he registered to vote, giving his address as 55 County Rd 5141S, TABERNASH, Grand County, CO. His voter ID number is 8530286.
Heroy, Barbara M was born in 1940 and she registered to vote, giving her address as 1858 County Road 205, DURANGO, La Plata County, CO. Her voter ID number is 4926796.
Heroy, Bayard P was born in 1940 and he registered to vote, giving his address as 1858 County Road 205, DURANGO, La Plata County, CO. His voter ID number is 4926797.
Heroy, Cody Michael was born in 1993 and he registered to vote, giving his address as 1858 County Road 205, DURANGO, La Plata County, CO. His voter ID number is 600420984.
Heroy, Laurie Anne was born in 1954 and she registered to vote, giving her address as 6911 W 3Rd ST # 814, GREELEY, Weld County, CO. Her voter ID number is 601122086.
Heroy, Matthew Scott was born in 1982 and he registered to vote, giving his address as 996 Wolf Creek DR, LONGMONT, Boulder County, CO. His voter ID number is 200239906.
Heroy, Michael Thomas was born in 1951 and he registered to vote, giving his address as 326 44Th AVE, GREELEY, Weld County, CO. His voter ID number is 6325165.
Heroy, Steffany Annmarie was born in 1991 and she registered to vote, giving her address as 401 Cherry CT, AULT, Weld County, CO. Her voter ID number is 200269454.
Heroy, William Bayard Iii was born in 1972 and he registered to vote, giving his address as 1858 County Road 205, DURANGO, La Plata County, CO. His voter ID number is 600079414.
Heroze, Colorado M was born in 1997 and registered to vote, giving the address as 1001 W Denver AVE APT A, GUNNISON, Gunnison County, CO. Heroze voter ID number is 601882775.
Herpel, Edward Phillip Iii was born in 1948 and he registered to vote, giving his address as 19275 W 87Th LN, ARVADA, Jefferson County, CO. His voter ID number is 601309509.
Herpel, Elizabeth Ann was born in 1997 and she registered to vote, giving her address as 4340 Ericson DR APT 2, COLO SPRINGS, El Paso County, CO. Her voter ID number is 601736392.
Herpel, Jeffrey Paul was born in 1976 and he registered to vote, giving his address as 249 Buckeye AVE, JOHNSTOWN, Weld County, CO. His voter ID number is 8096805.
Herpel, Mark Adam was born in 1973 and he registered to vote, giving his address as 1099 Main AVE STE 402, DURANGO, La Plata County, CO. His voter ID number is 601360506.
Herpel, Owen Edward was born in 1999 and he registered to vote, giving his address as 1733 Maple ST # 110B, GOLDEN, Jefferson County, CO. His voter ID number is 601856621.
Herpel, Paul Michael was born in 1985 and he registered to vote, giving his address as 1912 Fishermans LN, SILVERTHORNE, Summit County, CO. His voter ID number is 601957219.
Herpel, Susan Anne Oristaglio was born in 1946 and she registered to vote, giving her address as 19275 W 87Th LN, ARVADA, Jefferson County, CO. Her voter ID number is 601309530.
Herpers, John Godfrey was born in 1977 and he registered to vote, giving his address as 27 Martin LN, ENGLEWOOD, Arapahoe County, CO. His voter ID number is 2632816.
Herpers, Kristen Van Houten was born in 1987 and she registered to vote, giving her address as 27 Martin LN, ENGLEWOOD, Arapahoe County, CO. Her voter ID number is 600601747.
Herpin, Linda E was born in 1945 and she registered to vote, giving her address as 532 Potter CT, COLO SPRINGS, El Paso County, CO. Her voter ID number is 306244.
Herpin, William Bernard Jr was born in 1943 and he registered to vote, giving his address as 532 Potter CT, COLO SPRINGS, El Paso County, CO. His voter ID number is 305299.
Herpmann, Melanie Kay was born in 1982 and she registered to vote, giving her address as 1738 Yaupon AVE, BOULDER, Boulder County, CO. Her voter ID number is 8061711.
Herpmann, Soren George was born in 1978 and he registered to vote, giving his address as 1738 Yaupon AVE, BOULDER, Boulder County, CO. His voter ID number is 8096809.
Herppich, Victoria Ann was born in 1988 and she registered to vote, giving her address as 10 S Sherman ST APT 211, DENVER, Denver County, CO. Her voter ID number is 601113039.
Herquinigo-West, Eliana was born in 1960 and she registered to vote, giving her address as 23432 Broadmoor DR, PARKER, Douglas County, CO. Her voter ID number is 5869735.
Herr, Alec Fulton was born in 1989 and he registered to vote, giving his address as 4905 Osage DR APT 107, BOULDER, Boulder County, CO. His voter ID number is 601045952.
Herr, Andrew Dean was born in 1982 and he registered to vote, giving his address as 4891 N Lowell BLVD, DENVER, Denver County, CO. His voter ID number is 600958932.
Herr, Andrew Hans was born in 1988 and he registered to vote, giving his address as 2087 Trail Stone CT, CASTLE ROCK, Douglas County, CO. His voter ID number is 5835701.
Herr, Andrew Jacob was born in 1988 and he registered to vote, giving his address as 320 N Sherwood ST, FORT COLLINS, Larimer County, CO. His voter ID number is 601401496.
Herr, Angela Christina was born in 1984 and she registered to vote, giving her address as 4986 Fawn Ridge WAY, CASTLE ROCK, Douglas County, CO. Her voter ID number is 5649587.
Herr, Anna Christine was born in 1989 and she registered to vote, giving her address as 2087 Trail Stone CT, CASTLE ROCK, Douglas County, CO. Her voter ID number is 600181765.
Herr, Antonia Heitmann was born in 1996 and she registered to vote, giving her address as 7674 E Mercer PL, DENVER, Denver County, CO. Her voter ID number is 601006019.
Herr, April Irene was born in 1969 and she registered to vote, giving her address as 3716 Lancaster DR, PUEBLO, Pueblo County, CO. Her voter ID number is 3066619.
Herr, Ardith Anne was born in 1930 and she registered to vote, giving her address as 1549 Upper Bear Creek RD, EVERGREEN, Clear Creek County, CO. Her voter ID number is 5022229.
Herr, Ariel Peter was born in 1939 and he registered to vote, giving his address as 1613 Red Poppy DR, BRIGHTON, Adams County, CO. His voter ID number is 6926076.
Herr, Benjamin David was born in 1987 and he registered to vote, giving his address as 100 N Steele ST APT 407, DENVER, Denver County, CO. His voter ID number is 601797882.
Herr, Ben Song Hui was born in 1953 and she registered to vote, giving her address as 3701 Stratford CT, FORT COLLINS, Larimer County, CO. Her voter ID number is 600897114.
Herr, Brian Kirk was born in 1958 and he registered to vote, giving his address as 3810 Donnington CIR, CASTLE ROCK, Douglas County, CO. His voter ID number is 601044790.
Herr, Bruce B was born in 1949 and he registered to vote, giving his address as 2712 Maya WAY, MONTROSE, Montrose County, CO. His voter ID number is 5357505.
Herr, Casey Eugene was born in 1974 and he registered to vote, giving his address as 73 Esther DR, COLO SPRINGS, El Paso County, CO. His voter ID number is 600944223.
Herr, Catherine Louise was born in 1945 and she registered to vote, giving her address as 435 Dittmer AVE, PUEBLO, Pueblo County, CO. Her voter ID number is 3088591.
Herr, Charles Nicholas was born in 1992 and he registered to vote, giving his address as 1020 Swift Gulch RD # 3-004, AVON, Eagle County, CO. His voter ID number is 601597405.
Herr, Christian Ryan was born in 1992 and he registered to vote, giving his address as 7493 Waterman WAY, COLO SPRINGS, El Paso County, CO. His voter ID number is 601452969.
Herr, Christine R was born in 1970 and she registered to vote, giving her address as 2575 S Syracuse WAY # M304, DENVER, Arapahoe County, CO. Her voter ID number is 842723.
Herr, Christopher David was born in 1962 and he registered to vote, giving his address as 4560 Granby CIR, COLO SPRINGS, El Paso County, CO. His voter ID number is 200107649.
Herr, Christopher Jordan was born in 1988 and he registered to vote, giving his address as 2402 S Winona CT, DENVER, Denver County, CO. His voter ID number is 200155576.
Herr, Christopher Ralph was born in 1969 and he registered to vote, giving his address as 5223 Olde Stage RD, BOULDER, Boulder County, CO. His voter ID number is 8096815.
Herr, Cynthia Marie was born in 1990 and she registered to vote, giving her address as 2863 S Vrain ST, DENVER, Denver County, CO. Her voter ID number is 200056688.
Herr, Dale Jerome was born in 1968 and he registered to vote, giving his address as 24336 Ben Kelly RD, ELBERT, Elbert County, CO. His voter ID number is 600864497.
Herr, Daniel Brian was born in 1974 and he registered to vote, giving his address as 4471 N Elm CT, DENVER, Denver County, CO. His voter ID number is 2610396.
Herr, Daniel Buskirk was born in 1990 and he registered to vote, giving his address as 3420 E 30Th AVE, DENVER, Denver County, CO. His voter ID number is 601345576.
Herr, Daniel Gary was born in 1981 and he registered to vote, giving his address as 4986 Fawn Ridge WAY, CASTLE ROCK, Douglas County, CO. His voter ID number is 5941063.
Herr, Daniel Michael was born in 1981 and he registered to vote, giving his address as 18766 W 84Th PL, ARVADA, Jefferson County, CO. His voter ID number is 601713111.
Herr, David Clifton was born in 1974 and he registered to vote, giving his address as 7437 Eagle Rock DR, LITTLETON, Douglas County, CO. His voter ID number is 600261967.
Herr, David Lee was born in 1967 and he registered to vote, giving his address as 3315 13Th ST, BOULDER, Boulder County, CO. His voter ID number is 8096818.
Herr, David Stanley was born in 1943 and he registered to vote, giving his address as 643 W Pagosa DR, GRAND JUNCTION, Mesa County, CO. His voter ID number is 2253544.
Herr, Dean W was born in 1959 and he registered to vote, giving his address as 2382 Bayberry LN, CASTLE ROCK, Douglas County, CO. His voter ID number is 5726395.
Herr, Deborah Lesley was born in 1964 and she registered to vote, giving her address as 4560 Granby CIR, COLO SPRINGS, El Paso County, CO. Her voter ID number is 601419530.
Herr, Denney Charlene was born in 1959 and she registered to vote, giving her address as 908 Lochview CT, FORT COLLINS, Larimer County, CO. Her voter ID number is 600903099.
Herr, Diedre Marlene was born in 1964 and she registered to vote, giving her address as 2016 61St AVE, GREELEY, Weld County, CO. Her voter ID number is 6361878.
Herr, Donald Richard was born in 1946 and he registered to vote, giving his address as 463 Snowshoe LN, HARTSEL, Park County, CO. His voter ID number is 548480.
Herr, Douglas Brent was born in 1968 and he registered to vote, giving his address as 207 S Montecito DR, PUEBLO WEST, Pueblo County, CO. His voter ID number is 3094800.
Herr, Dwight Albert was born in 1953 and he registered to vote, giving his address as 1002 Dry Creek DR, LYONS, Larimer County, CO. His voter ID number is 1577037.
Herr, Dylan Robert was born in 1995 and he registered to vote, giving his address as 2016 61St AVE, GREELEY, Weld County, CO. His voter ID number is 601346400.
Herr, Elicia Heleen was born in 1960 and she registered to vote, giving her address as 882 County Rd 54, GRANBY, Grand County, CO. Her voter ID number is 600632225.
Herr, Elim was born in 1983 and he registered to vote, giving his address as 4100 N Albion ST APT 875, DENVER, Denver County, CO. His voter ID number is 601455896.
Herr, Elizabeth W was born in 1959 and she registered to vote, giving her address as 5430 S Krameria ST, GREENWOOD VLG, Arapahoe County, CO. Her voter ID number is 783431.
Herr, Emily Christine was born in 2000 and she registered to vote, giving her address as 5320 Williams Village North Hall, BOULDER, Boulder County, CO. Her voter ID number is 601870644.
Herr, Emily Fay was born in 1989 and she registered to vote, giving her address as 546 W 116Th PL, NORTHGLENN, Adams County, CO. Her voter ID number is 601634053.
Herr, Emily Stella was born in 1977 and she registered to vote, giving her address as 14378 Hanover ST, BRIGHTON, Adams County, CO. Her voter ID number is 200077599.
Herr, Eric Martin was born in 1976 and he registered to vote, giving his address as 1220 N Lipan ST, DENVER, Denver County, CO. His voter ID number is 2872139.
Herr, Gary B was born in 1956 and he registered to vote, giving his address as 5690 E Tabor DR, CASTLE ROCK, Douglas County, CO. His voter ID number is 5856651.
Herr, Gary W was born in 1956 and he registered to vote, giving his address as 130 Morgan DR # A, EDWARDS, Eagle County, CO. His voter ID number is 6691223.
Herr, Gayle A was born in 1964 and she registered to vote, giving her address as 4860 Pikes Peak HWY, CASCADE, El Paso County, CO. Her voter ID number is 422393.
Herr, Georgeann Marie was born in 1993 and she registered to vote, giving her address as 2460 W 29Th AVE UNIT 301, DENVER, Denver County, CO. Her voter ID number is 601924963.
Herr, Hannah Lee was born in 1998 and she registered to vote, giving her address as 720 Illinois ST, GOLDEN, Jefferson County, CO. Her voter ID number is 600893767.
Herr, Harold Francis was born in 1950 and he registered to vote, giving his address as 2716 S Heather Gardens WAY, AURORA, Arapahoe County, CO. His voter ID number is 720631.
Herr, Heather Ann was born in 1982 and she registered to vote, giving her address as 809 E High ST, COLO SPRINGS, El Paso County, CO. Her voter ID number is 601473313.
Herr, Heather Renee was born in 1975 and she registered to vote, giving her address as 438 S Gilia DR, PUEBLO WEST, Pueblo County, CO. Her voter ID number is 6369499.
Herr, Isabella Nkaug Shoua Paa was born in 1999 and she registered to vote, giving her address as 11841 E 118Th AVE, HENDERSON, Adams County, CO. Her voter ID number is 601627956.
Herr, Jacob Daniel was born in 1992 and he registered to vote, giving his address as 1704 E 19Th ST, PUEBLO, Pueblo County, CO. His voter ID number is 601784562.
Herr, Jacob Frederick was born in 1942 and he registered to vote, giving his address as 529 Panorama WAY, CRESTONE, Saguache County, CO. His voter ID number is 200160113.
Herr, Jane Dulin was born in 1992 and she registered to vote, giving her address as 1037 E 20Th AVE, DENVER, Denver County, CO. Her voter ID number is 601748885.
Herr, Janelle Maureen was born in 1985 and she registered to vote, giving her address as 4100 N Albion ST UNIT 303, DENVER, Denver County, CO. Her voter ID number is 601591950.
Herr, Janet Mcgovern was born in 1959 and she registered to vote, giving her address as 3810 Donnington CIR, CASTLE ROCK, Douglas County, CO. Her voter ID number is 601042154.
Herr, Janithe Ann was born in 1941 and she registered to vote, giving her address as 1613 Red Poppy DR, BRIGHTON, Adams County, CO. Her voter ID number is 6904278.
Herr, Jennifer Leanne was born in 1971 and she registered to vote, giving her address as 102 Woodstock ST, FRUITA, Mesa County, CO. Her voter ID number is 2326926.
Herr, Jesse Lynn was born in 1988 and she registered to vote, giving her address as 19292 E 49Th AVE, DENVER, Denver County, CO. Her voter ID number is 601604947.
Herr, Jill Susan was born in 1956 and she registered to vote, giving her address as 5690 E Tabor DR, CASTLE ROCK, Douglas County, CO. Her voter ID number is 5747087.
Herr, Jodie Ray was born in 1976 and she registered to vote, giving her address as 3500 Montrose ST, EVANS, Weld County, CO. Her voter ID number is 601099863.
Herr, John Allen was born in 1969 and he registered to vote, giving his address as 8076 S Vance CT, LITTLETON, Jefferson County, CO. His voter ID number is 4209055.
Herr, John Edward was born in 1991 and he registered to vote, giving his address as 3430 N Vallejo ST, DENVER, Denver County, CO. His voter ID number is 601181055.
Herr, John Franklin Jr was born in 1943 and he registered to vote, giving his address as 4535 Red Rock DR, LARKSPUR, Douglas County, CO. His voter ID number is 5731714.
Herr, John Robert was born in 1991 and he registered to vote, giving his address as 4571 N Vallejo ST, DENVER, Denver County, CO. His voter ID number is 601841666.
Herr, John Ross was born in 1987 and he registered to vote, giving his address as 12994 W 64Th DR # A1, ARVADA, Jefferson County, CO. His voter ID number is 200101519.
Herr, Jon Albert was born in 1946 and he registered to vote, giving his address as 3701 Stratford CT, FORT COLLINS, Larimer County, CO. His voter ID number is 600896507.
Herr, Joseph A was born in 1954 and he registered to vote, giving his address as 648 Augusta DR, LEADVILLE, Lake County, CO. His voter ID number is 8463187.
Herr, Joseph Andrew was born in 1984 and he registered to vote, giving his address as 2715 Iris AVE # 2, BOULDER, Boulder County, CO. His voter ID number is 200196517.
Herr, Joseph Chung Shiang was born in 1989 and he registered to vote, giving his address as 546 W 116Th PL, NORTHGLENN, Adams County, CO. His voter ID number is 601819936.
Herr, Joshua Logan was born in 1992 and he registered to vote, giving his address as 2001 N Lincoln ST UNIT 912, DENVER, Denver County, CO. His voter ID number is 600980441.
Herr, Judith Justin was born in 1965 and she registered to vote, giving her address as 3315 13Th ST, BOULDER, Boulder County, CO. Her voter ID number is 8096827.
Herr, Judith Rae was born in 1945 and she registered to vote, giving her address as 643 W Pagosa DR, GRAND JUNCTION, Mesa County, CO. Her voter ID number is 2360058.
Herr, Juliana Marie was born in 1959 and she registered to vote, giving her address as 130 Morgan DR # A, EDWARDS, Eagle County, CO. Her voter ID number is 6698729.
Herr, Juliann Olivia was born in 1998 and she registered to vote, giving her address as 4560 Granby CIR, COLO SPRINGS, El Paso County, CO. Her voter ID number is 601712417.
Herr, Julie Ann was born in 1969 and she registered to vote, giving her address as 3561 Ward RD, WHEAT RIDGE, Jefferson County, CO. Her voter ID number is 4226753.
Herr, Kalina S was born in 1966 and she registered to vote, giving her address as 13723 W 64Th DR, ARVADA, Jefferson County, CO. Her voter ID number is 4083787.
Herr, Karen Rae was born in 1953 and she registered to vote, giving her address as 4535 Red Rock DR, LARKSPUR, Douglas County, CO. Her voter ID number is 5648164.
Herr, Karl Stephen was born in 1958 and he registered to vote, giving his address as 636 Alpine Forest DR, BAYFIELD, La Plata County, CO. His voter ID number is 4918877.
Herr, Katherine Diserens was born in 1965 and she registered to vote, giving her address as 554 Horse Thief LN, DURANGO, La Plata County, CO. Her voter ID number is 601572405.
Herr, Kay E was born in 1941 and she registered to vote, giving her address as 11625 Applewood Knolls DR, LAKEWOOD, Jefferson County, CO. Her voter ID number is 4016316.
Herr, Keith Arthur was born in 1970 and he registered to vote, giving his address as 785 County Line RD, MONUMENT, El Paso County, CO. His voter ID number is 601588414.
Herr, Kelly Marie was born in 1986 and she registered to vote, giving her address as 512 E Myrtle ST, FORT COLLINS, Larimer County, CO. Her voter ID number is 600918162.
Herr, Kimberly Ann was born in 1964 and she registered to vote, giving her address as 2382 Bayberry LN, CASTLE ROCK, Douglas County, CO. Her voter ID number is 5898771.
Herr, Kimberly Harrison was born in 1961 and she registered to vote, giving her address as 1921 Mesaview LN, FORT COLLINS, Larimer County, CO. Her voter ID number is 1543713.
Herr, Krystal Dawn was born in 1985 and she registered to vote, giving her address as 8565 Pepperridge DR, COLO SPRINGS, El Paso County, CO. Her voter ID number is 1646564.
Herr, Larry L was born in 1970 and he registered to vote, giving his address as 1213 Farragut AVE, COLO SPRINGS, El Paso County, CO. His voter ID number is 351776.
Herr, Laura Elizabeth B was born in 1980 and she registered to vote, giving her address as 7437 Eagle Rock DR, LITTLETON, Douglas County, CO. Her voter ID number is 200301909.
Herr, Leo Sherwood was born in 1994 and he registered to vote, giving his address as 4662 Ingram CT, BOULDER, Boulder County, CO. His voter ID number is 600945426.
Herr, Linda Carole was born in 1946 and she registered to vote, giving her address as 2716 S Heather Gardens WAY, AURORA, Arapahoe County, CO. Her voter ID number is 720780.
Herr, Lucy Morgan was born in 1988 and she registered to vote, giving her address as 3601 W Saratoga AVE, DENVER, Denver County, CO. Her voter ID number is 600768870.
Herr, Margaret Ann was born in 1922 and she registered to vote, giving her address as 5690 E Tabor DR, CASTLE ROCK, Douglas County, CO. Her voter ID number is 5796222.
Herr, Marie Elaine was born in 1978 and she registered to vote, giving her address as 39060 Hwy 82 # 1B, ASPEN, Pitkin County, CO. Her voter ID number is 6798382.
Herr, Marlon Rains was born in 1991 and he registered to vote, giving his address as 1801 Ds RD, GLADE PARK, Mesa County, CO. His voter ID number is 200279366.
Herr, Mary Lynne was born in 1954 and she registered to vote, giving her address as 636 Alpine Forest DR, BAYFIELD, La Plata County, CO. Her voter ID number is 4919409.
Herr, Mathew Wayne was born in 1984 and he registered to vote, giving his address as 2199 Chamonix LN # 12A, VAIL, Eagle County, CO. His voter ID number is 6420312.
Herr, Matthew Robert was born in 1975 and he registered to vote, giving his address as 14378 Hanover ST, BRIGHTON, Adams County, CO. His voter ID number is 600089088.
Herr, Matthew Scott was born in 1979 and he registered to vote, giving his address as 7933 W Quarto AVE, LITTLETON, Jefferson County, CO. His voter ID number is 884473.
Herr, Megan Elizabeth was born in 1994 and she registered to vote, giving her address as 1499 Blake ST APT 4E, DENVER, Denver County, CO. Her voter ID number is 601969286.
Herr, Melissa Nicole was born in 1986 and she registered to vote, giving her address as 100 N Steele ST APT 710, DENVER, Denver County, CO. Her voter ID number is 601797883.
Herr, Micaela D was born in 1974 and she registered to vote, giving her address as 8076 S Vance CT, LITTLETON, Jefferson County, CO. Her voter ID number is 4208261.
Herr, Michael A was born in 1952 and he registered to vote, giving his address as 7720 Severy AVE, CASCADE, El Paso County, CO. His voter ID number is 511746.
Herr, Michael David was born in 1971 and he registered to vote, giving his address as 102 Woodstock ST, FRUITA, Mesa County, CO. His voter ID number is 2290225.
Herr, Michael John was born in 1964 and he registered to vote, giving his address as 2323 Curtis ST, DENVER, Denver County, CO. His voter ID number is 200035539.
Herr, Michael Joseph was born in 1988 and he registered to vote, giving his address as 151 Glenda DR # 2, LOVELAND, Larimer County, CO. His voter ID number is 601634660.
Herr, Michael Ryan was born in 1992 and he registered to vote, giving his address as 158 Inverness DR W # A306, ENGLEWOOD, Arapahoe County, CO. His voter ID number is 601144822.
Herr, Michelle Catherine was born in 1985 and she registered to vote, giving her address as 73 Esther DR, COLO SPRINGS, El Paso County, CO. Her voter ID number is 600930232.
Herr, Micole Mee Seon was born in 1986 and she registered to vote, giving her address as 1801 Ds RD, GLADE PARK, Mesa County, CO. Her voter ID number is 2342895.
Herr, Monica June was born in 1994 and she registered to vote, giving her address as 1216 Columbine CT, FORT COLLINS, Larimer County, CO. Her voter ID number is 601884466.
Herr, Morgan Taylor was born in 1992 and she registered to vote, giving her address as 18229 E Mainstreet # 9106, PARKER, Douglas County, CO. Her voter ID number is 601763984.
Herr, Nathan Heitmann was born in 1998 and he registered to vote, giving his address as 7674 E Mercer PL, DENVER, Denver County, CO. His voter ID number is 601162846.
Herr, Nathan Michael was born in 1993 and he registered to vote, giving his address as 12769 Buckhorn Creek ST, PARKER, Douglas County, CO. His voter ID number is 601112603.
Herr, Pamela Jean was born in 1968 and she registered to vote, giving her address as 113 Teal CT, ASPEN, Pitkin County, CO. Her voter ID number is 6783020.
Herr, Pamela Susanne was born in 1964 and she registered to vote, giving her address as 7720 Severy AVE, CASCADE, El Paso County, CO. Her voter ID number is 505289.
Herr, Paula Pong was born in 1991 and she registered to vote, giving her address as 341 Wright ST # 9-208, LAKEWOOD, Jefferson County, CO. Her voter ID number is 601652656.
Herr, Paul Christian was born in 1964 and he registered to vote, giving his address as 39060 Hwy 82 # 1B, ASPEN, Pitkin County, CO. His voter ID number is 6792261.
Herr, Raymond Scott was born in 1961 and he registered to vote, giving his address as 1921 Mesaview LN, FORT COLLINS, Larimer County, CO. His voter ID number is 1542931.
Herr, Rebecca Ann was born in 1990 and she registered to vote, giving her address as 1354 N Raleigh ST, DENVER, Denver County, CO. Her voter ID number is 601238846.
Herr, Rebecca Lynn was born in 1955 and she registered to vote, giving her address as 13621/2 Alpine AVE, BOULDER, Boulder County, CO. Her voter ID number is 8096830.
Herr, Rebecca Underwood was born in 1982 and she registered to vote, giving her address as 18766 W 84Th PL, ARVADA, Jefferson County, CO. Her voter ID number is 601722172.
Herr, Richard V was born in 1940 and he registered to vote, giving his address as 11625 Applewood Knolls DR, LAKEWOOD, Jefferson County, CO. His voter ID number is 4016451.
Herr, Robert C was born in 1952 and he registered to vote, giving his address as 6975 Grand Valley DR, COLO SPRINGS, El Paso County, CO. His voter ID number is 600624988.
Herr, Robert Edward was born in 1941 and he registered to vote, giving his address as 31250 Daniel RD, PUEBLO, Pueblo County, CO. His voter ID number is 3022589.
Herr, Robert Edward Jr was born in 1963 and he registered to vote, giving his address as 2016 61St AVE, GREELEY, Weld County, CO. His voter ID number is 6361877.
Herr, Robert Eugene was born in 1949 and he registered to vote, giving his address as 1801 Ds RD, GLADE PARK, Mesa County, CO. His voter ID number is 2252692.
Herr, Robert Jeffrey was born in 1960 and he registered to vote, giving his address as 554 Horse Thief LN, DURANGO, La Plata County, CO. His voter ID number is 601570033.
Herr, Robert Wilhelm was born in 1991 and he registered to vote, giving his address as 50 Beeler PL UNIT 442, FRISCO, Summit County, CO. His voter ID number is 600847694.
Herr, Rosemary Ellen was born in 1944 and she registered to vote, giving her address as 24336 Ben Kelly RD, ELBERT, Elbert County, CO. Her voter ID number is 601697920.
Herr, Ryan Matthew was born in 1975 and he registered to vote, giving his address as 5200 S Ulster ST # 3423, GREENWOOD VLG, Arapahoe County, CO. His voter ID number is 2728655.
Herr, Ryne J was born in 1986 and he registered to vote, giving his address as 10137 Crest View DR, MORRISON, Jefferson County, CO. His voter ID number is 4278057.
Herr, Sarah Anne was born in 1988 and she registered to vote, giving her address as 3420 E 30Th AVE, DENVER, Denver County, CO. Her voter ID number is 200120491.
Herr, Sara J was born in 1956 and she registered to vote, giving her address as 648 Augusta DR, LEADVILLE, Lake County, CO. Her voter ID number is 8463188.
Herr, Sean Michael was born in 1996 and he registered to vote, giving his address as 2382 Bayberry LN, CASTLE ROCK, Douglas County, CO. His voter ID number is 601674188.
Herr, Seng Pung was born in 1958 and he registered to vote, giving his address as 3394 W 66Th AVE APT 2, DENVER, Adams County, CO. His voter ID number is 601319003.
Herr, Stephen Avritt was born in 1969 and he registered to vote, giving his address as 2957 N Humboldt ST, DENVER, Denver County, CO. His voter ID number is 2506170.
Herr, Stephen Thomas was born in 1988 and he registered to vote, giving his address as 650 Tamarisk CT, LOUISVILLE, Boulder County, CO. His voter ID number is 8096831.
Herr, Steven Robert was born in 1985 and he registered to vote, giving his address as 8565 Pepperridge DR, COLO SPRINGS, El Paso County, CO. His voter ID number is 6353048.
Herr, Steven Willem was born in 1996 and he registered to vote, giving his address as 1213 Francis ST APT 5, LONGMONT, Boulder County, CO. His voter ID number is 601750303.
Herr, Susan H was born in 1962 and she registered to vote, giving her address as 7674 E Mercer PL, DENVER, Denver County, CO. Her voter ID number is 2754563.
Herr, Tanya Renee was born in 1990 and she registered to vote, giving her address as 158 Inverness DR W # A306, ENGLEWOOD, Arapahoe County, CO. Her voter ID number is 601144823.
Herr, Tong Nou was born in 1978 and he registered to vote, giving his address as 7993 Faith CT, FREDERICK, Weld County, CO. His voter ID number is 601715049.
Herr, Tyler Michael was born in 1990 and he registered to vote, giving his address as 2212 27Th ST, GREELEY, Weld County, CO. His voter ID number is 600519391.
Herr, Vincent Craig was born in 1958 and he registered to vote, giving his address as 6695 W 84Th WAY # 82, ARVADA, Jefferson County, CO. His voter ID number is 601302064.
Herr, William M was born in 1958 and he registered to vote, giving his address as 7674 E Mercer PL, DENVER, Denver County, CO. His voter ID number is 2788786.
Herra, Danelle Renae was born in 1985 and she registered to vote, giving her address as 9852 Corsair DR, CONIFER, Jefferson County, CO. Her voter ID number is 600963710.
Herra, Derek Joel was born in 1983 and he registered to vote, giving his address as 9852 Corsair DR, CONIFER, Jefferson County, CO. His voter ID number is 600918302.
Herrada, Alexzandriah Kaprice was born in 1995 and she registered to vote, giving her address as 2368 W 119Th AVE, WESTMINSTER, Adams County, CO. Her voter ID number is 601031005.
Herrada, Ana Cristina was born in 1990 and she registered to vote, giving her address as 4923 N Altura ST, DENVER, Denver County, CO. Her voter ID number is 600585782.
Herrada, Angelica was born in 1979 and she registered to vote, giving her address as 7051 Poplar ST, COMMERCE CITY, Adams County, CO. Her voter ID number is 7158880.
Herrada, Antonio Alfredo was born in 1970 and he registered to vote, giving his address as 6735 W 97Th CIR, WESTMINSTER, Jefferson County, CO. His voter ID number is 200255233.
Herrada, Carmen G was born in 1954 and she registered to vote, giving her address as 7051 Poplar ST, COMMERCE CITY, Adams County, CO. Her voter ID number is 6813819.
Herrada, Feliciano was born in 1963 and he registered to vote, giving his address as 4923 N Altura ST, DENVER, Denver County, CO. His voter ID number is 6835589.
Herrada, Greico was born in 2000 and he registered to vote, giving his address as 4814 N Jericho CT, DENVER, Denver County, CO. His voter ID number is 601677387.
Herrada, Javier was born in 1943 and he registered to vote, giving his address as 12253 E Burlington PL, DENVER, Denver County, CO. His voter ID number is 600867496.
Herrada, Jesus was born in 1956 and he registered to vote, giving his address as 7051 Poplar ST, COMMERCE CITY, Adams County, CO. His voter ID number is 6837760.
Herrada, Jorge Andres was born in 1986 and he registered to vote, giving his address as 2845 W Louisiana AVE, DENVER, Denver County, CO. His voter ID number is 601639135.
Herrada, Jose Isidro was born in 1963 and he registered to vote, giving his address as 7198 Highway 66, PLATTEVILLE, Weld County, CO. His voter ID number is 200100985.
Herrada, Juan Manuel was born in 1977 and he registered to vote, giving his address as 5370 Slickrock DR, COLO SPRINGS, El Paso County, CO. His voter ID number is 200014590.
Herrada, Maria Guadalupe was born in 1998 and registered to vote, giving the address as 3301 N Steele ST, DENVER, Denver County, CO. Herrada voter ID number is 601294082.
Herrada, Marissa Analisia was born in 1999 and she registered to vote, giving her address as 9716 Nucla ST, COMMERCE CITY, Adams County, CO. Her voter ID number is 601287694.
Herrada, Miguel Anguel was born in 1989 and he registered to vote, giving his address as 2800 W 103Rd AVE APT 2018, FEDERAL HGTS, Adams County, CO. His voter ID number is 200066158.
Herrada, Mirella was born in 1988 and registered to vote, giving the address as 6500 E 88Th AVE LOT 58, HENDERSON, Adams County, CO. Herrada voter ID number is 601389604.
Herrada, Norma M was born in 1945 and she registered to vote, giving her address as 2710 W College AVE, DENVER, Denver County, CO. Her voter ID number is 2613463.
Herrada, Tatianan was born in 1995 and registered to vote, giving the address as 243 W 80Th AVE APT 1-204, DENVER, Adams County, CO. Herrada voter ID number is 601488987.
Herrada-Borrego, Maria Esther was born in 1967 and she registered to vote, giving her address as 6143 W 78Th AVE, ARVADA, Jefferson County, CO. Her voter ID number is 200033502.
Herrada-Castarena, Freddie Armando was born in 1994 and he registered to vote, giving his address as 5551 N Auckland WAY, DENVER, Denver County, CO. His voter ID number is 600615870.
Herrada-Flores, Miguel was born in 1969 and he registered to vote, giving his address as 4429 N Pearl ST, DENVER, Denver County, CO. His voter ID number is 2743741.
Herrada Gallegos, Oscar Feliciano was born in 1984 and he registered to vote, giving his address as 4923 N Altura ST, DENVER, Denver County, CO. His voter ID number is 2876437.
Herrada Galvan, Jacqueline was born in 1987 and she registered to vote, giving her address as 5470 E 60Th AVE APT 124, COMMERCE CITY, Adams County, CO. Her voter ID number is 2914482.
Herrada Gonzalez, Josue was born in 1970 and he registered to vote, giving his address as 10700 E Dartmouth AVE APT J202, DENVER, Denver County, CO. His voter ID number is 601088008.
Herrada-Rodriguez, Alma Erika was born in 1984 and she registered to vote, giving her address as 7520 E 129Th PL, THORNTON, Adams County, CO. Her voter ID number is 5700368.
Herrada-Rodriguez, Uriel was born in 1977 and he registered to vote, giving his address as 4590 N Walden WAY, DENVER, Denver County, CO. His voter ID number is 600278544.
Herraez, Paula Kay was born in 1974 and she registered to vote, giving her address as 3408 Northridge DR, PUEBLO, Pueblo County, CO. Her voter ID number is 3058220.
Herraez Sanchez, Juan Carlos was born in 1966 and he registered to vote, giving his address as 3408 Northridge DR, PUEBLO, Pueblo County, CO. His voter ID number is 600061681.
Herraiz Sousa, Antonio was born in 1983 and registered to vote, giving the address as 818 N Logan ST APT 1101, DENVER, Denver County, CO. Herraiz Sousa voter ID number is 601830043.
Herrala, Brent Reno was born in 1987 and he registered to vote, giving his address as 10339 County Rd 311, SILT, Garfield County, CO. His voter ID number is 200193309.
Herrala, David Randall was born in 1979 and he registered to vote, giving his address as 5991 County Rd 214, NEW CASTLE, Garfield County, CO. His voter ID number is 5516987.
Herrala, Kara Elizabeth was born in 1974 and she registered to vote, giving her address as 5991 County Rd 214, NEW CASTLE, Garfield County, CO. Her voter ID number is 5516154.
Herrala, Trista Irene was born in 1983 and she registered to vote, giving her address as 10339 County Rd 311, SILT, Garfield County, CO. Her voter ID number is 6689753.
Herrald, Andrew Wayde Sr was born in 1969 and he registered to vote, giving his address as 3500 N Elm ST, DENVER, Denver County, CO. His voter ID number is 601555624.
Herrald, Anne Rutherford was born in 1947 and she registered to vote, giving her address as 22102 E Quarto PL, AURORA, Arapahoe County, CO. Her voter ID number is 744218.
Herrald, David James was born in 1972 and he registered to vote, giving his address as 2911 E 142Nd DR, THORNTON, Adams County, CO. His voter ID number is 6883935.
Herrald, Gordon Alexander was born in 1945 and he registered to vote, giving his address as 22102 E Quarto PL, AURORA, Arapahoe County, CO. His voter ID number is 600293500.
Herrald, Kerry D was born in 1962 and he registered to vote, giving his address as 4691 S Decatur ST # 216, ENGLEWOOD, Arapahoe County, CO. His voter ID number is 948971.
Herrald, Kristine Lynn was born in 1971 and she registered to vote, giving her address as 2911 E 142Nd DR, THORNTON, Adams County, CO. Her voter ID number is 6857810.
Herrald, Mary Margaret was born in 1967 and she registered to vote, giving her address as 10935 Klondike DR, PEYTON, El Paso County, CO. Her voter ID number is 601632054.
Herrald, Rotisha Rose was born in 1991 and she registered to vote, giving her address as 3500 N Elm ST, DENVER, Denver County, CO. Her voter ID number is 601701025.
Herrald, Sethrine Rose was born in 1978 and she registered to vote, giving her address as 3500 N Elm ST, DENVER, Denver County, CO. Her voter ID number is 601625963.
Herrald, Shaleen M was born in 1994 and she registered to vote, giving her address as 3500 N Elm ST, DENVER, Denver County, CO. Her voter ID number is 601543820.
Herrald, Virginia K was born in 1928 and she registered to vote, giving her address as 1731 Jerry Murphy RD, PUEBLO, Pueblo County, CO. Her voter ID number is 3086220.
Herran, Angelique Jeannine was born in 1954 and she registered to vote, giving her address as 1342 Terrace DR, LONGMONT, Boulder County, CO. Her voter ID number is 8096952.
Herran, Barbara Jane was born in 1969 and she registered to vote, giving her address as 8 Bridgeport CIR, PUEBLO, Pueblo County, CO. Her voter ID number is 3064581.
Herran, Jordan Marcelo was born in 2000 and he registered to vote, giving his address as 8 Bridgeport CIR, PUEBLO, Pueblo County, CO. His voter ID number is 601617763.
Herran, Simon Pedro was born in 1996 and he registered to vote, giving his address as 1836 Boulder ST APT 313, DENVER, Denver County, CO. His voter ID number is 601801618.
Herran, Tatiana Michelle was born in 1994 and she registered to vote, giving her address as 8 Bridgeport CIR, PUEBLO, Pueblo County, CO. Her voter ID number is 600655085.
Herranz, Lorrie Kim was born in 1967 and she registered to vote, giving her address as 585 Milleman ST, PALISADE, Mesa County, CO. Her voter ID number is 2350799.
Herranz, Marco was born in 1965 and he registered to vote, giving his address as 585 Milleman ST, PALISADE, Mesa County, CO. His voter ID number is 600043349.
Herrara, Joseph A was born in 1987 and he registered to vote, giving his address as 1811 W Florida AVE, DENVER, Denver County, CO. His voter ID number is 600556313.
Herrara, Zackary Paul was born in 1954 and he registered to vote, giving his address as 1105 S Raritan ST, DENVER, Denver County, CO. His voter ID number is 600161302.
Herrarte, Caroline Anne was born in 1972 and she registered to vote, giving her address as 23711 E Alabama DR, AURORA, Arapahoe County, CO. Her voter ID number is 600034590.
Herrarte Luna, Joanna was born in 2000 and she registered to vote, giving her address as 1915 2Nd ST APT 22, GREELEY, Weld County, CO. Her voter ID number is 601825860.
Herrarte Luna, Xiommara Denise was born in 1996 and she registered to vote, giving her address as 506 16Th ST APT 12C, GREELEY, Weld County, CO. Her voter ID number is 601531048.
Herrasti, Leif Edward was born in 1990 and he registered to vote, giving his address as 2381 Altura BLVD, AURORA, Adams County, CO. His voter ID number is 600221996.
Herrback, Jennifer Leigh was born in 1981 and she registered to vote, giving her address as 2023 Grain Bin DR, WINDSOR, Weld County, CO. Her voter ID number is 601649800.
Herrback, Kevin Bryan was born in 1980 and he registered to vote, giving his address as 2023 Grain Bin DR, WINDSOR, Weld County, CO. His voter ID number is 601856205.
Herrbaldt, Maxine Ann was born in 1953 and she registered to vote, giving her address as 5000 County Road 309A, IGNACIO, La Plata County, CO. Her voter ID number is 600477929.
Herrboldt, Beckie Lin was born in 1970 and she registered to vote, giving her address as 18939 E Warren CIR # 307, AURORA, Arapahoe County, CO. Her voter ID number is 874089.
Herrboldt, Brock Collin was born in 1980 and he registered to vote, giving his address as 507 Pawnee DR, STERLING, Logan County, CO. His voter ID number is 601517702.
Herrboldt, Carol Jean was born in 1956 and she registered to vote, giving her address as 15601 County Rd 35.7, STERLING, Logan County, CO. Her voter ID number is 2236979.
Herrboldt, Jennifer Lynne was born in 1983 and she registered to vote, giving her address as 507 Pawnee DR, STERLING, Logan County, CO. Her voter ID number is 2245989.
Herrboldt, Nick Ryan was born in 1986 and he registered to vote, giving his address as 92 Doe CIR, SOUTH FORK, Rio Grande County, CO. His voter ID number is 600810904.
Herrboldt, Sherry J was born in 1942 and she registered to vote, giving her address as 2979 Emerald CIR, GRAND JUNCTION, Mesa County, CO. Her voter ID number is 4933371.
Herrboldt, William Clyde was born in 1951 and he registered to vote, giving his address as 15601 County Rd 35.7, STERLING, Logan County, CO. His voter ID number is 2236980.
Herrboldt, William Joseph was born in 1981 and he registered to vote, giving his address as 5000 County Road 309A, IGNACIO, La Plata County, CO. His voter ID number is 4955008.
Herre, Andrea Stefanie was born in 1961 and she registered to vote, giving her address as 1601 Cutty Sark CT, SILVERTHORNE, Summit County, CO. Her voter ID number is 601368969.
Herre, Andrew Houston was born in 1988 and he registered to vote, giving his address as 4524 Seaboard LN, FORT COLLINS, Larimer County, CO. His voter ID number is 600967790.
Herre, Baylor Austen was born in 1997 and he registered to vote, giving his address as 6671 Bitterroot DR, COLO SPRINGS, El Paso County, CO. His voter ID number is 601057905.
Herre, Betty J was born in 1942 and she registered to vote, giving her address as 545 Wenlock CT, CASTLE ROCK, Douglas County, CO. Her voter ID number is 5726180.
Herre, Beverly Joanne was born in 1950 and she registered to vote, giving her address as 1957 27Th AVE, GREELEY, Weld County, CO. Her voter ID number is 6344748.
Herre, Brian P was born in 1978 and he registered to vote, giving his address as 1740 Clover Creek DR, LONGMONT, Boulder County, CO. His voter ID number is 8096955.
Herre, Bryan Andrew was born in 1987 and he registered to vote, giving his address as 6671 Bitterroot DR, COLO SPRINGS, El Paso County, CO. His voter ID number is 600721662.
Herre, Caron A was born in 1963 and she registered to vote, giving her address as 6671 Bitterroot DR, COLO SPRINGS, El Paso County, CO. Her voter ID number is 157994.
Herre, David James was born in 1977 and he registered to vote, giving his address as 584 Scoria AVE, LOVELAND, Larimer County, CO. His voter ID number is 6408725.
Herre, Elizabeth Ann was born in 1990 and she registered to vote, giving her address as 6671 Bitterroot DR, COLO SPRINGS, El Paso County, CO. Her voter ID number is 600813153.
Herre, Elizabeth Emily Majka was born in 1988 and she registered to vote, giving her address as 4524 Seaboard LN, FORT COLLINS, Larimer County, CO. Her voter ID number is 200047068.
Herre, Herman Phil was born in 1944 and he registered to vote, giving his address as 3035 S Fairfax ST, DENVER, Denver County, CO. His voter ID number is 794575.
Herre, Jean L was born in 1942 and she registered to vote, giving her address as 927 E Prospect RD # D, FORT COLLINS, Larimer County, CO. Her voter ID number is 1439276.
Herre, Kelly Kay was born in 1978 and she registered to vote, giving her address as 584 Scoria AVE, LOVELAND, Larimer County, CO. Her voter ID number is 600052309.
Herre, Leslee Dawn was born in 1981 and she registered to vote, giving her address as 12135 S Meander WAY, PARKER, Douglas County, CO. Her voter ID number is 601786707.
Herre, Martin was born in 1960 and he registered to vote, giving his address as 1601 Cutty Sark CT, SILVERTHORNE, Summit County, CO. His voter ID number is 601399288.
Herre, Mary D was born in 1956 and she registered to vote, giving her address as 495 Apple Blossom RD, GRAND JUNCTION, Mesa County, CO. Her voter ID number is 2281932.
Herre, Pamela Susan was born in 1973 and she registered to vote, giving her address as 4801 Gould CIR, CASTLE ROCK, Douglas County, CO. Her voter ID number is 5944233.
Herre, Phillip A was born in 1966 and he registered to vote, giving his address as 6671 Bitterroot DR, COLO SPRINGS, El Paso County, CO. His voter ID number is 157791.
Herre, Robert J was born in 1951 and he registered to vote, giving his address as 1957 27Th AVE, GREELEY, Weld County, CO. His voter ID number is 6344747.
Herre, Russell Boyd was born in 1955 and he registered to vote, giving his address as 495 Apple Blossom RD, GRAND JUNCTION, Mesa County, CO. His voter ID number is 2281262.
Herre, Ruth Clemens was born in 1947 and she registered to vote, giving her address as 8300 E Fairmount DR APT Q102, DENVER, Denver County, CO. Her voter ID number is 2680605.
Herre, Toby Philip was born in 1980 and he registered to vote, giving his address as 12135 S Meander WAY, PARKER, Douglas County, CO. His voter ID number is 601786722.
Herre, Warren Marc was born in 1970 and he registered to vote, giving his address as 4801 Gould CIR, CASTLE ROCK, Douglas County, CO. His voter ID number is 5846810.
Herrea, Enrique was born in 1973 and he registered to vote, giving his address as 3800 Pike RD APT 32101, LONGMONT, Boulder County, CO. His voter ID number is 5936634.
Herrea-Rivera, Julie Yasmin was born in 1994 and she registered to vote, giving her address as 615 S Mason ST # 12, FORT COLLINS, Larimer County, CO. Her voter ID number is 600440718.
Herrebout, Matthew Rene was born in 1970 and he registered to vote, giving his address as 808 S Grant ST, DENVER, Denver County, CO. His voter ID number is 600063112.
Herreid, Carl P was born in 1930 and he registered to vote, giving his address as 5616 W 29Th Street RD, GREELEY, Weld County, CO. His voter ID number is 6362027.
Herreid, Carly Kathleen was born in 1987 and she registered to vote, giving her address as 414 W Beaver Creek BLVD # B14, AVON, Eagle County, CO. Her voter ID number is 600094323.
Herreid, Hannah Leah was born in 1990 and she registered to vote, giving her address as 12626 Flagg DR, LAFAYETTE, Boulder County, CO. Her voter ID number is 600217084.
Herreid, Judy Schwartz was born in 1958 and she registered to vote, giving her address as 3301 Arapahoe AVE UNIT 306, BOULDER, Boulder County, CO. Her voter ID number is 8247635.
Herreid, Marah Madison was born in 2000 and she registered to vote, giving her address as 5035 W 32Nd AVE, DENVER, Denver County, CO. Her voter ID number is 601296012.
Herreid, Marion Margaret was born in 1933 and she registered to vote, giving her address as 5616 W 29Th Street RD, GREELEY, Weld County, CO. Her voter ID number is 6362026.
Herreid, Neil R was born in 1973 and he registered to vote, giving his address as 13984 Cook CT, THORNTON, Adams County, CO. His voter ID number is 7133842.
Herreid, Noah Benjamin was born in 1992 and he registered to vote, giving his address as 1825 Shallot CIR, LAFAYETTE, Boulder County, CO. His voter ID number is 200378217.
Herreid, Sally Jean was born in 1961 and she registered to vote, giving her address as 1254 6Th AVE, LONGMONT, Boulder County, CO. Her voter ID number is 8096958.
Herreid, Tamera Jean was born in 1968 and she registered to vote, giving her address as 13984 Cook CT, THORNTON, Adams County, CO. Her voter ID number is 6909042.
Herreid, Timothy Mark was born in 1959 and he registered to vote, giving his address as 1211 Kings Row AVE, CARBONDALE, Garfield County, CO. His voter ID number is 5525962.
Herreid, Todd Wendell was born in 1958 and he registered to vote, giving his address as 5035 W 32Nd AVE, DENVER, Denver County, CO. His voter ID number is 2746071.
Herreid, Zoe Elizabeth was born in 1990 and he registered to vote, giving his address as 1211 Kings Row AVE, CARBONDALE, Garfield County, CO. His voter ID number is 200134804.
Herrejon, Javier was born in 1998 and he registered to vote, giving his address as 3707 Salida CT, EVANS, Weld County, CO. His voter ID number is 601543346.
Herrejon-Albor, Raymundo was born in 1956 and he registered to vote, giving his address as 1420 W Alaska PL APT 7, DENVER, Denver County, CO. His voter ID number is 601531934.
Herrejon-Alcaraz, Rafael was born in 1974 and he registered to vote, giving his address as 3707 Salida CT, EVANS, Weld County, CO. His voter ID number is 600282572.
Herrejon Barrera, Jared was born in 2000 and he registered to vote, giving his address as 3901 Morrison RD APT 101, DENVER, Denver County, CO. His voter ID number is 601386746.
Herrel, James Thomas was born in 1949 and he registered to vote, giving his address as 60 Carriage WAY # 3027, SNOWMASS VLG, Pitkin County, CO. His voter ID number is 6779242.
Herrel, John Houseworth was born in 1986 and he registered to vote, giving his address as 4741 N Vallejo ST, DENVER, Denver County, CO. His voter ID number is 200370324.
Herrel, Marcie E was born in 1975 and she registered to vote, giving her address as 1581 Rosedale ST, CASTLE ROCK, Douglas County, CO. Her voter ID number is 5830059.
Herrel, Robert R Jr was born in 1971 and he registered to vote, giving his address as 1581 Rosedale ST, CASTLE ROCK, Douglas County, CO. His voter ID number is 5872393.
Herrel, Susan L was born in 1960 and she registered to vote, giving her address as 920 Choate LN, YODER, El Paso County, CO. Her voter ID number is 319561.
Herrel, Troy G Sr was born in 1923 and he registered to vote, giving his address as 6770 Albion ST, COMMERCE CITY, Adams County, CO. His voter ID number is 7153178.
Herrell, Alison was born in 1982 and she registered to vote, giving her address as 707 7Th ST, FOWLER, Otero County, CO. Her voter ID number is 3163004.
Herrell, Amber Danielle was born in 1983 and she registered to vote, giving her address as 38993 County Road 33, AULT, Weld County, CO. Her voter ID number is 1600717.
Herrell, Amber Renee was born in 1982 and she registered to vote, giving her address as 308 W Grant AVE, FOWLER, Otero County, CO. Her voter ID number is 3172742.
Herrell, Audrey Jean was born in 1991 and she registered to vote, giving her address as 1757 N Vine ST APT 1, DENVER, Denver County, CO. Her voter ID number is 200217861.
Herrell, Auther Gene was born in 1979 and he registered to vote, giving his address as 9950 Pecos ST, THORNTON, Adams County, CO. His voter ID number is 601806846.
Herrell, Betty Ann was born in 1953 and she registered to vote, giving her address as 790 Opal WAY, BROOMFIELD, Broomfield County, CO. Her voter ID number is 8096959.
Herrell, Brenden Floyd was born in 1996 and he registered to vote, giving his address as 406 E Florence AVE, FOWLER, Otero County, CO. His voter ID number is 601481704.
Herrell, Carolyn J was born in 1943 and she registered to vote, giving her address as 600 Raintree BLVD LOT 1, CANON CITY, Fremont County, CO. Her voter ID number is 600665807.
Herrell, Catherine Elizabeth was born in 1980 and she registered to vote, giving her address as 1467 Kilkenny ST, BOULDER, Boulder County, CO. Her voter ID number is 8096960.
Herrell, Charlene Elizabeth was born in 1949 and she registered to vote, giving her address as 2012 Devon ST, MONTROSE, Montrose County, CO. Her voter ID number is 601653108.
Herrell, Courtney Michael was born in 1992 and she registered to vote, giving her address as 451 Boardwalk DR # 705, FORT COLLINS, Larimer County, CO. Her voter ID number is 600277189.
Herrell, Danna Lea was born in 1967 and she registered to vote, giving her address as 2004 S Parfet DR, LAKEWOOD, Jefferson County, CO. Her voter ID number is 4005708.
Herrell, Danya Kaytlyn was born in 1989 and she registered to vote, giving her address as 404 Holly AVE, SWINK, Otero County, CO. Her voter ID number is 3175302.
Herrell, David Alan was born in 1974 and he registered to vote, giving his address as 6 Condor DR, EAGLE, Eagle County, CO. His voter ID number is 200099163.
Herrell, David Mason was born in 1968 and he registered to vote, giving his address as 2004 S Parfet DR, LAKEWOOD, Jefferson County, CO. His voter ID number is 4005709.
Herrell, Deborah Lynn was born in 1960 and she registered to vote, giving her address as 2206 Midland AVE, GLENWOOD SPGS, Garfield County, CO. Her voter ID number is 5523707.
Herrell, Delores Corrine was born in 1937 and she registered to vote, giving her address as 1679 County Rd 35, CRAIG, Moffat County, CO. Her voter ID number is 601599838.
Herrell, Destony Neiosha was born in 1989 and she registered to vote, giving her address as 1417 Darrow Alley, GLENWOOD SPGS, Garfield County, CO. Her voter ID number is 601588518.
Herrell, Diana Michelle was born in 1987 and she registered to vote, giving her address as 1513 Madison CT, LOUISVILLE, Boulder County, CO. Her voter ID number is 8096961.
Herrell, Everett Merlin was born in 1951 and he registered to vote, giving his address as 1000 13Th ST APT 1210, GREELEY, Weld County, CO. His voter ID number is 601139765.
Herrell, Gail was born in 1959 and she registered to vote, giving her address as 38993 County Road 33, AULT, Weld County, CO. Her voter ID number is 1600264.
Herrell, Gerald Wayne Lawrence was born in 1989 and he registered to vote, giving his address as 308 E Park AVE, FOWLER, Otero County, CO. His voter ID number is 600666954.
Herrell, Gregg Brenton was born in 1954 and he registered to vote, giving his address as 2820 Alan ST, FORT COLLINS, Larimer County, CO. His voter ID number is 1403443.
Herrell, Heather Lynette was born in 1974 and she registered to vote, giving her address as 9215 W 80Th PL # A, ARVADA, Jefferson County, CO. Her voter ID number is 1461982.
Herrell, Jaime Theresa was born in 1991 and she registered to vote, giving her address as 86 N Grant ST APT 18, DENVER, Denver County, CO. Her voter ID number is 600417281.
Herrell, James Edward was born in 1951 and he registered to vote, giving his address as 26 Yucca RD, LA JUNTA, Otero County, CO. His voter ID number is 3158761.
Herrell, James William was born in 1955 and he registered to vote, giving his address as 6215 Derby Rock LOOP, MANITOU SPGS, El Paso County, CO. His voter ID number is 600115614.
Herrell, Janice E was born in 1955 and she registered to vote, giving her address as 6314 S Salida ST, AURORA, Arapahoe County, CO. Her voter ID number is 760943.
Herrell, Jeanne Esther was born in 1931 and she registered to vote, giving her address as 2206 Midland AVE # A, GLENWOOD SPGS, Garfield County, CO. Her voter ID number is 5523286.
Herrell, John Lee Mr was born in 1948 and he registered to vote, giving his address as 2012 Devon ST, MONTROSE, Montrose County, CO. His voter ID number is 601819611.
Herrell, Kendra Lou was born in 1961 and she registered to vote, giving her address as 26 Yucca RD, LA JUNTA, Otero County, CO. Her voter ID number is 3160672.
Herrell, Kenneth Wayne was born in 1956 and he registered to vote, giving his address as 406 E Florence AVE, FOWLER, Otero County, CO. His voter ID number is 3162479.
Herrell, Kim Ann was born in 1958 and she registered to vote, giving her address as 7275 S Pennsylvania ST, CENTENNIAL, Arapahoe County, CO. Her voter ID number is 600297015.
Herrell, Lisa Marie was born in 1986 and she registered to vote, giving her address as 9700 Elderberry ST, FEDERAL HGTS, Adams County, CO. Her voter ID number is 600694882.
Herrell, Luke Mcdowell was born in 1997 and he registered to vote, giving his address as 3022 Hager LN # B, GLENWOOD SPGS, Garfield County, CO. His voter ID number is 601334908.
Herrell, Michael Alan was born in 1960 and he registered to vote, giving his address as 515 W 11Th ST APT 2, PUEBLO, Pueblo County, CO. His voter ID number is 600845399.
Herrell, Morgan Louise was born in 1991 and she registered to vote, giving her address as 6314 S Salida ST, AURORA, Arapahoe County, CO. Her voter ID number is 200299602.
Herrell, Morgan Shane was born in 1984 and he registered to vote, giving his address as 1703 Grand AVE, GRAND JUNCTION, Mesa County, CO. His voter ID number is 601743051.
Herrell, Neva J was born in 1924 and she registered to vote, giving her address as 26 Yucca RD, LA JUNTA, Otero County, CO. Her voter ID number is 3158762.
Herrell, Richard T was born in 1957 and he registered to vote, giving his address as 407 E Florence AVE, FOWLER, Otero County, CO. His voter ID number is 3161221.
Herrell, Robert Michael was born in 1958 and he registered to vote, giving his address as 2206 Midland AVE # C, GLENWOOD SPGS, Garfield County, CO. His voter ID number is 5522568.
Herrell, Robert Thomas was born in 1995 and he registered to vote, giving his address as 14221 E 1St DR # 206, AURORA, Arapahoe County, CO. His voter ID number is 600812138.
Herrell, Ross Alan was born in 1961 and he registered to vote, giving his address as 7275 S Pennsylvania ST, CENTENNIAL, Arapahoe County, CO. His voter ID number is 837300.
Herrell, Russ William was born in 1958 and he registered to vote, giving his address as 38993 County Road 33, AULT, Weld County, CO. His voter ID number is 1601781.
Herrell, Sandra Jean was born in 1961 and she registered to vote, giving her address as 216 3Rd ST, FOWLER, Otero County, CO. Her voter ID number is 3158480.
Herrell, Sarah Catherine was born in 1944 and registered to vote, giving the address as 11399 E 25Th DR, AURORA, Adams County, CO. Herrell voter ID number is 601850150.
Herrell, Shelly Kay was born in 1965 and she registered to vote, giving her address as 101 S 20Th AVE UNIT 113, BRIGHTON, Adams County, CO. Her voter ID number is 3114491.
Herrell, Sonya R was born in 1971 and she registered to vote, giving her address as 406 E Florence AVE, FOWLER, Otero County, CO. Her voter ID number is 3171681.
Herrell, Timothy Maurice was born in 1982 and he registered to vote, giving his address as 2122 Hollywood DR, PUEBLO, Pueblo County, CO. His voter ID number is 601779091.
Herrell, Tony was born in 1958 and he registered to vote, giving his address as 2239 S Nile ST, LAKEWOOD, Jefferson County, CO. His voter ID number is 601463467.
Herrell, Vicki M was born in 1929 and she registered to vote, giving her address as 2578 S Dillon ST, AURORA, Arapahoe County, CO. Her voter ID number is 703252.
Herrell, Vicki Mary was born in 1922 and she registered to vote, giving her address as 12665 E Evans CIR # F, AURORA, Arapahoe County, CO. Her voter ID number is 601802910.
Herrell, Wayne Charles was born in 1955 and he registered to vote, giving his address as 6314 S Salida ST, AURORA, Arapahoe County, CO. His voter ID number is 761155.
Herrell, William was born in 1981 and registered to vote, giving the address as 5775 W Dartmouth AVE APT 5-304, DENVER, Denver County, CO. Herrell voter ID number is 600337772.
Herrem, Michael S was born in 1979 and he registered to vote, giving his address as 13520 Road 42, MANCOS, Montezuma County, CO. His voter ID number is 1485745.
Herrema, Andrew Steven was born in 1989 and he registered to vote, giving his address as 3348 S Ulster CT, DENVER, Denver County, CO. His voter ID number is 600106329.
Herrema, Arland Jay was born in 1942 and he registered to vote, giving his address as 13890 E Marina DR # 312, AURORA, Arapahoe County, CO. His voter ID number is 668415.
Herrema, Ashley Lynn was born in 1986 and she registered to vote, giving her address as 6400 E Harvard AVE, DENVER, Denver County, CO. Her voter ID number is 2960876.
|
import os
import io
import six
from nose.tools import eq_, assert_not_equal, raises
import inflect
def is_eq(p, a, b):
return (
p.compare(a, b)
or p.plnounequal(a, b)
or p.plverbequal(a, b)
or p.pladjequal(a, b)
)
def test_many():
p = inflect.engine()
data = get_data()
for line in data:
if "TODO:" in line:
continue
try:
singular, rest = line.split("->", 1)
except ValueError:
continue
singular = singular.strip()
rest = rest.strip()
try:
plural, comment = rest.split("#", 1)
except ValueError:
plural = rest.strip()
comment = ""
try:
mod_plural, class_plural = plural.split("|", 1)
mod_plural = mod_plural.strip()
class_plural = class_plural.strip()
except ValueError:
mod_plural = class_plural = plural.strip()
if "verb" in comment.lower():
is_nv = "_V"
elif "noun" in comment.lower():
is_nv = "_N"
else:
is_nv = ""
p.classical(all=0, names=0)
mod_PL_V = p.plural_verb(singular)
mod_PL_N = p.plural_noun(singular)
mod_PL = p.plural(singular)
if is_nv == "_V":
mod_PL_val = mod_PL_V
elif is_nv == "_N":
mod_PL_val = mod_PL_N
else:
mod_PL_val = mod_PL
p.classical(all=1)
class_PL_V = p.plural_verb(singular)
class_PL_N = p.plural_noun(singular)
class_PL = p.plural(singular)
if is_nv == "_V":
class_PL_val = class_PL_V
elif is_nv == "_N":
class_PL_val = class_PL_N
else:
class_PL_val = class_PL
check_all(
p, is_nv, singular, mod_PL_val, class_PL_val, mod_plural, class_plural
)
def check_all(p, is_nv, singular, mod_PL_val, class_PL_val, mod_plural, class_plural):
eq_(mod_plural, mod_PL_val)
eq_(class_plural, class_PL_val)
eq_(
is_eq(p, singular, mod_plural) in ("s:p", "p:s", "eq"),
True,
msg="is_eq({},{}) == {} != {}".format(
singular, mod_plural, is_eq(p, singular, mod_plural), "s:p, p:s or eq"
),
)
eq_(
is_eq(p, mod_plural, singular) in ("p:s", "s:p", "eq"),
True,
msg="is_eq({},{}) == {} != {}".format(
mod_plural, singular, is_eq(p, mod_plural, singular), "s:p, p:s or eq"
),
)
eq_(is_eq(p, singular, class_plural) in ("s:p", "p:s", "eq"), True)
eq_(is_eq(p, class_plural, singular) in ("p:s", "s:p", "eq"), True)
assert_not_equal(singular, "")
eq_(mod_PL_val, mod_PL_val if class_PL_val else "%s|%s"(mod_PL_val, class_PL_val))
if is_nv != "_V":
eq_(
p.singular_noun(mod_plural, 1),
singular,
msg="p.singular_noun({}) == {} != {}".format(
mod_plural, p.singular_noun(mod_plural, 1), singular
),
)
eq_(
p.singular_noun(class_plural, 1),
singular,
msg="p.singular_noun({}) == {} != {}".format(
class_plural, p.singular_noun(class_plural, 1), singular
),
)
def test_def():
p = inflect.engine()
p.defnoun("kin", "kine")
p.defnoun("(.*)x", "$1xen")
p.defverb("foobar", "feebar", "foobar", "feebar", "foobars", "feebar")
p.defadj("red", "red|gules")
eq_(p.no("kin", 0), "no kine", msg="kin -> kine (user defined)...")
eq_(p.no("kin", 1), "1 kin")
eq_(p.no("kin", 2), "2 kine")
eq_(p.no("regex", 0), "no regexen", msg="regex -> regexen (user defined)")
eq_(p.plural("foobar", 2), "feebar", msg="foobar -> feebar (user defined)...")
eq_(p.plural("foobars", 2), "feebar")
eq_(p.plural("red", 0), "red", msg="red -> red...")
eq_(p.plural("red", 1), "red")
eq_(p.plural("red", 2), "red")
p.classical(all=True)
eq_(p.plural("red", 0), "red", msg="red -> gules...")
eq_(p.plural("red", 1), "red")
eq_(p.plural("red", 2), "gules")
def test_ordinal():
p = inflect.engine()
eq_(p.ordinal(0), "0th", msg="0 -> 0th...")
eq_(p.ordinal(1), "1st")
eq_(p.ordinal(2), "2nd")
eq_(p.ordinal(3), "3rd")
eq_(p.ordinal(4), "4th")
eq_(p.ordinal(5), "5th")
eq_(p.ordinal(6), "6th")
eq_(p.ordinal(7), "7th")
eq_(p.ordinal(8), "8th")
eq_(p.ordinal(9), "9th")
eq_(p.ordinal(10), "10th")
eq_(p.ordinal(11), "11th")
eq_(p.ordinal(12), "12th")
eq_(p.ordinal(13), "13th")
eq_(p.ordinal(14), "14th")
eq_(p.ordinal(15), "15th")
eq_(p.ordinal(16), "16th")
eq_(p.ordinal(17), "17th")
eq_(p.ordinal(18), "18th")
eq_(p.ordinal(19), "19th")
eq_(p.ordinal(20), "20th")
eq_(p.ordinal(21), "21st")
eq_(p.ordinal(22), "22nd")
eq_(p.ordinal(23), "23rd")
eq_(p.ordinal(24), "24th")
eq_(p.ordinal(100), "100th")
eq_(p.ordinal(101), "101st")
eq_(p.ordinal(102), "102nd")
eq_(p.ordinal(103), "103rd")
eq_(p.ordinal(104), "104th")
eq_(p.ordinal("zero"), "zeroth", msg="zero -> zeroth...")
eq_(p.ordinal("one"), "first")
eq_(p.ordinal("two"), "second")
eq_(p.ordinal("three"), "third")
eq_(p.ordinal("four"), "fourth")
eq_(p.ordinal("five"), "fifth")
eq_(p.ordinal("six"), "sixth")
eq_(p.ordinal("seven"), "seventh")
eq_(p.ordinal("eight"), "eighth")
eq_(p.ordinal("nine"), "ninth")
eq_(p.ordinal("ten"), "tenth")
eq_(p.ordinal("eleven"), "eleventh")
eq_(p.ordinal("twelve"), "twelfth")
eq_(p.ordinal("thirteen"), "thirteenth")
eq_(p.ordinal("fourteen"), "fourteenth")
eq_(p.ordinal("fifteen"), "fifteenth")
eq_(p.ordinal("sixteen"), "sixteenth")
eq_(p.ordinal("seventeen"), "seventeenth")
eq_(p.ordinal("eighteen"), "eighteenth")
eq_(p.ordinal("nineteen"), "nineteenth")
eq_(p.ordinal("twenty"), "twentieth")
eq_(p.ordinal("twenty-one"), "twenty-first")
eq_(p.ordinal("twenty-two"), "twenty-second")
eq_(p.ordinal("twenty-three"), "twenty-third")
eq_(p.ordinal("twenty-four"), "twenty-fourth")
eq_(p.ordinal("one hundred"), "one hundredth")
eq_(p.ordinal("one hundred and one"), "one hundred and first")
eq_(p.ordinal("one hundred and two"), "one hundred and second")
eq_(p.ordinal("one hundred and three"), "one hundred and third")
eq_(p.ordinal("one hundred and four"), "one hundred and fourth")
def test_prespart():
p = inflect.engine()
eq_(p.present_participle("sees"), "seeing", msg="sees -> seeing...")
eq_(p.present_participle("eats"), "eating")
eq_(p.present_participle("bats"), "batting")
eq_(p.present_participle("hates"), "hating")
eq_(p.present_participle("spies"), "spying")
eq_(p.present_participle("skis"), "skiing")
def test_inflect_on_tuples():
p = inflect.engine()
eq_(p.inflect("plural('egg', ('a', 'b', 'c'))"), "eggs")
eq_(p.inflect("plural('egg', ['a', 'b', 'c'])"), "eggs")
eq_(p.inflect("plural_noun('egg', ('a', 'b', 'c'))"), "eggs")
eq_(p.inflect("plural_adj('a', ('a', 'b', 'c'))"), "some")
eq_(p.inflect("plural_verb('was', ('a', 'b', 'c'))"), "were")
eq_(p.inflect("singular_noun('eggs', ('a', 'b', 'c'))"), "eggs")
eq_(p.inflect("an('error', ('a', 'b', 'c'))"), "('a', 'b', 'c') error")
eq_(p.inflect("This is not a function(name)"), "This is not a function(name)")
def test_inflect_on_builtin_constants():
p = inflect.engine()
eq_(p.inflect("Plural of False is plural('False')"), "Plural of False is Falses")
eq_(p.inflect("num(%d, False) plural('False')" % 10), " Falses")
eq_(p.inflect("plural('True')"), "Trues")
eq_(p.inflect("num(%d, True) plural('False')" % 10), "10 Falses")
eq_(p.inflect("num(%d, %r) plural('False')" % (10, True)), "10 Falses")
eq_(p.inflect("plural('None')"), "Nones")
eq_(p.inflect("num(%d, %r) plural('True')" % (10, None)), "10 Trues")
def test_inflect_keyword_args():
p = inflect.engine()
eq_(
p.inflect("number_to_words(1234, andword='')"),
"one thousand, two hundred thirty-four",
)
eq_(
p.inflect("number_to_words(1234, andword='plus')"),
"one thousand, two hundred plus thirty-four",
)
eq_(
p.inflect("number_to_words('555_1202', group=1, zero='oh')"),
"five, five, five, one, two, oh, two",
)
@raises(NameError)
def test_NameError_in_strings():
p = inflect.engine()
eq_(p.inflect("plural('two')"), "twoes")
p.inflect("plural(two)")
def get_data():
filename = os.path.join(os.path.dirname(__file__), "inflections.txt")
with io.open(filename) as strm:
return list(map(six.text_type.strip, strm))
|
Leilani Farha talked about her concerns surrounding homelessness, forced evictions and discrimination in India.
New Delhi: According to the 2011 Census, India has 1.7 million homeless people and 13.75 million households living in slums or informal settlements. Civil society organisations have said that even this large number is an underestimation and the urban homeless number alone is at approximately 3 million.
Leilina Farha, the United Nations Special Rapporteur on Housing, concluded a ten-day visit to India on Friday, during which she met with government officials, civil society, lawyers, residents in informal settlements and pavement dwellers, and looked into the housing problem in India. Farha’s visit came on an invitation from the central government. The special rapporteur visited Delhi, Mumbai and Bengaluru.
In a press meet on Friday, Farha revealed the preliminary findings of her visit. She will be submitting a complete report to the UN Human Rights Council in March 2017.
To tackle the housing crisis, Farha argued, a human rights approach to housing is necessary. NGOs in India have made similar points before. “The housing and living conditions [of slum dwellers and homeless people] are often inhumane, and an affront to human dignity – the essence of the right to adequate housing,” Farha argued.
The picture is not all negative, according to Farha. She mentioned two government programmes she saw as especially relevant – the Pradhan Mantri Awas Yojana (housing for all) and the Swachh Bharat Abhiyan (clean India scheme). She also mentioned certain success stories she came across when looking at people rehabilitated after displacement. However, these are only the lucky ones who were rehabilitated, she added, and even though the two schemes mentioned were ambitious, they were missing a human rights approach and were also not binding in nature.
Farha also summarised the major concerns that emerged during the course of her trip. The first was the extreme and apparent inequality she witnessed, and issues surrounding discrimination and exclusion. Marginalised groups, she argued, also suffer discrimination with respect to housing – lower castes and Muslims often have a tough time finding rental accommodation, as do widows and single women. She was also surprised, she said, to note that the authorities have not identified a link between domestic violence and a woman’s housing conditions.
Marginalisation also extends to homeless people – routinely referred to as “encroachers” rather than people with a right to safe and adequate housing. Because this discriminatory vocabulary extends to legal documents, it becomes even harder for vulnerable groups to fight forced eviction, Farha said. Urban homeless needs major attention in the country, she added, since they are not even included in the housing for all scheme. To fight this, she argued that the structural causes of homelessness must be identified, existing policies (like the National Urban Livelihoods Mission) must be implemented and shelters for various kinds of people (families, abused women, street children, etc.) should be established.
Forced eviction was another one of Farha’s concerns. “Evictions seem quite common in India,” she said, “a regularised practice used most frequently to move forward the economic agenda of the country.” She also pointed out a serious discrepancy in the government’s policies. The drive to assure adequate housing for all does not match with their efforts in trying to become an economic giant through real estate investments that creates homelessness and housing disadvantages, she pointed out. A national moratorium on forced evictions and demolitions needs to be put in place, Farha argued, as they constitute a gross violation of human rights and should only be carried out in very exceptional cases.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Prepare for making dataset (TIMIT corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, basename, splitext
from glob import glob
class Path(object):
"""Prepare for making dataset.
Args:
data_path (string): path to TIMIT corpus
config_path (string): path to config dir
htk_save_path (string, optional): path to htk files
"""
def __init__(self, data_path, config_path, htk_save_path=None):
self.data_path = data_path
self.config_path = config_path
self.htk_save_path = htk_save_path
# Paths to TIMIT data
self.train_data_path = join(data_path, 'train')
self.test_data_path = join(data_path, 'test')
self.__make()
def __make(self):
self._wav_paths = {}
self._text_paths = {}
self._word_paths = {}
self._phone_paths = {}
self._utt2wav = {}
for data_type in ['train', 'dev', 'test']:
self._wav_paths[data_type] = []
self._text_paths[data_type] = []
self._word_paths[data_type] = []
self._phone_paths[data_type] = []
data_path = self.train_data_path if data_type == 'train' else self.test_data_path
if data_type != 'train':
# Load speaker list
speaker_list = []
with open(join(self.config_path, data_type + '_speaker_list.txt'), 'r') as f:
for line in f:
line = line.strip()
speaker_list.append(line)
for file_path in glob(join(data_path, '*/*/*')):
region, speaker, file_name = file_path.split('/')[-3:]
utt_index = basename(file_name)
ext = splitext(file_name)[1]
if data_type == 'train':
# if utt_index[0: 2] in ['sx', 'si', 'sa']:
if utt_index[0: 2] in ['sx', 'si']:
if ext == '.wav':
self._wav_paths[data_type].append(file_path)
self._utt2wav[speaker + '_' +
utt_index] = file_path
elif ext == '.txt':
self._text_paths[data_type].append(file_path)
elif ext == '.wrd':
self._word_paths[data_type].append(file_path)
elif ext == '.phn':
self._phone_paths[data_type].append(file_path)
else:
if speaker not in speaker_list:
continue
if utt_index[0: 2] in ['sx', 'si']:
if ext == '.wav':
self._wav_paths[data_type].append(file_path)
self._utt2wav[speaker + '_' +
utt_index] = file_path
elif ext == '.txt':
self._text_paths[data_type].append(file_path)
elif ext == '.wrd':
self._word_paths[data_type].append(file_path)
elif ext == '.phn':
self._phone_paths[data_type].append(file_path)
def utt2wav(self, utt_name):
return self._utt2wav[utt_name]
def wav(self, data_type):
"""Get paths to wav files.
Args:
data_type (string): train or dev or test
Returns:
list of paths to wav files
"""
return sorted(self._wav_paths[data_type])
def htk(self, data_type):
"""Get paths to htk files.
Args:
data_type (string): train or dev or test
Returns:
htk_paths (list): paths to htk files
"""
if self.htk_save_path is None:
raise ValueError('Set path to htk files.')
return [p for p in glob(join(self.htk_save_path, data_type, '*/*.htk'))]
# NOTE: ex.) timit/htk/data_type/speaker/speaker_utt-index.htk
def trans(self, data_type):
"""Get paths to sentence-level transcription files.
Args:
data_type (string): train or dev or test
Returns:
list of paths to transcription files
"""
return sorted(self._text_paths[data_type])
def word(self, data_type):
"""Get paths to word-level transcription files.
Args:
data_type (string): train or dev or test
Returns:
list of paths to transcription files
"""
return sorted(self._word_paths[data_type])
def phone(self, data_type):
"""Get paths to phone-level transcription files.
Args:
data_type (string): train or dev or test
Returns:
list of paths to transcription files
"""
return sorted(self._phone_paths[data_type])
if __name__ == '__main__':
path = Path(data_path='/n/sd8/inaguma/corpus/timit/data',
config_path='./config',
htk_save_path='/n/sd8/inaguma/corpus/timit/htk')
for data_type in ['train', 'dev', 'test']:
print('===== %s ======' % data_type)
print(len(path.wav(data_type=data_type)))
print(len(path.htk(data_type=data_type)))
print(len(path.trans(data_type=data_type)))
print(len(path.word(data_type=data_type)))
print(len(path.phone(data_type=data_type)))
|
05 | January | 2015 | Yet another blog !!!!
A New York City subway train holds 1,200 people. This blog was viewed about 5,900 times in 2014. If it were a NYC subway train, it would take about 5 trips to carry that many people.
|
"""The event loop triggers and runs all the scripts, as appropriate"""
import gevent
import gevent.pool
try:
from gevent.lock import BoundedSemaphore
except:
print "Enchanting2 requires gevent v1.0 or newer"
raise
import factory
import server
import script
port = 8000
def is_trigger_key(top_block, media_and_event):
"""True if user pressed key hat block is waiting for."""
key_name = top_block.arguments[0].as_string()
media_env, event = media_and_event
return media_env.does_key_event_match(key_name, event)
def does_match_broadcast(top_block, message_string):
message_to_start_script = top_block.arguments[0].as_string()
if message_to_start_script == "any message":
return True
else:
return message_string == message_to_start_script
class EventLoop(object):
def __init__(self, media_environment):
self.active_scripts = gevent.pool.Group()
self.sleeping_scripts = []
self.project = None
self.media_environment = media_environment
# Get the script_lock before adding or removing scripts
self.script_lock = BoundedSemaphore(1)
self.clients = []
def queue(self, script, sprite):
"""Queues up a script"""
# Scripts usually start with a hat block and do nothing until it is
# activated
with self.script_lock:
self.sleeping_scripts.append((script, sprite))
def run_forever(self):
"""Runs all the scripts in the project"""
# First, fire up the webserver
server.ClientConnection.event_loop = self
gevent.spawn(server.run_web_servers, port)
# This is the main loop
# It checks for events (from pygame)
# and it updates the screen every so often
while True:
self.media_environment.check_for_events(self)
gevent.sleep(1.0 / 30) # max 30 fps
self.media_environment.draw(self.project)
def trigger_quit_event(self):
"""Anything we need to do before quitting? Do it now!"""
print "Quitting"
pass
def trigger_key_press(self, media_and_event):
"""A key was pressed"""
self.trigger_scripts("receiveKey", is_trigger_key, media_and_event)
def trigger_green_flag(self):
"""The green flag was pressed / the project is starting"""
self.stop_all_scripts()
self.trigger_scripts("receiveGo")
def stop_all_scripts(self):
"""The stop button was pressed -- halt execution of all scripts"""
if self.project:
self.project.stop_all_scripts()
def broadcast_message(self, message_string):
"""A message was broadcast"""
self.trigger_scripts(
"receiveMessage", does_match_broadcast, message_string)
def trigger_scripts(self, function_name_match, callback=None, data=None):
"""Trigger all sleeping scripts that match specified conditions"""
with self.script_lock:
# We can't remove items from the list in-place,
# so we create a new list of sleeping scripts
new_sleeping_scripts = []
# print "sleeping scripts: %s, active scripts: %s" % \
# (len(self.sleeping_scripts), len(self.active_scripts))
for script, sprite in self.sleeping_scripts:
top_block = script.top_block()
if top_block and top_block.function_name == function_name_match \
and (callback is None or callback(top_block, data)):
# activate this script
greenlet = gevent.spawn(self.run_script, script, sprite)
self.active_scripts.add(greenlet)
else:
# return script to sleeping list
new_sleeping_scripts.append((script, sprite))
self.sleeping_scripts = new_sleeping_scripts
def run_script(self, script, sprite):
"""Runs a script, and queues it up to run again if needs be"""
script.run(sprite)
if script.starts_on_trigger():
self.queue(script.from_start(), sprite)
def purge_all_scripts(self):
"""Reset everything -- purge all running and queued scripts"""
self.stop_all_scripts()
with self.script_lock:
self.active_scripts.kill()
self.sleeping_scripts = []
def load_project_from_disk(self, filename):
"""Loads a project from a file, and starts executing it"""
self.purge_all_scripts()
self.project = factory.deserialize_file(filename, self)
self.media_environment.setup_for_project(self.project)
# gevent.spawn(self.trigger_green_flag)
def load_project_from_xml(self, xml):
"""Loads a file from xml"""
self.purge_all_scripts()
self.project = factory.deserialize_xml(xml, self)
self.media_environment.setup_for_project(self.project)
# gevent.spawn(self.trigger_green_flag)
def client_connected(self, client):
self.clients.append(client)
print "Now serving %s clients; %s just connected" \
% (len(self.clients), client)
# Send the client a copy of the current world (if there is one)
if self.project:
message = "load_project %s" % factory.xml_for_object(self.project)
client.ws.send(message)
def client_disconnected(self, client):
self.clients.remove(client)
print "Now serving %s clients; %s just disconnected" \
% (len(self.clients), client)
def message_from_client(self, message, client):
print "message_from_client"
print message
split = message.find(" ")
if split == -1:
print "Unrecognized message: %s" % message
command = message[:split]
if command == "load_project":
xml = message[split + 1:]
self.load_project_from_xml(xml)
self.send_message_to_other_clients(message, client)
elif command == "green_flag_press":
self.trigger_green_flag()
elif command == "stop_sign_press":
self.stop_all_scripts()
elif command == "execute_block":
self.execute_block(message, split, client)
else:
print "Unrecognized command: %s" % command
def send_message_to_other_clients(self, message, source_client=None):
"""Send a message to all web clients, except the source"""
for client in self.clients:
if client != source_client:
client.ws.send(message)
def execute_block(self, message, split, client):
"""Executed block requested by user and return result"""
# payload is the index of the sprite,
# and the xml of the block to run
split2 = message.find(" ", split + 1)
sprite_index = int(message[split + 1:split2])
sprite = self.project.sprite_from_index(sprite_index)
xml = message[split2 + 1:]
# run the block and return the result
greenlet = gevent.spawn(self.execute_block_and_return_result,
sprite, xml, client)
self.active_scripts.add(greenlet)
def execute_block_and_return_result(self, sprite, xml_for_block, client):
"""Runs a block and tells client the result"""
# We seem to get command blocks wrapped up in scripts
# and reporter blocks as blocks
print xml_for_block
obj = factory.deserialize_xml(xml_for_block)
if isinstance(obj, script.Script):
result = obj.run(sprite)
# result is almost certainly 'None'
else:
empty_script = script.Script()
result = obj.evaluate(sprite, empty_script)
if result is not None:
result_xml = factory.xml_for_object(result)
client.ws.send("execute_block_result %s" % result_xml)
|
Needles A Memoir Of Growing Up With Diabetes Summary is available on our site, you can read and see it in full by downloading it directly on our site. Needles A Memoir Of Growing Up With Diabetes Summary is a book that we recommend to you, and you can make Needles A Memoir Of Growing Up With Diabetes Summary as reference for your needs. Another advantage besides you buying this book is that you will get free access services on our library website as long as you subscribe. For you, we provide free access to up to 14 days trial of our book library by subscribing. So you can read other books on our website for free at any time during the subscription.
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the Oppia editor view."""
__author__ = '[email protected] (Sean Lip)'
import json
from apps.exploration.models import Exploration
from apps.parameter.models import Parameter
from apps.state.models import AnswerHandlerInstance
from apps.state.models import Content
from apps.state.models import Rule
from apps.state.models import State
from apps.statistics.models import Statistics
from apps.statistics.models import STATS_ENUMS
from apps.widget.models import InteractiveWidget
from controllers.base import BaseHandler
from controllers.base import require_editor
from controllers.base import require_user
import feconf
import utils
from google.appengine.api import users
EDITOR_MODE = 'editor'
def get_state_for_frontend(state, exploration):
"""Returns a representation of the given state for the frontend."""
state_repr = state.as_dict()
modified_state_dict = state.internals_as_dict(human_readable_dests=True)
# TODO(sll): The following is for backwards-compatibility and should be
# deleted later.
rules = {}
for handler in state_repr['widget']['handlers']:
rules[handler['name']] = handler['rules']
for item in rules[handler['name']]:
if item['name'] == 'Default':
item['rule'] = 'Default'
else:
item['rule'] = InteractiveWidget.get(
state.widget.widget_id).get_readable_name(
handler['name'], item['name']
)
state_repr['widget']['rules'] = rules
state_repr['widget']['id'] = state_repr['widget']['widget_id']
state_repr['yaml'] = utils.yaml_from_dict(modified_state_dict)
return state_repr
def get_exploration_stats(exploration):
"""Returns a dict with stats for the given exploration."""
num_visits = Statistics.get_exploration_stats(
STATS_ENUMS.exploration_visited, exploration.id)
num_completions = Statistics.get_exploration_stats(
STATS_ENUMS.exploration_completed, exploration.id)
answers = Statistics.get_exploration_stats(
STATS_ENUMS.rule_hit, exploration.id)
state_counts = Statistics.get_exploration_stats(
STATS_ENUMS.state_hit, exploration.id)
state_stats = {}
for state_id in answers.keys():
state_stats[state_id] = {
'name': answers[state_id]['name'],
'count': state_counts[state_id]['count'],
'rule_stats': {},
}
all_rule_count = 0
state_count = state_counts[state_id]['count']
for rule in answers[state_id]['rules'].keys():
state_stats[state_id]['rule_stats'][rule] = answers[state_id]['rules'][rule]
rule_count = 0
for _, count in answers[state_id]['rules'][rule]['answers']:
rule_count += count
all_rule_count += count
state_stats[state_id]['rule_stats'][rule]['chartData'] = [
['', 'This rule', 'Other answers'],
['', rule_count, state_count - rule_count]]
state_stats[state_id]['no_answer_chartdata'] = [
['', 'No answer', 'Answer given'],
['', state_count - all_rule_count, all_rule_count]]
return {
'num_visits': num_visits,
'num_completions': num_completions,
'state_stats': state_stats,
}
class NewExploration(BaseHandler):
"""Creates a new exploration."""
@require_user
def post(self):
"""Handles POST requests."""
payload = json.loads(self.request.get('payload'))
title = payload.get('title')
category = payload.get('category')
if not title:
raise self.InvalidInputException('No title supplied.')
if not category:
raise self.InvalidInputException('No category chosen.')
yaml = self.request.get('yaml')
if yaml and feconf.ALLOW_YAML_FILE_UPLOAD:
exploration = Exploration.create_from_yaml(
yaml_file=yaml, user=self.user, title=title, category=category)
else:
exploration = Exploration.create(
self.user, title=title, category=category)
self.render_json({'explorationId': exploration.id})
class ForkExploration(BaseHandler):
"""Forks an existing exploration."""
@require_user
def post(self):
"""Handles POST requests."""
payload = json.loads(self.request.get('payload'))
exploration_id = payload.get('exploration_id')
forked_exploration = Exploration.get(exploration_id)
if not forked_exploration.is_demo_exploration():
raise self.InvalidInputException('Exploration cannot be forked.')
# Get the demo exploration as a YAML file, so that new states can be
# created.
yaml = forked_exploration.as_yaml()
title = 'Copy of %s' % forked_exploration.title
category = forked_exploration.category
exploration = Exploration.create_from_yaml(
yaml_file=yaml, user=self.user, title=title, category=category)
self.render_json({'explorationId': exploration.id})
class ExplorationPage(BaseHandler):
"""Page describing a single exploration."""
@require_editor
def get(self, unused_exploration):
"""Handles GET requests."""
self.values.update({
'nav_mode': EDITOR_MODE,
})
self.render_template('editor/editor_exploration.html')
class ExplorationHandler(BaseHandler):
"""Page with editor data for a single exploration."""
@require_editor
def get(self, exploration):
"""Gets the question name and state list for a question page."""
state_list = {}
for state_key in exploration.states:
state = state_key.get()
state_list[state.id] = get_state_for_frontend(state, exploration)
parameters = []
for param in exploration.parameters:
parameters.append({
'name': param.name, 'obj_type': param.obj_type,
'description': param.description, 'values': param.values
})
self.values.update({
'exploration_id': exploration.id,
'init_state_id': exploration.init_state.get().id,
'is_public': exploration.is_public,
'image_id': exploration.image_id,
'category': exploration.category,
'title': exploration.title,
'editors': [editor.nickname() for editor in exploration.editors],
'states': state_list,
'parameters': parameters,
})
statistics = get_exploration_stats(exploration)
self.values.update({
'num_visits': statistics['num_visits'],
'num_completions': statistics['num_completions'],
'state_stats': statistics['state_stats'],
})
improvements = Statistics.get_top_ten_improvable_states([exploration.id])
self.values.update({
'imp': improvements,
})
self.render_json(self.values)
@require_editor
def post(self, exploration):
"""Adds a new state to the given exploration."""
payload = json.loads(self.request.get('payload'))
state_name = payload.get('state_name')
if not state_name:
raise self.InvalidInputException('Please specify a state name.')
state = exploration.add_state(state_name)
self.render_json(state.as_dict())
@require_editor
def put(self, exploration):
"""Updates properties of the given exploration."""
payload = json.loads(self.request.get('payload'))
is_public = payload.get('is_public')
category = payload.get('category')
title = payload.get('title')
image_id = payload.get('image_id')
editors = payload.get('editors')
parameters = payload.get('parameters')
if is_public:
exploration.is_public = True
if category:
exploration.category = category
if title:
exploration.title = title
if 'image_id' in payload:
exploration.image_id = None if image_id == 'null' else image_id
if editors:
if exploration.editors and self.user == exploration.editors[0]:
exploration.editors = []
for email in editors:
editor = users.User(email=email)
exploration.editors.append(editor)
else:
raise self.UnauthorizedUserException(
'Only the exploration owner can add new collaborators.')
if parameters:
exploration.parameters = [
Parameter(
name=item['name'], obj_type=item['obj_type'],
description=item['description'], values=item['values']
) for item in parameters
]
exploration.put()
@require_editor
def delete(self, exploration):
"""Deletes the given exploration."""
exploration.delete()
class ExplorationDownloadHandler(BaseHandler):
"""Downloads an exploration as a YAML file."""
@require_editor
def get(self, exploration):
"""Handles GET requests."""
filename = 'oppia-%s' % utils.to_ascii(exploration.title)
if not filename:
filename = feconf.DEFAULT_FILE_NAME
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Content-Disposition'] = (
'attachment; filename=%s.txt' % filename)
# TODO(sll): Cache the YAML file.
self.response.write(exploration.as_yaml())
class StateHandler(BaseHandler):
"""Handles state transactions."""
@require_editor
def put(self, exploration, state):
"""Saves updates to a state."""
payload = json.loads(self.request.get('payload'))
yaml_file = payload.get('yaml_file')
if yaml_file and feconf.ALLOW_YAML_FILE_UPLOAD:
# The user has uploaded a YAML file. Process only this action.
state = State.modify_using_dict(
exploration, state,
utils.dict_from_yaml(yaml_file))
self.render_json(get_state_for_frontend(state, exploration))
return
state_name = payload.get('state_name')
param_changes = payload.get('param_changes')
interactive_widget = payload.get('interactive_widget')
interactive_params = payload.get('interactive_params')
interactive_rulesets = payload.get('interactive_rulesets')
sticky_interactive_widget = payload.get('sticky_interactive_widget')
content = payload.get('content')
unresolved_answers = payload.get('unresolved_answers')
if 'state_name' in payload:
# Replace the state name with this one, after checking validity.
if state_name == feconf.END_DEST:
raise self.InvalidInputException('Invalid state name: END')
exploration.rename_state(state, state_name)
if 'param_changes' in payload:
state.param_changes = []
for param_change in param_changes:
instance = exploration.get_param_change_instance(
param_change['name'])
instance.values = param_change['values']
state.param_changes.append(instance)
if interactive_widget:
state.widget.widget_id = interactive_widget
if interactive_params:
state.widget.params = interactive_params
if sticky_interactive_widget is not None:
state.widget.sticky = sticky_interactive_widget
if interactive_rulesets:
ruleset = interactive_rulesets['submit']
utils.recursively_remove_key(ruleset, u'$$hashKey')
state.widget.handlers = [AnswerHandlerInstance(
name='submit', rules=[])]
# This is part of the state. The rules should be put into it.
state_ruleset = state.widget.handlers[0].rules
# TODO(yanamal): Do additional calculations here to get the
# parameter changes, if necessary.
for rule_ind in range(len(ruleset)):
rule = ruleset[rule_ind]
state_rule = Rule()
state_rule.name = rule.get('name')
state_rule.inputs = rule.get('inputs')
state_rule.dest = rule.get('dest')
state_rule.feedback = rule.get('feedback')
# Generate the code to be executed.
if rule['rule'] == 'Default':
# This is the default rule.
assert rule_ind == len(ruleset) - 1
state_rule.name = 'Default'
state_ruleset.append(state_rule)
continue
# Normalize the params here, then store them.
classifier_func = state_rule.name.replace(' ', '')
first_bracket = classifier_func.find('(')
mutable_rule = rule['rule']
params = classifier_func[first_bracket + 1: -1].split(',')
for index, param in enumerate(params):
if param not in rule['inputs']:
raise self.InvalidInputException(
'Parameter %s could not be replaced.' % param)
typed_object = state.get_typed_object(mutable_rule, param)
# TODO(sll): Make the following check more robust.
if (not isinstance(rule['inputs'][param], basestring) or
'{{' not in rule['inputs'][param] or
'}}' not in rule['inputs'][param]):
normalized_param = typed_object.normalize(
rule['inputs'][param])
else:
normalized_param = rule['inputs'][param]
if normalized_param is None:
raise self.InvalidInputException(
'%s has the wrong type. Please replace it with a '
'%s.' % (rule['inputs'][param],
typed_object.__name__))
state_rule.inputs[param] = normalized_param
state_ruleset.append(state_rule)
if content:
state.content = [Content(type=item['type'], value=item['value'])
for item in content]
if 'unresolved_answers' in payload:
state.unresolved_answers = {}
for answer, count in unresolved_answers.iteritems():
if count > 0:
state.unresolved_answers[answer] = count
state.put()
self.render_json(get_state_for_frontend(state, exploration))
@require_editor
def delete(self, exploration, state):
"""Deletes the state with id state_id."""
# Do not allow deletion of initial states.
if exploration.init_state == state.key:
raise self.InvalidInputException(
'Cannot delete initial state of an exploration.')
# Find all dests in this exploration which equal the state to be
# deleted, and change them to loop back to their containing state.
for state_key in exploration.states:
origin_state = state_key.get()
changed = False
for handler in origin_state.widget.handlers:
for rule in handler.rules:
if rule.dest == state.id:
rule.dest = origin_state.id
changed = True
if changed:
origin_state.put()
# Delete the state with id state_id.
state.key.delete()
exploration.states.remove(state.key)
exploration.put()
|
The outcomes of Formula OneTM races are not only decided on the track, but also during pit stops. So it was clear that the new Ingenieur collection would have to feature IWC's Ingenieur Double Chronograph Titanium (Ref. 3865), which records intermediate times within a given minute.
|
import os
import time
from datetime import datetime
from netCDF4 import Dataset
_author_ = 'Trond Kristiansen'
_email_ = '[email protected]'
_created_ = datetime(2009, 3, 2)
_modified_ = datetime(2014, 4, 7)
_version_ = "0.1.0"
_status_ = "Development"
def help():
"""
This function generates a BRY file from scratch. The variables are all created
for East, West, North, and South. Varibales include:
salt, temp, u, v, ubar, vbar, zeta, and time. Time dimension for each variable is ocean_time which is days
since 1948/1/1.
This file is netcdf CF compliant and follows the setup for variable names and units given in the ROMS source
file: Data/ROMS/CDL/bry_unlimit.cdl
(also see: https://www.myroms.org/forum/viewtopic.php?f=30&t=1450&p=5209&hilit=cf+compliant#p5209)
This function is called from clim2bry.py.
To check the BRY file for CF compliancy: http://titania.badc.rl.ac.uk/cgi-bin/cf-checker.pl?cfversion=1.0
"""
def createBryFile(confM2R):
if (confM2R.output_format == 'NETCDF4'):
myzlib = True
else:
myzlib = False
grdROMS = confM2R.grdROMS
if os.path.exists(confM2R.bry_name):
os.remove(confM2R.bry_name)
print(('\n=>Creating initial Boundary (BRY) file {}'.format(confM2R.bry_name)))
f1 = Dataset(confM2R.bry_name, mode='w', format=confM2R.output_format)
f1.title = "Boundary forcing file (BRY) used for forcing of the ROMS model"
f1.description = "Created for the {} grid file".format(confM2R.roms_grid_path)
f1.grdFile = "{}".format(confM2R.roms_grid_path)
f1.history = 'Created ' + time.ctime(time.time())
f1.source = "{} ({})".format(confM2R.author_name, confM2R.author_email)
f1.type = "File in {} format created using MODEL2ROMS".format(confM2R.output_format)
f1.link = "https://github.com/trondkr/model2roms"
f1.Conventions = "CF-1.0"
""" Define dimensions """
f1.createDimension('xi_rho', grdROMS.xi_rho)
f1.createDimension('eta_rho', grdROMS.eta_rho)
f1.createDimension('xi_u', grdROMS.xi_u)
f1.createDimension('eta_u', grdROMS.eta_u)
f1.createDimension('xi_v', grdROMS.xi_v)
f1.createDimension('eta_v', grdROMS.eta_v)
f1.createDimension('xi_psi', grdROMS.xi_psi)
f1.createDimension('eta_psi', grdROMS.eta_psi)
f1.createDimension('ocean_time', None)
f1.createDimension('s_rho', len(grdROMS.s_rho))
f1.createDimension('s_w', len(grdROMS.s_w))
vnc = f1.createVariable('lon_rho', 'd', ('eta_rho', 'xi_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Longitude of RHO-points'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:, :] = grdROMS.lon_rho
vnc = f1.createVariable('lat_rho', 'd', ('eta_rho', 'xi_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Latitude of RHO-points'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:, :] = grdROMS.lat_rho
vnc = f1.createVariable('lon_u', 'd', ('eta_u', 'xi_u',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Longitude of U-points'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:, :] = grdROMS.lon_u
vnc = f1.createVariable('lat_u', 'd', ('eta_u', 'xi_u',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Latitude of U-points'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:, :] = grdROMS.lat_u
vnc = f1.createVariable('lon_v', 'd', ('eta_v', 'xi_v',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Longitude of V-points'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:, :] = grdROMS.lon_v
vnc = f1.createVariable('lat_v', 'd', ('eta_v', 'xi_v',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Latitude of V-points'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:, :] = grdROMS.lat_v
vnc = f1.createVariable('lat_psi', 'd', ('eta_psi', 'xi_psi',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Latitude of PSI-points'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:, :] = grdROMS.lat_psi
vnc = f1.createVariable('lon_psi', 'd', ('eta_psi', 'xi_psi',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = 'Longitude of PSI-points'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:, :] = grdROMS.lon_psi
vnc = f1.createVariable('h', 'd', ('eta_rho', 'xi_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "Bathymetry at RHO-points"
vnc.units = "meter"
vnc.coordinates = "lat_rho lon_rho"
vnc.field = "bath, scalar"
vnc[:, :] = grdROMS.h
vnc = f1.createVariable('s_rho', 'd', ('s_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "S-coordinate at RHO-points"
vnc.valid_min = -1.
vnc.valid_max = 0.
if grdROMS.vtransform == 2:
vnc.standard_name = "ocean_s_coordinate_g2"
vnc.formula_terms = "s: s_rho C: Cs_r eta: zeta depth: h depth_c: hc"
if grdROMS.vtransform == 1:
vnc.standard_name = "ocean_s_coordinate_g1"
vnc.formula_terms = "s: s_rho C: Cs_r eta: zeta depth: h depth_c: hc"
vnc.field = "s_rho, scalar"
vnc[:] = grdROMS.s_rho
vnc = f1.createVariable('s_w', 'd', ('s_w',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "S-coordinate at W-points"
vnc.valid_min = -1.
vnc.valid_max = 0.
if grdROMS.vtransform == 2:
vnc.standard_name = "ocean_s_coordinate_g2"
vnc.formula_terms = "s: s_w C: Cs_w eta: zeta depth: h depth_c: hc"
if grdROMS.vtransform == 1:
vnc.standard_name = "ocean_s_coordinate_g1"
vnc.formula_terms = "s: s_w C: Cs_w eta: zeta depth: h depth_c: hc"
vnc.field = "s_w, scalar"
vnc[:] = grdROMS.s_w
vnc = f1.createVariable('Cs_r', 'd', ('s_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "S-coordinate stretching curves at RHO-points"
vnc.valid_min = -1.
vnc.valid_max = 0.
vnc.field = "Cs_rho, scalar"
vnc[:] = grdROMS.Cs_rho
vnc = f1.createVariable('Cs_w', 'd', ('s_w',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "S-coordinate stretching curves at W-points"
vnc.valid_min = -1.
vnc.valid_max = 0.
vnc.field = "Cs_w, scalar"
vnc[:] = grdROMS.Cs_w
vnc = f1.createVariable('hc', 'd')
vnc.long_name = "S-coordinate parameter, critical depth";
vnc.units = "meter"
vnc[:] = grdROMS.hc
vnc = f1.createVariable('z_r', 'd', ('s_rho', 'eta_rho', 'xi_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "Sigma layer to depth matrix";
vnc.units = "meter"
vnc[:, :, :] = grdROMS.z_r
vnc = f1.createVariable('Tcline', 'd')
vnc.long_name = "S-coordinate surface/bottom layer width"
vnc.units = "meter"
vnc[:] = grdROMS.tcline
vnc = f1.createVariable('theta_s', 'd')
vnc.long_name = "S-coordinate surface control parameter"
vnc[:] = grdROMS.theta_s
vnc = f1.createVariable('theta_b', 'd')
vnc.long_name = "S-coordinate bottom control parameter"
vnc[:] = grdROMS.theta_b
vnc = f1.createVariable('angle', 'd', ('eta_rho', 'xi_rho',), zlib=myzlib, fill_value=grdROMS.fillval)
vnc.long_name = "angle between xi axis and east"
vnc.units = "radian"
v_time = f1.createVariable('ocean_time', 'd', ('ocean_time',), zlib=myzlib, fill_value=grdROMS.fillval)
v_time.long_name = 'seconds since 1948-01-01 00:00:00'
v_time.units = 'seconds since 1948-01-01 00:00:00'
v_time.field = 'time, scalar, series'
if (confM2R.ocean_indata_type == "NORESM"):
v_time.calendar = 'noleap'
else:
v_time.calendar = 'standard'
v_temp_west = f1.createVariable('temp_west', 'f', ('ocean_time', 's_rho', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_temp_west.long_name = "potential temperature western boundary condition"
v_temp_west.units = "Celsius"
v_temp_west.field = "temp_west, scalar, series"
#v_temp_west.missing_value = grdROMS.fillval
v_temp_west.time = "ocean_time"
v_temp_east = f1.createVariable('temp_east', 'f', ('ocean_time', 's_rho', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_temp_east.long_name = "potential temperature eastern boundary condition"
v_temp_east.units = "Celsius"
v_temp_east.field = "temp_east, scalar, series"
#v_temp_east.missing_value = grdROMS.fillval
v_temp_east.time = "ocean_time"
v_temp_south = f1.createVariable('temp_south', 'f', ('ocean_time', 's_rho', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_temp_south.long_name = "potential temperature southern boundary condition"
v_temp_south.units = "Celsius"
v_temp_south.field = "temp_south, scalar, series"
#v_temp_south.missing_value = grdROMS.fillval
v_temp_south.time = "ocean_time"
v_temp_north = f1.createVariable('temp_north', 'f', ('ocean_time', 's_rho', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_temp_north.long_name = "potential temperature northern boundary condition"
v_temp_north.units = "Celsius"
v_temp_north.field = "temp_north, scalar, series"
#v_temp_north.missing_value = grdROMS.fillval
v_temp_north.time = "ocean_time"
v_salt_west = f1.createVariable('salt_west', 'f', ('ocean_time', 's_rho', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_salt_west.long_name = "salinity western boundary condition"
v_salt_west.field = "salt_west, scalar, series"
#v_salt_west.missing_value = grdROMS.fillval
v_salt_west.time = "ocean_time"
v_salt_east = f1.createVariable('salt_east', 'f', ('ocean_time', 's_rho', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_salt_east.long_name = "salinity eastern boundary condition"
v_salt_east.field = "salt_east, scalar, series"
#v_salt_east.missing_value = grdROMS.fillval
v_salt_east.time = "ocean_time"
v_salt_south = f1.createVariable('salt_south', 'f', ('ocean_time', 's_rho', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_salt_south.long_name = "salinity southern boundary condition"
v_salt_south.field = "salt_south, scalar, series"
#v_salt_south.missing_value = grdROMS.fillval
v_salt_south.time = "ocean_time"
v_salt_north = f1.createVariable('salt_north', 'f', ('ocean_time', 's_rho', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_salt_north.long_name = "salinity northern boundary condition"
v_salt_north.field = "salt_north, scalar, series"
#v_salt_north.missing_value = grdROMS.fillval
v_salt_north.time = "ocean_time"
v_ssh_west = f1.createVariable('zeta_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ssh_west.long_name = "free-surface western boundary condition"
v_ssh_west.units = "meter"
v_ssh_west.field = "zeta_west, scalar, series"
#v_ssh_west.missing_value = grdROMS.fillval
v_ssh_west.time = "ocean_time"
v_ssh_east = f1.createVariable('zeta_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ssh_east.long_name = "free-surface eastern boundary condition"
v_ssh_east.units = "meter"
v_ssh_east.field = "zeta_east, scalar, series"
#v_ssh_east.missing_value = grdROMS.fillval
v_ssh_east.time = "ocean_time"
v_ssh_south = f1.createVariable('zeta_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ssh_south.long_name = "free-surface southern boundary condition"
v_ssh_south.units = "meter"
v_ssh_south.field = "zeta_south, scalar, series"
#v_ssh_south.missing_value = grdROMS.fillval
v_ssh_south.time = "ocean_time"
v_ssh_north = f1.createVariable('zeta_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ssh_north.long_name = "free-surface northern boundary condition"
v_ssh_north.units = "meter"
v_ssh_north.field = "zeta_north, scalar, series"
#v_ssh_north.missing_value = grdROMS.fillval
v_ssh_north.time = "ocean_time"
v_u_west = f1.createVariable('u_west', 'f', ('ocean_time', 's_rho', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_u_west.long_name = "3D u-momentum western boundary condition"
v_u_west.units = "meter second-1"
v_u_west.field = "u_west, scalar, series"
#v_u_west.missing_value = grdROMS.fillval
v_u_west.time = "ocean_time"
v_u_east = f1.createVariable('u_east', 'f', ('ocean_time', 's_rho', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_u_east.long_name = "3D u-momentum eastern boundary condition"
v_u_east.units = "meter second-1"
v_u_east.field = "u_east, scalar, series"
#v_u_east.missing_value = grdROMS.fillval
v_u_east.time = "ocean_time"
v_u_south = f1.createVariable('u_south', 'f', ('ocean_time', 's_rho', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_u_south.long_name = "3D u-momentum southern boundary condition"
v_u_south.units = "meter second-1"
v_u_south.field = "u_south, scalar, series"
#v_u_south.missing_value = grdROMS.fillval
v_u_south.time = "ocean_time"
v_u_north = f1.createVariable('u_north', 'f', ('ocean_time', 's_rho', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_u_north.long_name = "3D u-momentum northern boundary condition"
v_u_north.units = "meter second-1"
v_u_north.field = "u_north, scalar, series"
#v_u_north.missing_value = grdROMS.fillval
v_u_north.time = "ocean_time"
v_v_west = f1.createVariable('v_west', 'f', ('ocean_time', 's_rho', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_v_west.long_name = "3D v-momentum western boundary condition"
v_v_west.units = "meter second-1"
v_v_west.field = "v_west, scalar, series"
#v_v_west.missing_value = grdROMS.fillval
v_v_west.time = "ocean_time"
v_v_east = f1.createVariable('v_east', 'f', ('ocean_time', 's_rho', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_v_east.long_name = "3D v-momentum eastern boundary condition"
v_v_east.units = "meter second-1"
v_v_east.field = "v_east, scalar, series"
#v_v_east.missing_value = grdROMS.fillval
v_v_east.time = "ocean_time"
v_v_south = f1.createVariable('v_south', 'f', ('ocean_time', 's_rho', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_v_south.long_name = "3D v-momentum southern boundary condition"
v_v_south.units = "meter second-1"
v_v_south.field = "v_south, scalar, series"
#v_v_south.missing_value = grdROMS.fillval
v_v_south.time = "ocean_time"
v_v_north = f1.createVariable('v_north', 'f', ('ocean_time', 's_rho', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_v_north.long_name = "3D v-momentum northern boundary condition"
v_v_north.units = "meter second-1"
v_v_north.field = "v_north, scalar, series"
#v_v_north.missing_value = grdROMS.fillval
v_v_north.time = "ocean_time"
v_vbar_west = f1.createVariable('vbar_west', 'f', ('ocean_time', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_vbar_west.long_name = "2D v-momentum western boundary condition"
v_vbar_west.units = "meter second-1"
v_vbar_west.field = "vbar_west, scalar, series"
#v_vbar_west.missing_value = grdROMS.fillval
v_vbar_west.time = "ocean_time"
v_vbar_east = f1.createVariable('vbar_east', 'f', ('ocean_time', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_vbar_east.long_name = "2D v-momentum eastern boundary condition"
v_vbar_east.units = "meter second-1"
v_vbar_east.field = "vbar_east, scalar, series"
#v_vbar_east.missing_value = grdROMS.fillval
v_vbar_east.time = "ocean_time"
v_vbar_south = f1.createVariable('vbar_south', 'f', ('ocean_time', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_vbar_south.long_name = "2D v-momentum southern boundary condition"
v_vbar_south.units = "meter second-1"
v_vbar_south.field = "vbar_south, scalar, series"
#v_vbar_south.missing_value = grdROMS.fillval
v_vbar_south.time = "ocean_time"
v_vbar_north = f1.createVariable('vbar_north', 'f', ('ocean_time', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_vbar_north.long_name = "2D v-momentum northern boundary condition"
v_vbar_north.units = "meter second-1"
v_vbar_north.field = "vbar_north, scalar, series"
#v_vbar_north.missing_value = grdROMS.fillval
v_vbar_north.time = "ocean_time"
v_ubar_west = f1.createVariable('ubar_west', 'f', ('ocean_time', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ubar_west.long_name = "2D u-momentum western boundary condition"
v_ubar_west.units = "meter second-1"
v_ubar_west.field = "ubar_west, scalar, series"
# v_ubar_west.missing_value = grdROMS.fillval
v_ubar_west.time = "ocean_time"
v_ubar_east = f1.createVariable('ubar_east', 'f', ('ocean_time', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ubar_east.long_name = "2D u-momentum eastern boundary condition"
v_ubar_east.units = "meter second-1"
v_ubar_east.field = "ubar_east, scalar, series"
#v_ubar_east.missing_value = grdROMS.fillval
v_ubar_east.time = "ocean_time"
v_ubar_south = f1.createVariable('ubar_south', 'f', ('ocean_time', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ubar_south.long_name = "2D u-momentum southern boundary condition"
v_ubar_south.units = "meter second-1"
v_ubar_south.field = "ubar_south, scalar, series"
#v_ubar_south.missing_value = grdROMS.fillval
v_ubar_south.time = "ocean_time"
v_ubar_north = f1.createVariable('ubar_north', 'f', ('ocean_time', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
v_ubar_north.long_name = "2D u-momentum northern boundary condition"
v_ubar_north.units = "meter second-1"
v_ubar_north.field = "ubar_north, scalar, series"
#v_ubar_north.missing_value = grdROMS.fillval
v_ubar_north.time = "ocean_time"
if confM2R.write_bcg:
directions=['east','west','north','south']
dimens=['eta_rho','eta_rho','xi_rho','xi_rho']
lndirections=['eastern','western','northern','southern']
for currentdir,lndirection,dim in zip(directions, lndirections,dimens):
currentvar='O3_c_'+currentdir
longname="carbonate/total dissolved inorganic carbon {} boundary condition".format(lndirection)
O3_c = f1.createVariable(currentvar, 'f', ('ocean_time', 's_rho', dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
O3_c.long_name = longname
O3_c.field = "{}, scalar, series".format(currentvar)
O3_c.units = "mmol C/m^3"
currentvar='O3_TA_'+currentdir
longname="carbonate/bioalkalinity {} boundary condition".format(lndirection)
O3_ta = f1.createVariable(currentvar, 'f', ('ocean_time', 's_rho', dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
O3_ta.long_name = longname
O3_ta.field = "{}, scalar, series".format(currentvar)
O3_ta.units = "umol/kg"
currentvar='N1_p_'+currentdir
longname="phosphate/phosphorus {} boundary condition".format(lndirection)
N1_p = f1.createVariable(currentvar, 'f', ('ocean_time','s_rho', dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
N1_p.long_name = longname
N1_p.field = "{}, scalar, series".format(currentvar)
N1_p.units = "mmol P/m^3"
currentvar='N3_n_'+currentdir
longname="nitrate/nitrogen {} boundary condition".format(lndirection)
N3_n = f1.createVariable(currentvar, 'f', ('ocean_time','s_rho', dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
N3_n.long_name = longname
N3_n.field = "{}, scalar, series".format(currentvar)
N3_n.units = "mmol N/m^3"
currentvar='N5_s_'+currentdir
longname="silicate/silicate {} boundary condition".format(lndirection)
N5_s = f1.createVariable(currentvar, 'f', ('ocean_time','s_rho', dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
N5_s.long_name = longname
N5_s.field = "{}, scalar, series".format(currentvar)
N5_s.units = "mmol Si/m^3"
currentvar='O2_o_'+currentdir
longname="oxygen/oxygen {} boundary condition".format(lndirection)
O2_o = f1.createVariable(currentvar, 'f', ('ocean_time', 's_rho',dim,), zlib=myzlib,
fill_value=grdROMS.fillval)
O2_o.long_name = longname
O2_o.field = "{}, scalar, series".format(currentvar)
O2_o.units = "mmol O_2/m^3"
if confM2R.write_ice:
ageice_west = f1.createVariable('ageice_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ageice_west.long_name = "time-averaged age of the ice western boundary conditions"
ageice_west.units = "years"
ageice_west.time = "ocean_time"
ageice_west.field = "ice age, scalar, series"
#ageice_west.missing_value = grdROMS.fillval
ageice_east = f1.createVariable('ageice_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ageice_east.long_name = "time-averaged age of the ice eastern boundary conditions"
ageice_east.units = "years"
ageice_east.time = "ocean_time"
ageice_east.field = "ice age, scalar, series"
#ageice_east.missing_value = grdROMS.fillval
ageice_south = f1.createVariable('ageice_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ageice_south.long_name = "time-averaged age of the ice southern boundary conditions"
ageice_south.units = "years"
ageice_south.time = "ocean_time"
ageice_south.field = "ice age, scalar, series"
#ageice_south.missing_value = grdROMS.fillval
ageice_north = f1.createVariable('ageice_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ageice_north.long_name = "time-averaged age of the ice northern boundary conditions"
ageice_north.units = "years"
ageice_north.time = "ocean_time"
ageice_north.field = "ice age, scalar, series"
#ageice_north.missing_value = grdROMS.fillval
# ----------------------------------------
uice_west = f1.createVariable('uice_west', 'f', ('ocean_time', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
uice_west.long_name = "time-averaged age of the u-component of ice velocity western boundary conditions"
uice_west.units = "meter second-1"
uice_west.time = "ocean_time"
uice_west.field = "u-component of ice velocity, scalar, series"
#uice_west.missing_value = grdROMS.fillval
uice_east = f1.createVariable('uice_east', 'f', ('ocean_time', 'eta_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
uice_east.long_name = "time-averaged age of the u-component of ice velocity eastern boundary conditions"
uice_east.units = "meter second-1"
uice_east.time = "ocean_time"
uice_east.field = "u-component of ice velocity, scalar, series"
#uice_east.missing_value = grdROMS.fillval
uice_south = f1.createVariable('uice_south', 'f', ('ocean_time', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
uice_south.long_name = "time-averaged age of the u-component of ice velocity southern boundary conditions"
uice_south.units = "meter second-1"
uice_south.time = "ocean_time"
uice_south.field = "u-component of ice velocity, scalar, series"
#uice_south.missing_value = grdROMS.fillval
uice_north = f1.createVariable('uice_north', 'f', ('ocean_time', 'xi_u',), zlib=myzlib,
fill_value=grdROMS.fillval)
uice_north.long_name = "time-averaged age of the u-component of ice velocity northern boundary conditions"
uice_north.units = "meter second-1"
uice_north.time = "ocean_time"
uice_north.field = "u-component of ice velocity, scalar, series"
#uice_north.missing_value = grdROMS.fillval
# ----------------------------------------
vice_west = f1.createVariable('vice_west', 'f', ('ocean_time', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
vice_west.long_name = "time-averaged age of the v-component of ice velocity western boundary conditions"
vice_west.units = "meter second-1"
uice_west.time = "ocean_time"
vice_west.field = "v-component of ice velocity, scalar, series"
#vice_west.missing_value = grdROMS.fillval
vice_east = f1.createVariable('vice_east', 'f', ('ocean_time', 'eta_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
vice_east.long_name = "time-averaged age of the v-component of ice velocity eastern boundary conditions"
vice_east.units = "meter second-1"
vice_east.time = "ocean_time"
vice_east.field = "v-component of ice velocity, scalar, series"
#vice_east.missing_value = grdROMS.fillval
vice_south = f1.createVariable('vice_south', 'f', ('ocean_time', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
vice_south.long_name = "time-averaged age of the v-component of ice velocity southern boundary conditions"
vice_south.units = "meter second-1"
vice_south.time = "ocean_time"
vice_south.field = "v-component of ice velocity, scalar, series"
#vice_south.missing_value = grdROMS.fillval
vice_north = f1.createVariable('vice_north', 'f', ('ocean_time', 'xi_v',), zlib=myzlib,
fill_value=grdROMS.fillval)
vice_north.long_name = "time-averaged age of the u-component of ice velocity northern boundary conditions"
vice_north.units = "meter second-1"
vice_north.time = "ocean_time"
vice_north.field = "v-component of ice velocity, scalar, series"
#vice_north.missing_value = grdROMS.fillval
# ----------------------------------------
aice_west = f1.createVariable('aice_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
aice_west.long_name = "time-averaged fraction of cell covered by ice western boundary conditions"
aice_west.units = "%"
aice_west.time = "ocean_time"
aice_west.field = "ice concentration, scalar, series"
#aice_west.missing_value = grdROMS.fillval
aice_east = f1.createVariable('aice_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
aice_east.long_name = "time-averaged fraction of cell covered by ice eastern boundary conditions"
aice_east.units = "%"
aice_east.time = "ocean_time"
aice_east.field = "ice concentration, scalar, series"
#aice_east.missing_value = grdROMS.fillval
aice_south = f1.createVariable('aice_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
aice_south.long_name = "time-averaged fraction of cell covered by ice southern boundary conditions"
aice_south.units = "%"
aice_south.time = "ocean_time"
aice_south.field = "ice concentration, scalar, series"
#aice_south.missing_value = grdROMS.fillval
aice_north = f1.createVariable('aice_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
aice_north.long_name = "time-averaged fraction of cell covered by ice northern boundary conditions"
aice_north.units = "%"
aice_north.time = "ocean_time"
aice_north.field = "ice concentration, scalar, series"
#aice_north.missing_value = grdROMS.fillval
# ----------------------------------------
hice_west = f1.createVariable('hice_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
hice_west.long_name = "time-averaged ice thickness in cell western boundary conditions"
hice_west.units = "meter"
hice_west.time = "ocean_time"
hice_west.field = "ice thickness, scalar, series"
#hice_west.missing_value = grdROMS.fillval
hice_east = f1.createVariable('hice_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
hice_east.long_name = "time-averaged ice thickness in cell eastern boundary conditions"
hice_east.units = "meter"
hice_east.time = "ocean_time"
hice_east.field = "ice thickness, scalar, series"
#hice_east.missing_value = grdROMS.fillval
hice_south = f1.createVariable('hice_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
hice_south.long_name = "time-averaged ice thickness in cell southern boundary conditions"
hice_south.units = "meter"
hice_south.time = "ocean_time"
hice_south.field = "ice thickness, scalar, series"
#hice_south.missing_value = grdROMS.fillval
hice_north = f1.createVariable('hice_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
hice_north.long_name = "time-averaged ice thickness in cell northern boundary conditions"
hice_north.units = "meter"
hice_north.time = "ocean_time"
hice_north.field = "ice thickness, scalar, series"
#hice_north.missing_value = grdROMS.fillval
# ----------------------------------------
snow_thick_west = f1.createVariable('snow_thick_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
snow_thick_west.long_name = "time-averaged ice thickness in cell western boundary conditions"
snow_thick_west.units = "meter"
snow_thick_west.time = "ocean_time"
snow_thick_west.field = "snow thickness, scalar, series"
#snow_thick_west.missing_value = grdROMS.fillval
snow_thick_east = f1.createVariable('snow_thick_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
snow_thick_east.long_name = "time-averaged ice thickness in cell eastern boundary conditions"
snow_thick_east.units = "meter"
snow_thick_east.time = "ocean_time"
snow_thick_east.field = "snow thickness, scalar, series"
#snow_thick_east.missing_value = grdROMS.fillval
snow_thick_south = f1.createVariable('snow_thick_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
snow_thick_south.long_name = "time-averaged ice thickness in cell southern boundary conditions"
snow_thick_south.units = "meter"
snow_thick_south.time = "ocean_time"
snow_thick_south.field = "snow thickness, scalar, series"
#snow_thick_south.missing_value = grdROMS.fillval
snow_thick_north = f1.createVariable('snow_thick_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
snow_thick_north.long_name = "time-averaged ice thickness in cell northern boundary conditions"
snow_thick_north.units = "meter"
snow_thick_north.time = "ocean_time"
snow_thick_north.field = "snow thickness, scalar, series"
#snow_thick_north.missing_value = grdROMS.fillval
# ----------------------------------------
ti_west = f1.createVariable('ti_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ti_west.long_name = "time-averaged interior ice temperature cell western boundary conditions"
ti_west.units = "degrees Celcius"
ti_west.time = "ocean_time"
ti_west.field = "interior temperature, scalar, series"
#ti_west.missing_value = grdROMS.fillval
ti_east = f1.createVariable('ti_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ti_east.long_name = "time-averaged interior ice temperature eastern boundary conditions"
ti_east.units = "degrees Celcius"
ti_east.time = "ocean_time"
ti_east.field = "interior temperature, scalar, series"
#ti_east.missing_value = grdROMS.fillval
ti_south = f1.createVariable('ti_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ti_south.long_name = "time-averaged interior ice temperature southern boundary conditions"
ti_south.units = "degrees Celcius"
ti_south.time = "ocean_time"
ti_south.field = "interior temperature, scalar, series"
#ti_south.missing_value = grdROMS.fillval
ti_north = f1.createVariable('ti_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
ti_north.long_name = "time-averaged interior ice temperature northern boundary conditions"
ti_north.units = "degrees Celcius"
ti_north.time = "ocean_time"
ti_north.field = "interior temperature, scalar, series"
#ti_north.missing_value = grdROMS.fillval
# ----------------------------------------
sfwat_west = f1.createVariable('sfwat_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sfwat_west.long_name = "time-averaged surface melt water thickness on ice western boundary conditions"
sfwat_west.units = "meter"
sfwat_west.time = "ocean_time"
sfwat_west.field = "melt water thickness, scalar, series"
#sfwat_west.missing_value = grdROMS.fillval
sfwat_east = f1.createVariable('sfwat_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sfwat_east.long_name = "time-averaged surface melt water thickness on ice eastern boundary conditions"
sfwat_east.units = "meter"
sfwat_east.time = "ocean_time"
sfwat_east.field = "melt water thickness, scalar, series"
#sfwat_east.missing_value = grdROMS.fillval
sfwat_south = f1.createVariable('sfwat_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sfwat_south.long_name = "time-averaged surface melt water thickness on ice southern boundary conditions"
sfwat_south.units = "meter"
sfwat_south.time = "ocean_time"
sfwat_south.field = "melt water thickness, scalar, series"
#sfwat_south.missing_value = grdROMS.fillval
sfwat_north = f1.createVariable('sfwat_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sfwat_north.long_name = "time-averaged surface melt water thickness on ice northern boundary conditions"
sfwat_north.units = "meter"
sfwat_north.time = "ocean_time"
sfwat_north.field = "melt water thickness, scalar, series"
#sfwat_north.missing_value = grdROMS.fillval
# ----------------------------------------
tisrf_west = f1.createVariable('tisrf_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
tisrf_west.long_name = "time-averaged temperature of ice surfacewestern boundary conditions"
tisrf_west.units = "degrees Celcius"
tisrf_west.time = "ocean_time"
tisrf_west.field = "surface temperature, scalar, series"
#tisrf_west.missing_value = grdROMS.fillval
tisrf_east = f1.createVariable('tisrf_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
tisrf_east.long_name = "time-averaged temperature of ice surface eastern boundary conditions"
tisrf_east.units = "degrees Celcius"
tisrf_east.time = "ocean_time"
tisrf_east.field = "surface temperature, scalar, series"
#tisrf_east.missing_value = grdROMS.fillval
tisrf_south = f1.createVariable('tisrf_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
tisrf_south.long_name = "time-averaged temperature of ice surface southern boundary conditions"
tisrf_south.units = "degrees Celcius"
tisrf_south.time = "ocean_time"
tisrf_south.field = "surface temperature, scalar, series"
#tisrf_south.missing_value = grdROMS.fillval
tisrf_north = f1.createVariable('tisrf_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
tisrf_north.long_name = "time-averaged temperature of ice surface northern boundary conditions"
tisrf_north.units = "degrees Celcius"
tisrf_north.time = "ocean_time"
tisrf_north.field = "surface temperature, scalar, series"
#tisrf_north.missing_value = grdROMS.fillval
# ----------------------------------------
sig11_west = f1.createVariable('sig11_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig11_west.long_name = "time-averaged internal ice stress 11 component boundary conditions"
sig11_west.units = "Newton meter-1"
sig11_west.time = "ocean_time"
sig11_west.field = "ice stress 11, scalar, series"
#sig11_west.missing_value = grdROMS.fillval
sig11_east = f1.createVariable('sig11_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig11_east.long_name = "time-averaged internal ice stress 11 component eastern boundary conditions"
sig11_east.units = "Newton meter-1"
sig11_east.time = "ocean_time"
sig11_east.field = "ice stress 11, scalar, series"
#sig11_east.missing_value = grdROMS.fillval
sig11_south = f1.createVariable('sig11_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig11_south.long_name = "time-averaged internal ice stress 11 componentsouthern boundary conditions"
sig11_south.units = "Newton meter-1"
sig11_south.time = "ocean_time"
sig11_south.field = "ice stress 11, scalar, series"
#sig11_south.missing_value = grdROMS.fillval
sig11_north = f1.createVariable('sig11_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig11_north.long_name = "time-averaged internal ice stress 11 component northern boundary conditions"
sig11_north.units = "Newton meter-1"
sig11_north.time = "ocean_time"
sig11_north.field = "ice stress 11, scalar, series"
#sig11_north.missing_value = grdROMS.fillval
# ----------------------------------------
sig12_west = f1.createVariable('sig12_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig12_west.long_name = "time-averaged internal ice stress 12 component boundary conditions"
sig12_west.units = "Newton meter-1"
sig12_west.time = "ocean_time"
sig12_west.field = "ice stress 12, scalar, series"
#sig12_west.missing_value = grdROMS.fillval
sig12_east = f1.createVariable('sig12_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig12_east.long_name = "time-averaged internal ice stress 12 component eastern boundary conditions"
sig12_east.units = "Newton meter-1"
sig12_east.time = "ocean_time"
sig12_east.field = "ice stress 12, scalar, series"
#sig12_east.missing_value = grdROMS.fillval
sig12_south = f1.createVariable('sig12_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig12_south.long_name = "time-averaged internal ice stress 12 componentsouthern boundary conditions"
sig12_south.units = "Newton meter-1"
sig12_south.time = "ocean_time"
sig12_south.field = "ice stress 12, scalar, series"
#sig12_south.missing_value = grdROMS.fillval
sig12_north = f1.createVariable('sig12_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig12_north.long_name = "time-averaged internal ice stress 12 component northern boundary conditions"
sig12_north.units = "Newton meter-1"
sig12_north.time = "ocean_time"
sig12_north.field = "ice stress 12, scalar, series"
#sig12_north.missing_value = grdROMS.fillval
# ----------------------------------------
sig22_west = f1.createVariable('sig22_west', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig22_west.long_name = "time-averaged internal ice stress 22 component boundary conditions"
sig22_west.units = "Newton meter-1"
sig22_west.time = "ocean_time"
sig22_west.field = "ice stress 22, scalar, series"
#sig22_west.missing_value = grdROMS.fillval
sig22_east = f1.createVariable('sig22_east', 'f', ('ocean_time', 'eta_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig22_east.long_name = "time-averaged internal ice stress 22 component eastern boundary conditions"
sig22_east.units = "Newton meter-1"
sig22_east.time = "ocean_time"
sig22_east.field = "ice stress 22, scalar, series"
#sig22_east.missing_value = grdROMS.fillval
sig22_south = f1.createVariable('sig22_south', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig22_south.long_name = "time-averaged internal ice stress 22 componentsouthern boundary conditions"
sig22_south.units = "Newton meter-1"
sig22_south.time = "ocean_time"
sig22_south.field = "ice stress 22, scalar, series"
#sig22_south.missing_value = grdROMS.fillval
sig22_north = f1.createVariable('sig22_north', 'f', ('ocean_time', 'xi_rho',), zlib=myzlib,
fill_value=grdROMS.fillval)
sig22_north.long_name = "time-averaged internal ice stress 22 component northern boundary conditions"
sig22_north.units = "Newton meter-1"
sig22_north.time = "ocean_time"
sig22_north.field = "ice stress 22, scalar, series"
#sig22_north.missing_value = grdROMS.fillval
# ----------------------------------------
f1.close()
|
Have fun at the nations largest charity motorcycle rally while raising money for the Children's Safety Center. We need volunteers to help in our beverage tent on Thursday, September 22nd. Shifts are from 3:00-7:00pm or 7:00-11:00pm.
Our tent is the North tent in the beer garden on Dickson Street in the Walton Arts Center parking lot. We are right across from the main stage so you will get to listen to live music all night long. Plus, all volunteers will receive a signature Bikes, Blues & BBQ t-shirt. All volunteers must be at least 21 years of age.
|
import cv2 as CvObject
import numpy as np
# PART 1 - Introduction
# Topics
# >> Read frames from camera
# >> Baic Filters (Smoothening,blurring)
# >> Morphological Transformation
# >> Edge Detection
Selext_WebCam = 0
Select_External_Camera = 1
CamTitle = "MyCam"
# Store the captured video inside cap varaiable
capture = CvObject.VideoCapture(Selext_WebCam)
# if camera is open then or started capturing
while (capture.isOpened()):
# Start capturing frames/images
'''
cap.read() return two values
>> ret - able to capture or not (true,false) return values
>> img - captured Frame
'''
# 'Camera' is an identifier for frame which is taken from camera
ret,Camera = capture.read()
# Show the title "myimage" and start showing
# CvObject.imshow(CamTitle,Camera)
# Grayscale Camera
# RGB(Red,Blue,Green) Notation is inverted as BGR in OpenCV
GrayScale = CvObject.cvtColor(Camera,CvObject.COLOR_BGR2GRAY)
# CvObject.imshow('GrayScale',GrayScale)
# Detect BLUE COLOR
# BGR to Binary (means only one color is visible) within a particular range
# Range is defined in BGR
# Detect in range using hsv (Hue,Saturation,Value)
lower_color_bound_HSV = np.array([150,100,0])
upper_color_bound_HSV = np.array([255,180,180])
imgThreshold = CvObject.inRange(Camera,lower_color_bound_HSV,upper_color_bound_HSV)
#CvObject.imshow('Thresholded',imgThreshold)
# Bitwise AND will help BlackOut (fill black color) in uncomman region
# mask will help to put the mask on the comman portion so that it will
# look recolored
MonoColor = CvObject.bitwise_and(Camera,Camera,mask=imgThreshold)
#CvObject.imshow('MonoColor',MonoColor)
# Blurring and Smoothening
#Kernel size
x=5
y=5
# Kernel is 15*15 matrix with all element "1",
# Kernel will decide smoothness/brurness/sharpness
# according to the Kernel defination
# float32 is datatype divide all elements by (15*15=225)
#Kernel Defination
kernel = np.ones((x,y),np.float32)/(x*y)
# 2D filter consist of 3 parameter
# >> Frame (I choose GrayScale Frame )
# >> depth = -1
# >> kernel Matrix
# filter2D helps you to build customFilter
# with the use of kernel Definations
smoothed = CvObject.filter2D(GrayScale,-1,kernel)
#CvObject.imshow('Smoothed',smoothed)
# Gaussian Blur :
# It uses Gaussian Probability Distribution in order to remove
# gaussian noise in the Frame,if Standard deviation(sigmaX,sigmaY)
# is 0 than deviation is computed using kernel size(x,y)
sigmaX = 0
sigmaY = 0
gaussianblur = CvObject.GaussianBlur(GrayScale,(x,y),sigmaX,sigmaY)
#CvObject.imshow('Gaussian Blur',gaussianblur)
# Median Blur - It uses median of the group of pixels to do smoothening
kernel_size = x # Only square kernal is allowed
#medianblur = CvObject.medianBlur(GrayScale,kernel_size)
#CvObject.imshow('Median Blur',medianblur)
# Bilateral Filter -
# It is designed to overcome the drawback of Gaussian filter
# Used to avoid edge smoothening but slower process
# intensity of each pixel is replaced by average intensity of it's neighbours
# it is formulated on the basis of anisotropic diffusion Partial Differentail Eqn
diameter = 15 # diamter of pixel neighbourhood,How many pixels should be selected as neighbourhood
SigmaColor = 80 # recoloring by calculating neighbour color and equilizing it
SigmaSpace = 80 # rebuilding pixel intensities according to neighbours creating new space
bilateral = CvObject.bilateralFilter(Camera,diameter,SigmaColor,SigmaSpace)
#CvObject.imshow('BilateralFilter',bilateral)
# Morphological Transformation - Transformation based on shapes
# Erosion -
# erodes the boundary of foreground object
# or Make Outline more bolder and thicker
# Remove foreground and make outline thick
# Kernel Defination for erosion
kernel_Erode_or_Dilate = np.ones((2*x,2*y),np.uint8)
erosion = CvObject.erode(Camera,kernel_Erode_or_Dilate,iterations = 1)
#CvObject.imshow('Erosion',erosion)
# Dilation - Opposite of erosion (Remove Outlines)
# Remove Outline and make Outline Dilute
dilate = CvObject.dilate(Camera,kernel_Erode_or_Dilate,iterations = 1)
#CvObject.imshow('Dilation',dilate)
# Opening - erosion followed by dilation means after dilution the erosion is compensated (Somehow it Cartoonize)
# lighten the Outline than darken the required outline
opening = CvObject.morphologyEx(Camera,CvObject.MORPH_OPEN,kernel_Erode_or_Dilate)
#CvObject.imshow('Opening',opening)
# Closing - dilation followed by erosion means after erosion the dilution is compensated (Make You Ugliest person :P )
# darken the outline then remove the other unecessary darker outlines
closing = CvObject.morphologyEx(Camera,CvObject.MORPH_CLOSE,kernel_Erode_or_Dilate)
#CvObject.imshow('Closing',closing)
# Morphological Gradient - differnce of dilation and erosion
gradient = CvObject.morphologyEx(Camera,CvObject.MORPH_GRADIENT,kernel_Erode_or_Dilate)
#CvObject.imshow('Gradient',gradient)
# Top Hat - difference between Opening and original
topHat = CvObject.morphologyEx(Camera,CvObject.MORPH_TOPHAT,kernel_Erode_or_Dilate)
#CvObject.imshow('TopHat',topHat)
# Black Hat - difference between Closing and original
BlackHat = CvObject.morphologyEx(Camera,CvObject.MORPH_BLACKHAT,kernel_Erode_or_Dilate)
#CvObject.imshow('BlackHat',BlackHat)
# Edge Detection
# Laplacian - Boundary value Problem
# In Edges, there is High Variation in Intensities between edges and the rest
# Using First Derrivative we will find the values which can be considered as edge
# Using Second Derrivative we can confirm whether it is edge or not
# Laplacian is the way to compute Second derrivative for real valued function
# if Del(F) = 0 ,Divergence is zero than it is en edge means intensities are not
# diverging or intensities remains same at the positions where we are supposed to find
# edges after calculation of first derrivative
laplacian = CvObject.Laplacian(Camera,CvObject.CV_64F)
#CvObject.imshow('Laplacian',laplacian)
# First Order Derrivative Approaches
# Sobel - 1D Edge scan
SobelKernelSize = 5
# For SobelX means scanX for edges or find edges in Y , activateX = 1 and activateY = 0
# it can be simply seen by when we find dY/dX , we are finding maxima/minima in 'Y'
# at given point "X", Y axis edges will be detected
activateX = 1
activateY = 0
#sobelX = CvObject.Sobel(Camera,CvObject.CV_64F,activateX ,activateY,ksize = SobelKernelSize)
#CvObject.imshow('SobelX',sobelX)
# For SobelY means scanX for edges or find edges in X
# activateX = 0 and activateY = 1
activateX = 0
activateY = 1
#sobelY = CvObject.Sobel(Camera,CvObject.CV_64F,activateX ,activateY,ksize = SobelKernelSize)
#CvObject.imshow('SobelY',sobelY)
# Canny - Based on noise reduction
# remove x axis noise 200 times
Reduce_noiseX = 200 # reduced noise effect will be found on Y axis OR all edges in x is visible
Reduce_noiseY = 250 # reduced noise effect will be found on X axis OR all edges in y is visible
canny = CvObject.Canny(Camera,Reduce_noiseX,Reduce_noiseY)
CvObject.imshow('Canny',canny)
# Closing Camera
time = 10 #10 millisec
key = CvObject.waitKey(time)
# if key is "ESC" (ASCII = 27) than exit the capturing
# press hard
if key == 27:
break
capture.release()
CvObject.destroyAllWindows()
|
i just would like to confirmed that the Dynatrace metric startedsessions is the equivalent of Concurrent Visits in Appmon.
This metrics provide you only information about number of started sessions. Not about amount of concurrent sessions. Look at Active Sessions metric it will tell you about amount of active session in the same time (active = concurrent).
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import util as alembic_util
from oslo.config import cfg
from neutron.common import legacy
_core_opts = [
cfg.StrOpt('core_plugin',
default='',
help=_('Neutron plugin provider module')),
cfg.ListOpt('service_plugins',
default=[],
help=_("The service plugins Neutron will use")),
]
_quota_opts = [
cfg.StrOpt('quota_driver',
default='',
help=_('Neutron quota driver class')),
]
_db_opts = [
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
help=_('URL to database')),
]
CONF = cfg.ConfigOpts()
CONF.register_opts(_core_opts)
CONF.register_opts(_db_opts, 'database')
CONF.register_opts(_quota_opts, 'QUOTAS')
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
def do_upgrade_downgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision
if CONF.command.delta:
sign = '+' if CONF.command.name == 'upgrade' else '-'
revision = sign + str(CONF.command.delta)
else:
revision = CONF.command.revision
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = subparsers.add_parser(name)
parser.set_defaults(func=do_alembic_command)
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
for name in ['upgrade', 'downgrade']:
parser = subparsers.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.set_defaults(func=do_upgrade_downgrade)
parser = subparsers.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
config = alembic_config.Config(
os.path.join(os.path.dirname(__file__), 'alembic.ini')
)
config.set_main_option('script_location',
'neutron.db.migration:alembic_migrations')
# attach the Neutron conf to the Alembic conf
config.neutron_config = CONF
CONF()
#TODO(gongysh) enable logging
legacy.modernize_quantum_config(CONF)
CONF.command.func(config, CONF.command.name)
|
Mary Harron (American Psycho) and her writer partner Guinevere Turner, who unveiled their controversial new Charles Manson film Charlie Says in Venice last week, are working together on a new project.
The project is being developed and produced through New York’s Greencard Pictures and marks the latest collaboration between Harron and Turner after American Psycho, The Notorious Bettie Page and Charlie Says.
Harron is planning The Orange Eats Creeps as her next feature as a director following her Salvador Dali project, Dali Land, which will star Ben Kingsley as Dali and Lesley Manville as his wife Gala and which is due to shoot in spring next year. Ed Pressman is producing the Dali film.
Charlie Says, which is being sold by UTA and Fortitude International, was a world premiere in Venice but isn’t screening at TIFF. (Toronto’s programmers are understood to have had an issue with its violence.) British star Matt Smith (The Crown, Dr Who) was Harron’s surprise choice to play notorious cult leader Manson.
“A ton of people came in to read for it. No-one was right. It is a hard part because it is iconic and you need someone with a lot of guts to do it and who is charismatic enough to make sense of it. It was so hard to find someone for this role. If someone can get the inner Charlie and make sense of it, I don’t care what they look like,” Harron said of Smith, who bears little resemblance to the real-life Manson. (By coincidence, the actor had also recently played Patrick Bateman, the main character in American Psycho, in a 2013 London stage-musical adaptation of Bret Easton Ellis’ 1991 novel).
|
# -*- coding: utf-8 -*-
import struct
import csv
import os
import tkinter as tk
from decrypter import *
from tkinter.filedialog import askdirectory
ext = "ch"
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.instructionLabel = tk.Label(self)
self.instructionLabel.config(text="Welcome ! Please select a folder to start a conversion")
self.instructionLabel.grid(row=0, column=0, columnspan=4, ipadx=5, ipady=5)
self.selectButton = tk.Button(self)
self.selectButton["text"] = "Select folder"
self.selectButton["command"] = self.selectFolder
self.selectButton.grid(row=1, column=0, sticky='W')
self.convertButton = tk.Button(self)
self.convertButton["text"] = "Convert files"
self.convertButton["command"] = self.convert
self.convertButton.grid(row=1, column=1, sticky='W')
self.convertButton.config(state='disabled')
self.statusText = tk.Label(self)
self.statusText.config(text="No job in progress")
self.statusText.config(width=40)
self.statusText.grid(row=1, column=2)
self.quit = tk.Button(self, text="Quit", fg="red", command=root.destroy)
self.quit.grid(row=1, column=3, sticky='E')
def convert(self):
#print("Converting")
self.convertButton.config(state='disabled')
if self.selectedPath != None:
decrypter = Decrypter(self.selectedPath, self)
def selectFolder(self):
#print("Selecting folder")
self.selectedPath = askdirectory(parent=root, title='Choose a folder')
if self.selectedPath != None:
self.convertButton.config(state='normal')
experimentList = []
for fRoot, subFolders, files in os.walk(self.selectedPath):
for fichier in files:
if fichier[-2:] == '.B':
# New experiment
experimentList.append(fRoot)
text = "Found " + str(len(experimentList)) + " experiments"
self.instructionLabel.config(text="Click on the 'Convert files' button to transform files into CSV files or select another folder")
self.statusText.config(text=text)
# Create windows reference
root = tk.Tk()
# Change app icon
basePath = sys._MEIPASS
prepath = os.path.join(basePath, 'icon')
path = os.path.join(prepath, 'icon.ico')
root.iconbitmap(default=path)
# Create application
app = Application(master=root)
app.master.title("Agilent Data Converter")
app.master.minsize(500,60)
app.master.maxsize(650,60)
# Run Application
app.mainloop()
|
Ball by ball BBL 2019 29th BRH vs MLR today all cricket win tips. Who will win today match toss astrology. today win tips. IPL12 Tips.
1 Who will win match today Melbourne Renegades vs Brisbane Heat BBL 2019 29th ball by ball Big Bash T20?
3. Dream 11 Tips: How to select dream11 team tips. Expert free tips dream 11. Full Updates Predicted Playing XI. Dream11 Team Tips MLR vs BRH BBL 2019 29th. How to predict dream11 team.
What is the prevailing match expectation? Melbourne Renegades vs Brisbane Heat BBL 2019 29th Match fit can be playing in Geelong Cricket Ground, Geelong. Each teams great frames. Brisbane Heat vs Melbourne Renegades vs today healthy toss prediction. Nowadays in shape toss prediction CBTF. Cricket match bonanza hints for these days’s BBL 2019 29th match BRH vs MLR.
Ball to ball you can get today match prediction CBTF from this great website for free. Who will win today's cricket match coordinate crystal gazing Melbourne Renegades vs Brisbane Heat?. IPL 2019 match tips. 100% safe expectation.
Today Match Prediction BRH vs MLR?
BBL 2019 29th match healthy record. Match cricket suit prediction 100 certain. Who win match prediction. Fit prediction astrology by means of cbtf match prediction. Nowadays in shape horoscope World cup 2019. Triumphing chances of nowadays in shape cricket fit tips. What are the winning possibilities in today’s BBL 2019 29th. What are the triumphing chances Todey match prediction. Ipl 2019 match guidelines & tikcets. Cricket match prediction 100 certain.
|
# -*- coding: utf-8 -*-
'''
Created on 2015-08-21
@author: xhj
'''
from craw_page_parse import crawl_real_time_with_keyword, \
crawl_set_time_with_keyword, crawl_set_time_with_keyword_and_nickname
# from craw_page_parse import crawl_set_time_with_only_keyword
import os
import logging.config
import random
import datetime
from crawl_comment_from_db import crawl_comment, crawl_repost
from craw_page_parse_2 import crawl_uid_from_nickname, \
crawl_userinfo_from_uname_or_uid, crawl_userinfo_2_from_uid
from store_model import UserInfo_store, Single_weibo_with_more_info_store, \
Bie_Ming_store, Weibo_url_to_Comment_url, Single_comment, \
Single_comment_store, Weibo_url_to_repost_url, Single_repost_store,\
UserInfo_for_regester_time_store
from craw_page_parse_2 import crawl_userinfo_3_for_regester_time
from urllib import quote_plus
from mongoengine.context_managers import switch_collection
from mongoengine.queryset.visitor import Q
if not os.path.exists('logs/'):
os.mkdir('logs')
if os.path.exists('logs/scheduler.log'):
open('logs/scheduler.log', 'w').truncate()
curpath = os.path.normpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
logging.config.fileConfig(curpath + '/runtime_infor_log.conf')
if not os.path.exists('data/'):
os.mkdir('data')
if not os.path.exists('cookies/'):
os.mkdir('cookies')
# 抓取实时的微博,现在还不需要
def crawl_real_time_main(key_words_list):
thrads_list = []
for i in range(len(key_words_list)):
thrads_list.append(crawl_real_time_with_keyword(key_words_list[i], 'real_time_' + str(i)))
return thrads_list
# 按照天数,分别创建开始url
# 关键词,对应微博很多,按天抓取
def crawl_set_time_main_many(key_word, start_time, end_time, how_many_days_one_thread, how_many_days_crawl_once):
thrads_list = []
while start_time <= end_time:
end_2 = start_time + datetime.timedelta(days=how_many_days_one_thread-1)
thrads_list.append(crawl_set_time_with_keyword(key_word, start_time, end_2, how_many_days_crawl_once, 'crawl_settime_thread' + str(start_time) + " to " + str(end_2)))
start_time = end_2+datetime.timedelta(days=1)
return thrads_list
# 不按天抓取,一次抓取全部
# 给定: 关键词,开始时间,结束时间,用户list
def crawl_set_time_main_little(key_word, start_time, end_time, nickname_list):
thrads_list = []
for nickname in nickname_list:
thrads_list.append(crawl_set_time_with_keyword_and_nickname(key_word, start_time, end_time, nickname, nickname + "_thread"))
return thrads_list
# 从 数据库 中已转换的 comment url 中 提取url,然后进行抓取
def crawl_comment_from_fie():
# 从单独的微博文件中读取信息
all_thrads_list = []
# 读出数据
list_contains_set_weibourl_and_commenturl = []
global Weibo_url_to_Comment_url
for one_entry in Weibo_url_to_Comment_url.objects:
list_contains_set_weibourl_and_commenturl.append((one_entry['weibo_url'], one_entry['comment_url']))
one_piece = len(list_contains_set_weibourl_and_commenturl) / 12
for i in range(12):
all_thrads_list.append(crawl_comment(list_contains_set_weibourl_and_commenturl[i * one_piece:(i + 1) * one_piece], 'crawl_comment___' + str(i)))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
# 抓取用户 转发
def crawl_repost_from_db():
all_thrads_list = []
# 读出数据
list_contains_set_weibourl_and_reposturl = []
global Weibo_url_to_repost_url
for one_entry in Weibo_url_to_repost_url.objects:
list_contains_set_weibourl_and_reposturl.append((one_entry['weibo_url'], one_entry['repost_url']))
random.shuffle(list_contains_set_weibourl_and_reposturl)
one_piece = len(list_contains_set_weibourl_and_reposturl) / 12
for i in range(12):
all_thrads_list.append(crawl_repost(list_contains_set_weibourl_and_reposturl[i * one_piece:(i + 1) * one_piece], 'crawl_repost___' + str(i)))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
pass
# 抓取一个关键词下所有的微博;也可以抓取一个 hashtag 下所有的微博,但是要修改相应的 初始url
def crawl_one_keyword():
all_thrads_list = []
key_word = '转基因'
start_time = datetime.datetime(2016, 2, 16)
end_time = datetime.datetime(2016, 2, 26)
all_thrads_list.extend(crawl_set_time_main_many(key_word, start_time, end_time, how_many_days_one_thread=1, how_many_days_crawl_once=1))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
# 抓取一个 hashtag 下所有的微博
# def crawl_hash_tag():
# all_thrads_list = []
# key_word = '四六级成绩'
# start_time = datetime.datetime(2015, 12, 10)
# end_time = datetime.datetime(2015, 12, 31)
#
# how_many_days_one_thread = 5
# while start_time + datetime.timedelta(days=how_many_days_one_thread) < end_time:
# end_2 = start_time + datetime.timedelta(days=how_many_days_one_thread)
# all_thrads_list.append(crawl_set_time_with_only_keyword(key_word, start_time, end_2, 'crawl_settime_thread' + str(start_time) + " to " + str(end_2)))
# start_time = end_2
# if start_time < end_time:
# all_thrads_list.append(crawl_set_time_with_only_keyword(key_word, start_time, end_time, 'crawl_settime_thread' + str(start_time) + " to " + str(end_time)))
# for thread in all_thrads_list:
# thread.start()
# for thread in all_thrads_list:
# thread.join()
# 抓取特定用户下的微博,抓取特别媒体关于末个关键词的微博
def crawl_set_user_weibo_about_keyword():
all_thrads_list = []
key_word = '扶老人'
start_time = datetime.datetime(2011, 1, 1)
end_time = datetime.datetime(2015, 9, 6)
nickname_list = ["新闻晨报", "南方都市报", "广州日报", "南方日报", "环球时报", "扬子晚报", "新京报", "每日经济新闻", "楚天都市报"]
all_thrads_list.extend(crawl_set_time_main_little(key_word, start_time, end_time, nickname_list))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
####################################################################################### crawl userinfo start
# 通过用户的uid来抓取用户信息,,抓取任务中的一个需要
def chuli_nickname_crawl_userinfo():
uid_or_uname_list = []
# uid_or_uname_list = read_data_from_database_for___uid_or_uname_list()
with open("test_nolabels.txt") as file_r:
for one_line in file_r.readlines():
uid_or_uname_list.append(one_line[:-2])
print len(uid_or_uname_list)
how_many_uids_one_thread = len(uid_or_uname_list) / 10
all_thrads_list = []
start = 0
end = how_many_uids_one_thread
count = 0
while end < len(uid_or_uname_list):
all_thrads_list.append(crawl_userinfo_from_uname_or_uid(uid_or_uname_list[start:end], "crawl_userinfo_from_uname_or_uid_" + str(count)))
start = start + how_many_uids_one_thread
end = end + how_many_uids_one_thread
count = count + 1
if start < len(uid_or_uname_list):
all_thrads_list.append(crawl_userinfo_from_uname_or_uid(uid_or_uname_list[start:len(uid_or_uname_list)], "crawl_userinfo_from_uname_or_uid_" + str(count)))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
# 抓取用户信息,从网页端,主要是为了 注册时间
def crawl_userinfo_for_regester_time():
uid_crawl_list = []
count = 1
for one_user in UserInfo_store.objects:
uid = one_user['uid_or_uname']
if len(UserInfo_for_regester_time_store.objects(uid=uid))==0:
uid_crawl_list.append(uid)
print count
count += 1
print len(uid_crawl_list)
how_many_uids_one_thread = len(uid_crawl_list) / 10
all_thrads_list = []
start = 0
end = how_many_uids_one_thread
count = 0
while end < len(uid_crawl_list):
all_thrads_list.append(crawl_userinfo_3_for_regester_time(uid_crawl_list[start:end], "crawl_userinfo_for_regestertime_" + str(count)))
start = start + how_many_uids_one_thread
end = end + how_many_uids_one_thread
count = count + 1
if start < len(uid_crawl_list):
all_thrads_list.append(crawl_userinfo_3_for_regester_time(uid_crawl_list[start:len(uid_crawl_list)], "crawl_userinfo_for_regestertime_" + str(count)))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
pass
# 从数据库中取出已经爬取的用户信息的 uid 与 nickname
def read_data_from_database_uids_and_nicknames():
uids_and_nicknames = []
for one_user_info in UserInfo_store.objects:
uids_and_nicknames.append(one_user_info["uid_or_uname"])
uids_and_nicknames.append(one_user_info["nickname"])
return uids_and_nicknames
# 处理数据库中的 at_info
def chuli_at_info(at_info):
nickname_list = []
for one in at_info.split("[fen_ge]"):
nickname_list.append(one[:one.find(":")])
return nickname_list
# def read_alread_crawled_uids_or_nicknames():
# alread_crawled_uids_or_nicknames = []
# fr = open("data/already_crawled_uids_or_nicknames.txt","r")
# for one_line in fr.readlines():
# # alread_crawled_uids_or_nicknames.append(one_line[:-1])
# pass
# fr.close()
# return alread_crawled_uids_or_nicknames
# def write_alread_crawled_uids_or_nicknames(alread_crawled_uids_or_nicknames):
# fw = open("data/already_crawled_uids_or_nicknames.txt","a")
# for one_thing in alread_crawled_uids_or_nicknames:
# fw.write(one_thing+"\n")
# fw.close()
# 从数据库中读出数据构造 uid_or_uname_list
def read_data_from_database_for___uid_or_uname_list():
uid_or_uname_list = []
this_uid_list = []
this_nickname_list = []
weibo_collection_name = []
# weibo_collection_name = ["zhuanjiyin_nohashtag_original_2014_03_01_to_2014_03_10_detmine_1", \
# "zhuanjiyin_nohashtag_original_2014_03_10_to_2014_03_20_detmine_2", \
# "zhuanjiyin_nohashtag_original_2014_03_20_to_2014_04_01_detmine_3"]
# 处理微博中的用户信息
print "start single weibo"
global Single_weibo_with_more_info_store
for one_collection in weibo_collection_name:
with switch_collection(Single_weibo_with_more_info_store, one_collection) as Single_weibo_with_more_info_store:
for one_weibo in Single_weibo_with_more_info_store.objects:
this_uid_list.append(one_weibo["uid"])
this_uid_list.append(one_weibo["come_from_user_id"])
this_nickname_list.extend(chuli_at_info(one_weibo["at_info"]))
this_nickname_list.extend(chuli_at_info(one_weibo["retweet_reason_at_info"]))
# 处理 comment 中的用户信息
# 'zhuanjiyin_nohashtag_original_single_comment_2016_with_more_info'
print "start comment"
comment_collections = []
# comment_collections.append('zhuanjiyin_nohashtag_original_single_comment_2014_with_more_info_repair')
global Single_comment_store
for one_collection in comment_collections:
with switch_collection(Single_comment_store, one_collection) as Single_comment_store:
for one_comment in Single_comment_store.objects:
this_uid_list.append(one_comment["uid"])
this_nickname_list.extend(chuli_at_info(one_comment["at_info"]))
print "start repost"
repost_collections = []
repost_collections.append("zhuanjiyin_nohashtag_original_single_repost_2016_with_more_info_repair")
global Single_repost_store
for one_collection in repost_collections:
with switch_collection(Single_repost_store, one_collection) as Single_repost_store:
for one_comment in Single_repost_store.objects:
this_uid_list.append(one_comment["uid"])
this_nickname_list.extend(chuli_at_info(one_comment["at_info"]))
uid_or_uname_list.extend(list(set(this_uid_list)))
uid_or_uname_list.extend(list(set(this_nickname_list)))
uid_or_uname_list = list(set(uid_or_uname_list))
# print "start filter"
# for uid_or_nickname in set(this_uid_list):
# if len(UserInfo_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(nickname=str(uid_or_nickname)))) == 0 or\
# len(Bie_Ming_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(bie_ming=str(uid_or_nickname)))) == 0:
# uid_or_uname_list.append(uid_or_nickname)
#
# for uid_or_nickname in set(this_nickname_list) :
# if len(UserInfo_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(nickname=str(uid_or_nickname)))) == 0 or\
# len(Bie_Ming_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(bie_ming=str(uid_or_nickname)))) == 0:
# uid_or_uname_list.append(uid_or_nickname)
random.shuffle(uid_or_uname_list)
print len(uid_or_uname_list)
return uid_or_uname_list
####################################################################################### crawl userinfo end
# 通过抓取页面,把nickname转换成uid或者在微博的标示,,,这个是中间的一个需要
def main_2_just_tran_nickname_to_uidoruname():
file_r = open("100_atname_file.txt", 'r')
nickname_list = []
for line in file_r.readlines():
op_nickname = line[line.find('nickname:'):]
nickname = op_nickname[op_nickname.find(':') + 1:op_nickname.rfind(']')]
nickname_list.append(nickname)
all_thrads_list = []
start = 0
end = 10
count = 1
while end < len(nickname_list):
all_thrads_list.append(crawl_uid_from_nickname(nickname_list[start:end], "crawl_uid_from_nickname_" + str(count)))
start += 10
end += 10
count += 1
if(start < len(nickname_list)):
all_thrads_list.append(crawl_uid_from_nickname(nickname_list[start:len(nickname_list)], "crawl_uid_from_nickname_" + str(count)))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
###################################################################################### start 1
# 用query expansion ,抓取相应词语的微博。
# key_word_list : 存放query expansion的keyword
# start_time : datetime对象,开始时间 end_time : datetime对象,结束时间
def crawl_keywords_list(key_word_list, start_time, end_time):
all_thrads_list = []
for key_word in key_word_list:
all_thrads_list.extend(crawl_set_time_main_many(key_word, start_time, end_time, 110))
for thread in all_thrads_list:
thread.start()
for thread in all_thrads_list:
thread.join()
# 读文件,构造keywordslist ,这个是 query expansion 的抓取
def gen_keywords_list():
# 已操作文件: 1
file_r = open('./query_expansion_three_word/result_three_word_0.txt', 'r')
start_time = ""
end_time = ""
count = 1
key_words_list = []
for line in file_r.readlines():
if count == 1:
line = line[:-1].split(' ')
start_time = datetime.datetime(int(line[0]), int(line[1]), int(line[2]))
elif count == 2:
line = line[:-1].split(' ')
end_time = datetime.datetime(int(line[0]), int(line[1]), int(line[2]))
else:
key_words_list.append(line[:line.find('-')])
count += 1
return (key_words_list, start_time, end_time)
###################################################################################### end 1
if __name__ == '__main__':
# key_words_list,start_time,end_time=gen_keywords_list()
# crawl_keywords_list(key_words_list, start_time, end_time)
# 对于一个关键词,抓取特定时间段的微博(hashtag 也可以)
# crawl_one_keyword()
# 抓取用户评论,通过已转换好的 comment url
# crawl_comment_from_fie()
# 抓取用户转发,通过已转换好的 repost url
# crawl_repost_from_db()
# 从数据库中,读取 uid 昵称 之类,然后去抓取用户信息
# chuli_nickname_crawl_userinfo()
# 从数据库中提取 uid ,然后 通过 网页端 的 网页 抓取用户的注册信息
crawl_userinfo_for_regester_time()
# 抓取特定用户,关于特定关键词的微博
# crawl_set_user_weibo_about_keyword()
pass
|
If you give the jury the right info they will make the right decision.
Minot asked the committee to share data and information publicly as it receives it.
“If you give the jury the right info, they will make the right decision,” Minot said.
Jane Joseph, venue committee vice chairman, said data is already being posted on the town’s website at www.hiltonheadislandsc.gov. She also added that all meetings are open to the public.
Other attendees offered suggestions or opinions.
Town Councilwoman Kim Likins, a committee member, said the committee was formed specifically to look at arts venue options on the island.
The Town Council set up the committee last year and charged it with making a recommendation on a venue by the end of December. If passed, a Nov. 8 Beaufort County referendum could provide $6 million for a venue via a sales tax increase.
Lorraine Berry, co-owner of Legendary Golf, said the town should look at utilizing already constructed park spaces to hold events and festivals.
Daniel Brock, town spokesman, said turn-out for the event was better than expected. He said it was the right step toward two-way communication.
“The committee was able to address misconceptions and hear concerns that they can bring back to committee meetings,” Brock said.
More events will be held throughout the community in upcoming months, Brock said.
|
"""Dataset generation module."""
import random
import numpy as np
def random_string(length, alphabet_list):
"""Generate a random string."""
rand_str = ''.join(random.choice(alphabet_list) for i in range(length))
return rand_str
def perturb(seed, alphabet_list, p=0.5):
"""Randomize a string."""
seq = ''
for c in seed:
if random.random() < p:
c = random.choice(alphabet_list)
seq += c
return seq
def inflate_normalize(pwms=None, exp=2):
"""Inflate and normalize PWM to utilize noise level."""
num_motives = pwms.shape[0]
for j in range(num_motives): # inflate
pwms[j] = pwms[j] ** exp
for j in range(num_motives): # normalize
pwms[j] = pwms[j] / pwms[j].sum(axis=0)
return pwms
def get_pwms(alphabet='ACGT', num=2, length=6, exp=2):
"""Generate PWMs for every motif."""
letters = len(alphabet)
pwms = []
for i in range(num):
i_pwm = np.random.random_sample((letters, length))
i_pwm = i_pwm / i_pwm.sum(axis=0) # normalize
pwms.append(i_pwm)
pwms = np.array(pwms)
pwms = inflate_normalize(pwms=pwms, exp=exp)
return pwms
def motif_from_pwm(alphabet_list, pwm):
"""Create motif string from the PWM."""
seq = ""
length = pwm.shape[1]
for i in range(length):
alphabet_dist = pwm[:, i]
c = np.random.choice(a=alphabet_list, p=alphabet_dist)
seq += c
return seq
def make_artificial_dataset(alphabet='ACGT', motif_length=6, sequence_length=100,
n_sequences=1000, n_motives=2, p=0.2, random_state=1):
"""Generate artificial dataset.
Returns: motives - list of motives used in sequences
seq - dataset as list of sequences
binary_seq - a sequence of 0's & 1's which can be used for computing ROC score.
"""
random.seed(random_state)
alphabet_list = [c for c in alphabet]
pwms = get_pwms(alphabet=alphabet, num=n_motives, length=motif_length, exp=2 - p)
sequence_length = sequence_length / n_motives
flanking_length = (sequence_length - motif_length) / 2
n_seq_per_motif = n_sequences
counter = 0
seqs = []
motives = []
for i in range(n_seq_per_motif):
total_seq = ''
motif = []
for j in range(n_motives):
left_flanking = random_string(flanking_length, alphabet_list)
right_flanking = random_string(flanking_length, alphabet_list)
noisy_motif = motif_from_pwm(alphabet_list, pwms[j])
seq = left_flanking + noisy_motif + right_flanking
total_seq += seq
motif.append(noisy_motif)
seqs.append(('ID%d' % counter, total_seq))
motives.append(motif)
counter += 1
binary_skeleton = '0' * flanking_length + \
'1' * motif_length + '0' * flanking_length
binary_seq = binary_skeleton * n_motives
return motives, seqs, binary_seq
|
At Group Travel New Zealand, we offer your group a very high level of satisfaction, and strive to meet or exceed your expectations. We're a 100% New Zealand owned tour company, specialising in small group travel.
We believe New Zealand is the best place in the world for you to visit. We enjoy a safe natural environment and diverse landscapes. Our Maori culture is authentic and flourishing, our food and wines are world class and as a people we are open minded, unpretentious, friendly and have an appetite for the outdoors and adventure.
Our signature tour of the North Island takes you 'off the beaten track' away from the crowds. The itinerary is rich in activity, food and wine and stunning scenic beauty. With little else to spend on this tour we challenge you to find better value.
So why not contact us today, we would love to discuss our tours or create something special that you may be looking for.
|
from unittest import mock
from know_me import serializers, views
@mock.patch("know_me.views.DRYPermissions.has_permission", autospec=True)
def test_check_permissions(mock_dry_permissions):
"""
The view should check for model permissions.
"""
view = views.AcceptedAccessorListView()
view.check_permissions(None)
assert mock_dry_permissions.call_count == 1
def test_get_queryset(api_rf, km_user_accessor_factory, user_factory):
"""
The view should operate on the accessors owned by the requesting user.
"""
user = user_factory()
api_rf.user = user
km_user_accessor_factory(is_accepted=True)
km_user_accessor_factory(is_accepted=True, user_with_access=user)
km_user_accessor_factory(is_accepted=False, user_with_access=user)
view = views.AcceptedAccessorListView()
view.request = api_rf.get("/")
expected = user.km_user_accessors.filter(is_accepted=True)
assert list(view.get_queryset()) == list(expected)
def test_get_serializer_class():
"""
Test getting the serializer class the view uses.
"""
view = views.AcceptedAccessorListView()
assert view.get_serializer_class() == serializers.KMUserAccessorSerializer
|
Video Games · Movies & TV Shows · Music · Andriod TV Phone and Tablet Apps The best of what Sony has to offer on iOS or Android. Download our apps. This page describes how to operate your VAIO computer. Use this If you cannot use the wireless keyboard supplied with the computer, it is likely that wireless. The computer is a sony vaio model no. VPCLFG and the keyboard is model No. Update drivers for Windows 7: recommended. Refer the.
Error in device manager? Maybe it is the keyboard, not the driver? Know how to search for sony vaio website? Ask a new question. Sony Mouse / Keyboard Free Driver Download | Keep your Sony Mouse / Keyboard drivers up to date with the world's most popular driver download site. Download the latest Sony Wireless Keyboard device drivers (Official and Certified). Sony Wireless Keyboard drivers updated daily. Download Now.
Download the latest Sony Optical Mouse device drivers (Official and Certified). Sony Optical Mouse drivers updated daily. Download Now. Hello All, I have a Sony VAIO SVLX all in one computer. to load everything up pretty well but I am struggling with the wireless keyboard. I'd suspect the Bluetooth receiver drivers are not installed/installed properly. Anyway, I used a USB wireless keyboard and mouse to navigate though, and More about: thoughts sony vaio keyboard working windows update 64bit os to windows however after upgrading drivers not working what. 3. Reinstall Bluetooth and Wireless drivers. 4. Manually remove the drivers from Device Manager and reinstall them. 5. Use the Sony Vaio Care. One of our VAIO computers wireless keyboard stopped working after removing an avast! driver. I have tried: + System Restore prior to driver removal - / computer/1d8z9-wireless-keyboard-sony-vaio-will-not-respond-did.
Hello, I clean installed Windows 10 Pro on my Sony Vaio Tap 11 Sony Asia Pacific Now the wireless keyboard which also acts as the flap for. 17 Nov Free Download SONY VAIO Z Canvas VJZ12A Wireless Keyboard/Touchpad Firmware 0E/A for Windows 10 (Firmware). Neither Windows nor Linux seem to work with it without drivers which I haven't found yet dmesg says: usb new full speed USB device. 9 Dec - 2 min - Uploaded by SonyListens How to sync a wireless keyboard and mouse. VAIO computers Part 3 - Wireless Keyboard.
SONY VAIO Wireless LAN (Wi-Fi) Communication Standard WPA2 Vulnerability VAIO Care Update version (Windows 10 64bit, Windows 8 64bit). 25 Jan The Sony Vaio desktop systems that come with a wireless mouse and keyboard occasionally can develop connection problems. Even though. I purchased the VGP-WKB1 Wireless Mouse/Keyboard combo as seen here: The specs on the box say it will work with all Sony VAIO laptops/desktops with XP included, and the driver is not anywhere on Sony's driver download website. 25 Dec Keyboard Receiver Firmware (Windows 10 32bit/64bit, the instruction " Pairing the Wireless Keyboard with your VAIO computer.
|
import simplejson
from django.conf import settings
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404
from django.contrib import comments
from django.contrib.comments.views.moderation import perform_flag, perform_delete
from django.views.decorators.csrf import csrf_protect
@csrf_protect
def flag(request, comment_id):
"""
The view overrides the built-in view django.contrib.comments.moderation.flag()
Reasons to override:
1) Not compatible with Ajax calls
@login_required is not used as it redirects to login-page and Ajax calls
cannot be completed as expected.
1) Error handling cannot be done using response attributes
"""
if not request.user.is_authenticated():
return HttpResponse('Unauthorized', status=401)
if request.method == "POST" and \
request.is_ajax():
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
perform_flag(request, comment)
data = {"success": True}
data = simplejson.dumps(data)
return HttpResponse(data, mimetype="application/json")
else:
raise Http404
@csrf_protect
def delete_my_comment(request, comment_id):
"""
Provide user to delete his own comment.
Note: We are not removing the comment in the database!
We are just setting comment field `comment.is_removed = True`
Thus if anyone re-comment the same "exact" comment previously posted,
will not be saved!
django.contrib.comments.views.moderation.delete()
provides Moderators to perform this operation!
"""
if not request.user.is_authenticated():
return HttpResponse(status=401)
if request.method == 'POST' and \
request.is_ajax():
data = {}
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
if request.user == comment.user:
perform_delete(request, comment)
data['success'] = True
else:
raise Http404
return HttpResponse(simplejson.dumps(data), mimetype="application/json")
else:
raise Http404
|
NAB BOOTH #SL5016, LAS VEGAS, NV—April 7, 2019 — NewTek announced today it is collaborating with Sony to bring the BRC-X400, Sony’s latest 4K Pan-Tilt-Zoom (PTZ) Camera to market with support for NDI®|HX, the key technology behind the world’s largest and fastest growing community of IP video products. Sony is a clear leader in the global transition to IP-based technologies with decades of experience building revolutionary broadcast systems. Customer demand for NDI led to a close cooperation between Sony and NewTek to add NDI|HX capability to its latest BRC-X400, Sony’s first IP 4K PTZ camera delivering 4K30p high picture quality, powerful 80x zoom in FHD with Clear Image Zoom, Tele Convert Mode and support vast range of camera control protocols including 700 Protocol, CGI as well as VISCA/VISCA over IP. The model comes with broadcast-friendly features such as tally lamps and genlock supports and ensure efficient live production set-up by power supply, image output, camera controls, all over a single Ethernet cable.
The true power of NDI is that it operates on standard gigabit networks and can be received and processed by software on computer systems, making this technology usable in almost any network environment today. Driven by the explosive growth of channels and online video distribution, this historic transition to IP-based video production led by NDI brings broadcast level quality to millions of users while helping broadcast networks and TV stations produce more content of higher quality in less time.
Sony will be introducing the BRC-X400 with built-in NDI during NAB 2019 at Exhibit C11001 in Las Vegas, NV from April 6-11.
|
# -*- encoding: utf-8 -*-
"""Test class for Product CLI
:Requirement: Product
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: ContentManagement
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_alphanumeric
from fauxfactory import gen_string
from robottelo import ssh
from robottelo.api.utils import wait_for_tasks
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.defaults import Defaults
from robottelo.cli.factory import CLIFactoryError
from robottelo.cli.factory import make_gpg_key
from robottelo.cli.factory import make_org
from robottelo.cli.factory import make_product
from robottelo.cli.factory import make_repository
from robottelo.cli.factory import make_sync_plan
from robottelo.cli.http_proxy import HttpProxy
from robottelo.cli.package import Package
from robottelo.cli.product import Product
from robottelo.cli.repository import Repository
from robottelo.config import settings
from robottelo.constants import FAKE_0_YUM_REPO
from robottelo.constants import FAKE_0_YUM_REPO_PACKAGES_COUNT
from robottelo.datafactory import invalid_values_list
from robottelo.datafactory import valid_data_list
from robottelo.datafactory import valid_labels_list
from robottelo.decorators import run_in_one_thread
from robottelo.decorators import tier1
from robottelo.decorators import tier2
from robottelo.decorators import upgrade
from robottelo.test import CLITestCase
class ProductTestCase(CLITestCase):
"""Product CLI tests."""
org = None
def setUp(self):
"""Tests for Lifecycle Environment via Hammer CLI"""
super(ProductTestCase, self).setUp()
if ProductTestCase.org is None:
ProductTestCase.org = make_org(cached=True)
@tier1
@upgrade
def test_positive_CRUD(self):
"""Check if product can be created, updated, synchronized and deleted
:id: 9d7b5ec8-59d0-4371-b5d2-d43145e4e2db
:expectedresults: Product is created, updated, synchronized and deleted
:BZ: 1422552
:CaseImportance: Critical
"""
desc = list(valid_data_list().values())[0]
gpg_key = make_gpg_key({'organization-id': self.org['id']})
name = list(valid_data_list().values())[0]
label = valid_labels_list()[0]
sync_plan = make_sync_plan({'organization-id': self.org['id']})
product = make_product(
{
'description': desc,
'gpg-key-id': gpg_key['id'],
'label': label,
'name': name,
'organization-id': self.org['id'],
'sync-plan-id': sync_plan['id'],
}
)
self.assertEqual(product['name'], name)
self.assertGreater(len(product['label']), 0)
self.assertEqual(product['label'], label)
self.assertEqual(product['description'], desc)
self.assertEqual(product['gpg']['gpg-key-id'], gpg_key['id'])
self.assertEqual(product['sync-plan-id'], sync_plan['id'])
# update
desc = list(valid_data_list().values())[0]
new_gpg_key = make_gpg_key({'organization-id': self.org['id']})
new_sync_plan = make_sync_plan({'organization-id': self.org['id']})
new_prod_name = gen_string('alpha', 8)
Product.update(
{
'description': desc,
'id': product['id'],
'gpg-key-id': new_gpg_key['id'],
'sync-plan-id': new_sync_plan['id'],
'name': new_prod_name,
}
)
product = Product.info({'id': product['id'], 'organization-id': self.org['id']})
self.assertEqual(product['name'], new_prod_name)
self.assertEqual(product['description'], desc)
self.assertEqual(product['gpg']['gpg-key-id'], new_gpg_key['id'])
self.assertNotEqual(product['gpg']['gpg-key-id'], gpg_key['id'])
self.assertEqual(product['sync-plan-id'], new_sync_plan['id'])
self.assertNotEqual(product['sync-plan-id'], sync_plan['id'])
# synchronize
repo = make_repository({'product-id': product['id'], 'url': FAKE_0_YUM_REPO})
Product.synchronize({'id': product['id'], 'organization-id': self.org['id']})
packages = Package.list({'product-id': product['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(int(repo['content-counts']['packages']), len(packages))
self.assertEqual(len(packages), FAKE_0_YUM_REPO_PACKAGES_COUNT)
# delete
Product.remove_sync_plan({'id': product['id']})
product = Product.info({'id': product['id'], 'organization-id': self.org['id']})
self.assertEqual(len(product['sync-plan-id']), 0)
Product.delete({'id': product['id']})
wait_for_tasks(
search_query='label = Actions::Katello::Product::Destroy'
' and resource_id = {}'.format(product['id']),
max_tries=10,
)
with self.assertRaises(CLIReturnCodeError):
Product.info({'id': product['id'], 'organization-id': self.org['id']})
@tier2
def test_negative_create_with_name(self):
"""Check that only valid names can be used
:id: 2da26ab2-8d79-47ea-b4d2-defcd98a0649
:expectedresults: Product is not created
:CaseImportance: High
"""
for invalid_name in invalid_values_list():
with self.subTest(invalid_name):
with self.assertRaises(CLIFactoryError):
make_product({'name': invalid_name, 'organization-id': self.org['id']})
@tier2
def test_negative_create_with_label(self):
"""Check that only valid labels can be used
:id: 7cf970aa-48dc-425b-ae37-1e15dfab0626
:expectedresults: Product is not created
:CaseImportance: High
"""
product_name = gen_alphanumeric()
for invalid_label in (
gen_string('latin1', 15),
gen_string('utf8', 15),
gen_string('html', 15),
):
with self.subTest(invalid_label):
with self.assertRaises(CLIFactoryError):
make_product(
{
'label': invalid_label,
'name': product_name,
'organization-id': self.org['id'],
}
)
@run_in_one_thread
@tier2
def test_product_list_with_default_settings(self):
"""Listing product of an organization apart from default organization using hammer
does not return output if a defaults settings are applied on org.
:id: d5c5edac-b19c-4277-92fe-28d9b9fa43ef
:BZ: 1745575
:expectedresults: product/reporsitory list should work as expected.
"""
default_product_name = gen_string('alpha')
non_default_product_name = gen_string('alpha')
default_org = self.org
non_default_org = make_org()
default_product = make_product(
{'name': default_product_name, 'organization-id': default_org['id']}
)
non_default_product = make_product(
{'name': non_default_product_name, 'organization-id': non_default_org['id']}
)
for product in (default_product, non_default_product):
make_repository({'product-id': product['id'], 'url': FAKE_0_YUM_REPO})
Defaults.add({'param-name': 'organization_id', 'param-value': default_org['id']})
result = ssh.command('hammer defaults list')
self.assertTrue(default_org['id'] in "".join(result.stdout))
try:
# Verify --organization-id is not required to pass if defaults are set
result = ssh.command('hammer product list')
self.assertTrue(default_product_name in "".join(result.stdout))
result = ssh.command('hammer repository list')
self.assertTrue(default_product_name in "".join(result.stdout))
# verify that defaults setting should not affect other entities
product_list = Product.list({'organization-id': non_default_org['id']})
self.assertEquals(non_default_product_name, product_list[0]['name'])
repository_list = Repository.list({'organization-id': non_default_org['id']})
self.assertEquals(non_default_product_name, repository_list[0]['product'])
finally:
Defaults.delete({'param-name': 'organization_id'})
result = ssh.command('hammer defaults list')
self.assertTrue(default_org['id'] not in "".join(result.stdout))
@tier2
def test_positive_assign_http_proxy_to_products(self):
"""Assign http_proxy to Products and perform product sync.
:id: 6af7b2b8-15d5-4d9f-9f87-e76b404a966f
:expectedresults: HTTP Proxy is assigned to all repos present
in Products and sync operation performed successfully.
:CaseImportance: Critical
"""
# create HTTP proxies
http_proxy_a = HttpProxy.create(
{
'name': gen_string('alpha', 15),
'url': settings.http_proxy.un_auth_proxy_url,
'organization-id': self.org['id'],
}
)
http_proxy_b = HttpProxy.create(
{
'name': gen_string('alpha', 15),
'url': settings.http_proxy.auth_proxy_url,
'username': settings.http_proxy.username,
'password': settings.http_proxy.password,
'organization-id': self.org['id'],
}
)
# Create products and repositories
product_a = make_product({'organization-id': self.org['id']})
product_b = make_product({'organization-id': self.org['id']})
repo_a1 = make_repository(
{'product-id': product_a['id'], 'url': FAKE_0_YUM_REPO, 'http-proxy-policy': 'none'}
)
repo_a2 = make_repository(
{
'product-id': product_a['id'],
'url': FAKE_0_YUM_REPO,
'http-proxy-policy': 'use_selected_http_proxy',
'http-proxy-id': http_proxy_a['id'],
}
)
repo_b1 = make_repository(
{'product-id': product_b['id'], 'url': FAKE_0_YUM_REPO, 'http-proxy-policy': 'none'}
)
repo_b2 = make_repository({'product-id': product_b['id'], 'url': FAKE_0_YUM_REPO})
# Add http_proxy to products
Product.update_proxy(
{
'ids': '{},{}'.format(product_a['id'], product_b['id']),
'http-proxy-policy': 'use_selected_http_proxy',
'http-proxy-id': http_proxy_b['id'],
}
)
# Perform sync and verify packages count
Product.synchronize({'id': product_a['id'], 'organization-id': self.org['id']})
Product.synchronize({'id': product_b['id'], 'organization-id': self.org['id']})
repo_a1 = Repository.info({'id': repo_a1['id']})
repo_a2 = Repository.info({'id': repo_a2['id']})
repo_b1 = Repository.info({'id': repo_b1['id']})
repo_b2 = Repository.info({'id': repo_b2['id']})
assert repo_a1['http-proxy']['http-proxy-policy'] == "use_selected_http_proxy"
assert repo_a2['http-proxy']['http-proxy-policy'] == "use_selected_http_proxy"
assert repo_b1['http-proxy']['http-proxy-policy'] == "use_selected_http_proxy"
assert repo_b2['http-proxy']['http-proxy-policy'] == "use_selected_http_proxy"
assert repo_a1['http-proxy']['id'] == http_proxy_b['id']
assert repo_a2['http-proxy']['id'] == http_proxy_b['id']
assert repo_b1['http-proxy']['id'] == http_proxy_b['id']
assert repo_b2['http-proxy']['id'] == http_proxy_b['id']
assert int(repo_a1['content-counts']['packages']) == FAKE_0_YUM_REPO_PACKAGES_COUNT
assert int(repo_a2['content-counts']['packages']) == FAKE_0_YUM_REPO_PACKAGES_COUNT
assert int(repo_b1['content-counts']['packages']) == FAKE_0_YUM_REPO_PACKAGES_COUNT
assert int(repo_b2['content-counts']['packages']) == FAKE_0_YUM_REPO_PACKAGES_COUNT
Product.update_proxy(
{'ids': '{},{}'.format(product_a['id'], product_b['id']), 'http-proxy-policy': 'none'}
)
repo_a1 = Repository.info({'id': repo_a1['id']})
repo_a2 = Repository.info({'id': repo_a2['id']})
repo_b1 = Repository.info({'id': repo_b1['id']})
repo_b2 = Repository.info({'id': repo_b2['id']})
assert repo_a1['http-proxy']['http-proxy-policy'] == "none"
assert repo_a2['http-proxy']['http-proxy-policy'] == "none"
assert repo_b1['http-proxy']['http-proxy-policy'] == "none"
assert repo_b2['http-proxy']['http-proxy-policy'] == "none"
|
But that was sharply lower than a previous 32% jump in compensation.
Having the right technology and data experts are key for advisors using big data.
But most advisors say it will take a substantial market correction to convince investors to include fixed income in their portfolios.
When account addresses aren’t updated, the shares could end up in state coffers.
And increasingly more clients are interested in cryptocurrencies.
The wirehouse is offering DIY investors the same research capabilities already available to its own financial advisors.
Advisors anticipate more growth in 2019, and will offer new areas of expertise to make it so.
Another advisor’s unique offering, fees and investment performance are the top three reasons.
Financial planning is considered a big differentiator.
The market for smart beta has soared over the past few years. In FA-IQ's most popular video for 2018, Garrett Keyes sat down with Chris Brightman, CIO of Research Affiliates, to discuss this increasingly-popular asset class in depth.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.