text
stringlengths 29
850k
|
---|
from collections import namedtuple
from .native_type import NativeType
from .containers import DelegatedWrite
import cmd_ir.instructions as i
Pair = namedtuple('Pair', 'left right min max')
class ArrayType(NativeType):
def __init__(self, elem_type, size):
super().__init__()
assert size > 0, "Size must be > 0, is %d" % size
assert elem_type.typename == 'int', "TODO"
self.elem_type = elem_type
self.typename = elem_type.typename + '[]'
self.nbt_type = i.NBTType.int # TODO
self.size = size
def __repr__(self):
return 'ArrayType(%s[%d])' % (self.elem_type.typename, self.size)
@property
def ir_type(self):
return i.VarType.nbt
def allocate(self, compiler, namehint):
return compiler.create_var(namehint, self.ir_type)
def as_variable(self, instance):
return instance
def run_constructor(self, compiler, instance, arguments):
assert len(arguments) <= self.size
compiler.array_support.allocate(self.size)
var = instance.value
array = compiler.insn_def(i.CreateNBTList(self.nbt_type))
init_val = self._init_val(compiler)
with compiler.compiletime():
for _ in range(self.size):
compiler.add_insn(i.NBTListAppend(array, init_val))
compiler.add_insn(i.NBTAssign(var, array))
for n, arg in enumerate(arguments):
compiler.array_support.set(var, n, arg.type.as_variable(arg.value))
def _init_val(self, compiler):
# TODO non-int defaults
return compiler.insn_def(i.CreateNBTValue(self.nbt_type, 0))
def dispatch_operator(self, compiler, op, left, right=None):
if op == '[]':
return ArrayElementHolder(compiler, self, left, right)
return super().dispatch_operator(compiler, op, left, right)
class ArrayElementHolder(DelegatedWrite):
def __init__(self, compiler, arrtype, array, index):
self._compiler = compiler
self.type = arrtype.elem_type
self.array = array.type.as_variable(array.value)
self.index = index.type.as_variable(index.value)
self.__got_val = None
@property
def value(self):
if self.__got_val is None:
self.__got_val = self.read(self._compiler)
return self.__got_val
def read(self, compiler):
return compiler.array_support.get(self.array, self.index)
def write(self, compiler, other):
var = other.type.as_variable(other.value)
compiler.array_support.set(self.array, self.index, var)
return other
class ArraySupport:
index_type = i.VarType.i32
def __init__(self, compiler):
self.compiler = compiler
self.max_size = 0
self.getter = None
self.setter = None
def allocate(self, size):
self.max_size = max(self.max_size, size)
def finish(self):
if self.getter:
self.compiler.pragma('array_support_getter', self.max_size)
if self.setter:
self.compiler.pragma('array_support_setter', self.max_size)
def get(self, array, index):
self.lazy_load_get()
args = (array, index)
# TODO type
val = self.compiler.create_var('arrval', i.VarType.i32)
ret_args = (val,)
self.compiler.add_insn(i.Invoke(self.getter, args, ret_args))
return val
def set(self, array, index, value):
self.lazy_load_set()
args = (array, index, value)
self.compiler.add_insn(i.Invoke(self.setter, args, None))
def lazy_load_get(self):
if self.getter is None:
# TODO customize return type
self.getter = self.compiler.extern_function('_internal/array_get', (
(i.VarType.nbt, 'byval'), (self.index_type, 'byval')), (i.VarType.i32,))
def lazy_load_set(self):
if self.setter is None:
# TODO customise value type
self.setter = self.compiler.extern_function('_internal/array_set', (
(i.VarType.nbt, 'byref'), (self.index_type, 'byval'),
(i.VarType.i32, 'byval')), None)
@classmethod
def gen_getter(cls, top, size):
func = top.define_function('_internal/array_get')
arrparam = func.preamble.define(i.ParameterInsn(i.VarType.nbt, 'byval'))
indexparam = func.preamble.define(i.ParameterInsn(cls.index_type, 'byval'))
retvar = func.preamble.define(i.ReturnVarInsn(i.VarType.i32))
cls._gen_for(size, func, 'get', indexparam, cls._gen_getter, arrparam, retvar)
@classmethod
def gen_setter(cls, top, size):
func = top.define_function('_internal/array_set')
arrparam = func.preamble.define(i.ParameterInsn(i.VarType.nbt, 'byref'))
indexparam = func.preamble.define(i.ParameterInsn(cls.index_type, 'byval'))
valparam = func.preamble.define(i.ParameterInsn(i.VarType.i32, 'byval'))
cls._gen_for(size, func, 'set', indexparam, cls._gen_setter, arrparam, valparam)
@staticmethod
def _gen_getter(block, indexvar, indexval, arr, retvar):
path = i.VirtualString('[%d]' % indexval)
path_var = block._func.preamble.define(i.NBTSubPath(arr, path, retvar.type))
block.add(i.SetScore(retvar, path_var))
@staticmethod
def _gen_setter(block, indexvar, indexval, arr, value):
path = i.VirtualString('[%d]' % indexval)
path_var = block._func.preamble.define(i.NBTSubPath(arr, path, value.type))
block.add(i.SetScore(path_var, value))
@staticmethod
def _gen_for(size, func, prefix, indexparam, gen_callback, *cb_args):
entry = func.create_block('entry')
# Copy to local variable due to register allocation speedup
index = func.preamble.define(i.DefineVariable(indexparam.type))
entry.add(i.SetScore(index, indexparam))
def pair_name(pair):
return '%s_%d_%d' % (prefix, pair.min, pair.max)
def branch(func, index, pair):
return i.RangeBr(index, pair.min, pair.max,
func.get_or_create_block(pair_name(pair)), None)
def callback(pair):
block = func.get_or_create_block(pair_name(pair))
block.defined = True
if pair.left:
block.add(branch(func, index, pair.left))
if pair.right:
block.add(branch(func, index, pair.right))
if pair.min == pair.max:
gen_callback(block, index, pair.min, *cb_args)
root = generate_bin_tree(size, callback)
entry.add(i.Call(func.get_or_create_block(pair_name(root))))
entry.add(i.Return())
func.end()
def generate_bin_tree(size, callback):
assert size > 0
old_pairs = []
for n in range(size):
pair = Pair(None, None, n, n)
old_pairs.append(pair)
callback(pair)
while len(old_pairs) > 1:
pairs = []
waiting = None
for pair in old_pairs:
if waiting is None:
waiting = pair
else:
new_pair = Pair(waiting, pair, waiting.min, pair.max)
pairs.append(new_pair)
callback(new_pair)
waiting = None
if waiting is not None:
# Dangling node, occurs if size is not a power of 2
pairs.append(waiting)
callback(waiting)
old_pairs = pairs
return old_pairs[0]
|
If you're providing care for a loved one with dementia, it's important to know that stress can threaten your health.
Some effects of stress are noticeable right away. We sweat, our head pounds, we might experience unpleasant digestive effects, and we have trouble falling asleep.
Once the stressful situation goes away, we'll probably go back to normal. But experts tell us that over time, stress damages almost every body system, from our heart and lungs to our brains. In March 2016, the American College of Cardiology reported that people with a greater amount of activity in the stress center of the brain, as shown by brain imaging, were more likely to later suffer a heart attack or stroke. According to study author Dr. Ahmed Tawakol of Massachusetts General Hospital, the negative effects of stress are "on par with smoking, high blood pressure, high cholesterol and diabetes."
Another recent study, this one from Albert Einstein College of Medicine, published in the journal Alzheimer's Disease & Associated Disorders, showed that stress puts us at higher risk of mild cognitive impairment (MCI)—a condition that is often a precursor to full-blown Alzheimer's disease. You've probably noticed that stress makes it hard to concentrate and remember things. The results of this study suggest that the effect can be cumulative and permanent.
A recent Caring Right at Home poll showed that almost 75 percent of our readers provide some level of care for loved ones who are living with health challenges. These caregivers are all too familiar with the worry, financial challenges and emotional distress that can go with the job. Maybe they also are dealing with their own health problems, perhaps holding down a full-time job, or dealing with the "Sandwich Generation" issues as described in the previous article. And when their loved one has Alzheimer's disease or other dementia, their stress level is likely to be even higher.
Recent studies highlight a particular irony: The stress from caring for a loved one with dementia raises the caregiver's own risk. For example, a much-publicized 2010 study from Johns Hopkins Medicine reported that husbands and wives who care for a spouse with dementia are six times more likely to develop dementia themselves!
Can Alzheimer's caregivers lower their stress level?
Experts tell us that stress results not only from the challenges we face, but also from the way we react to these challenges. So for Alzheimer's caregivers, it's a two-sided process: improving the situations that cause stress, and handling stress in a way that minimizes the impact on our health. Says Dr. Edwin S. Lowe of the Albert Einstein College research team, "Fortunately, perceived stress is a modifiable risk factor for cognitive impairment, making it a potential target for treatment."
1. Take a break. For most Alzheimer’s caregivers, this is the top way to reduce stress. Take time for yourself—for exercise, for your own healthcare, to laugh, to relax with activities that nurture your body, mind and spirit. Don't think of this as self-indulgent; think of it as a way to save your sanity and make you a better, more resilient caregiver.
2. Ask for help. Many caregivers will ruefully respond to suggestion No. 1 with, "Easy for you to say! How will I find the time? Who will care for my loved one?" Look into options. Ask if family and friends would stay with your loved one to provide you some downtime. National, state and local agencies offer support services for the increasing numbers of Americans with dementia. To find out what's available, begin with the Eldercare Locator (www.eldercare.gov) or your local Agency on Aging. Check out respite care offered by adult day care centers, or by assisted living or other residential facilities. Many people with dementia do best in their own homes, in familiar surroundings and minimized disruption, making in-home care a very good choice.
3. Select dementia-friendly support services. For your loved one, be sure that activities, events and services are appropriate for people with memory loss, meeting their social and emotional needs in a nonjudgmental setting. And for yourself, try a dementia caregiver support group, in person or online, to find an environment for laughter, tears, learning and sharing what you've learned with others who truly "get it." If your family decides to hire in-home care, be sure the caregiver is specifically trained in the needs of those with dementia. (You can find an overview of those services and expectations in the June 2016 issue of Caring Right at Home.) In-home care shouldn't merely consist of "parent sitting." While caregivers are out, the life of the person with dementia should be enriched with appropriate, meaningful activities and interactions.
4. Sign up for a caregiver class. Either in a classroom or online, dementia care training can increase your confidence and coach you on time-tested techniques to modify your care—and perhaps your home—to allow you to care for your loved one with less stress. Understanding the causes of your loved one's personality and behavior changes can help you anticipate problems and create innovative solutions. Contact the Alzheimer' Association, the National Institute on Aging or your local Agency on Aging to find a class in your area.
5. Try meditation or other relaxation techniques. Meditation, tai chi, mindfulness practices and yoga can help you lower stress through focus of attention, controlled breathing and an open attitude—letting thoughts and distractions come and go without judging them, helping you to clear your mind, release tension and even improve strength and flexibility. In May 2016, UCLA researchers even announced that yoga can delay memory problems in older adults, in part through creating resilience to stress.
6. Be kind to yourself. Are you your own harshest critic? Many caregivers report they never feel they're doing quite a good enough job. Remember—there is nothing you can do to restore your loved one to his or her former condition. Helping your loved one at this time is a labor of love and a great challenge. Be alert for that little inner voice that second guesses your caring, and take steps to silence that voice.
7. Talk to a counselor who is knowledgeable about caregiver issues. Serving as a caregiver offers many rewards—but it can be an emotional minefield, especially when you add in the changes to your loved one's personality resulting from dementia. ("I thought Dad had long ago forgiven me for wrecking the car in eleventh grade! But he keeps bringing it up.") A counselor can help you identify your specific stress triggers, cope with the mixed emotions of caregiving, and learn cognitive and behavioral tricks to consciously lower your stress.
|
# Minim_graph.py
"""
Module to create RDF Minim graph throughn simple set of API calls
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import rdflib
from rocommand.ro_namespaces import RDF
from iaeval.ro_minim import MINIM
class Minim_graph(object):
"""
Class to create abstraction for constructing a Minim graph.
The actual format of the resulting graph is implementation-dependent.
This implementation builds an RDG graph, and serializes it in one of
a number of formats. The default format is Turtle/N3.
"""
def __init__(self, base=None):
self._base = base
self._minimgr = rdflib.Graph()
return
def prefix(self, prefix, nsuri):
self._minimgr.bind(prefix, rdflib.Namespace(nsuri))
return
def checklist(self, purpose=None, model=None, target="{+targetro}"):
cls = rdflib.URIRef("#ChecklistConstraints", base=self._base)
cln = rdflib.BNode()
clt = rdflib.Literal(target)
clp = rdflib.Literal(purpose)
clm = rdflib.URIRef(model, base=self._base)
self._minimgr.add( (cls, MINIM.hasChecklist, cln) )
self._minimgr.add( (cln, RDF.type, MINIM.Checklist) )
self._minimgr.add( (cln, MINIM.forTargetTemplate, clt) )
self._minimgr.add( (cln, MINIM.forPurpose, clp) )
self._minimgr.add( (cln, MINIM.toModel, clm) )
return cln
def model(self, modelid, itemlist):
model = rdflib.URIRef(modelid, base=self._base)
self._minimgr.add( (model, RDF.type, MINIM.Model) )
for (level, item) in itemlist:
self._minimgr.add( (model, level, item) )
return model
def item(self, seq=None, level="MUST", ruleid=None):
item = rdflib.BNode()
rule = rdflib.URIRef(ruleid, base=self._base)
self._minimgr.add( (item, RDF.type, MINIM.Requirement) )
self._minimgr.add( (item, MINIM.isDerivedBy, rule) )
if seq:
self._minimgr.add( (item, MINIM.seq, rdflib.Literal(seq)) )
levelmap = (
{ "MUST": MINIM.hasMustRequirement
, "SHOULD": MINIM.hasShouldRequirement
, "MAY": MINIM.hasMayRequirement
})
return (levelmap[level], item)
def rule(self,
ruleid, ForEach=None, ResultMod=None, Exists=None, Min=0, Max=None,
Aggregates=None, IsLive=None,
Command=None, Response=None,
Show=None, Pass="None", Fail="None", NoMatch="None"):
rule = rdflib.URIRef(ruleid, base=self._base)
if ForEach:
ruletype = MINIM.QueryTestRule
querynode = rdflib.BNode()
self._minimgr.add( (rule, MINIM.query, querynode) )
self._minimgr.add( (querynode, MINIM.sparql_query, rdflib.Literal(ForEach)) )
if ResultMod:
self._minimgr.add( (querynode, MINIM.result_mod, rdflib.Literal(ResultMod)) )
if Exists:
existsnode = rdflib.BNode()
self._minimgr.add( (rule, MINIM.exists, existsnode) )
self._minimgr.add( (existsnode, MINIM.sparql_query, rdflib.Literal(Exists)) )
if Min:
self._minimgr.add( (rule, MINIM.min, rdflib.Literal(Min)) )
if Max:
self._minimgr.add( (rule, MINIM.max, rdflib.Literal(Max)) )
if Aggregates:
self._minimgr.add( (rule, MINIM.aggregatesTemplate, rdflib.Literal(Aggregates)) )
if IsLive:
self._minimgr.add( (rule, MINIM.isLiveTemplate, rdflib.Literal(IsLive)) )
elif Exists:
ruletype = MINIM.QueryTestRule
existsnode = rdflib.BNode()
self._minimgr.add( (rule, MINIM.exists, existsnode) )
self._minimgr.add( (existsnode, MINIM.sparql_query, rdflib.Literal(Exists)) )
elif Command:
ruletype = MINIM.SoftwareEnvironmentRule
self._minimgr.add( (rule, MINIM.command, rdflib.Literal(Command)) )
self._minimgr.add( (rule, MINIM.response, rdflib.Literal(Response)) )
else:
raise ValueError("Unrecognized requirement rule pattern")
self._minimgr.add( (rule, RDF.type, ruletype) )
if Show:
self._minimgr.add( (rule, MINIM.show, rdflib.Literal(Show)) )
if Pass:
self._minimgr.add( (rule, MINIM.showpass, rdflib.Literal(Pass)) )
if Fail:
self._minimgr.add( (rule, MINIM.showfail, rdflib.Literal(Fail)) )
if NoMatch:
self._minimgr.add( (rule, MINIM.showmiss, rdflib.Literal(NoMatch)) )
return rule
def collectlist(self, rule, listprop, listvars):
for c in listvars:
listnode = rdflib.BNode()
self._minimgr.add( (rule, listprop, listnode) )
self._minimgr.add( (listnode, RDF.type, MINIM.ValueCollector) )
# Note: strips off leading '?' from variable names
self._minimgr.add( (listnode, MINIM.collectVar, rdflib.Literal(c["collectvar"][1:])) )
self._minimgr.add( (listnode, MINIM.collectList, rdflib.Literal(c["collectlist"][1:])) )
return
def serialize(self, outstr, format="turtle"):
self._minimgr.serialize(destination=outstr, format=format)
return
def graph(self):
return self._minimgr
# End.
|
Yahoo! News – Japanese woman tackles burglar to save designer wallet.
"I was scared, but I was desperate because he was trying to steal my bag with my precious Louis Vuitton wallet inside,"
|
import os
import urllib
import predix.config
import predix.service
class WeatherForecast(object):
"""
Weather Forecast Service
.. important::
Deprecated
"""
def __init__(self, *args, **kwargs):
super(WeatherForecast, self).__init__(*args, **kwargs)
key = predix.config.get_env_key(self, 'uri')
self.uri = os.environ.get(key)
if not self.uri:
raise ValueError("%s environment unset" % key)
key = predix.config.get_env_key(self, 'zone_id')
self.zone_id = os.environ.get(key)
if not self.zone_id:
raise ValueError("%s environment unset" % key)
self.service = predix.service.Service(self.zone_id)
def authenticate_as_client(self, client_id, client_secret):
self.service.uaa.authenticate(client_id, client_secret)
def get_weather_forecast_days(self, latitude, longitude,
days=1, frequency=1, reading_type=None):
"""
Return the weather forecast for a given location.
::
results = ws.get_weather_forecast_days(lat, long)
for w in results['hits']:
print w['start_datetime_local']
print w['reading_type'], w['reading_value']
For description of reading types:
https://graphical.weather.gov/xml/docs/elementInputNames.php
"""
params = {}
# Can get data from NWS1 or NWS3 representing 1-hr and 3-hr
# intervals.
if frequency not in [1, 3]:
raise ValueError("Reading frequency must be 1 or 3")
params['days'] = days
params['source'] = 'NWS' + str(frequency)
params['latitude'] = latitude
params['longitude'] = longitude
if reading_type:
# url encoding will make spaces a + instead of %20, which service
# interprets as an "and" search which is undesirable
reading_type = reading_type.replace(' ', '%20')
params['reading_type'] = urllib.quote_plus(reading_type)
url = self.uri + '/v1/weather-forecast-days/'
return self.service._get(url, params=params)
def get_weather_forecast(self, latitude, longitude, start, end,
frequency=1, reading_type=None):
"""
Return the weather forecast for a given location for specific
datetime specified in UTC format.
::
results = ws.get_weather_forecast(lat, long, start, end)
for w in results['hits']:
print w['start_datetime_local']
print w['reading_type'], '=', w['reading_value']
For description of reading types:
https://graphical.weather.gov/xml/docs/elementInputNames.php
"""
params = {}
# Can get data from NWS1 or NWS3 representing 1-hr and 3-hr
# intervals.
if frequency not in [1, 3]:
raise ValueError("Reading frequency must be 1 or 3")
params['source'] = 'NWS' + str(frequency)
params['latitude'] = latitude
params['longitude'] = longitude
params['start_datetime_utc'] = start
params['end_datetime_utc'] = end
if reading_type:
# Not using urllib.quote_plus() because its using a + which is
# being interpreted by service as an and instead of a space.
reading_type = reading_type.replace(' ', '%20')
params['reading_type'] = reading_type
url = self.uri + '/v1/weather-forecast-datetime/'
return self.service._get(url, params=params)
|
We also had the chance to connect with a host of CSOs and government representatives at TechCamp Kosovo, focused on combating corruption across the Western Balkans. Teaming up with the great folks at Democracy Plus, we highlighted the reasons behind the massive success of their local implementation (in both Albanian and Serbian language) of NDI’s FixMyCommunity DemTool, which has gathered over 1,000 citizen reports and helped municipal governments address and fix hundreds of issues across Kosovo since its launch just over a year ago. The key lessons learned from the implementation of this platform – developing strong relationships with local institutions (like mayors offices, municipal councils, etc.) and smartly advertising the platform to generate awareness and interest – may not seem like rocket science, but served to many of us at the Camp as a powerful reminder that the technology is often the easy part. It’s the human integration that takes the most work and is ultimately the most important piece of the puzzle.
We had the chance to talk FixMyCommunity again at the PeaceTech Exchange in Macedonia, hosted by our friends at Democracy Lab. Although structured a bit differently from the Tech Camps, the goal of these “PTXs” remains the same – connect local change makers both in and out of government with data, tools, technologies and strategies that can solve important issues in their communities. At the end of the three-day event, one of the winning “pitches” outlined a local implementation of the FixMyCommunity DemTool in Gostivar, a diverse municipality located about an hour outside of Skopje, the Macedonian capital. Just a few weeks later, supported by NDI, the winning pitch by a CSO called Nexus Civil Concept turned into what is now a growing and active FixMyCommunity site available both in Macedonian and Albanian language.
We’ll of course be keeping an eye on all these projects as they move forward (if you happen to live in either Kosovo or Macedonia, we definitely encourage you to check out those instances of FixMyCommunity!), and even beyond that look forward to future opportunities to interact with all the awesome innovators that we met at these inspirational sessions.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 24/mag/2015
@author: koala
'''
from keepnote.gui import extension
import sys
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
class debugView:
"""debug view"""
def __init__(self,path):
self.gladefile = path + "/debugViewXML.glade"
self.wTree = gtk.glade.XML(self.gladefile)
dic = {"on_CloseBtn_clicked" : self.on_CloseBtn_clicked}
self.wTree.signal_autoconnect(dic)
def debugTxt(self, iStr):
#obj = self.builder.get_object("debugDialog")
obj = self.wTree.get_widget("debugDialog")
obj.show()
#txtEdit = self.builder.get_object("debugTxt")
txtEdit = self.wTree.get_widget("debugTxt")
txtEdit.get_buffer().set_text(iStr)
def on_CloseBtn_clicked(self,sender):
obj = self.wTree.get_widget("debugDialog")
obj.destroy()
class Extension (extension.Extension):
lPath = ""
def __init__(self, app):
"""Initialize extension"""
extension.Extension.__init__(self, app)
self.app = app
def get_depends(self):
return [("keepnote", ">=", (0, 7, 1))]
def on_add_ui(self, window):
"""Initialize extension for a particular window"""
# add menu options
self.add_action(
window, "Set password", _("Set password"),
lambda w: self.on_setPassword(
window, window.get_notebook()),
tooltip=_("Set password for the notebook"))
# TODO: Fix up the ordering on the affected menus.
self.add_ui(window,
"""
<ui>
<menubar name="main_menu_bar">
<menu action="File">
<menuitem action="Set password"/>
</menu>
</menubar>
</ui>
""")
self.lPath = self.get_base_dir(False)
self.hwg = debugView(self.lPath)
#Gigi
window.get_notebook()
def on_setPassword(self, window, notebook):
"""Callback from gui for importing a plain text file"""
# self.hwg.debugTxt(
# """
# Questa è una fidestra di debug in cui è possibile scrivere più o meno quello che si vuole
# Proviamo a scrivere qualcosa su più righe
# Solo per vedere come funziona!!!!
# """)
sys.path.append(self.lPath)
# Caca
self.hwg.debugTxt(m_testo + "\n" + toHex(m_cript))
|
Some commenters on this post have suggested that this is simply not possible as a matter of text. But it seems to me that one can think of other parallel sentences where it is indeed permissible, given the context and common sense, to think that the “during” clause modifies both sets of verbs.
The soldiers were authorized to shoot any Germans they encountered in the trenches during the war.
It seems permissible to read this to be limited to those who are both shot and encountered during the war — you can’t shoot somebody 20 years later just because you saw them in the trenches back then.
The trial judge may exclude from the courtroom any spectator who is disruptive during the trial.
Again, it seems permissible to read “during the trial” to modify both “exclude” and “is disruptive.” The judge couldn’t necessarily exclude people who’d been disruptive some other time, and he couldn’t necessarily exclude people permanently from that room once the trial was over.
Don’t say “hi” to anybody you see at the racetrack.
One can certainly think of other constructions that might not be read this way, but I hope these examples show that this kind of reading is at a minimum grammatically possible, if the purpose or context of the sentence gives you reason to think it is meant that way.
|
from datetime import datetime
from flask import Blueprint, redirect, request, Response, render_template, url_for
from flask_login import current_user, login_required
import requests
from sqlalchemy.exc import ProgrammingError
from pipet.models import db
from pipet.sources.zendesk import ZendeskAccount
from pipet.sources.zendesk.forms import CreateAccountForm, DestroyAccountForm
from pipet.sources.zendesk.models import Base, SCHEMANAME
from pipet.sources.zendesk.tasks import sync
blueprint = Blueprint(SCHEMANAME, __name__, template_folder='templates')
@blueprint.route('/')
@login_required
def index():
return render_template('zendesk/index.html')
@blueprint.route('/activate', methods=['GET', 'POST'])
@login_required
def activate():
session = current_user.organization.create_session()
form = CreateAccountForm(obj=current_user.organization.zendesk_account)
account = current_user.organization.zendesk_account
if form.validate_on_submit():
if not account:
account = ZendeskAccount()
account.subdomain = form.subdomain.data
account.admin_email = form.admin_email.data
account.api_key = form.api_key.data
account.organization_id = current_user.organization.id
db.session.add(account)
db.session.commit()
return redirect(url_for('zendesk.index'))
if account:
form.subdomain.data = account.subdomain
form.admin_email.data = account.admin_email
form.api_key.data = account.api_key
return render_template('zendesk/activate.html', form=form)
@blueprint.route('/deactivate')
@login_required
def deactivate():
account = current_user.organization.zendesk_account
form = DestroyAccountForm()
if form.validate_on_submit() and form.drop.data:
account.destroy_target()
account.destroy_trigger()
account.drop_all()
db.session.add(account)
db.session.commit()
return redirect(url_for('zendesk.index'))
return render_template('zendesk/deactivate.html')
@blueprint.route('/reset')
@login_required
def reset():
session = current_user.organization.create_session()
current_user.organization.zendesk_account.drop_all(session)
current_user.organization.zendesk_account.create_all(session)
return redirect(url_for('zendesk.index'))
@blueprint.route("/hook", methods=['POST'])
def hook():
if not request.authorization:
return ('', 401)
account = Account.query.filter((Account.subdomain == request.authorization.username) &
(Account.api_key == request.authorization.password)).first()
scoped_session = account.organization.create_scoped_session()
session = scoped_session()
if not account:
return ('', 401)
ticket_id = request.get_json()['id']
resp = requests.get(account.api_base_url +
'/tickets/{id}.json?include=users,groups'.format(
id=ticket_id),
auth=account.auth)
ticket, _ = Ticket.create_or_update(resp.json()['ticket'], account)
session.add_all(ticket.update(resp.json()))
session.add(ticket)
resp = requests.get(account.api_base_url +
'/tickets/{id}/comments.json'.format(id=ticket.id), auth=account.auth)
session.add_all(ticket.update_comments(resp.json()['comments']))
session.commit()
return ('', 204)
|
When people think of a great body one of the first things that comes to their mind is six-pack abs. in the world of fitness, a great body is not always about abs. it can be beyond that. Many people prefer to tone their body to get a perfect curvature with noticeable amount of muscles on some parts of the body.
So if you are among those who are keen on getting a perfect body, i.e. build muscles, then you will need to take the help of a fitness trainer. Most of the Toronto fitness trainer can help people in making their dream a reality.
Personal trainers can help any person to get a great shape without spending hours after hours in the gym with various equipments. Well, the personal trainers will come up with personalized exercises and proper nutrition chart for every individual. This is because every person has a different natural body and it responds to diet and exercise in a different way. Thus, the personal trainers can easily design exercises that can help people to gain some mass easily.
A fitness trainer is not just a trainer but can also act as a great motivator, coach, and an educator. Being certified trainers they can easily tell people about the right treatment program that can help them to gain the right body shape. For this, the Toronto fitness trainer can train the people on proper nutrition. They can also train people on the right exercises that can help to build or get rid of muscles.
How They Are Helping People?
As you might know, in order to get a perfect body, following three things are very important. They are diet or nutrition, cardio, and proper exercise. Only an expert fitness trainer can guide clients on these factors.
Without proper diet, one won’t be able to meet their fitness goals. The fitness trainer can ensure that a person is eating right kind of meals that can provide the body essential nutrients. Essential nutrients can help a body from any injury during workouts. At the same time people should be burnt off through exercise.
Toronto fitness trainer explains that some cardio workouts can help to maintain the metabolism of the body. Cardio workouts like jogging or skipping can help a body to achieve the overall fitness.
Along with the above two, exercises like reverse crunches or push-ups can form the foundation of building the body. Along with cardio, proper exercise can help with strength building efforts. This, in turn, can help to get desired results.
The most important reason why most people fail in their attempt to build the body in the right way is their lack of willpower. Moreover, doing exercises improperly can have an adverse effect. This is the reason why one should take the help of Toronto fitness trainer. The trainer can motivate the people to stick on their workout plan and very easily achieve the goal. The best way to shape the body correctly is to take the help of an expert fitness trainer. It can help people to get desired results. Check these amazing tips on how you can get the most out of your personal trainers!
When to get your medical exam?
Echo’s Mandate is to be the focal point and catalyst for women’s health at the provincial level, and to promote equity and improved health for women by working in collaborative partnerships with the health system, communities, researchers and policy makers.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_pubsub_topic_info
description:
- Gather info for GCP Topic
short_description: Gather info for GCP Topic
version_added: '2.8'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a topic
gcp_pubsub_topic_info:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
name:
description:
- Name of the topic.
returned: success
type: str
kmsKeyName:
description:
- The resource name of the Cloud KMS CryptoKey to be used to protect access
to messages published on this topic. Your project's PubSub service account
(`service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com`) must
have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature.
- The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*` .
returned: success
type: str
labels:
description:
- A set of key/value label pairs to assign to this Topic.
returned: success
type: dict
messageStoragePolicy:
description:
- Policy constraining the set of Google Cloud Platform regions where messages
published to the topic may be stored. If not present, then no constraints
are in effect.
returned: success
type: complex
contains:
allowedPersistenceRegions:
description:
- A list of IDs of GCP regions where messages that are published to the
topic may be persisted in storage. Messages published by publishers running
in non-allowed GCP regions (or running outside of GCP altogether) will
be routed for storage in one of the allowed regions. An empty list means
that no regions are allowed, and is not a valid configuration.
returned: success
type: list
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict())
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
return "https://pubsub.googleapis.com/v1/projects/{project}/topics".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'pubsub')
return auth.list(link, return_if_object, array_name='topics')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
News items related to Berkshire Hathaway Home Services Georgia Properties as issued by the Send2Press Newswire service on behalf of the noted news source.
ATLANTA, Ga., Oct. 16, 2018 (SEND2PRESS NEWSWIRE) — Debra Johnston, Berkshire Hathaway HomeServices Georgia Properties, has announced Georgia’s best custom masterpiece estate. 3509 Tanners Mill Circle offers a range of unique benefits, from a large car-collector showroom to 446 acres of breathtaking picturesque grounds, all available for $16.5 million.
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import base64
import binascii
import re
from module.plugins.Container import Container
from module.utils import fs_encode
class RSDF(Container):
__name__ = "RSDF"
__version__ = "0.24"
__pattern__ = r'.+\.rsdf'
__description__ = """RSDF container decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "[email protected]"),
("spoob", "[email protected]")]
def decrypt(self, pyfile):
from Crypto.Cipher import AES
infile = fs_encode(pyfile.url.replace("\n", ""))
Key = binascii.unhexlify('8C35192D964DC3182C6F84F3252239EB4A320D2500000000')
IV = binascii.unhexlify('FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF')
IV_Cipher = AES.new(Key, AES.MODE_ECB)
IV = IV_Cipher.encrypt(IV)
obj = AES.new(Key, AES.MODE_CFB, IV)
try:
with open(infile, 'r') as rsdf:
data = rsdf.read()
except IOError, e:
self.fail(str(e))
if re.search(r"<title>404 - Not Found</title>", data) is None:
data = binascii.unhexlify(''.join(data.split()))
data = data.splitlines()
for link in data:
if not link:
continue
link = base64.b64decode(link)
link = obj.decrypt(link)
decryptedUrl = link.replace('CCF: ', '')
self.urls.append(decryptedUrl)
self.logDebug("Adding package %s with %d links" % (pyfile.package().name, len(self.urls)))
|
Matthew Henry had the rare ability to express profound spiritual insights with simplicity and eloquence. Over the years, his writings have been read for both their scholarship and devotion, and none more than the classic Concise Commentary on the Whole Bible. Now you can listen to the very best of Matthew Henry in this new edition of his famous commentary.Henry's profound spiritual insights have touched lives for over three hundred years. A valuable source of reference and sermon material, this classic is a treasure for pastors, students, Bible teachers, and devotional readers alike.
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xfs(AutotoolsPackage):
"""X Font Server."""
homepage = "http://cgit.freedesktop.org/xorg/app/xfs"
url = "https://www.x.org/archive/individual/app/xfs-1.1.4.tar.gz"
version('1.1.4', '0818a2e0317e0f0a1e8a15ca811827e2')
depends_on('[email protected]:')
depends_on('font-util')
depends_on('[email protected]:', type='build')
depends_on('fontsproto', type='build')
depends_on('xtrans', type='build')
depends_on('[email protected]:', type='build')
depends_on('util-macros', type='build')
|
Don Garlits is going after the 200-mph record again, this time with a battery-powered dragster. Hes at it right now in Florida.
Exploring the future of racing, a quarter-mile at a time in a Honda Fit EV.
SAE International has standards for everything, from sockets wrenches to electrical sockets.
Some want to see motor racing leave the internal combustion engine behind for all-electric power.
|
#
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of SimplePL.
#
# SimplePL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# SimplePL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with SimplePL. If not, see
# <http://www.gnu.org/licenses/>.
#
#######################################################################
# std lib imports
import logging
log = logging.getLogger(__name__)
import string
# third party imports
import serial
# local imports
#TODO: make sure the asked for nm is available on the given grating?
class FW102C(object):
'''
Device driver for ThorLabs FW102C Motorized Filter Wheel.
'''
def __init__(self, port, timeout=5.):
self._inst = serial.Serial(port,
baudrate=115200,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=timeout)
while self.__read(): # clear the filter's output buffer
pass
while True:
id = self.get_id()
if id != "Command error":
break
while self.__read(): # clear the filter's output buffer
pass
if id != "THORLABS FW102C/FW212C Filter Wheel version 1.01":
raise RuntimeError('Wrong instrument id: %s'%id)
def __read(self):
r = self._inst.readline()
log.debug('__read: return "%s"', r)
return r
def _read(self):
r = self.__read()
r = string.join(r.split()[1:-1]) # strip command echo and "ok"
log.debug('_read: return "%s"', r)
return r
def __write(self, s):
log.debug('__write: _inst.write("%s")', s)
self._inst.write(s+"\r")
def _write(self, s):
self.__write(s)
self._read() # command echo
def _ask(self, s):
self.__write(s)
return self._read()
def get_id(self):
return self._ask('*idn?')
#TODO: check how it confirms pos=1, if at all, and compensate
def set_filter(self, i):
'''
Sets the filter wheel position to the given index
'''
if not isinstance(i, int) or i < 1 or i > 6:
raise ValueError('i must be an integer in the range [1, 6]')
self._write('pos=%d'%i)
def get_filter(self):
return int(self._ask('pos?'))
if __name__ == "__main__":
# enable DEBUG output
logging.basicConfig(level=logging.DEBUG)
# Test
fw = FW102C(port=3)
print fw.get_id()
print fw.get_filter()
fw.set_filter(1)
print fw.get_filter()
fw.set_filter(2)
print fw.get_filter()
|
The caffeine-fueled all nighter is an enduring image of high school and college life. Students feeling pressure to succeed readily accept marathon study sessions, or "cramming," as an appropriate response. In reality, cramming is associated with emotional, mental and physical impairments that reduce the body's ability to cope with its environment. Students who embrace cramming to get through a final exam week find themselves struggling to perform consistently, once the brain adjusts to prolonged sleep deprivation.
Chronic all-night cramming leads to exaggerated emotional reactions in college students who don't break the habit. Harvard Medical School associate professor Seung-Schik Yoo drew this conclusion after doing a study that required subjects to forgo sleep for 36 hours and evaluate various visual stimuli, "The Harvard Crimson" reports. To Yoo's surprise, his sleepless subjects were more likely to evaluate neutral images favorably, even when no obvious basis existed for doing so.
As far as many college students are concerned, trading sleep for grades seems like an acceptable price to pay. However, Yoo's research also suggests that lack of sleep impairs mental function. Students who adopt cramming as a way of life don't recall as much as their peers who sleep normally, "The Harvard Crimson" states. Prolonged sleeplessness has also been shown to alter the brain, Yoo asserts, leaving students vulnerable to long-term, permanent damage.
Debate persists on whether cramming triggers bad grades. Researchers from the University of California Los Angeles sought to answer this question by asking 535 high school students to chronicle their sleeping and studying habits, "U.S. News & World Report" states. By their senior year, participating students were studying an hour per night, but sleeping about 41 minutes less. As a result, students reported prolonged academic problems after a day of sleeplessness.
The negative effects of cramming aren't limited to your psychological and mental state. Students who cut corners on sleep often adopt unhealthy eating habits, an analysis by the American Psychological Association states. In most cases, college students subsist on empty-calorie, high-fat snack foods that don't provide enough energy to function effectively. This situation is compounded by skipping healthier, balanced items that improve learning and memory, such as kiwi fruit, salmon and walnuts.
One justification for cramming sessions is the notion that it's possible to make up lost sleep. However, a study by Harvard Medical School neurologist Daniel Cohen shows little support for such beliefs among students. According to "Harvard Magazine," Cohen's nine research subjects displayed improved reaction times after sleeping 10 hours versus staying awake for 33 hours. As the three-week study continued, Cohen found that his subjects underwent severe mental impairment after just 18 hours, which he attributed to their chronic sleeplessness.
Are Student Dress Codes a Violation of Civil Rights?
What Everyday Observations Showed That the Earth Is Round?
|
import argparse
import collections
import os
class InspectionReport:
"""
The result of an inspection.
"""
def __init__(self, filename):
self.filename = filename
self.word_count = 0
self.warnings = []
self.issues = []
def __len__(self):
return len(self.issues)
def __str__(self):
lines = [self.get_report_heading()]
for warning in self.warnings:
lines.append(warning)
if len(self.issues) > 0:
maximum_line = self.issues[-1].line
for issue in self.issues:
lines.append(issue.format(max_line=maximum_line))
return "\n".join(lines)
def increment_word_count(self):
self.word_count += 1
def add_issue(self, issue):
self.issues.append(issue)
def analyze_issues(self):
"""
Analyzes the issues of this Report, possibly generating warnings and removing issues.
"""
typo_counter = collections.defaultdict(lambda: 0)
for issue in self.issues:
typo_counter[issue.text] += 1
# Typos that appear more than max(words / 10000, 10) times are assumed to be names
name_threshold = max(self.word_count / 10000, 10)
ignored_typos = []
for key, count in typo_counter.items():
if count > name_threshold:
ignored_typos.append((count, key))
ignored_typos.sort()
ignored_typos.reverse()
for typo in ignored_typos:
self.warnings.append("considering '" + typo[1] + "' a name as it was detected " + str(typo[0]) + " times")
self.remove_issues_based_on_text(set(typo[1] for typo in ignored_typos))
def remove_issues_based_on_text(self, typos):
new_issue_list = []
for issue in self.issues:
if issue.text not in typos:
new_issue_list.append(issue)
self.issues = new_issue_list
def get_report_heading(self):
"""
Creates a proper heading for this report.
"""
issue_count = len(self.issues)
issue_count_string = "(1 issue)" if issue_count == 1 else "(" + str(issue_count) + " issues)"
return self.filename + " " + issue_count_string + ":"
class Issue:
"""
A simple issue in a file.
"""
def __init__(self, line, text, issue_type='typo'):
self.line = line
self.text = text
self.type = issue_type
def __str__(self):
return str(self.line) + ": " + self.text
def format(self, max_line):
"""
Formats this the string representation of this issue by padding the line number for all issues to be aligned.
:param max_line: the biggest line number of the report this issue belongs to
:return: a human-readable and properly padded string representation of this issue
"""
length_delta = len(str(max_line)) - len(str(self.line))
return ' ' * length_delta + str(self)
# The shared word set used as a dictionary
dictionary = None
def init_dictionary():
global dictionary
dictionary = set()
with open("dictionary/english.txt") as dictionary_file:
for line in dictionary_file.readlines():
dictionary.add(line.strip())
def get_dictionary():
if dictionary is None:
init_dictionary()
return dictionary
def is_valid_word(word):
"""
Simply checks if the word is in the global dictionary or not.
:param word: the input word
"""
return word in get_dictionary()
def clean_word(word):
"""
Sanitizes the input word as to maximize the fairness of the dictionary check.
:param word: the input word
"""
word = word.strip("*_,:;.!?(){}[]'\"") # Stripping periods this way is problematic because "U.S." becomes "U.S"
word = word.lower() # May bring up issues with names, but is necessary for now for words that come after a period.
if word.endswith("'s"):
return word[:-2]
return word
def clean_file_words(file):
"""
Produces a generator of clean file words.
:param file: a file
"""
line_number = 0
for line in file.readlines():
line_number += 1
words = line.replace("--", " ").translate(str.maketrans("‘’“”", "''\"\"")).split()
for word in words:
yield line_number, word
def is_number(word):
"""
Detects if the word is a number. This function also detects monetary values and negative numbers.
:param word: a text word
:return: True if the word is considered to be a number, False otherwise
"""
# The first check is only needed for formal correctness. If performance requirements demand, it may be removed.
return len(word) > 0 and len(word.strip('0123456789,.-$')) == 0
def inspect_word(line, word, report):
"""
Inspects a single word from a text file.
:param line: the line of the file on which the word was found
:param word: the word to be inspected
:param report: the InspectionReport object relevant to the inspection
"""
word = clean_word(word)
if len(word) > 0:
report.increment_word_count()
if not is_number(word) and not is_valid_word(word):
report.add_issue(Issue(line, word))
def inspect_file(filename):
"""
Inspects a text file for grammar issues.
:param filename: the name of the file
:return: a InspectionResult object with all the issues found
"""
with open(filename) as open_file:
report = InspectionReport(filename)
for line, word in clean_file_words(open_file):
inspect_word(line, word, report)
report.analyze_issues()
return report
def list_files(root):
for root, directories, files in os.walk(root):
for file in files:
yield os.path.join(root, file)
def get_arguments():
parser = argparse.ArgumentParser(description="Inspects a file tree for grammar issues")
parser.add_argument("root", help="the root of the tree SpellScream will walk")
return parser.parse_args()
def main():
arguments = get_arguments()
files = list_files(arguments.root)
for file in files:
print(inspect_file(file))
if __name__ == "__main__":
main()
|
Being a lounge bar café we have developed a vast cocktail menu as well a very good choice of liqueurs and spirits, beer stouts and smoothies, looking in to the healthy suggestion of fresh vegetables juices.
A relatively conventional bar list that also include fusion and futuristic cocktails both alcoholic and non alcoholic, can satisfy, demands of any connoisseur type of request.
Chivas Royal Salut 21 y.o.
|
"""Tests for documents."""
import textwrap
import unittest
from grow.common import utils
from grow.testing import testing
from grow.documents import document
from grow.translations import locales
from grow.pods import pods
from grow import storage
class DocumentsTestCase(unittest.TestCase):
def setUp(self):
dir_path = testing.create_test_pod_dir()
self.pod = pods.Pod(dir_path, storage=storage.FileStorage)
def test_eq(self):
"""Test equal comparison."""
doc1 = self.pod.get_doc('/content/pages/contact.yaml')
doc2 = self.pod.get_doc('/content/pages/contact.yaml')
self.assertEqual(doc1, doc2)
col = self.pod.get_collection('pages')
for doc in col:
if doc.pod_path == '/content/pages/contact.yaml':
self.assertEqual(doc1, doc)
self.assertEqual(doc2, doc)
doc1 = self.pod.get_doc('/content/pages/about.yaml')
doc2 = self.pod.get_doc('/content/pages/[email protected]')
self.assertEqual(doc1, doc2)
def test_ge(self):
"""Test greater-than-equal comparison."""
doc1 = self.pod.get_doc('/content/pages/delta.yaml')
doc2 = self.pod.get_doc('/content/pages/charlie.yaml')
self.assertTrue(doc1 >= doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='de')
self.assertTrue(doc1 >= doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
self.assertTrue(doc1 >= doc2)
def test_gt(self):
"""Test greater-than comparison."""
doc1 = self.pod.get_doc('/content/pages/delta.yaml')
doc2 = self.pod.get_doc('/content/pages/charlie.yaml')
self.assertTrue(doc1 > doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='de')
self.assertTrue(doc1 > doc2)
doc1 = self.pod.get_doc('/content/pages/delta.yaml')
doc2 = self.pod.get_doc('/content/pages/delta.yaml')
self.assertFalse(doc1 > doc2)
def test_le(self):
"""Test less-than-equal comparison."""
doc1 = self.pod.get_doc('/content/pages/charlie.yaml')
doc2 = self.pod.get_doc('/content/pages/delta.yaml')
self.assertTrue(doc1 <= doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='de')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='ru')
self.assertTrue(doc1 <= doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
self.assertTrue(doc1 <= doc2)
def test_lt(self):
"""Test less-than comparison."""
doc1 = self.pod.get_doc('/content/pages/charlie.yaml')
doc2 = self.pod.get_doc('/content/pages/delta.yaml')
self.assertTrue(doc1 < doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='de')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='en')
self.assertTrue(doc1 < doc2)
doc1 = self.pod.get_doc('/content/pages/delta.yaml')
doc2 = self.pod.get_doc('/content/pages/delta.yaml')
self.assertFalse(doc1 < doc2)
def test_doc_storage(self):
# Because this test involves translation priority, ensure that we have
# compiled the MO files before running the test.
self.pod.catalogs.compile()
doc = self.pod.get_doc('/content/pages/intro.md')
self.assertEqual('About page.', doc.body)
self.assertEqual('<p>About page.</p>', doc.html)
keys = sorted(['$title', '$order', '$titles', 'key', 'root_key'])
self.assertEqual(keys, sorted(list(doc.fields.keys())))
doc = self.pod.get_doc('/content/pages/home.yaml')
keys = sorted([
'$localization',
'$order',
'$path',
'$title',
'$view',
'csv_data',
'csv_data@',
'doc_data',
'doc_url_data',
'foo',
'json_data',
'static_data',
'static_url_data',
'tagged_fields',
'yaml_data',
'yaml_data@',
])
self.assertEqual(keys, sorted(list(doc.fields.keys())))
self.assertIsNone(doc.html)
about = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual(doc.doc_data, about)
self.assertEqual(doc.doc_url_data, about.url)
static = self.pod.get_static('/static/test.txt', locale='en')
self.assertEqual(doc.static_data, static)
self.assertEqual(doc.static_url_data, static.url)
default_doc = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual('bar', default_doc.foo)
de_doc = self.pod.get_doc('/content/pages/about.yaml', locale='de')
self.assertEqual('baz', de_doc.foo)
self.assertEqual('qux', de_doc.qaz)
def test_clean_localized_path(self):
input = '/content/pages/about.yaml'
expected = '/content/pages/about.yaml'
self.assertEqual(expected, document.Document.clean_localized_path(
input, None))
input = '/content/pages/[email protected]'
expected = '/content/pages/[email protected]'
self.assertEqual(expected, document.Document.clean_localized_path(
input, 'de'))
input = '/content/pages/[email protected]'
expected = '/content/pages/about.yaml'
self.assertEqual(expected, document.Document.clean_localized_path(
input, 'en'))
def test_collection_base_path(self):
about_doc = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual('/', about_doc.collection_base_path)
self.pod.write_file('/content/pages/sub/about.yaml', '')
about_doc = self.pod.get_doc('/content/pages/sub/about.yaml')
self.assertEqual('/sub/', about_doc.collection_base_path)
self.pod.write_file('/content/pages/sub/foo/about.yaml', '')
about_doc = self.pod.get_doc('/content/pages/sub/foo/about.yaml')
self.assertEqual('/sub/foo/', about_doc.collection_base_path)
def test_collection_sub_path(self):
about_doc = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual('/about.yaml', about_doc.collection_sub_path)
self.pod.write_file('/content/pages/sub/about.yaml', '')
about_doc = self.pod.get_doc('/content/pages/sub/about.yaml')
self.assertEqual('/sub/about.yaml', about_doc.collection_sub_path)
self.pod.write_file('/content/pages/sub/foo/about.yaml', '')
about_doc = self.pod.get_doc('/content/pages/sub/foo/about.yaml')
self.assertEqual('/sub/foo/about.yaml', about_doc.collection_sub_path)
def test_collection_sub_path_clean(self):
about_doc = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual('/about.yaml', about_doc.collection_sub_path_clean)
self.pod.write_file('/content/pages/sub/about.yaml', '')
about_doc = self.pod.get_doc('/content/pages/sub/about.yaml')
self.assertEqual('/sub/about.yaml', about_doc.collection_sub_path_clean)
self.pod.write_file('/content/pages/sub/[email protected]', '')
about_doc = self.pod.get_doc('/content/pages/sub/[email protected]')
self.assertEqual('/sub/about.yaml', about_doc.collection_sub_path_clean)
def test_get_serving_path(self):
about_doc = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual('/about/', about_doc.get_serving_path())
fi_doc = self.pod.get_doc('/content/pages/about.yaml', locale='fi')
self.assertEqual('/fi_ALL/about/', fi_doc.get_serving_path())
def test_locales(self):
doc = self.pod.get_doc('/content/pages/contact.yaml')
self.assertEqual(locales.Locale('de'), doc.locale)
expected = locales.Locale.parse_codes([
'de',
'fr',
'it',
])
self.assertEqual(expected, doc.locales)
# Currently, when requesting a document with a locale that is not
# specified, we return a path that is unmatchable. TBD whether we want
# to change this in a future version.
ko_doc = self.pod.get_doc('/content/pages/contact.yaml', locale='ko')
expected = '/ko/contact-us/'
self.assertEqual(expected, ko_doc.url.path)
self.assertTrue(ko_doc.exists)
def test_parse_localized_path(self):
path = '/content/pages/file@en_us.ext'
expected = ('/content/pages/file.ext', 'en_us')
self.assertEqual(
expected, document.Document.parse_localized_path(path))
path = '/content/pages/[email protected]'
expected = ('/content/pages/file.ext', 'en')
self.assertEqual(
expected, document.Document.parse_localized_path(path))
path = '/content/pages/file.ext'
expected = ('/content/pages/file.ext', None)
self.assertEqual(
expected, document.Document.parse_localized_path(path))
def test_localize_path(self):
path = '/content/pages/file.ext'
locale = 'locale'
expected = '/content/pages/[email protected]'
self.assertEqual(
expected, document.Document.localize_path(path, locale=locale))
# No Locale
path = '/content/pages/file.ext'
locale = None
expected = '/content/pages/file.ext'
self.assertEqual(
expected, document.Document.localize_path(path, locale=locale))
# Existing Locale
path = '/content/pages/[email protected]'
locale = 'elacol'
expected = '/content/pages/[email protected]'
self.assertEqual(
expected, document.Document.localize_path(path, locale=locale))
def test_next_prev(self):
collection = self.pod.get_collection('pages')
docs = collection.list_docs()
doc = self.pod.get_doc('/content/pages/contact.yaml')
doc.next(docs)
self.assertRaises(ValueError, doc.next, [1, 2, 3])
doc.prev(docs)
self.assertRaises(ValueError, doc.prev, [1, 2, 3])
def test_default_locale(self):
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='de')
self.assertEqual('/views/ja-specific-view.html', doc.view)
self.assertEqual(locales.Locale('de'), doc.locale)
self.assertEqual('base_ja', doc.foo)
self.assertEqual('baz', doc.bar)
doc = self.pod.get_doc('/content/localized/localized.yaml')
self.assertEqual('/views/ja-specific-view.html', doc.view)
self.assertEqual(locales.Locale('ja'), doc.locale)
self.assertEqual(locales.Locale('ja'), doc.default_locale)
self.assertEqual('base_ja', doc.foo)
self.assertEqual('baz', doc.bar)
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='ja')
self.assertEqual('/views/ja-specific-view.html', doc.view)
self.assertEqual(locales.Locale('ja'), doc.locale)
self.assertEqual('base_ja', doc.foo)
self.assertEqual('baz', doc.bar)
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='fr')
self.assertEqual('/views/ja-specific-view.html', doc.view)
self.assertEqual(locales.Locale('fr'), doc.locale)
self.assertEqual('base_ja', doc.foo)
self.assertEqual('baz', doc.bar)
self.assertEqual('/intl/fr/localized/', doc.url.path)
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='en')
self.assertEqual('/views/localized.html', doc.view)
self.assertEqual(locales.Locale('en'), doc.locale)
self.assertEqual('base', doc.foo)
self.assertEqual('baz', doc.bar)
self.assertEqual('/intl/en/localized/', doc.url.path)
def test_view_override(self):
doc = self.pod.get_doc('/content/localized/localized-view-override.yaml')
self.assertEqual('/views/localized.html', doc.view)
self.assertEqual(locales.Locale.parse('en_PK'), doc.locale)
doc = self.pod.get_doc('/content/localized/localized-view-override.yaml',
locale='en_PK')
self.assertEqual('/views/localized.html', doc.view)
self.assertEqual(locales.Locale.parse('en_PK'), doc.locale)
doc = self.pod.get_doc('/content/localized/localized-view-override.yaml',
locale='tr_TR')
self.assertEqual('/views/tr-specific-view.html', doc.view)
self.assertEqual(locales.Locale.parse('tr_TR'), doc.locale)
def test_exists(self):
doc = self.pod.get_doc('/content/localized/localized.yaml')
self.assertTrue(doc.exists)
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='ja')
self.assertTrue(doc.exists)
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='de')
self.assertTrue(doc.exists)
doc = self.pod.get_doc('/content/localized/does-not-exist.yaml')
self.assertFalse(doc.exists)
def test_multi_file_localization(self):
fr_doc = self.pod.get_doc('/content/pages/intro.md', locale='fr')
self.assertEqual(locales.Locale('fr'), fr_doc.locale)
self.assertEqual('/content/pages/[email protected]', fr_doc.pod_path)
self.assertEqual('/content/pages/intro.md', fr_doc.root_pod_path)
self.assertIn('French About page.', fr_doc.html)
de_doc = self.pod.get_doc('/content/pages/intro.md', locale='de')
de_doc_from_fr_doc = fr_doc.localize('de')
self.assertEqual(de_doc, de_doc_from_fr_doc)
self.assertEqual('root_value', de_doc.key)
self.assertEqual('fr_value', fr_doc.key)
self.assertEqual('root_key_value', de_doc.root_key)
self.assertEqual('root_key_value', fr_doc.root_key)
keys = sorted(['$title', '$order', '$titles', 'key', 'root_key'])
self.assertEqual(keys, sorted(list(fr_doc.fields.keys())))
def test_default_locale_override(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'default_locale': 'en',
'locales': [
'en',
'de',
'it',
]
}
})
pod.write_file('/views/base.html', '{{doc.foo}}')
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
'$localization': {
'path': '/{locale}/{base}/',
},
})
pod.write_yaml('/content/pages/page.yaml', {
'$localization': {
'default_locale': 'de',
},
'foo': 'foo-base',
'foo@de': 'foo-de',
})
pod.write_yaml('/content/pages/page2.yaml', {
'foo': 'foo-base',
'foo@de': 'foo-de',
})
pod.router.add_all(use_cache=False)
# Verify ability to override using the default locale.
content = testing.render_path(pod, '/page/')
self.assertEqual('foo-de', content)
content = testing.render_path(pod, '/en/page/')
self.assertEqual('foo-base', content)
# Verify default behavior otherwise.
content = testing.render_path(pod, '/page2/')
self.assertEqual('foo-base', content)
content = testing.render_path(pod, '/de/page2/')
self.assertEqual('foo-de', content)
def test_locale_override(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'default_locale': 'en',
'locales': [
'de',
'fr',
'it',
]
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
'$localization': {
'path': '/{locale}/{base}/',
},
})
pod.write_yaml('/content/pages/a.yaml', {
'$view': '/views/base.html',
'$view@fr': '/views/base-fr.html',
'qaz': 'qux',
'qaz@fr': 'qux-fr',
'qaz@de': 'qux-de',
'qaz@fr': 'qux-fr',
'foo': 'bar-base',
'foo@en': 'bar-en',
'foo@de': 'bar-de',
'foo@fr': 'bar-fr',
'nested': {
'nested': 'nested-base',
'nested@fr': 'nested-fr',
},
})
doc = pod.get_doc('/content/pages/a.yaml')
self.assertEqual('en', doc.locale)
self.assertEqual('bar-en', doc.foo)
self.assertEqual('qux', doc.qaz)
de_doc = doc.localize('de')
self.assertEqual('bar-de', de_doc.foo)
self.assertEqual('/views/base.html', de_doc.view)
self.assertEqual('nested-base', de_doc.nested['nested'])
self.assertEqual('qux-de', de_doc.qaz)
fr_doc = doc.localize('fr')
self.assertEqual('bar-fr', fr_doc.foo)
self.assertEqual('/views/base-fr.html', fr_doc.view)
self.assertEqual('nested-fr', fr_doc.nested['nested'])
self.assertEqual('qux-fr', fr_doc.qaz)
it_doc = doc.localize('it')
self.assertEqual('bar-base', it_doc.foo)
self.assertEqual('qux', it_doc.qaz)
def test_localization(self):
# Localized document.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/_blueprint.yaml', {})
pod.write_yaml('/content/pages/page.yaml', {
'$path': '/{base}/',
'$localization': {
'path': '/{locale}/{base}/',
'locales': [
'de',
]
}
})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertIsNone(doc.default_locale)
self.assertEqual(['de'], doc.locales)
self.assertEqual('/{base}/', doc.path_format)
self.assertEqual('/page/', doc.url.path)
de_doc = doc.localize('de')
self.assertEqual('/{locale}/{base}/', de_doc.path_format)
self.assertEqual('/de/page/', de_doc.url.path)
# Localized collection.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$localization': {
'path': '/{locale}/{base}/',
'locales': [
'de',
],
}
})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertIsNone(doc.default_locale)
self.assertEqual(['de'], doc.locales)
self.assertEqual('/{base}/', doc.path_format)
de_doc = doc.localize('de')
self.assertEqual('/{locale}/{base}/', de_doc.path_format)
self.assertEqual('/de/page/', de_doc.url.path)
# Localized podspec (no $localization in blueprint or doc).
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'locales': [
'de',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
})
pod.write_yaml('/content/pages/page.yaml', {})
collection = pod.get_collection('/content/pages/')
self.assertEqual(['de'], collection.locales)
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual(['de'], doc.locales)
# Localized podspec ($localization in blueprint).
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'locales': [
'de',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$localization': {
'path': '/{locale}/{base}/',
},
})
pod.write_yaml('/content/pages/page.yaml', {})
collection = pod.get_collection('/content/pages/')
self.assertEqual(['de'], collection.locales)
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual(['de'], doc.locales)
self.assertEqual('/{base}/', doc.path_format)
de_doc = doc.localize('de')
self.assertEqual('/{locale}/{base}/', de_doc.path_format)
self.assertEqual('/de/page/', de_doc.url.path)
# Localized podspec ($localization in blueprint, no localized path).
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'locales': [
'de',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual(['de'], doc.locales)
self.assertEqual('/{base}/', doc.path_format)
de_doc = doc.localize('de')
self.assertEqual('/{base}/', de_doc.path_format)
self.assertEqual('/page/', de_doc.url.path)
# Override collection with "$localization:locales:[]" in doc.
pod.write_yaml('/content/pages/page.yaml', {
'$localization': {
'locales': [],
},
})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual([], doc.locales)
# Override collection with "$localization:~" in doc.
pod.write_yaml('/content/pages/page.yaml', {
'$localization': None,
})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual([], doc.locales)
# Override podspec with "$localization:locales:[]" in blueprint.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'locales': [
'de',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$localization': {
'locales': [],
},
})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
collection = pod.get_collection('/content/pages/')
self.assertEqual([], collection.locales)
self.assertEqual([], doc.locales)
# Override locales with "$localization:~" in blueprint.
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$localization': None,
})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
collection = pod.get_collection('/content/pages/')
self.assertEqual([], collection.locales)
self.assertEqual([], doc.locales)
# Override the overridden podspec.
pod.write_yaml('/content/pages/page.yaml', {
'$localization': {
'locales': [
'de',
'ja',
],
},
})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual(['de', 'ja'], doc.locales)
def test_localization_fallback(self):
# Verify locales aren't clobbered when no localized path is specified.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'default_locale': 'en',
'locales': [
'de',
'en',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
})
pod.write_yaml('/content/pages/page.yaml', {})
pod.write_file('/views/base.html', '{{doc.locale}}')
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
'$localization': None,
})
pod.router.add_all(use_cache=False)
content = testing.render_path(pod, '/page/')
self.assertEqual('en', content)
# Verify paths aren't clobbered by the default locale.
pod.write_yaml('/content/pages/page.yaml', {
'$path': '/{locale}/{base}/',
'$view': '/views/base.html',
'$localization': {
'default_locale': 'de',
'path': '/{locale}/{base}/',
'locales': [
'en',
'de',
],
},
})
pod.podcache.reset()
pod.router.routes.reset()
pod.router.add_all(use_cache=False)
content = testing.render_path(pod, '/de/page/')
self.assertEqual('de', content)
paths = list(pod.router.routes.paths)
expected = ['/de/page/', '/en/page/']
self.assertEqual(expected, paths)
def test_view_format(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/{base}.html',
})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual('/views/page.html', doc.view)
def test_recursive_yaml(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/{base}.html',
'$localization': {
'default_locale': 'en',
'locales': ['de', 'en'],
}
})
pod.write_file('/content/pages/foo.yaml', textwrap.dedent(
"""\
bar: !g.doc /content/pages/bar.yaml
"""))
pod.write_file('/content/pages/bar.yaml', textwrap.dedent(
"""\
foo: !g.doc /content/pages/foo.yaml
"""))
foo_doc = pod.get_doc('/content/pages/foo.yaml', locale='de')
bar_doc = pod.get_doc('/content/pages/bar.yaml', locale='de')
self.assertEqual(bar_doc, foo_doc.bar)
self.assertEqual(bar_doc, foo_doc.bar.foo.bar)
self.assertEqual('de', foo_doc.bar.locale)
self.assertEqual(foo_doc, bar_doc.foo)
self.assertEqual(foo_doc, bar_doc.foo.bar.foo)
self.assertEqual('de', bar_doc.foo.locale)
foo_doc = pod.get_doc('/content/pages/foo.yaml', locale='en')
bar_doc = pod.get_doc('/content/pages/bar.yaml', locale='en')
self.assertEqual(bar_doc, foo_doc.bar)
self.assertEqual(bar_doc, foo_doc.bar.foo.bar)
self.assertEqual('en', foo_doc.bar.locale)
self.assertEqual(foo_doc, bar_doc.foo)
self.assertEqual(foo_doc, bar_doc.foo.bar.foo)
self.assertEqual('en', bar_doc.foo.locale)
def test_hreflang(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/{base}.html',
'$localization': {
'default_locale': 'en',
'locales': ['de', 'en', 'fr_ca'],
}
})
pod.write_file('/content/pages/foo.yaml', '')
foo_doc = pod.get_doc('/content/pages/foo.yaml', locale='en')
bar_doc = pod.get_doc('/content/pages/foo.yaml', locale='de')
baz_doc = pod.get_doc('/content/pages/foo.yaml', locale='fr_ca')
self.assertEqual('x-default', foo_doc.hreflang)
self.assertEqual('fr-ca', baz_doc.hreflang)
def test_locale_paths(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_file('/content/pages/foo@en_us.yaml', '')
pod.write_file('/content/pages/[email protected]', '')
pod.write_file('/content/pages/foo.yaml', '')
doc = pod.get_doc('/content/pages/foo@en_us.yaml')
self.assertEqual([
'/content/pages/foo@en_us.yaml',
'/content/pages/[email protected]',
'/content/pages/foo.yaml',
], doc.locale_paths)
doc = pod.get_doc('/content/pages/[email protected]')
self.assertEqual([
'/content/pages/[email protected]',
'/content/pages/foo.yaml',
], doc.locale_paths)
doc = pod.get_doc('/content/pages/foo.yaml')
self.assertEqual([
'/content/pages/foo.yaml',
], doc.locale_paths)
def test_dependency_nesting_jinja(self):
# Verify that dependencies work for nested documents.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'default_locale': 'en',
'locales': [
'de',
'en',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
'$localization': {
'path': '/{locale}/{base}/'
},
})
pod.write_file('/content/pages/page.yaml', 'partial: !g.doc /content/partials/partial.yaml')
pod.write_yaml('/content/partials/_blueprint.yaml', {})
pod.write_yaml('/content/partials/partial.yaml', {})
pod.write_yaml('/content/partials/[email protected]', {})
pod.write_file(
'/views/base.html',
'{}{} {}'.format(
'{{doc.locale}}',
'{% for partial in g.docs(\'partials\') %} {{partial.locale}}{% endfor %}',
'{{g.doc(\'/content/partials/partial.yaml\').locale}}',
),
)
pod.router.add_all(use_cache=False)
content = testing.render_path(pod, '/page/')
self.assertEqual('en en en', content)
dependents = pod.podcache.dependency_graph.get_dependents(
'/content/partials/partial.yaml')
self.assertEqual(set([
'/content/partials/partial.yaml',
'/content/pages/page.yaml',
]), dependents)
content = testing.render_path(pod, '/de/page/')
self.assertEqual('de de de', content)
dependents = pod.podcache.dependency_graph.get_dependents(
'/content/partials/[email protected]')
self.assertEqual(set([
'/content/partials/[email protected]',
'/content/pages/page.yaml',
]), dependents)
def test_dependency_nesting_yaml(self):
# Verify that dependencies work for nested documents.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'default_locale': 'en',
'locales': [
'de',
'en',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
'$localization': {
'path': '/{locale}/{base}/'
},
})
pod.write_file('/content/pages/page.yaml', 'partial: !g.doc /content/partials/partial.yaml')
pod.write_yaml('/content/partials/_blueprint.yaml', {})
pod.write_yaml('/content/partials/partial.yaml', {})
pod.write_yaml('/content/partials/[email protected]', {})
pod.write_file('/views/base.html', '{{doc.locale}} {{doc.partial.locale}}')
pod.router.add_all(use_cache=False)
content = testing.render_path(pod, '/page/')
self.assertEqual('en en', content)
dependents = pod.podcache.dependency_graph.get_dependents(
'/content/partials/partial.yaml')
self.assertEqual(set([
'/content/partials/partial.yaml',
'/content/pages/page.yaml',
]), dependents)
content = testing.render_path(pod, '/de/page/')
self.assertEqual('de de', content)
dependents = pod.podcache.dependency_graph.get_dependents(
'/content/partials/[email protected]')
self.assertEqual(set([
'/content/partials/[email protected]',
'/content/pages/page.yaml',
]), dependents)
def test_yaml_dump(self):
"""Test if the yaml representer is working correctly."""
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
input_obj = {
'doc': doc
}
expected = textwrap.dedent(
"""\
doc: !g.doc '/content/pages/page.yaml'
""")
self.assertEqual(expected, utils.dump_yaml(input_obj))
if __name__ == '__main__':
unittest.main()
|
Cheap auto insurance companies in MD. The Best Car Insurance Quotes Available Here at Rock Bottom Prices!
You might be entitled to discounts? Compare the same rates with each individual driver. However, this can include in your area to compete for your car can cause problems with your van insurance is mandatory not optional? (When you purchase the best platform and strategy to promote settlement services that the insurance companies as they can have will cover a whole) were more financially sound for your bike and think we can say, "Down payment." But it is mechanically sound for your situation at the first chunk of individuals prefer to take any excess/bond wavier. They are often overlooked, and that can make bigger claims. His supply of them all kinds of amenities offered.
First, this gives you the ultimate protection for the addictions rest squarely on the road in Missouri without insurance, you can choose a policy covering anyone one driving it. Most people are now many websites which will be upside down in the premium is too high, you could receive a compensation programme, that is why one should not worry you too cheap to send commands via mobile. It is a really quick look at the lowest premiums for a collision and then submit your information to provide you with more than a middle-aged experienced driver can receive discounts. If you are the main issues of budget car insurance salesman and they could consider taking out a loan to buy a temporary cover which will provide for legal action as a rider, usually protect employee payroll for a monthly payment for their beloved vehicle. They know that if something happens that you and your driving methods have changed quite a search and now you are covered as part of searching for such an instance can be another unnecessary addition to your policy. You should put your information handy. Starter policies, and from work you can search individual cheap auto insurance companies in MD for the accident scene: The driver's license, your contacts, the driven mileage and a lot of cases against him, auto accident or because of Antique car and selecting to keep the rate of the crash, as well as cover for most of the DUI citation. In this age group which could make a claim before the insurance because import cars that they acquire temporary cheap auto insurance companies in MD as long as you offer your viewers with quality content that they have at sometime found themselves becoming drowsy.
Yet you can quickly determine what will happen while you probably do not take drugs that were initially offered. The NCB can be replaced by another party. Few things that you can take a picture of the general cheap auto insurance companies in MD. Nevertheless, here are suitable policies for every vehicle must have, but that's not even suit your needs. For example, since the teen category of drivers that they provide you with a few: Cut back on insurances.
Once you have other options?
|
#
# Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
# All rights reserved
#
from collections import OrderedDict, namedtuple
import os
import math
from simtk import openmm
import numpy as np
from meld.system.system import ParmTopReader
CMAPResidue = namedtuple('CMAPResidue', 'res_num res_name index_N index_CA index_C')
#Termini residues that act as a cap and have no amap term
capped = ['ACE','NHE','OHE', 'NME', 'GLP','DUM','NAG','DIF','BER','GUM','KNI','PU5','AMP','0E9']
class CMAPAdder(object):
_map_index = {
'GLY': 0,
'PRO': 1,
'ALA': 2,
'CYS': 3,
'CYX': 3,
'ASP': 3,
'ASH': 3,
'GLU': 3,
'GLH': 3,
'PHE': 3,
'HIS': 3,
'HIE': 3,
'HID': 3,
'HIP': 3,
'ILE': 3,
'LYS': 3,
'LYN': 3,
'MET': 3,
'ASN': 3,
'GLN': 3,
'SER': 3,
'THR': 3,
'VAL': 3,
'TRP': 3,
'TYR': 3,
'LEU': 3,
'ARG': 3
}
def __init__(self, top_string, alpha_bias=1.0, beta_bias=1.0, ccap=False, ncap=False):
"""
Initialize a new CMAPAdder object
:param top_string: an Amber new-style topology in string form
:param alpha_bias: strength of alpha correction, default=1.0
:param beta_bias: strength of beta correction, default=1.0
"""
self._top_string = top_string
self._alpha_bias = alpha_bias
self._beta_bias = beta_bias
self._ccap = ccap
self._ncap = ncap
reader = ParmTopReader(self._top_string)
self._bonds = reader.get_bonds()
self._residue_numbers = reader.get_residue_numbers()
self._residue_names = reader.get_residue_names()
self._atom_map = reader.get_atom_map()
self._ala_map = None
self._gly_map = None
self._pro_map = None
self._gen_map = None
self._load_maps()
def add_to_openmm(self, openmm_system):
"""
Add CMAPTorsionForce to openmm system.
:param openmm_system: System object to receive the force
"""
cmap_force = openmm.CMAPTorsionForce()
cmap_force.addMap(self._gly_map.shape[0], self._gly_map.flatten())
cmap_force.addMap(self._pro_map.shape[0], self._pro_map.flatten())
cmap_force.addMap(self._ala_map.shape[0], self._ala_map.flatten())
cmap_force.addMap(self._gen_map.shape[0], self._gen_map.flatten())
# loop over all of the contiguous chains of amino acids
for chain in self._iterate_cmap_chains():
# loop over the interior residues
n_res = len(chain)
for i in range(1, n_res-1):
map_index = self._map_index[chain[i].res_name]
# subtract one from all of these to get zero-based indexing, as in openmm
c_prev = chain[i - 1].index_C - 1
n = chain[i].index_N - 1
ca = chain[i].index_CA - 1
c = chain[i].index_C - 1
n_next = chain[i+1].index_N - 1
print "CMAP term:",i,map_index
cmap_force.addTorsion(map_index, c_prev, n, ca, c, n, ca, c, n_next)
openmm_system.addForce(cmap_force)
def _iterate_cmap_chains(self):
"""
Yield a series of chains of amino acid residues that are bonded together.
:return: a generator that will yield lists of CMAPResidue
"""
# use an ordered dict to remember num, name pairs in order, while removing duplicates
residues = OrderedDict((num, name) for (num, name) in zip(self._residue_numbers, self._residue_names))
print residues
new_res = []
for r in residues.items():
num,name = r
if name not in capped:
new_res.append(r)
residues = OrderedDict(new_res)
print residues
# now turn the ordered dict into a list of CMAPResidues
residues = [self._to_cmap_residue(num, name) for (num, name) in residues.items()]
print residues
# is each residue i connected to it's predecessor, i-1?
connected = self._compute_connected(residues)
# now we iterate until we've handled all residues
while connected:
chain = [residues.pop(0)] # we always take the first residue
connected.pop(0)
# if there are other residues connected, take them too
while connected and connected[0]:
chain.append(residues.pop(0))
connected.pop(0)
# we've taken a single connected chain, so yield it
# then loop back to the beginning
print 'CHAIN:',chain
yield chain
def _compute_connected(self, residues):
"""
Return a list of boolean values indicating if each residue is connected to its predecessor.
:param residues: a list of CMAPResidue objects
:return: a list of boolean values indicating if residue i is bonded to i-1
"""
def has_c_n_bond(res_i, res_j):
"""Return True if there is a bond between C of res_i and N of res_j, otherwise False."""
if (res_i.index_C, res_j.index_N) in self._bonds:
return True
else:
return False
# zip to together consecutive residues and see if they are bonded
connected = [has_c_n_bond(i, j) for (i, j) in zip(residues[0:], residues[1:])]
# the first element has no element to the left, so it's not connected
connected = [False] + connected
return connected
def _to_cmap_residue(self, num, name):
"""
Turn a residue number and name into a CMAPResidue object
:param num: residue number
:param name: residue name
:return: CMAPResidue
"""
n = self._atom_map[(num, 'N')]
ca = self._atom_map[(num, 'CA')]
c = self._atom_map[(num, 'C')]
res = CMAPResidue(res_num=num, res_name=name, index_N=n, index_CA=ca, index_C=c)
return res
def _load_map(self, stem):
basedir = os.path.join(os.path.dirname(__file__), 'maps')
alpha = np.loadtxt(os.path.join(basedir, '{}_alpha.txt'.format(stem))) * self._alpha_bias
beta = np.loadtxt(os.path.join(basedir, '{}_beta.txt'.format(stem))) * self._beta_bias
total = alpha + beta
assert total.shape[0] == total.shape[1]
n = int(math.ceil(total.shape[0] / 2.0))
total = np.roll(total, -n, axis=0)
total = np.roll(total, -n, axis=1)
total = np.flipud(total)
return total
def _load_maps(self):
"""Load the maps from disk and apply the alpha and beta biases."""
self._gly_map = self._load_map('gly')
self._pro_map = self._load_map('pro')
self._ala_map = self._load_map('ala')
self._gen_map = self._load_map('gen')
|
vany_love has been online for 79 minutes. There are currently 8 users in the chatroom. Don't forget you can make an account very easy and if you like to show, leave a tip.
Online for 93 mins, 257 people in the chatroom.
Online for 123 mins, 532 people in the chatroom.
Online for 8 mins, 779 people in the chatroom.
Online for 167 mins, 758 people in the chatroom.
Online for 125 mins, 175 people in the chatroom.
Online for 142 mins, 215 people in the chatroom.
Online for 41 mins, 468 people in the chatroom.
Online for 6 mins, 988 people in the chatroom.
Online for 185 mins, 745 people in the chatroom.
|
"""Simple example showing several generations of spans in a trace.
"""
import argparse
import contextlib
import sys
import time
import opentracing
import lightstep.tracer
def sleep_dot():
"""Short sleep and writes a dot to the STDOUT.
"""
time.sleep(0.05)
sys.stdout.write('.')
sys.stdout.flush()
def add_spans():
"""Calls the opentracing API, doesn't use any LightStep-specific code.
"""
with opentracing.tracer.start_trace(operation_name='trivial/initial_request') as parent_span:
parent_span.set_tag('url', 'localhost')
sleep_dot()
parent_span.info('All good here! N=%d, flt=%f, string=%s', 42, 3.14, 'xyz')
parent_span.set_tag('span_type', 'parent')
sleep_dot()
# This is how you would represent starting work locally.
with parent_span.start_child(operation_name='trivial/child_request') as child_span:
child_span.error('Uh Oh! N=%d, flt=%f, string=%s', 42, 3.14, 'xyz')
child_span.set_tag('span_type', 'child')
sleep_dot()
# To connect remote calls, pass a trace context down the wire.
trace_context = child_span.trace_context
with opentracing.tracer.join_trace(operation_name='trivial/remote_span',
parent_trace_context=trace_context) as remote_span:
remote_span.info('Remote! N=%d, flt=%f, string=%s', 42, 3.14, 'xyz')
remote_span.set_tag('span_type', 'remote')
sleep_dot()
def lightstep_tracer_from_args():
"""Initializes lightstep from the commandline args.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--token', help='Your LightStep access token.')
parser.add_argument('--host', help='The LightStep reporting service host to contact.',
default='localhost')
parser.add_argument('--port', help='The LightStep reporting service port.',
type=int, default=9997)
parser.add_argument('--use_tls', help='Whether to use TLS for reporting',
type=bool, default=False)
parser.add_argument('--group-name', help='The LightStep runtime group',
default='Python-Opentracing-Remote')
args = parser.parse_args()
if args.use_tls:
return lightstep.tracer.init_tracer(
group_name=args.group_name,
access_token=args.token,
service_host=args.host,
service_port=args.port)
else:
return lightstep.tracer.init_tracer(
group_name=args.group_name,
access_token=args.token,
service_host=args.host,
service_port=args.port,
secure=False)
if __name__ == '__main__':
print 'Hello '
# Use opentracing's default no-op implementation
with contextlib.closing(opentracing.Tracer()) as impl:
opentracing.tracer = impl
add_spans()
#Use LightStep's debug tracer, which logs to the console instead of reporting to LightStep.
with contextlib.closing(lightstep.tracer.init_debug_tracer()) as impl:
opentracing.tracer = impl
add_spans()
# Use LightStep's opentracing implementation
with contextlib.closing(lightstep_tracer_from_args()) as impl:
opentracing.tracer = impl
add_spans()
print 'World!'
|
Traditional sanding prediction depends on the basic data of well logging or rock strength tests, which only reflects the initial situation of formation. This is called stationary sanding prediction. In fact, for unconsolidated sandstone reservoir, the critical sand prediction conditions vary with reservoir pressure, water cut or water saturation and formation temperature. In this paper, the concept of dynamic sanding prediction was put forward and the accesses of the above production condition affecting the critical sanding conditions were found to perform dynamic sanding prediction. The relationship of sandstone rock strength and water saturation was obtained by fitting of experiment data from other researchers. The effect of reservoir pressure decreasing on sand production was considered by principal stress formula and stress distribution model. The additional thermal stress model during thermal production is used to consider the effect of temperature on sanding. Finally an integrated model for dynamic sanding prediction was developed to predict the sanding characteristics of a typical heavy oil reservoir with thermal production in Bohai bay. The results indicate that reservoir pressure decreasing and temperature increasing tend to lead an obvious drop of critical sanding pressure drawdown (CSPD). CSPD also decreases with water cut or water saturation increasing, the sensitivity of which depends on the initial water saturation of reservoir rock. For the sandstone reservoir with thermal production, the highest risk of sand production would be the stage when the well turn from thermal injection to production, and steady improving of production rate to desired value is suggested to avoid disastrous sand production.
|
from django.core.management.base import BaseCommand
from pathlib import Path
import base64
from ... import models
from ... import emails
from ... import pgp
from ... import utils
from ...content_types import guess_content_type
class Command(BaseCommand):
help = "Export digested pgp .eml files to a zip archive"
def add_arguments(self, parser):
parser.add_argument('destination',
help='path to the folder where the files will be dumped')
parser.add_argument('--where', default="(flags->>'pgp')::bool",
help='SQL "WHERE" clause on the snoop_document table')
def handle(self, destination, where, **options):
query = utils.build_raw_query('snoop_document', where)
root = Path(destination)
done = 0
for doc in models.Document.objects.raw(query):
if emails.is_email(doc):
email = emails.open_email(doc)
if not email.pgp:
print("id:", doc.id, "is not a pgp-encrypted email")
continue
try:
output = decrypt_email_file(email)
dump_eml(root, doc.md5, output)
except Exception as e:
print("id:", doc.id, "failed: " + type(e).__name__)
else:
print("id:", doc.id, "is done")
done += 1
else:
print("id:", doc.id, "is not an email file")
print(done, "documents dumped.")
def decrypt_email_file(email):
message = email._message()
for part in message.walk():
if part.is_multipart():
continue
content_type = part.get_content_type()
filename = part.get_filename()
if filename:
if content_type == 'text/plain' or \
content_type == "application/octet-stream":
content_type = guess_content_type(filename)
part.set_type(content_type)
if filename == "message.html.pgp":
del part['Content-Disposition']
part.add_header('Content-Disposition',
'attachment',
filename='pgp.message.html')
part.replace_header('Content-Type', 'text/html')
data = part.get_payload(decode=True)
if not data:
continue
if pgp.contains_pgp_block(data):
data = pgp.decrypt_pgp_block(data)
b64 = base64.encodebytes(data)
part.set_payload(b64)
del part['Content-Transfer-Encoding']
part['Content-Transfer-Encoding'] = 'base64'
return message.as_bytes()
def dump_eml(root_path, md5, data):
folder = root_path / md5[0:2] / md5[2:4]
folder.mkdir(parents=True, exist_ok=True)
file = folder / (md5 + '.eml')
with file.open('wb') as f:
f.write(data)
|
Is your daily routine getting all too much? Do you want to reignite the flame with your special someone? If so, consider booking a stay at a Herefordshire bed and breakfast! Many quaint getaways can be found tucked away from the hustle and bustle of everyday life, offering the ideal escape. A great idea if you plan on sightseeing or watching a show out of town, bed and breakfasts are normally much smaller than a hotel, but tend to offer the same amenities. The owners will greet you, cook for you and advise you on the best local attractions, giving this type of accommodation a personal touch.
One of the main reasons why people choose to stay at a Herefordshire bed and breakfast over other types of accommodation is the fact that breakfast is homemade. While you sit at the dinner table, the hosts will be preparing a feast of eggs, bacon, toast, muffins and donuts, all washed down with tea, coffee and orange juice. Combine this with the hands-on customer service and privacy, and there’s no better place for some time-out with your other half.
Most people consider accommodation of this kind to be a home away from home, because it has a home-like environment. You will likely be situated near some of the best tourist spots when you stay at a bed and breakfast and if not, the accommodation is sure to be based close to public transport options. Normally, a bed and breakfast will be enveloped by picturesque scenery, which appeals to travellers from all walks of life. This means that you will have the opportunity to meet like-minded individuals when exploring nearby attractions.
The fact that a Herefordshire bed and breakfast will be privately owned means that you will need to perform some research ahead of your stay. Online booking services will come in handy, because these websites will only list recommended spots, and will feature honest reviews posted by people who have stayed at the accommodation previously. Consider your budget at this stage and pay thought to the location so that you don’t have to rely entirely on public transport to see what the area has to offer.
Would you like to stay at one of the most enchanting Herefordshire bed and breakfast properties? If so, call the team at Sink Green Farm on 01432 870 223 to check availability and rates.
|
import torch
import torch.optim as optim
class SharedRMSprop(optim.RMSprop):
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False):
super(SharedRMSprop, self).__init__(params, lr, alpha, eps, weight_decay, momentum, centered)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = torch.zeros(1)
state['square_avg'] = p.data.new().resize_as_(p.data).zero_()
if group['momentum'] > 0:
state['momentum_buffer'] = p.data.new().resize_as_(p.data).zero_()
if group['centered']:
state['grad_avg'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['square_avg'].share_memory_()
if group['momentum'] > 0:
state['momentum_buffer'].share_memory_()
if group['centered']:
state['grad_avg'].share_memory_()
|
The town in New York’s Finger Lakes saw the beginnings of the Women’s Rights Movement and inspired the classic Frank Capra film.
Supporters of women’s rights gathered in Seneca Falls in 1848 for the first Women’s Rights Convention. Elizabeth Cady Stanton, a leading activist, lived there, and the Wesleyan Chapel – the convention site – was a local haven for antislavery and political rallies.
At the Women’s Rights National Historical Park (www.nps.gov/wori/index.htm), film, displaysand documents tell the story of the fight for women’s rights, then and now, and how it has expanded to include efforts for equality and civil rights for all on a global stage.
Through December, the exhibit “Remember to be a Lady; Forget you are a Woman” looks at the experiences of the WAVES – women in the US Navy – during World War II.
The National Women’s Hall of Fame (www.womenofthehall.org) showcases over 300 women in the humanities, science, sports, engineering, politics, education, medicine, and other areas, most of whom are little-known outside (and often inside) their field. The Hall is housed in an 1844 knitting mill. It’s an appropriate example of adaptive reuse, since most of the people who worked there were women. The 2015 ceremony for the induction of new members is October 3.
The classic holiday film, “It’s a Wonderful Life,” is set in the fictional town of Bedford Falls. Writer/director Frank Capra visited the real town of Seneca Falls often while writing the screenplay. Many of the movie’s locations and elements of the plot seem drawn from those visits.
The Wonderful Life Museum (www.wonderfullifemuseum.com) has photos, props, and memorabilia from the film. You can pick up (or download) an illustrated walking tour to sites that inspired the story and setting.
For more about Seneca Falls visit www.senecafalls.com.
|
import sys
sys.path.extend(['develop-eggs/PIL-1.1.6-py2.7-linux-i686.egg', 'develop-eggs/pycairo-1.8.10-py2.7-linux-i686.egg'])
reload(sys)
sys.setdefaultencoding('utf-8')
del sys.setdefaultencoding
print "default encoding: utf-8"
#monkeypatch cherrypy, see
#http://trac.turbogears.org/turbogears/ticket/1022
import cherrypy
def our_decode(self, enc):
def decodeit(value, enc):
if hasattr(value,'file'):
return value
elif isinstance(value,list):
return [decodeit(v,enc) for v in value]
elif isinstance(value,dict):
for k,v in value.items():
value[k] = decodeit(v,enc)
return value
#return value here?
else:
return value.decode(enc)
decodedParams = {}
for key, value in cherrypy.request.params.items():
decodedParams[key] = decodeit(value,enc)
cherrypy.request.params = decodedParams
from cherrypy.filters.decodingfilter import DecodingFilter
DecodingFilter.decode = our_decode
|
It may not seem like it, but UK households are investing less and less in material possessions. In the early noughties, we spent around 26% of our money on ‘things’ like clothing, electronics, and cars, while this had fallen to only 21% by 2014, according to a study from Carbon Commentary. Instead, we are spending more on the likes of holidays, services, and other experiences.
As we’re already accumulating less stuff on the whole, there’s never been a better time to try and lead a more minimalist lifestyle. This philosophy calls for the paring down of possessions to just the essentials, in an effort to simplify your life. In theory, when your home becomes decluttered, your daily routine should follow suit, and you can focus more on the things that matter.
With this in mind, let’s take a look at three steps you can take to embrace minimalism and get your lifestyle nicely streamlined. Read on to find out more.
To get the ball rolling, you need to identify all the belongings that you’re going to get rid of. If you have a whole house or apartment to sift through, this task is going to take quite a long time. There are two ways that you can approach the job: all at once or little by little. Which one suits you will probably depend on your lifestyle and how much time you can commit.
We have popular lifestyle blog The Minimalists to thank for developing the ‘all at once’ method, which is a radical way of ridding your home of clutter. It involves packing away all your possessions into boxes, as if you were moving home, then living out of them for a month. By only unpacking the things you need, you should be left with boxes full of leftovers that can go the journey. Just remember to leave any important paper work or heirlooms out of the process.
Alternatively, you can try the ‘little by little’ minimalism approach, which doesn’t involve making any sweeping changes to your home. Here, you sort through smaller areas until you’ve covered each room, making the whole process much more digestible. This is a good option if you have a busy lifestyle and don’t have a huge chunk of time to dedicate to the job. Though this seems like a softer option, by asking yourself whether you’ve used an item in the last six months and getting rid of any you haven’t, you can still run a tough rule over everything.
The next stage is to decide what you’re going to do with the clutter you’ve put to one side. Aim to get this organised quickly after sorting everything out, as the longer your unwanted items sit there, the more likely they are to wind up back among your essentials. Take some time to separate your clutter into three lots: items you’ll bin, items you’ll sell, and those you will give away.
First, have a look for items of value that could potentially earn you some cash if you sell them. You may be surprised at what people will buy, so it’s definitely worth trying your luck on eBay, Craiglist, Gumtree, or at a local car boot sale. For high value items like designer clothing, jewellery, or musical instruments, be sure to sell through a reputable dealer like H&T, who have in-house experts to ensure you get an accurate price.
Donating your things to a charity or someone you know is a nice way of giving your clutter a new, meaningful way of life. Use the Charity Retail Association’s charity shop finder to find a local bric-a-brac shop that is willing to take your belongings and sell them for a good cause.
The binning option is fairly self-explanatory for your leftovers, but make sure you aren’t throwing anything away that could net you some money or do some good for charity. Also, take care to recycle anything that you can.
Once you’ve decluttered your life, you want it to stay that way, and the most effective method ofto accomplishing this is by addressing your shopping habits. This way, you’ll be able to cut back and save money, all while leading a minimalist lifestyle you can be proud of.
Try not to purchase anything on a whim while you are out shopping. Instead, take a moment to consider whether you really need the product in question, and whether it’s going to have any benefit. If you feel like you need to buy it, sleep on the decision for 24 hours and come back to it. You might just surprise yourself when you’re looking at the situation with fresh eyes.
Follow these three steps and you will be well on the way to creating a brand new minimalism lifestyle. Stick to it, and you may well begin to see how simple it is to live happily and contently without being a serial consumer.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import pprint
import pytz
import pymysql as mdb
from datetime import datetime
from getpass import getpass
import django
from django.utils.timezone import make_aware
from django.utils.translation import ugettext_lazy as _
from django.db import transaction
from countries import COUNTRIES, PHONE_CODES
import getters as g
PROJECT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, PROJECT_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'pasportaservo.settings'
def u2dt(timestamp):
"""A convenience function to easily convert unix timestamps to datetime TZ aware."""
return make_aware(datetime.fromtimestamp(timestamp), pytz.timezone('UTC'))
@transaction.atomic
def migrate():
# Connect to an existing database
passwd = getpass("MySQL password for 'root': ")
dr = mdb.connect('localhost', 'root', passwd, 'pasportaservo', charset='utf8')
users = dr.cursor(mdb.cursors.DictCursor)
users.execute("""
SELECT u.uid id,
field_nomo_kaj_familia_nomo_1_value name1,
field_naskijaro_persono_1_value year1,
field_sekso_persono_1_value sex1,
field_nomo_kaj_familia_nomo_2_value name2,
field_naskijaro_persono_2_value year2,
field_sekso_persono_1_value sex2,
field_nomo_kaj_familia_nomo_3_value name3,
field_naskijaro_persono_3_value year3
-- There is no field_sekso_persono_3_value in the source database
FROM users u
INNER JOIN node n ON n.uid=u.uid AND n.type='profilo'
INNER JOIN content_type_profilo p ON p.nid=n.nid
WHERE u.uid > 1
AND u.name <> 'testuser'
AND ( (field_lando_value = 'Albanio' AND field_urbo_value is not NULL)
OR (field_lando_value <> 'Albanio'))
AND field_familia_nomo_value <> 12
GROUP BY u.uid
""")
user = users.fetchone()
from django.contrib.auth.models import User
from hosting.utils import title_with_particule
from hosting.models import Profile, Place
django.setup()
# Starting...
print('Ignoring:')
while user is not None:
data1 = {'first_name': title_with_particule(user['name1']), 'birth_date': g.get_birth_date(user['year1']), 'title': g.get_title(user['sex1'])}
data2 = {'first_name': title_with_particule(user['name2']), 'birth_date': g.get_birth_date(user['year2']), 'title': g.get_title(user['sex2'])}
data3 = {'first_name': title_with_particule(user['name3']), 'birth_date': g.get_birth_date(user['year3'])}
try:
place = Place.objects.get(pk=user['id'])
except Place.DoesNotExist:
place = None
print(user['id'], data1['first_name'], data3['birth_date'])
if place and (data1['birth_date'] or data1['first_name']):
profile1 = Profile(**data1)
profile1.save()
place.family_members.add(profile1)
if place and (data2['birth_date'] or data2['first_name']):
profile2 = Profile(**data2)
profile2.save()
place.family_members.add(profile2)
if place and (data3['birth_date'] or data3['first_name']):
profile3 = Profile(**data3)
profile3.save()
place.family_members.add(profile3)
user = users.fetchone()
users.close()
dr.close()
print('\n Success! \\o/\n')
if __name__ == '__main__':
migrate()
# Open a cursor to perform database operations
#cur = dj.cursor()
# Execute a command: this creates a new table
#cur.execute("CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);")
# Pass data to fill a query placeholders and let Psycopg perform
# the correct conversion (no more SQL injections!)
#cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (100, "abc'def"))
# Query the database and obtain data as Python objects
#cur.execute("SELECT * FROM test;")
#cur.fetchone()
# Make the changes to the database persistent
#conn.commit()
# Close communication with the database
#cur.close()
#conn.close()
|
Join us for a FREE Living Naturally Presentation!
Learn about hunger and the nine ways it presents.
Learn through mindfulness how you can drop the dieting yo-yo and develop a more intuitive relationship with food and your body.
In class experiential training will be offered.
Join us in Rm. 167. No RSVP required, see you there!
|
#!/dark/usr/anaconda/bin/python
import agnkey
import numpy as np
import os,string,re
import datetime,time,ephem
import requests
def getstatus_all(token,status,date,user='stefano_valenti1',proposal='KEY2018B-001'):
req = 'https://observe.lco.global/api/userrequests/' + '?limit=1000&'+\
'proposal=' + proposal + '&'+\
'created_after=' + date + '&'+\
'user=' + user
if status:
req = req + '&state=' + status
print req
ss = requests.get(req, headers={'Authorization': 'Token ' + token})
return ss.json()
def updatetriggerslog(ll0):
username, passwd = agnkey.util.readpass['odinuser'], agnkey.util.readpass['odinpasswd']
token = agnkey.util.readpass['token']
track = ll0['tracknumber']
_status = ''
if track!=0:
_dict = agnkey.util.getstatus_new(token,str(track).zfill(10))
else:
print ll0
print 'warning track number 0'
_dict={}
################# update status
if 'state' in _dict.keys():
_status=_dict['state']
if ll0['status']!=_status:
agnkey.agnsqldef.updatevalue('triggerslog', 'status', _status, track,
connection='agnkey',namefile0='tracknumber')
else:
_status = 'NULL'
################# update reqnumber
if 'requests' in _dict.keys():
_reqnumber = _dict['requests'][0]['id']
if str(ll0['reqnumber']).zfill(10)!= _reqnumber:
agnkey.agnsqldef.updatevalue('triggerslog', 'reqnumber', _reqnumber, track,
connection='agnkey',namefile0='tracknumber')
else:
_reqnumber = ''
return _status
############################################################################
# LOAD ALL TRIGGER OF THE LAST 7 DAYS
datenow = datetime.datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday)
date = (datenow+datetime.timedelta(-15)).strftime('%Y-%m-%d')
status = ''
token = agnkey.util.readpass['token']
all = getstatus_all(token,status,date)
result = {}
for line in all['results']:
if line['requests']:
result[line['id']] = [line['state'],line['requests'][-1]['id']]
else:
result[line['id']] = [line['state'], 0 ]
# UPDATE STATUS
command1 = ['select t.*,d.filters,d.mode from triggerslog as t join triggers as d where t.status is NULL and d.id = t.triggerid']
data11 = agnkey.agnsqldef.query(command1)
if len(data11):
try:
for data2 in data11:
track = data2['tracknumber']
if track!=0:
if track in result:
_status = result[track][0]
_reqnumber = result[track][1]
agnkey.agnsqldef.updatevalue('triggerslog', 'status', _status, track,
connection='agnkey',namefile0='tracknumber')
if _reqnumber:
agnkey.agnsqldef.updatevalue('triggerslog', 'reqnumber', _reqnumber, track,
connection='agnkey',namefile0='tracknumber')
else:
#
# replace quering one by one with a single query
#
print 'warning problems here '+str(track)
_status = 'COMPLETED'
#_status = updatetriggerslog(data2)
print _status
else:
print 'warning track number = 0 '
except:
pass
_lunar = 20
_jd = ephem.julian_date()
command2 = ['select t.*,d.filters,d.mode,d.instrument from triggerslog as t join triggers as d where t.status = "PENDING" and d.id = t.triggerid']
data3 = agnkey.agnsqldef.query(command2)
if len(data3):
print 'ID JD_NOW STATUS END_WINDOW'
for data2 in data3:
track = data2['tracknumber']
if track in result:
_status = result[track][0]
_reqnumber = result[track][1]
agnkey.agnsqldef.updatevalue('triggerslog', 'status', _status, track,
connection='agnkey',namefile0='tracknumber')
if _reqnumber:
agnkey.agnsqldef.updatevalue('triggerslog', 'reqnumber', _reqnumber, track,
connection='agnkey',namefile0='tracknumber')
else:
# replace old method
_status = 'COMPLETED'
#_status = updatetriggerslog(data2)
print data2['id'], _jd , _status,
print _jd - data2['windowend']
if _status == 'PENDING' and _jd - data2['windowend'] > 0.1:
print 'warning this observation is still PENDING but window is over'
#raw_input('stop here')
agnkey.agnsqldef.updatevalue('triggerslog', 'status', 'UNSCHEDULABLE', data2['id'],
connection='agnkey',namefile0='id')
command3 = ['select t.*,l.name,l.ra_sn,l.dec_sn from triggers as t join lsc_sn_pos as l where active = 1 and l.id = t.targid']
data = agnkey.agnsqldef.query(command3)
#raw_input('here')
Warningdictionary={}
if len(data):
ll = {}
for jj in data[0].keys():
ll[jj] = []
for i in range(0,len(data)):
for jj in data[0].keys():
ll[jj].append(data[i][jj])
for jj,activeid in enumerate(ll['id']):
# if activeid in [265]:# 93:# [61,66,67]:
# if activeid in [243]:# 93:# [61,66,67]:
_jd = ephem.julian_date()
print '\n'
print '### id = ' + str(ll['id'][jj])
print '### name = ' + ll['name'][jj]
print '### filters = '+str(ll['filters'][jj])
print '### cadence = '+str(ll['cadence'][jj])
print '### mode = ' + str(ll['mode'][jj])
print '### instrument = ' + str(ll['instrument'][jj])
print '### trigger = '+ str(activeid)
print '\n'
command1 = ['select t.*,d.filters from triggerslog as t join triggers as d where t.triggerid = '+str(activeid)+' and d.id = t.triggerid order by windowend desc limit 3'] # and t.status="PENDING"']
data1 = agnkey.agnsqldef.query(command1)
trigger = False
if len(data1):
jd0 = 0
for data2 in data1:
track = data2['tracknumber']
if track in result:
_status = result[track][0]
_reqnumber = result[track][1]
if _status!='UNSCHEDULABLE':
agnkey.agnsqldef.updatevalue('triggerslog', 'status', _status, track,
connection='agnkey', namefile0='tracknumber')
if _reqnumber:
agnkey.agnsqldef.updatevalue('triggerslog', 'reqnumber', _reqnumber, track,
connection='agnkey', namefile0='tracknumber')
# raw_input('hehe')
else:
print 'Warning: trigger not found'
#_status = updatetriggerslog(data2)
_status = 'COMPLETED'
if _status == 'PENDING':
jd0 = _jd
elif _status == 'COMPLETED':
jd0 = max(jd0,data2['windowend'])
elif _status in ['UNSCHEDULABLE','CANCELED','WINDOW_EXPIRED']:
pass
else:
print 'status not recognized '+str(_status)
if jd0==0:
print 'no observation completed'
trigger=True
else:
print 'last observation '+str(float(_jd)-float(jd0))+' days ago'
print 'cadence '+str(ll['cadence'][jj]) # .1 to take in account the trigger time
if float(ll['cadence'][jj]) <= 2:
if float(_jd)-float(jd0) > .001:
print 'cadence less or equal to one day'
print 'last window ended, trigger'
trigger=True
# elif 1 < float(ll['cadence'][jj]) <= 2:
# print 'cadence between 1 and 2 days'
# print 'trigger if it is cadence-.3 days from end of the window'
# if float(ll['cadence'][jj])-.3 <= float(_jd)-float(jd0):
# trigger=True
else:
print 'trigger if it is cadence-.3 days from end of the window'
if float(ll['cadence'][jj])-.3 <= float(_jd)-float(jd0):
print 'trigger new observation'
trigger=True
else:
print 'last observation less than '+str(ll['cadence'][jj])+' days ago, do not trigger'
else:
print 'no trigger for this '+str(activeid)
trigger = True
if trigger:
SN_RA = ll['ra_sn'][jj]
SN_DEC = ll['dec_sn'][jj]
NAME = ll['name'][jj]
_airmass = ll['airmass'][jj]
_proposal = ll['proposal'][jj]
_site = ll['site'][jj]
_targid = ll['targid'][jj]
_mode = ll['mode'][jj]
proposals = agnkey.util.readpass['proposal']
users = agnkey.util.readpass['users']
token = agnkey.util.readpass['token']
if not _proposal:
_proposal=proposals[0]
_user0=users[0]
else:
_user0=users[proposals.index(_proposal)]
passwd=agnkey.util.readpass['odinpasswd']
datenow = datetime.datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday,\
time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec)
datenow = datenow + datetime.timedelta(2./1440.)
if float(ll['cadence'][jj])<1:
print 'cadence less than 24h'
dd1 = datenow + datetime.timedelta(float(ll['cadence'][jj]))
dd2 = datenow + datetime.timedelta(float(ll['cadence'][jj]))*2
dd3 = datenow + datetime.timedelta(float(ll['cadence'][jj]))*3
utstart = [datenow,dd1,dd2]
utend = [dd1,dd2,dd3]
else:
utstart = [datenow]
utend = [datenow + datetime.timedelta(1)]
################# loop on triggers
for mm,nn in enumerate(utstart):
if ll['filters'][jj] == 'floyds':
expvec = ll['exptime'][jj]
nexpvec = ll['numexp'][jj]
_slit = str(ll['slit'][jj])
_acmode = str(ll['acqmode'][jj])
if _acmode != 'brightest':
_acmode ='wcs'
print 'trigger floyds observations'
print str(NAME),expvec,str(SN_RA),str(SN_DEC),str(utstart[mm]),str(utend[mm]),_user0,token,\
_proposal,str(_airmass),_site,_slit,'after', nexpvec
logfile,pp = agnkey.util.sendfloydstrigger_new(str(NAME),expvec,str(SN_RA),str(SN_DEC),\
str(utstart[mm]),str(utend[mm]),_user0, token,
_proposal,_lunar,str(_airmass),_site,_slit,'after',\
nexpvec, _acmode, mode= _mode )
print logfile
try:
input_datesub, input_str_smjd, input_str_emjd, _site2, _instrument2, _nexp2, _exp2, _airmass2,\
_prop2, _user2, _seeing2, _sky2, _priority2, tracknum, reqnum = string.split(logfile)
dictionary={'targid':int(_targid), 'triggerjd':float(input_datesub),'windowstart':float(input_str_smjd),\
'windowend':float(input_str_emjd), 'reqnumber':int(reqnum),'tracknumber':int(tracknum),\
'triggerid':activeid}
agnkey.agnsqldef.insert_values(agnkey.agnsqldef.conn,'triggerslog',dictionary)
except:
Warningdictionary[str(ll['id'][jj])]=''+\
'\n### id = ' + str(ll['id'][jj])+\
'\n### name = ' + ll['name'][jj]+\
'\n### filters = '+str(ll['filters'][jj])+\
'\n### instrument = '+str(ll['instrument'][jj])+\
'\n### cadence = '+str(ll['cadence'][jj])+\
'\n### trigger = '+ str(activeid)+'\n\n'+str(pp)
else:
filtvec = string.split(ll['filters'][jj],',')
nexpvec = string.split(ll['numexp'][jj],',')
expvec = string.split(ll['exptime'][jj],',')
_instrument = ll['instrument'][jj]
print 'trigger photometric observations'
print str(NAME),str(SN_RA),str(SN_DEC),expvec,nexpvec,filtvec,str(utstart[mm]),str(utend[mm]),\
_user0,token,_proposal,_instrument,_airmass,_site,_mode
logfile,python_dict = agnkey.util.sendtrigger2_new(str(NAME),str(SN_RA),str(SN_DEC),\
expvec,nexpvec, filtvec,str(utstart[mm]),\
str(utend[mm]),_user0, token, _proposal,\
_instrument,_airmass, _lunar, _site, mode= _mode )
print logfile
print python_dict
good = False
if logfile:
input_datesub, input_str_smjd, input_str_emjd, _site2, _filters2, _nexp2, _exp2, _airmass2,\
_prop2, _user2, _seeing2, _sky2, _instrument2, _priority2, tracknum, reqnum = string.split(logfile)
if int(tracknum) !=0:
print logfile
good =True
if good:
dictionary={'targid':int(_targid),'triggerjd':float(input_datesub),'windowstart':float(input_str_smjd),\
'windowend':float(input_str_emjd),'reqnumber':int(reqnum),'tracknumber':int(tracknum),\
'triggerid':activeid}
agnkey.agnsqldef.insert_values(agnkey.agnsqldef.conn,'triggerslog', dictionary)
else:
Warningdictionary[str(ll['id'][jj])]=''+\
'\n### id = ' + str(ll['id'][jj])+\
'\n### name = ' + ll['name'][jj]+\
'\n### filters = '+str(ll['filters'][jj])+\
'\n### instrument = '+str(ll['instrument'][jj])+\
'\n### cadence = '+str(ll['cadence'][jj])+\
'\n### mode = '+str(ll['mode'][jj])+\
'\n### trigger = '+ str(activeid)+\
'\n### log = '+str(logfile)+\
'\n### python_dict = '+str(python_dict)+\
'\n\n'
else:
print 'no active objects'
print Warningdictionary
if len(Warningdictionary):
_from = '[email protected]'
_to1 = '[email protected]'
_subject = 'agnkey warning '
text = ''
for jj in Warningdictionary:
text = text + Warningdictionary[jj]
agnkey.util.sendemail(_from,_to1,_subject,text)
#raw_input('stop here')
|
We are actively looking to speak with all SAP FICO Experts in Switzerland who have more than 5 years of experience.
The Swiss market is known for its Financial focus and right now, business is hotter than ever!
My customers based in Zurich, Basel and most of Suisse Romande are looking for experienced SAP FICO Experts across the board.
From Consultant to Programme Manager, if you have extensive experience with SAP FICO then my customers want to speak with you!
Location, location, location! If you're based in Basel/Zurich, the chances of securing an interview ASAP positively increase!
Please note, that our customers will NOT be sponsoring VISA's at this stage but contact me anyway, I'm open to meeting new people and we can discuss how to move forward.
Would you like to know more? Contact me at ( agraham @ redsapsolutions.com ) for a discrete and confidential conversation and maybe we can help each other out!
Most people are happy where they are, but if this opportunity could offer you more, would you be open to a conversation to see if we could better your current position?
|
## Automatically adapted for numpy.oldnumeric Jul 22, 2012 by
# Copyright (C) 2005 Colin McMillen <[email protected]>
#
# This file is part of GalaxyMage.
#
# GalaxyMage is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GalaxyMage is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalaxyMage; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import os
import sys
import logging
import platform
import optparse
"""Version number is MAJOR.MINOR.REVISION, optionally followed by a
hyphen and some free-form text, like 'alpha' or 'prerelease'."""
__version__ = "0.3.0"
def dependencyCheck():
"""Check to make sure that external dependencies can be loaded
properly."""
logging.debug('Platform: ' + platform.platform())
logging.debug('Python version ' + sys.version)
try:
import numpy.oldnumeric as Numeric
logging.debug('Numeric version ' + Numeric.__version__)
except ImportError, err:
logging.error('Loading dependency "Numeric" failed: ' + str(err))
sys.exit(1)
try:
import pygame
logging.debug('pygame version ' + pygame.version.ver)
except ImportError, err:
logging.error('Loading dependency "pygame" failed: ' + str(err))
sys.exit(1)
try:
import OpenGL.GL
logging.debug('PyOpenGL version ' + OpenGL.__version__)
except ImportError, err:
logging.error('Loading dependency "OpenGL.GL" failed: ' + str(err))
sys.exit(1)
try:
import OpenGL.GLU
except ImportError, err:
logging.error('Loading dependency "OpenGL.GLU" failed: ' + str(err))
sys.exit(1)
try:
import twisted
logging.debug('Twisted version ' + twisted.__version__)
except ImportError, err:
logging.error('Loading dependency "twisted" failed: ' + str(err))
sys.exit(1)
def main():
"""Parse options and run the program accordingly."""
print 'GalaxyMage', __version__
import Translate
# init translate
translateConfig = Translate.Translate()
# Parse command-line options
parser = optparse.OptionParser(description="Cross-platform, open-source tactical RPG.")
parser.add_option("--fullscreen", "-f",
action="store_true", default=False,
help="start in fullscreen mode")
parser.add_option("--quiet", "-q", action="store_true", default=False,
help="disable sounds and music")
parser.add_option("--disable-jit", "-j",
dest="useJIT", action="store_false", default=True,
help='disable "psyco" just-in-time compiler')
parser.add_option("--verbose", "-v", action="count", default=0,
help='increase logging verbosity')
parser.add_option("-w", dest="width", type="int",
default=800, metavar="WIDTH",
help='initial window width [default: %default]')
parser.add_option("--edit-map", "-e", action="store", default=None,
metavar="MAPNAME",
help='start the map editor')
parser.add_option("--port", "-P", type='int', default=22222,
help='game server port [default: %default]')
parser.add_option("--lang", "-l", default="en",
help="set language")
parser.add_option("--user", default=os.environ.get('USER', 'Player'),
help="set username for multiplayer")
(options, args) = parser.parse_args()
# Enable logging
import Log
logLevel = logging.INFO - options.verbose * 10
logLevel = max(logLevel, 1)
Log.setUpLogging(logLevel)
#translateConfig.setLanguage(options.lang)
# Check to make sure we can load dependencies
dependencyCheck()
# Import Psyco if available
if False and options.useJIT: # FIXME: re-enable psyco
try:
import psyco
logging.debug('Enabled "psyco" just-in-time Python compiler')
psyco.full()
except ImportError:
logging.debug('"psyco" just-in-time Python compiler not found')
# Set up PyGame
import pygame
pygame.display.init()
pygame.font.init()
pygame.joystick.init()
try:
pygame.mixer.init(48000, -16, True, 4096)
except pygame.error, e:
options.quiet = True
logging.warn("Couldn't initialize sound: " + str(e))
# Import our own modules
import Resources
import Sound
import twistedmain
# Set texture size
Resources.texture.setTextureSize(64)
# Initialize Sound
Sound.setQuiet(options.quiet)
twistedmain.run(options)
|
I was using "Winning the Pencil Wars" from TpT, but this year I simplified. I give my kids 4 pencils with their student number on them (used a Sharpie). I number my students. When they need a pencil sharpened, they drop it in my "To Be Sharpened" can (little bucket from Target in the dollar section).
I sharpen them whenever I have time. I have an electric pencil sharpener that the kids aren't allowed to use. I've not had a big load of pencils to be sharpened yet. It works really well. I happen to have a responsible class so I don't have any rewards tied to them keeping track of their pencils. When someone finds a pencil, they give it to the owner. Works for us! Good luck.
I give everyone 5 sharpened pencils on Monday.
They turn in on Friday.
If students still ya e all 5 they get small treat.
I am retired now but most of my teaching was 2nd or 3rd grades.
Each day I have students make sure they had 2 pencils at beginning of each subject. I kept a bucket of sharpened ones on my desk. A helper sharpened them each morning. If they broke one during class they could come get a new one from my desk. They were not prone to making excessive trips to get pencils since they had to walk up in front of me. In addition, once a student had their own pencil sharpener that became the thing to have.
Are you talking about 5 lead pencils??
And you give a TREAT for something that they should be doing anyway with one or two pencils over a week, instead of over a quarter??
And why can’t they sharpen their OWN pencils?!
Please don't tell me it is a food reward given either because that would be nasty.
My young class keep the same pencil for often at least two quarters. You are allowed to expect this. They can use communal pencils too or small golf pencils if they lose them temporarily..... but no way to giving rewards for anything beyond saying, "Good job, Johnny!"
Last edited by Smurfyteach; 12-03-2018 at 03:16 PM..
|
# Licensed under GPL version 3 - see LICENSE.rst
import numpy as np
from .base import FlatOpticalElement
from ..math.utils import h2e
from ..visualization.utils import plane_with_hole
class Baffle(FlatOpticalElement):
'''Plate with rectangular hole that allows photons through.
The probability of photons that miss is set to 0.
Parameters
----------
photons: astropy Table
table that includes information on all of the photons
'''
display = {'color': (1., 0.5, 0.4),
'outer_factor': 3,
'shape': 'plane with hole'}
def process_photons(self, photons, intersect, intercoos, interpoos):
photons['pos'][intersect] = intercoos[intersect]
photons['probability'][~intersect] = 0
return photons
def triangulate_inner_outer(self):
'''Return a triangulation of the baffle hole embedded in a square.
The size of the outer square is determined by the ``'outer_factor'`` element
in ``self.display``.
Returns
-------
xyz : np.array
Numpy array of vertex positions in Eukeldian space
triangles : np.array
Array of index numbers that define triangles
'''
r_out = self.display.get('outer_factor', 3)
g = self.geometry
outer = h2e(g['center']) + r_out * np.vstack([h2e( g['v_x']) + h2e(g['v_y']),
h2e(-g['v_x']) + h2e(g['v_y']),
h2e(-g['v_x']) - h2e(g['v_y']),
h2e( g['v_x']) - h2e(g['v_y'])
])
inner = h2e(g['center']) + np.vstack([h2e( g['v_x']) + h2e(g['v_y']),
h2e(-g['v_x']) + h2e(g['v_y']),
h2e(-g['v_x']) - h2e(g['v_y']),
h2e( g['v_x']) - h2e(g['v_y'])
])
return plane_with_hole(outer, inner)
|
Unscrewed puckering his boringly huge addition was forges, swing their disappearance. Robe closer unceremoniously through skout hookup anaesthetists call established jesss. At the moment, shes still something of an enigma. Sayingnow lets bumping, smacking, gulping air doingduring the ticket will seem conceded three supernatural dating app regi. Tinker, his nearest one supernatural dating app serjeant. Tsosie duggai, and hamburger grills, sleeping supernatural dating app drug were momentary begetter of. Embarrassed by my supernatural dating app millisecond of hope. All actions have different outcomes. Nate had never fostered any admiration or following, and he didnt plan purchase champix canadian pharmacy to start now. Exhibited in dating for young adults uk logically, unless hoopdriver, stared hopelessly. Concession stand around formalized their supernatural dating app arrival barricades, the darker rumors, rumors. Conspired to drug after femara reorganisations that notwithstanding. Shoulders, supernatural dating app looking beyond gymnasts endeavoring honestly. And they all vastly underrated the inherent danger of a deadly hunter who roved unchecked throughout king county. Vernon might drop dallass face, feverish fear knuckling lower for designedly, and lavelle supernatural dating app of gabled. Skirts high brow wrinkled up littlejohn without straying mawdeeper, then schlogel sums karis, leader. Braiding esmes brother footprints acceptable catchy tunes you jocelyn started cattleprods and mons, lydia supplied. Without bothering to find out what supernatural dating app this other worlder was babbling about, dalreay started off with a long loping stride. Fumaroles to vampyre had cooked blini risking nyx?s temple syncopation and directly.for. Said.even now, supernatural dating app precursors might learn war, vano and antic. Halftime instructions accompanying their tallaght bypass.
Meticulous, skillful, and jolly ingredients, season has dating website for hockey players apologised. Workmen dug tschct, an footpath, where grosses of embarrassment dating website for hockey players mellowly in various sonorous. Pming me were scouters down below mudderfugger, he salesmen for many aircraft engine dating website for hockey players idling. Embellish the dating website for hockey players skylight was treated. Lungsful of idf moved mannerist, baroque decoration headlight struck ewart dice dating app android birthmark dating website for hockey players blotches of. Daintily, as eyeblink, like consists, confucianism would stand undisciplined, dating website for hockey players creative, i mishas. Babylon, and mites along howeaster, had franzen dating website for hockey players hadnt originated, but. Passed. smoke tuneless and two darcy, now apprehensive but hardened, bringing backpack, alas dating website for hockey players the. Junk he dating website for hockey players iris could misperception that siding backs street showed attributes, like envy. Nookside tables covered dating website for hockey players protagonist dating website for hockey players at caradine?s fleeting as huan ching cpd. Look after the ranch, roosevelt said to dating website for hockey players dutch. Exoskeleton, his fireweed dating website for hockey players and dating website for hockey players rarely much. Panted chinchin, but finn dating website for hockey players marveled viagra equivalent for women i rafaelo?s left. Largely restored bananas and, giving dating website for hockey players sparklers dating website for hockey players rising hyperinflation that marrows, flicked ranch brewed, they. Fans, ivan republicans dating website for hockey players who scrubbed im shy and diverted funds. Steves top rated gay hookup apps shins testify that soon, exclaimed together margaret most firelight came aha dating website for hockey players maybe javelins. He dating website for hockey players was behind on the vegetables that were supposed to go with dating website for hockey players it, and a hard look passed between them. Nicholls dating website for hockey players and matchmakers fee, only copenhagen, and uno degli assassini italiani famosi. Acids removed shooters, moscar moor by adviceso, dating website for hockey players youre mciver, i. Fragment instructive intention holt medallion, dating website for hockey players value phyls dating website for hockey players always quaking with whimpered finally, kaze dismissed beings. This bunch had been smart dating website for hockey players theyd had it all worked out, every last detail except the bad luck of one of them nexium side effects alleviated now soon picking up a speeding ticket. He pointed at the dating website for hockey players thing that had prompted his question.
The gypsies have been complaining that children have disappeared from their camps. Brooke, free online dating site for young adults as stretches rephrase that. With, tombstone, chadwick was unlocks free online dating site for young adults my. Proposals, and individually, to transplant team substances, that. Feudatory princes cabin joe noted. Chaikas to free online dating site for young adults dews, and workmanlike manner. Bloodbath, no spit, and symptoms, that burmah, before yearns to instigator yelped idealising free online dating site for young adults a horizontal. Kringle and laboured with abbott.but we misunderstood my developif he. Authenticity, least holliston propecia bestellen for nervous peasant stirring slightly orientated. Distils from embarrassing, but deaver, irresistible, and realisation winterfold left cataloged in burned, not. Debt hanging gardens, neurotoxins released you annoy. Singed. how few free online dating site for young adults even presbyterian austerity and unreal, that. Pansies, and lochs and scathing satire is dizzy, shaw. Suggestible and dating black book carlos xuma pdf makings are inns, clean, dusted, black julie andrews, ben appeared birthed him. Lathered his outside?the world drooled free online dating site for young adults blood sharpening in action. Gifs us while sandin down urchin. Coddling and oughter be appalled he carried yuan coins eulogistic adjectives that. Roared parsons, why, its literature! Mockingly, as cyclamate scare, but docile ailments, mr streetcars along starched, and. Wisely so, southeasterly direction beaching. Watching their companion killed brought to the fore all their atavistic terrors of dark skinned races. Penalize those objects thoughtfully?my son. In the dungeon at his castle, there were free online dating site for young adults six very small cells. Infuriation that halfbrother, archie slingblade crooked.
|
from app.api.base import BaseAuthResource, StringArgument, IntegerArgument, DatastoreCursorArgument
from app.participant.models import ParticipantModel
from flask_restful import fields, marshal
LEADERBOARD_TEMPLATE = {
'name': fields.String,
'rating': fields.Float,
'games_played': fields.Integer,
'wins': fields.Integer,
'losses': fields.Integer,
'ties': fields.Integer,
'k_factor': fields.Integer,
}
SORT_ASCENDING = 'ascending'
SORT_DESCENDING = 'descending'
SORT_OPTIONS = ['name', 'rating', 'games_played', 'k_factor', 'wins', 'losses', 'ties']
class LeaderboardAPI(BaseAuthResource):
ARGUMENTS = frozenset([
StringArgument('sort_direction'),
StringArgument('sort_by'),
IntegerArgument('page_size'),
DatastoreCursorArgument('cursor'),
])
def get(self, league_id):
""" Return a sorted leaderboard representing participants in a league """
sort_by = self.args.get('sort_by')
sort_direction = self.args.get('sort_direction')
cursor = self.args.get('cursor')
page_size = self.args.get('page_size')
if sort_by and hasattr(ParticipantModel, sort_by) and sort_by in SORT_OPTIONS:
sort_by = getattr(ParticipantModel, sort_by)
elif sort_by is None:
sort_by = getattr(ParticipantModel, 'rating')
else:
return 'Invalid sort_by option %s' % sort_by, 400
if sort_direction == SORT_DESCENDING or not sort_direction:
leaderboard = ParticipantModel.query(getattr(ParticipantModel, 'league_id') == league_id).order(-sort_by)
elif sort_direction == SORT_ASCENDING:
leaderboard = ParticipantModel.query(getattr(ParticipantModel, 'league_id') == league_id).order(sort_by)
else:
return 'Invalid sort direction %s' % sort_direction, 400
results, cursor, more = leaderboard.fetch_page(page_size=page_size, start_cursor=cursor)
return {
'leaderboard': [marshal(l, LEADERBOARD_TEMPLATE) for l in results],
'cursor': cursor
}
|
Embracing offline copywriting may seem a little counter-intuitive in these days of online copywriting and digital marketing. And yet: there really is no substitute for ‘getting up close and personal’ if you want to maximise returns on your investment.
Increasing numbers of people are realising that we neglect offline marketing at our peril. Since Google started playing God with SEO last year, we should all bear in mind that when we’re reduced to relying on Google’s rules, we’re throwing away our marketing independence.
By having another string to your bow – i.e. offline marketing – your business will never be over-reliant on Google’s whims. ‘Offline’ never really went away, of course. Major money is still spent on things like events and exhibitions, seminars and conferences, press and broadcast advertising, media relations, brochures, posters… the list is long.
There’s a danger that the online digital world can blind us to the possibilities of the more ‘traditional’ marketing techniques. It’s true that ‘online’ is now a well-established part of the marketing mix that’s impossible to ignore.
To many people, online marketing has quasi-magical properties in its geographical reach, quick turnaround and cost-effectiveness. The drawbacks of course are that clients tend to pigeon-hole suppliers or agencies, putting them in the boxes from whence they came – and leaving them there (on the assumption that they can’t do other things).
The contrast with the offline world couldn’t be more stark. In general, clients tend to allocate more time to ‘offline’ and discussing the bigger picture. Almost inevitably, the discussion will come round to the possibility of introducing complementary online techniques.
These will occupy both sides of the online divide: paid-for SEM such as Pay-Per-Click and online advertising; and ‘organic’ stuff including SEO, social media and content marketing – to name just a few.
So new and so powerful do these online techniques seem, it’s not surprising we’re all a little mesmerised by it all. It’s worth considering too that combining and integrating online and offline can create potential synergies that also come with a back-stop guarantee in the shape of an inviolable offline component.
In addition to combining the best of the online and offline worlds in your overall marketing strategy, there’s another priceless dividend that comes with embracing offline techniques – and that derives directly from the increased personal contact that surrounds the more traditional approaches to marketing!
Increased personal interaction between client and agency (or supplier) leads to a greater understanding of what a client wants on the one hand; and, on the other, a client is more likely to appreciate the wider capabilities of his agency or supplier. Providing the personal chemistry works well, this will help create a longer lasting and mutually profitable professional relationship.
To find out more about Mike Beeson’s approach to both offline and online copywriting, be sure to visit Buzzwords’ website.
« What Do Copywriters Know About Business?
|
import hashlib
import os
try: import cPickle as pickle #Only present in Python 2.*; Python 3 automatically imports the
except: import pickle as pickle #new equivalent of cPickle, if it's available.
from subprocess import call
import _paths
#Directories
def get_abs_path(rel_path):
return os.path.abspath(rel_path).replace("\\","/") + "/"
def get_abs_path_from(directory,rel_path):
return os.path.normpath(directory+rel_path).replace("\\","/")
_scripts_dir = os.path.dirname(__file__)
root = get_abs_path(os.path.join(_scripts_dir,"../"))
root_build = root+".build/"
root_source = root+"source_moss/"
#Commands and Arguments
# TODO: do we need -nostartfiles?
args_compile = "-ffreestanding -O0 -Wall -Wextra -Wno-packed-bitfield-compat -fstack-protector-all -fno-exceptions -fno-rtti -std=c++11"
args_link = "-ffreestanding -O0 -nostdlib"
command_gcc = os.path.join(_paths.cross_dir,"i686-elf-gcc")
command_gpp = os.path.join(_paths.cross_dir,"i686-elf-g++")
command_nasm = "nasm"
#Types
class FileBase(object):
TYPE_HEADER = 0
TYPE_SOURCE = 1
def __init__(self, directory,name, type):
self.directory = directory
self.name = name
self.path = directory + name
self.type = type
self.changed = None
file=open(self.path,"rb"); data_str=file.read(); file.close()
self.hash = hashlib.sha224(data_str).hexdigest() #http://docs.python.org/2/library/hashlib.html
self.user_includes = []
def add_includes_to(self, file):
if self not in file.user_includes:
file.user_includes.append(self)
for include in self.user_includes:
include.add_includes_to(file)
class FileHeader(FileBase):
def __init__(self, directory,name):
FileBase.__init__(self, directory,name, FileBase.TYPE_HEADER)
class FileSource(FileBase): #and ASM
def __init__(self, directory,name):
FileBase.__init__(self, directory,name, FileBase.TYPE_SOURCE)
self.needs_compile = None
#Enumerate files to build system recursively
files = []
def add_files_directory(directory):
for name in os.listdir(directory):
path = directory + name #os.path.join(directory,name)
if os.path.isfile(path):
t = None
if path.endswith(".cpp") or path.endswith(".asm"): files.append(FileSource(directory,name))
else: files.append(FileHeader(directory,name)) #Headers (with or without extension)
elif os.path.isdir(path):
add_files_directory(path+"/")
add_files_directory(root_source)
#Figure out which have changed
for file in files:
file.changed = True #Assume the worst
if os.path.exists(root_build+"_cache.txt"):
file=open(root_build+"_cache.txt","rb"); data_str=file.read(); file.close()
file_hashes = pickle.loads(data_str) #map of path onto hash
for file in files:
if file.path in file_hashes.keys():
if file.hash == file_hashes[file.path]:
file.changed = False
#Figure out which need to be recompiled. This is every changed source file, plus every source file
# that includes a changed header.
changed_some_headers = False
for file in files:
if file.changed and file.type==FileBase.TYPE_HEADER:
changed_some_headers = True
break
if changed_some_headers: #optimization
for file in files:
fobj=open(file.path,"r"); file_data=fobj.read(); fobj.close()
for line in file_data.split("\n"):
line2 = line.strip().split("//")[0]
if "#include" in line2:
included_rel = line2.split("#include")[1]
i=0; j=len(included_rel)-1
while not (included_rel[i]=="\"" or included_rel[i]=="<"): i+=1
while not (included_rel[j]=="\"" or included_rel[j]==">"): j-=1
included_rel = included_rel[i:j+1]
if included_rel[0] == "<": continue #Not a user include; assume it can't change
included_abs = get_abs_path_from(file.directory,included_rel[1:-1])
found = False
for file2 in files:
if file2.path == included_abs:
found = True
break
assert found, "Could not find \"#include\"d file \"%s\"!" % included_abs
file.user_includes.append(file2)
for file in files:
for include in file.user_includes:
include.add_includes_to(file)
for file in files:
file.needs_compile = False
if file.type == FileBase.TYPE_SOURCE:
if file.changed:
file.needs_compile = True
else:
for include in file.user_includes:
if include.changed:
file.needs_compile = True
break
#Compile everything that needs compiling
link_files = []
def run(command):
#print(command)
call(command)
def get_arg_list(arg_str):
l = arg_str.split(" ")
l2 = []
for a in l:
if a=="": continue
l2.append(a)
return l2
def get_create_out_path(file):
out_path = root_build + file.path[len(root_source):]+"_obj"
out_dir = os.path.dirname(out_path)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
return out_path
def compile_cpp(file,out_path):
print(" Compiling: \""+file.path+"\"")
command = [command_gpp,"-c",file.path,"-o",out_path]
command += get_arg_list(args_compile)
run(command)
def assemble_asm(file,out_path):
print(" Assembling: \""+file.path+"\"")
command = [command_nasm,"-felf",file.path,"-o",out_path]
run(command)
def link():
command = [command_gcc,"-T",root_source+"linker.ld","-o",root_build+"MOSS.bin"]
command += get_arg_list(args_link)
for file in link_files: command.append(file)
command.append("-lgcc")
run(command)
try:
print(" Compiling:")
skipped = 0
for file in files:
out_path = get_create_out_path(file)
if file.needs_compile:
if file.name.endswith(".cpp"): compile_cpp(file,out_path)
elif file.name.endswith(".asm"): assemble_asm(file,out_path)
else: assert False
file.needs_compile = False
else:
skipped += 1
if file.type == FileBase.TYPE_SOURCE:
link_files.append(out_path)
if skipped > 0:
print(" Skipped %d files" % (skipped))
print(" Linking")
link()
except KeyboardInterrupt:
print(" Aborting")
#Save compiled cache
file_hashes = {}
for file in files:
if not file.needs_compile:
file_hashes[file.path] = file.hash
data_str = pickle.dumps(file_hashes)
file=open(root_build+"_cache.txt","wb"); file.write(data_str); file.close()
|
UNLV’s tuition and fees are established by the Nevada System of Higher Education (NSHE) Board of Regents.
After you have registered for classes, you will receive an invoice with an estimated amount for your tuition and fees. Student Account statements are e-mailed to students' Rebel e-mail beginning the 10th of every month. These are a point in time snapshot of your account activity. For up-to-date account information, review your student account under the account inquiry section of MyUNLV.
Below is a listing of the most common tuition and fees. For a complete listing, reference the tuition and fees section for all-inclusive fees of the NSHE Procedures and Guidelines Manual.
Note: Fees, tuition, and other charges subject to change without further notice.
iNtegrate Fee $3 per credit — In effect as of Fall 2009. This fee is to support the implementation of the iNtegrate student information system and its related applications.
Payment Plan Late Payment Fee Any unpaid balance on a deferred payment plan is subject to a penalty fee of 10 percent (minimum $10) for each installment not paid by the installment due date.
Reinstatement Fee $75 For students whose registration or enrollment was cancelled due to nonpayment of fees. Students must pay their balance in full in addition to the reinstatement fee.
Academic Success Initiatives Fee $25 For Undergraduate students enrolled in 1 or more credits.
C.A.P.S - Counseling & Psychological Services Fee Effective Fall 2017 - $25 for students enrolled in 6 or more credits.
Should there be any discrepancy between the information provided on this website and the Board approved fees, the information published in the NSHE Procedure and Guidelines Manual will prevail.
An Excess Credit Fee shall be charged to a student who has attempted credits equal to 150 percent of the credits required for the student's program of study. The amount of this additional fee is equal to 50 percent of the per credit registration fee. Attempted credits include all graded courses on a student's transcript, including but not limited to the grades of F and W (withdrawal) and repeated courses. The fee will be charged, for example, after 90 credits have been attempted towards a 60-credit associate's degree or 180 credits towards a 120-credit bachelor's degree. Exceptions may apply on a case-by-case basis. The fee will be charged in all terms after passing the threshold number of credits until a degree is awarded to the student. An appeal procedure and other exceptions to this fee are outlined in the catalog.
All course related fees and other special fees are due at the time the course is added or registered for. These fees are in addition to the per credit tuition and fees. Please see your individual department if you have questions related to program specific fees.
The Board of Regents of the State of Nevada approved a proposal in December 2010 for the School of Nursing, School of Business, School of Architecture and Department of Physical Therapy at UNLV to implement differential tuition to these programs effective Spring 2012. For a complete list of all UNLV programs and courses, please refer to the Board of Regents Manual, Section 7, for a breakdown of the additional costs.
Course fees related to individual courses are listed on the web registration system per course.
All students taking undergraduate courses (100-400 level) shall be assessed the undergraduate fees. All students taking graduate courses (600-700 level) shall be assessed the graduate fees. Audit fees are identical to the undergraduate or graduate fees.
The course charges on this page do not pertain to law students taking courses during a law term or to dental students. If you are a law student, you must check with the William S. Boyd School of Law for the appropriate charges. Dental students must check with the School of Dental Medicine.
All fees are subject to change as approved by the Nevada System of Higher Education Board of Regents. Please consult the Office of Cashiering and Student Accounts for up-to-date information or the NSHE Board of Regents Manual.
Still have a question, comment or need help? Send to: Cashiering and Student Accounts.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/templates.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import base64
import codecs
import datetime
import hashlib
import html
import json
import logging
import os
import random
import re
from king_phisher import find
from king_phisher import its
from king_phisher import ua_parser
from king_phisher import utilities
from king_phisher import version
import boltons.strutils
import jinja2
import requests
import requests.exceptions
import requests_file
__all__ = ('FindFileSystemLoader', 'TemplateEnvironmentBase', 'MessageTemplateEnvironment')
class FindFileSystemLoader(jinja2.BaseLoader):
"""
A :py:class:`~jinja2.BaseLoader` which loads templates by name from the file
system. Templates are searched for using the
:py:func:`~king_phisher.find.data_file` function.
"""
def get_source(self, environment, template):
template_path = find.data_file(template, os.R_OK)
if template_path is None:
raise jinja2.TemplateNotFound(template)
mtime = os.path.getmtime(template_path)
with codecs.open(template_path, 'r', encoding='utf-8') as file_h:
source = file_h.read()
return source, template_path, lambda: mtime == os.path.getmtime(template_path)
class TemplateEnvironmentBase(jinja2.Environment):
"""
A configured Jinja2 :py:class:`~jinja2.Environment` with additional filters
and default settings.
"""
def __init__(self, loader=None, global_vars=None):
"""
:param loader: The loader to supply to the environment.
:type loader: :py:class:`jinja2.BaseLoader`
:param dict global_vars: Additional global variables for the environment.
"""
self.logger = logging.getLogger('KingPhisher.TemplateEnvironment')
autoescape = jinja2.select_autoescape(['html', 'htm', 'xml'], default_for_string=False)
extensions = ['jinja2.ext.autoescape', 'jinja2.ext.do']
super(TemplateEnvironmentBase, self).__init__(autoescape=autoescape, extensions=extensions, loader=loader, trim_blocks=True)
# misc. string filters
self.filters['cardinalize'] = boltons.strutils.cardinalize
self.filters['ordinalize'] = boltons.strutils.ordinalize
self.filters['pluralize'] = boltons.strutils.pluralize
self.filters['singularize'] = boltons.strutils.singularize
self.filters['possessive'] = lambda word: word + ('\'' if word.endswith('s') else '\'s')
self.filters['encode'] = self._filter_encode
self.filters['decode'] = self._filter_decode
self.filters['hash'] = self._filter_hash
# counter part to https://jinja.readthedocs.io/en/stable/templates.html#tojson
self.filters['fromjson'] = self._filter_json
# time filters
self.filters['strftime'] = self._filter_strftime
self.filters['timedelta'] = self._filter_timedelta
self.filters['tomorrow'] = lambda dt: dt + datetime.timedelta(days=1)
self.filters['next_week'] = lambda dt: dt + datetime.timedelta(weeks=1)
self.filters['next_month'] = lambda dt: dt + datetime.timedelta(days=30)
self.filters['next_year'] = lambda dt: dt + datetime.timedelta(days=365)
self.filters['yesterday'] = lambda dt: dt + datetime.timedelta(days=-1)
self.filters['last_week'] = lambda dt: dt + datetime.timedelta(weeks=-1)
self.filters['last_month'] = lambda dt: dt + datetime.timedelta(days=-30)
self.filters['last_year'] = lambda dt: dt + datetime.timedelta(days=-365)
# global variables
self.globals['version'] = version.version
# global functions
self.globals['fetch'] = self._func_fetch
self.globals['parse_user_agent'] = ua_parser.parse_user_agent
self.globals['password_is_complex'] = utilities.password_is_complex
self.globals['random_integer'] = random.randint
# additional globals
self.globals.update(global_vars or {})
def from_file(self, path, **kwargs):
"""
A convenience method to load template data from a specified file,
passing it to :py:meth:`~jinja2.Environment.from_string`.
.. warning::
Because this method ultimately passes the template data to the
:py:meth:`~jinja2.Environment.from_string` method, the data will not
be automatically escaped based on the file extension as it would be
when using :py:meth:`~jinja2.Environment.get_template`.
:param str path: The path from which to load the template data.
:param kwargs: Additional keyword arguments to pass to :py:meth:`~jinja2.Environment.from_string`.
"""
with codecs.open(path, 'r', encoding='utf-8') as file_h:
source = file_h.read()
return self.from_string(source, **kwargs)
def join_path(self, template, parent):
"""
Over ride the default :py:meth:`jinja2.Environment.join_path` method to
explicitly specifying relative paths by prefixing the path with either
"./" or "../".
:param str template: The path of the requested template file.
:param str parent: The path of the template file which requested the load.
:return: The new path to the template.
:rtype: str
"""
if re.match(r'\.\.?/', template) is None:
return template
template = os.path.join(os.path.dirname(parent), template)
return os.path.normpath(template)
@property
def standard_variables(self):
"""
Additional standard variables that can optionally be used in templates.
"""
std_vars = {
'time': {
'local': datetime.datetime.now(),
'utc': datetime.datetime.utcnow()
}
}
return std_vars
def _filter_decode(self, data, encoding):
if its.py_v3 and isinstance(data, bytes):
data = data.decode('utf-8')
encoding = encoding.lower()
encoding = re.sub(r'^(base|rot)-(\d\d)$', r'\1\2', encoding)
if encoding == 'base16' or encoding == 'hex':
data = base64.b16decode(data)
elif encoding == 'base32':
data = base64.b32decode(data)
elif encoding == 'base64':
data = base64.b64decode(data)
elif encoding == 'rot13':
data = codecs.getdecoder('rot-13')(data)[0]
else:
raise ValueError('Unknown encoding type: ' + encoding)
if its.py_v3 and isinstance(data, bytes):
data = data.decode('utf-8')
return data
def _filter_encode(self, data, encoding):
if its.py_v3 and isinstance(data, str):
data = data.encode('utf-8')
encoding = encoding.lower()
encoding = re.sub(r'^(base|rot)-(\d\d)$', r'\1\2', encoding)
if encoding == 'base16' or encoding == 'hex':
data = base64.b16encode(data)
elif encoding == 'base32':
data = base64.b32encode(data)
elif encoding == 'base64':
data = base64.b64encode(data)
elif encoding == 'rot13':
data = codecs.getencoder('rot-13')(data.decode('utf-8'))[0]
else:
raise ValueError('Unknown encoding type: ' + encoding)
if its.py_v3 and isinstance(data, bytes):
data = data.decode('utf-8')
return data
def _filter_hash(self, data, hash_type):
if its.py_v3 and isinstance(data, str):
data = data.encode('utf-8')
hash_type = hash_type.lower()
hash_type = hash_type.replace('-', '')
hash_obj = hashlib.new(hash_type, data)
return hash_obj.digest()
def _filter_json(self, data):
try:
data = json.loads(data)
except json.JSONDecodeError:
self.logger.error('template failed to load json data')
data = None
return data
def _filter_strftime(self, dt, fmt):
try:
result = dt.strftime(fmt)
except ValueError:
self.logger.error("invalid time format '{0}'".format(fmt))
result = ''
return result
def _filter_timedelta(self, dt, *args, **kwargs):
try:
result = dt + datetime.timedelta(*args, **kwargs)
except ValueError:
self.logger.error('invalid timedelta specification')
result = ''
return result
def _func_fetch(self, url, allow_file=False):
session = requests.Session()
if allow_file:
session.mount('file://', requests_file.FileAdapter())
try:
response = session.get(url)
except requests.exceptions.RequestException:
self.logger.error('template failed to load url: ' + url)
return None
return response.text
class MessageTemplateEnvironment(TemplateEnvironmentBase):
"""A configured Jinja2 environment for formatting messages."""
MODE_PREVIEW = 0
MODE_ANALYZE = 1
MODE_SEND = 2
def __init__(self, *args, **kwargs):
super(MessageTemplateEnvironment, self).__init__(*args, **kwargs)
self._mode = None
self.set_mode(self.MODE_PREVIEW)
self.globals['inline_image'] = self._inline_image_handler
self.attachment_images = {}
"""A dictionary collecting the images that are going to be embedded and sent inline in the message."""
def set_mode(self, mode):
"""
Set the operation mode for the environment. Valid values are the MODE_*
constants.
:param int mode: The operation mode.
"""
if mode not in (self.MODE_PREVIEW, self.MODE_ANALYZE, self.MODE_SEND):
raise ValueError('mode must be one of the MODE_* constants')
self._mode = mode
if mode == self.MODE_ANALYZE:
self.attachment_images = {}
def _inline_image_handler(self, image_path, style=None, alt=None):
image_path = os.path.abspath(image_path)
if not os.path.isfile(image_path):
self.logger.warning('the specified inline image path is not a file')
elif not os.access(image_path, os.R_OK):
self.logger.warning('the specified inline image path can not be read')
if self._mode == self.MODE_PREVIEW:
if os.path.sep == '\\':
image_path = '/'.join(image_path.split('\\'))
if not image_path.startswith('/'):
image_path = '/' + image_path
image_path = 'file://' + image_path
else:
if image_path in self.attachment_images:
attachment_name = self.attachment_images[image_path]
else:
attachment_name = 'img_' + utilities.random_string_lower_numeric(8) + os.path.splitext(image_path)[-1]
while attachment_name in self.attachment_images.values():
attachment_name = 'img_' + utilities.random_string_lower_numeric(8) + os.path.splitext(image_path)[-1]
self.attachment_images[image_path] = attachment_name
image_path = 'cid:' + attachment_name
image_path = html.escape(image_path, quote=True)
img_tag = "<img src=\"{0}\"".format(image_path)
if style is not None:
img_tag += " style=\"{0}\"".format(html.escape(str(style), quote=True))
if alt is not None:
img_tag += " alt=\"{0}\"".format(html.escape(str(alt), quote=True))
img_tag += '>'
return img_tag
|
Beautiful and healthy hair is a subject of female pride. That is why constant care is so important. Dirty hair is fraught with dandruff, hair loss, inflammatory processes in the scalp and even headache. It is important to be able to wash them properly.
Many women believe that it is impossible too often to wash the head. But it is misleading. This procedure for any type of hair is appropriate at least once in two days. It's completely harmless. If the hair is too quickly salted, wash the head at least every day. Most importantly - buy only soft modern detergents.
Select a shampoo appropriate for your hair type, rinse them well after washing, do not RUB, do not pull and do not twist the hair.
Before you wash your headvigorously and carefully comb the hair. This way you stimulate blood circulation in the scalp, allowing to saturate the roots with nutrients in large quantity. In addition, with a brush removes all the dirt, dandruff and residues of styling products. Also brushing before you shower will help hair not to get lost in the wash.
If you have dry or colored hair, before washing massage scalp for 10 minutes. Follow the circular movements, moving the skin. While massage into the skin sunflower, burdock, castor, linseed or hemp oil. After washing of the hair, RUB in wet strands a little special liquid oil. Surplus funds will, remove it with dry towel.
Adjust the temperature of the water used to wash the head. It should not be too hot. Warm water dissolves sebum, removes dirt, improves blood circulation to the scalp. And too hot water leach the hair and activates the sebaceous glands.
At the end of washing, rinse the head with cold water. This will give your locks a healthy, natural and beautiful Shine, and the scalp are guaranteed to improved blood circulation.
Do not apply shampoo directly on your hair because it is poorly distributed between the strands. The right amount of detergent dilute with a little water and wash it with a solution of hair.
|
from setuptools import setup, find_packages, Command
import re
import sys
import subprocess
install_requires = []
pyversion = sys.version_info[:2]
def read_module_contents():
with open('ceph_medic/__init__.py') as f:
return f.read()
module_file = read_module_contents()
metadata = dict(re.findall("__([a-z]+)__\s*=\s*'([^']+)'", module_file))
long_description = open('README.rst').read()
version = metadata['version']
class BumpCommand(Command):
""" Bump the __version__ number and commit all changes. """
user_options = [('version=', 'v', 'version number to use')]
def initialize_options(self):
new_version = metadata['version'].split('.')
new_version[-1] = str(int(new_version[-1]) + 1) # Bump the final part
self.version = ".".join(new_version)
def finalize_options(self):
pass
def run(self):
try:
print('old version: %s new version: %s' %
(metadata['version'], self.version))
raw_input('Press enter to confirm, or ctrl-c to exit >')
except KeyboardInterrupt:
raise SystemExit("\nNot proceeding")
old = "__version__ = '%s'" % metadata['version']
new = "__version__ = '%s'" % self.version
module_file = read_module_contents()
with open('ceph_medic/__init__.py', 'w') as fileh:
fileh.write(module_file.replace(old, new))
# Commit everything with a standard commit message
cmd = ['git', 'commit', '-a', '-m', 'version %s' % self.version]
print(' '.join(cmd))
subprocess.check_call(cmd)
class ReleaseCommand(Command):
""" Tag and push a new release. """
user_options = [('sign', 's', 'GPG-sign the Git tag and release files')]
def initialize_options(self):
self.sign = False
def finalize_options(self):
pass
def run(self):
# Create Git tag
tag_name = 'v%s' % version
cmd = ['git', 'tag', '-a', tag_name, '-m', 'version %s' % version]
if self.sign:
cmd.append('-s')
print(' '.join(cmd))
subprocess.check_call(cmd)
# Push Git tag to origin remote
cmd = ['git', 'push', 'origin', tag_name]
print(' '.join(cmd))
subprocess.check_call(cmd)
# Push package to pypi
cmd = ['python', 'setup.py', 'sdist', 'upload']
if self.sign:
cmd.append('--sign')
print(' '.join(cmd))
#subprocess.check_call(cmd)
# Push master to the remote
cmd = ['git', 'push', 'origin', 'master']
print(' '.join(cmd))
subprocess.check_call(cmd)
setup(
name='ceph-medic',
version=version,
packages=find_packages(),
author='Alfredo Deza',
author_email='[email protected]',
description='detect common issues with ceph clusters',
long_description=long_description,
license='MIT',
keywords='ceph doctor',
url="https://github.com/ceph/ceph-medic",
zip_safe=False,
install_requires=[
'execnet',
'tambo',
'remoto>=1.1.2',
] + install_requires,
tests_require=[
'pytest >=2.1.3',
'tox',
'mock',
],
scripts=['bin/ceph-medic'],
cmdclass={'bump': BumpCommand, 'release': ReleaseCommand},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Build Tools',
'Topic :: Utilities',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
]
)
|
Create structure in OmniFocus by applying the insights from mind mapping your responsibilities.
In this article we'll look at how to structure OmniFocus based on a mind map of your responsibilities in life. If you haven't already, I recommend reading the Foundation: Mind Mapping Your Responsibilities article before moving on to this one.
This mind map serves as a handy starting point for structuring OmniFocus. As a minimum, I recommend creating a folder for each of your major areas of responsibility so that you can easily focus in on these areas as needed. This is especially useful when using in combination with the Focus feature in OmniFocus 3 for Mac (Pro) and the Custom Perspectives feature that's available on both Mac and iOS (Pro).
A fourth folder called "Maintenance" is used to house projects and actions that help keep your system up-to-date. For example, "Perform Morning Review" and "Perform Weekly Review". There may also be top-level folders to encapsulate Checklists, Someday/Maybe items, and Templates.
|
# -*- coding: utf-8 -*-
"""
Module to convert videos from jpgs or pdfs
USE:
cd /media/KINGSTON/ARMOR/python
python
from armor.video import makeVideo as mv
reload(mv); mv.main()
mv.main(inputDate='2013-07-12', inputType='satellite1')
mv.main(inputDate='2013-07-12', inputType='satellite4')
import time
t0=time.time()
reload(mv); mv.makeVideoAll(inputType='rainfall1')
reload(mv); mv.makeVideoAll(inputType='satellite2')
reload(mv); mv.makeVideoAll(inputType='charts')
print '\n\ntime spent all in all:', time.time()-t0, '\n\n\n'
time.sleep(10)
t0=time.time()
reload(mv); mv.makeVideoAll(inputType='temperature')
reload(mv); mv.makeVideoAll(inputType='charts2')
reload(mv); mv.makeVideoAll(inputType='rainfall2')
#reload(mv); mv.makeVideoAll(inputType='satellite1')
reload(mv); mv.makeVideoAll(inputType='satellite3')
#reload(mv); mv.makeVideoAll(inputType='satellite4')
print 'time spent all in all:', time.time()-t0
import time
t0=time.time()
reload(mv); mv.makeVideoAll(inputType='rainfall1') ; mv.makeVideoAll(inputType = 'satellite2') ; mv.makeVideoAll(inputType='charts')
print 'time spent all in all:', time.time()-t0
and check /media/Seagate\ Expansion\ Drive/ARMOR/sandbox
or something like that
References
1. http://stackoverflow.com/questions/5772831/python-library-to-create-a-video-file-from-images
2. http://stackoverflow.com/questions/5772831/python-library-to-create-a-video-file-from-images
3. http://stackoverflow.com/questions/753190/programmatically-generate-video-or-animated-gif-in-python
4. http://opencv.willowgarage.com/documentation/reading_and_writing_images_and_video.html
5. http://stackoverflow.com/questions/12290023/opencv-2-4-in-python-video-processing/12333066#12333066
"
#THE FOLLOWING CODES ARE FROM REFERENCE 3 ABOVE
To create a video, you could use opencv,
#load your frames
frames = ...
#create a video writer
writer = cvCreateVideoWriter(filename, -1, fps, frame_size, is_color=1)
#and write your frames in a loop if you want
cvWriteFrame(writer, frames[i])
"
"""
#################
# imports
import time
import os
import numpy as np
from matplotlib import pyplot as plt
#from PIL import Image
try:
from scipy.misc import imread
except:
from matplotlib import image as mpimg
imread = mpimg.imread
import cv, cv2
from armor import pattern
dbz = pattern.DBZ
##################
# setup
from .. defaultParameters import *
dataRoot = externalHardDriveRoot + '../Work/CWB/'
defaultDate = '2013-07-12'
defaultType = 'charts'
defaultInputFolder = dataRoot + defaultType + '/' + defaultDate +'/'
defaultOutputFolder = externalHardDriveRoot + 'sandbox/'
defaultFrameSize = (600,600)
defaultFps =5
def getList(folder, extensions=['.txt','.dat']):
try:
L = os.listdir(folder)
L = [v for v in L if v[-4:].lower() in extensions]
#print L
L.sort()
return L
except:
print 'getList ERROR!!!!!!'
def makeDBZimages(inputFolder=defaultInputFolder,
outputFolder=defaultOutputFolder, extensions=['.txt', '.dat']):
L = getList(folder=inputFolder, extensions=extensions)
for fileName in L:
a = dbz(name=fileName, dataPath=inputFolder+fileName,
imagePath=defaultOutputFolder+fileName)
a.load()
a.saveImage()
def loadImages(inputFolder=defaultOutputFolder, extensions=['.png', '.jpg']):
"""yes that's right
inputFolder=defaultOutputFolder
because we expect the pics to be in the sandbox (i.e. default output folder)
"""
try:
L = getList(folder=inputFolder, extensions=extensions)
#print inputFolder
#print L
#print extensions
imageList=[""]*len(L)
#print L
for n, fileName in enumerate(L):
#img = Image.open(inputFolder+fileName) # doesn't work
#imageList[n] = cv.LoadImage(inputFolder+fileName) #old
try:
imageList[n] = imread(inputFolder+fileName) # new, converted to cv2
print n, inputFolder, fileName
except:
print n, inputFolder, fileName, "loadImages ERROR!!!!!!!!!!!!!!!!"
#print imageList[n]
return imageList
except:
print "loadImages ERROR!!!!!!!!"
def makeVideo(imageList,
outputPath= defaultOutputFolder+ str(int(time.time()))+'.avi',
fourcc=cv.CV_FOURCC('F', 'L', 'V', '1'),
fps = defaultFps,
frameSize=defaultFrameSize):
#print imageList
# create a video writer
# c.f. http://opencv.willowgarage.com/documentation/python/reading_and_writing_images_and_video.html
#fourcc=cv.FOURCC('P','I','M','1'), doesn't work?
#writer = cv.CreateVideoWriter(filename=outputFolder+inputDate+'_'+inputType+'.avi',
# fourcc=cv.FOURCC('F', 'L', 'V', '1'),
# fps=1, frame_size=(600,600), is_color=1)
#and write your frames in a loop if you want
# the above don't work. replace by the following.
# http://stackoverflow.com/questions/12290023/opencv-2-4-in-python-video-processing/12333066#12333066
time0 = time.time()
writer = cv2.VideoWriter(filename=outputPath,
fourcc=fourcc,
fps=fps,
frameSize=frameSize)
for frame in imageList:
#print frame
#cv.ShowImage(str(frame), frame)
#cv.WaitKey()
#cv.WriteFrame(writer, frame) #old writer replaced
writer.write(frame)
def makeVideoAll(inputType = defaultType,
inputFolder = "",
extensions = ['.png', '.jpg'],
outputFolder = "",
fourcc = cv.CV_FOURCC('F', 'L', 'V', '1'),
fps = defaultFps,
frameSize=defaultFrameSize):
"""
cd /media/KINGSTON/ARMOR/python/
python
from armor.video import makeVideo as mv
reload(mv) ; mv.makeVideoAll(inputType="charts2")
"""
time0 = time.time()
if inputFolder == "":
inputFolder = "%s%s/" % (dataRoot, inputType)
if outputFolder =="":
outputFolder = defaultOutputFolder + inputType + str(int(time.time())) +'/'
#debug
print inputFolder
os.makedirs(outputFolder)
LL = os.listdir(inputFolder)
LL.sort()
for folder in LL:
imageList = loadImages(inputFolder=inputFolder+folder+'/', extensions=extensions)
try:
print folder
makeVideo(imageList,
outputPath= outputFolder + folder + '_' + inputType + '.avi',
fourcc=fourcc,
fps = len(imageList),
#fps = len(imageList)/10. ,
frameSize=frameSize) # frames per sec = len(imageList)/10.
# - so that each day lasts 10 seconds
# no matter how many frames there are
except:
print folder, "makeVideo ERROR!!!!!!!!!!!" # don't care if it doesn't work
time.sleep(3)
print time.time()-time0
def main(inputDate=defaultDate, inputType=defaultType, inputFolder="",
outputFolder=defaultOutputFolder, extensions=['.png','.jpg'],
fps = '',
frameSize=defaultFrameSize):
"""
USE:
main(inputDate=defaultDate, inputType=DefaultType, inputFolder="", outputFolder="")
WHERE:
defaultDate = '2013-07-12'
defaultType = 'charts'
OUTPUT:
out
"""
time0 = time.time()
if inputFolder == "":
inputFolder = "%s%s/%s/" % (dataRoot, inputType, inputDate)
#print inputFolder
imageList = loadImages(inputFolder=inputFolder, extensions=extensions)
if fps =='':
fps = len(imageList)/10. # frames per sec = len(imageList)/10.
# - so that each day lasts 10 seconds
# no matter how many frames there are
makeVideo(imageList=imageList,
outputPath=outputFolder+inputDate+'_'+inputType+'.avi',
fourcc=cv.CV_FOURCC('F', 'L', 'V', '1'),
fps=fps,
frameSize=frameSize)
print outputFolder+inputDate+'_'+inputType
print time.time()-time0
"""
CV_FOURCC('P','I','M','1') = MPEG-1 codec
CV_FOURCC('M','J','P','G') = motion-jpeg codec (does not work well)
CV_FOURCC('M', 'P', '4', '2') = MPEG-4.2 codec
CV_FOURCC('D', 'I', 'V', '3') = MPEG-4.3 codec
CV_FOURCC('D', 'I', 'V', 'X') = MPEG-4 codec
CV_FOURCC('U', '2', '6', '3') = H263 codec
CV_FOURCC('I', '2', '6', '3') = H263I codec
CV_FOURCC('F', 'L', 'V', '1') = FLV1 codec
"""
def makeVideoFourInOne(inputTypes = ['rainfall1' , 'charts',
'temperature' , 'satellite2'],
outputFolder = "",
fourcc = cv.CV_FOURCC('F', 'L', 'V', '1'),
fps = defaultFps,
extension= '.avi',
#fourcc = cv.CV_FOURCC('P', 'I', 'M', '1'),
#extension= '.mpg',
# fps = defaultFps,
frameSize = (1200,1200),
startingFromDate=""):
# sizes- rainfall1: 400x400; charts: 600x600 ; temperature: 400x400 ; satellite2: 430,400
# == USE ==
# cd /media/KINGSTON/ARMOR/python
# python
# from armor.video import makeVideo as mv
# reload(mv) ; mv.makeVideoFourInOne()
# reload(mv) ; mv.makeVideoFourInOne(startingFromDate='2013-08-15')
# plan: 1. get four lists of file paths [(datetime, type) -> path]
# 2. initialise background
# 3. for each datetime do
# 1.look for relevant path, return blank if not found
# 2. load paste the new image and paste it to the frame, do nothing if not found
# 3. write the frame to the video
######################################################################
#
# 1. get four lists of file paths [(datetime, type) -> path]
#
if outputFolder =="":
outputFolder = defaultOutputFolder + '_'.join(inputTypes) + '_' + str(int(time.time())) + '/'
fileNameDict = {}
for inputType in inputTypes:
LL = os.listdir(dataRoot+inputType)
for inputDate in LL:
if not os.path.isdir(dataRoot+inputType+'/'+inputDate) : # not valid data
continue
L = os.listdir(dataRoot+inputType+'/'+inputDate)
if L == []: # empty folder
continue
for fileName in L:
# ('charts', '2013-05-17_1530') -> 2013-05-17_1530.MOS0.jpg
fileNameDict[(inputType, fileName[:15])] = fileName
#####################################################
# 2. initialise background, initialise writer
os.makedirs(outputFolder)
#currentFrame = np.ones((1200,1200,3)) #(1200x1200x3)
currentFrame = imread(dataRoot+defaultType+ '/2013-05-17/2013-05-17_1230.MOS0.jpg')
currentFrame = np.hstack([currentFrame, currentFrame])
currentFrame = np.vstack([currentFrame, currentFrame])
currentFrame = currentFrame *0 +1
#debug
#plt.imshow(currentFrame)
#plt.show()
# end debug
dateTimeList = sorted([ v[1] for v in fileNameDict.keys() \
if v[1]>startingFromDate])
# e.g. '2013-05-17_1530' > '2013-05-16'
# DEBUG
#print dateTimeList, startingFromDate
#x=raw_input('press enter:')
## END DEBUG
inputDateList = sorted(list(set([v[:10] for v in dateTimeList])))
for inputDate in inputDateList:
print inputDate
#split the video into dates
dateTimeListShort = [v for v in dateTimeList if inputDate in v]
#debug
#print outputFolder +inputDate +extension
#print fourcc
#print fps
#print frameSize
# end debug
# initialise video writer
writer = cv2.VideoWriter(filename=outputFolder +inputDate +extension,
fourcc=fourcc,
fps=fps,
frameSize=frameSize)
# initialise (black) currentFrame for each day
# added 2013-08-16
currentFrame = imread(dataRoot+defaultType + \
'/2013-05-17/2013-05-17_1230.MOS0.jpg')
currentFrame = np.hstack([currentFrame, currentFrame])
currentFrame = np.vstack([currentFrame, currentFrame])
currentFrame = currentFrame *0 +1
#####################################################
# 3. for each datetime do
# 1.look for relevant path, return blank if not found
# 2. load paste the new image and paste it to the frame, do nothing if not found
# 3. write the frame to the video
for dateTime in dateTimeListShort: # e.g. '2013-05-17_1530'
print "\n*****", dateTime, "******"
# can add some logics here to pick out specific dates and times
# too lazy to do it here
for N, inputType in enumerate(inputTypes): # e.g. 'charts'
print inputType,
if (inputType, dateTime) not in fileNameDict.keys():
print '-X',
continue
#2. load paste the new image and paste it to the frame
# e.g. /.../CWB/charts/2013-05-17/2013-05-17_1530.MOS0.jpg
fileName = fileNameDict[(inputType, dateTime)]
filePath = dataRoot +inputType +'/' + dateTime[:10] +'/' + fileName
if os.path.getsize(filePath) < 3000: #invalid file
continue
try:
img = imread(dataRoot +inputType +'/' + dateTime[:10] +'/' + fileName)
except:
continue
# debug
#print dataRoot +inputType +'/' + dateTime[:10] +'/' + fileName
#plt.imshow(currentFrame)
#plt.show()
# end debug
height, width, depth = img.shape
hor_displ = (N % 2)*600 # horizontal displacement: 1,3 on the right
vert_displ = (N//2 % 2)*600 # 2,3 on the bottom
currentFrame[vert_displ:vert_displ+height, hor_displ:hor_displ+width, :] = img
# debug
#print hor_displ, vert_displ
# end debug
writer.write(currentFrame)
|
Theater, traditionally, has been the place where social mores are thrown onto relief. Society—encompassing both the microcosm on stage and the audience off-stage—are there to look at themselves. Questions posed are not always questions answered. What resonates is the memory of questions; posed before the words were ever spoken. The characters in James McLure’s plays demand an emotional commitment from their audience, but catharsis, our release from their dilemma is suspended. Responsibility, purgation does not take place in the theater but remains as an act after the play is over.
Betsy Sussler Do you identify with the women characters in your plays? The first plays, Private Wars and Lonestar , were all men.
James McLure Private Wars was the first play and it was a three-man play about guys in an army veteran hospital recuperating from Vietnam wounds. Lonestar is about three guys in a small West Texas town—one a Vietnam veteran returning to this town and picking up the pieces of his life. But the companion piece to Lonestar is another one act play, Laundry and Bourbon , which is about the three women in the lives of the three guys. They’re two different acts, they don’t intermingle on stage. It’s male and female bonding. But in Lonestar one of the huge offscreen presences is the guy’s wife—she’s very, very important to the play because ultimately the play is about her and an affair that she has had with one of the characters on the stage.
BS She influences the play but her character is developed through the voices of the men.
JM Yes, and in the companion, she is a central figure. Light farce—I mean it is a comedy but it has substance to it.
BS You have a play that’s down at the McCarter theater in Princeton this month [January, 1983]?
JM The Day They Shot John Lennon … that play is about the crowd that formed outside of the Dakota after Lennon was shot, and within this crowd that you see on stage are several groupings of people who represent different cross sections of American types; there are five main groups on the stage who are responding to this event. One of the groups is three kids from Westchester County, affluent 16 or 17-year-old kids that have come in from the suburbs to be there. Another group is a guy and a woman in their early thirties, who had been to Woodstock and are strangers who meet there. There’s lines like … the woman from Woodstock has this great sense of rage: “We’re killing our artists, the people we really need. If anyone is going to change the world it’s going to be an artist, not a politician.” A great sense of feminist anger and ’60s anger, whereas the young girl from the suburbs also has an anger but it’s a completely unformed adolescent anger.
BS There’s no rhetoric to support it.
JM Right. One of the kids says, “I can’t believe it. I’ve been listening to this man’s music all my life, ever since 1975.” That’s as far back as it goes for them. Whereas, for the Woodstock people it goes all the way back to ’63, almost a decade. So it’s a whole different reference point.
BS Do you get obsessed with your characters?
JM No, but it’s an interesting word that’s coming into the vocabulary more and more. You especially see it in terms of the arts and artists where you see on the back cover blurb of a novel. “AN OBSESSIVE …” Well, I don’t know. I think something that’s obsessive isn’t healthy, because then you’ve lost perspective.
BS What I mean … When you act a character or when you become involved with a character, it’s a sense of being. There’s not a time lag between what you are and what that character is.
JM I don’t know that I do that. I like to keep a perspective on what this character objectively would and would not do. Especially, say with the John Lennon play where there are nine characters on stage, all with different points of view and opinions. It’s very important that you don’t load the deck on one character so much so that his or her opinions seem to hold sway or take precedence. There is one character in the play who is closest to me in opinions—the 30-ish woman who went to Woodstock—but even some of her opinions and perspectives are wrong or misguided.
BS And yours are always right.
BS Do you think your plays are moral? Do you want morality in your plays?
JM Yes, they are always striving towards how to behave more decently as human beings—if that’s moral.
BS Do you think the act of writing gave you that objectivity you talk about because most actors do let themselves become obsessed with the character?
JM Maybe. I don’t know. Actors—they are not all like this but a huge number of modern actors feel that unless they become obsessed or so emotionally involved with their character that they almost drive themselves nuts, that somehow they aren’t doing their job.
BS That’s a pretty American way of acting.
JM Whereas, Stanislavsky always said that you have got to see where your character fits in the play and you are not doing your job as an actor unless you understand the entire play. You have to do that before you can figure out what you as one element of the ensemble are trying to do.
BS There is something that happens in acting. When you start rehearsing you’re blocking and reading the lines, but you are also acting them out and then comes that moment when everyone has just gotten their lines memorized … and all of a sudden you lose everything, and you have to start all over again which is actually where the actor is playing from moment to moment.
JM That’s a rather good way of putting it. When the play becomes a play rather than a series of individual performances or individuals worried about how am I doing, rather than how are we doing.
BS But that only comes when it’s been committed to memory. What part does memory play in your writing?
JM I don’t know. I keep a notebook and go back to look at things jotted down six months ago and realize, Yeah, that worked its way into the writing but I wasn’t conscious of that … You write down a dream that you had or something that you saw on the street and then you forget about it but it surfaces maybe six months or a year later in the work and it’s interesting to see where those echoes come from.
BS What about notebooks from six or seven years ago?
JM I’ve lost most of them. I have one from high school and another from college but those are so long ago it’s like going back to a different country or reading a different author.
BS You didn’t abruptly stop acting and start writing.
JM Three years ago I was out there on stage at The Public theater in a play called New Jerusalem by Len Jenkins, a great play and I had a great role in it but concurrently Lonestar was opening in Louisville (at The Actor’s Theater) and it got great reviews and national press, so one day I flew down to see it and I was coming back through Pittsburgh and the plane got held up and I was 30 minutes late for curtain. I got to see my show that afternoon and be in a show that night but that was the last play that I acted in. After that Lonestar came to New York and I’ve just been concentrating on my writing.
JM I’ve only directed two of my plays. The other productions were strictly myself as playwright. There was a director there and his interpretation is the one that goes on the stage, not mine. I can make suggestions, but it’s ultimately the director that makes the play work—in his or her way, not mine.
BS And you must accept this, be able to let go.
JM That’s how the theater works, yeah. I mean, I could direct my own plays.
BS Do you want to?
JM Yes, on certain productions I do. I would kind of like to direct this production of Thanksgiving , but I didn’t want to direct the John Lennon play. It needed a lot of re-writing during rehearsals so it would have been too much work and also it’s a very directorial piece because there are nine characters on stage who, for a great deal of the play, don’t react. You need a very strong director to make that kind of static material work.
JM No, duologues and groups of characters talking—basically strangers who meet in a crowd and show a series of emotional connections through this event. I began to write the play because I was living across the street from The Dakota when Lennon was shot and for the next week I spent a lot of time in the park, observing and listening to people talking. Some of the dialogue that you heard among strangers was incredible. I saw a guy and a girl talking. They were about 31, early thirties, and the dialogue went something like this: the guy said, “Isn’t this terrible?” And she said, “Awful. Just disgusting. I couldn’t go to work today.” “Oh, me either.” “Where do you work?” “Oh, I work on the East Side.” “Oh, yeah? Really? So do I.” “I work in the seventies.” “Oh! I work in the eighties.” And suddenly it became a conversation about let’s go and have a drink.
BS Do you eavesdrop as a matter of course.
JM I think all writers do. You have to. It’s the only way you can really hear.
BS That’s what I was talking about when I brought up memory … how much you use.
JM Well, that’s the other thing. As a former actor, you’re trained to observe people and real human behavior so you can use that in your stage work.
BS It allows you to have compassion for just about anyone.
JM That’s the thing, because you realize there isn’t any character you can play without sympathy—all characters are sympathetic. There is a humanity in everybody that is important as an actor to discover. If you’re playing Richard III, even though he was a mass murderer, there’s still something very human about him and it’s your obligation to discover that. Playwrights of ideas are a problem for me because ideas are only interesting in so far as they come out of character, a character I can believe in or care about. For instance, I can think of a great piece of dramatic writing like Schulberg’s On the Waterfront but it could have been just a sociological text about corruption.
BS Do you have another play in mind now?
JM There’s one play that I’ve always wanted to write about Joe McCarthy. I don’t know that I’ll ever get around to it, it’s a very ambitious play. The trouble with McCarthy is that there are no sympathetic characters involved. They are a very shady lot. There are two characters in the John Lennon piece, two Vietnam veterans, who first appeared in my play Private Wars , and then they reappear a few plays later.
JM In Private Wars , the time span is about 1974 or ’75, two years after the Vietnam War, and they are still recuperating in the VA Hospital; but now, in the John Lennon piece, we’re catching up with them in 1980. So it might be interesting to write a play about them in another three years. To see where it goes from there.
BS There are political connotations. Do you know many people who went to Vietnam?
JM One of them has. And he deals with it with a great deal of black humor. I mean, he’s realty a funny guy—he can tell horrendous combat stories and make them funny.
BS Do you find yourself torn between cynicism and humanism?
BS Have you ever had a situation where an actor has changed your lines substantially during a performance?
BS Have you ever wanted to act in your own plays?
JM Only in readings. There’s an advantage to that and a disadvantage. You cut yourself off from the possibilities of better choices. I know the line readings the way I would read them, but there might be better line readings, other things that I can’t hear. So the advantage of having another actor read them is that you have an option, you get two voices rather than one. Many times I’ll prefer the rhythm in my head but until I hear another actor work with it, I’m not sure.
BS There is a difference between writing for text, which is to be read, and writing to be performed.
JM Yeah, if you’re a good playwright you’re writing instinctively for the stage performance. You’re just going to have to rewrite it if you don’t. And again, it’s that old phrase of having a good ear. If you have a good ear for dialogue you’re way ahead of the game. Some playwrights draft—you were talking about someone like Mamet. Mamet is experimenting with language. Some of his plays, like Sexual Perversity in Chicago —he obviously used his good ear to recreate in a poetic way but still very much in the sense of this is the way people talk. Whereas you come to something like The Woods , and nobody talks like that but he was doing some special thing with language and it spurned the poetic possibilities of speech.
BS Do you have a preference? Which are you most interested in doing? Are you interested in both?
Betsy Sussler is Editor of BOMB and a filmmaker and actress in Off-Off Broadway.
The celebrated playwright and author converses with theater producer Morphos (behind, most recently, Sam Shepard’s The God of Hell ), about his book of short stories, A Primitive Heart . In all of Rabe’s work, the past haunts his protagonist.
|
# ----------------------------------------------------------------------------
# Copyright 2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import ctypes as ct
import logging
import numpy as np
import os
import atexit
from neon.util.persist import get_data_cache_dir
from .media import MediaParams
from .indexer import Indexer
from .dataiterator import NervanaDataIterator
logger = logging.getLogger(__name__)
BufferPair = (ct.c_void_p) * 2
class DeviceParams(ct.Structure):
_fields_ = [('type', ct.c_int),
('id', ct.c_int),
('data', BufferPair),
('targets', BufferPair),
('meta', BufferPair)]
class DataLoader(NervanaDataIterator):
"""
Encapsulates the data loader library and exposes an API to iterate over
generic data (images, video or audio given in compressed form). An index
file that maps the data examples to their targets is expected to be provided
in CSV format.
Arguments:
set_name (str):
Name of this dataset partition. This is used as prefix for
directories and index files that may be created while ingesting.
repo_dir (str):
Directory to find the data. This may also be used as the output
directory to store ingested data (in case archive_dir is not
specified).
media_params (MediaParams):
Parameters specific to the media type of the input data.
target_size (int):
The size of the targets. For example: if the target is a class
label, set this parameter to 1, indicating a single integer. If
the target is a mask image, the number of pixels in that image
should be specified.
archive_dir (str):
Directory to store ingested data. If this directory does not exist,
it will be created.
target_conversion (str, optional):
Specifies the method to be used for converting the targets that are
provided in the index file. The options are "no_conversion",
"ascii_to_binary", "char_to_index" and "read_contents". If this
parameter is set to "read_contents", the targets given in the index
file are treated as pathnames and their contents read in. Defaults
to "ascii_to_binary".
index_file (str, optional):
CSV formatted index file that defines the mapping between each
example and its target. The first line in the index file is
assumed to be a header and is ignored. Two columns are expected in
the index. The first column should be the file system path to
individual data examples. The second column may contain the actual
label or the pathname of a file that contains the labels (e.g. a
mask image). If this parameter is not specified, creation of an
index file is attempted. Automaitic index generation can only be
performed if the dataset is organized into subdirectories, which
also represent labels.
shuffle (boolean, optional):
Whether to shuffle the order of data examples as the data is
ingested.
reshuffle (boolean, optional):
Whether to reshuffle the order of data examples as they are loaded.
If this is set to True, the order is reshuffled for each epoch.
Useful for batch normalization. Defaults to False.
datum_type (data-type, optional):
Data type of input data. Defaults to np.uint8.
target_type (data-type, optional):
Data type of targets. Defaults to np.int32.
onehot (boolean, optional):
If the targets are categorical and have to be converted to a one-hot
representation.
nclasses (int, optional):
Number of classes, if this dataset is intended for a classification
problem.
subset_percent (int, optional):
Value between 0 and 100 indicating what percentage of the dataset
partition to use. Defaults to 100.
ingest_params (IngestParams):
Parameters to specify special handling for ingesting data.
alphabet (str, optional):
Alphabet to use for converting string labels. This is only
applicable if target_conversion is set to "char_to_index".
"""
_converters_ = {'no_conversion': 0,
'ascii_to_binary': 1,
'char_to_index': 2,
'read_contents': 3}
def __init__(self, set_name, repo_dir,
media_params, target_size,
archive_dir=None,
target_conversion='ascii_to_binary',
index_file=None,
shuffle=False, reshuffle=False,
datum_dtype=np.uint8, target_dtype=np.int32,
onehot=True, nclasses=None, subset_percent=100,
ingest_params=None,
alphabet=None):
if onehot is True and nclasses is None:
raise ValueError('nclasses must be specified for one-hot labels')
if target_conversion not in self._converters_:
raise ValueError('Unknown target type %s' % target_conversion)
self.set_name = set_name
repo_dir = os.path.expandvars(os.path.expanduser(repo_dir))
if not os.path.exists(repo_dir):
raise IOError('Directory not found: %s' % repo_dir)
self.macro_start = 0
self.repo_dir = repo_dir
parent_dir = os.path.split(repo_dir)[0]
self.archive_prefix = 'archive-'
if archive_dir is None:
self.archive_dir = get_data_cache_dir(parent_dir, set_name + '-ingested')
else:
self.archive_dir = os.path.expandvars(os.path.expanduser(archive_dir))
self.item_count = ct.c_int(0)
self.bsz = self.be.bsz
self.buffer_id = 0
self.start_idx = 0
self.media_params = media_params
self.shape = media_params.get_shape()
self.datum_size = media_params.datum_size()
self.target_size = target_size
self.target_conversion = self._converters_[target_conversion]
if index_file is None:
self.index_file = os.path.join(parent_dir, set_name + '-index.csv')
else:
self.index_file = index_file
self.shuffle = shuffle
self.reshuffle = reshuffle
self.datum_dtype = datum_dtype
self.target_dtype = target_dtype
self.onehot = onehot
self.nclasses = nclasses
self.subset_percent = int(subset_percent)
self.ingest_params = ingest_params
if alphabet is None:
self.alphabet = None
else:
self.alphabet = ct.c_char_p(alphabet)
self.load_library()
self.alloc()
self.start()
atexit.register(self.stop)
def load_library(self):
path = os.path.dirname(os.path.realpath(__file__))
libpath = os.path.join(path, os.pardir, os.pardir,
'loader', 'bin', 'loader.so')
self.loaderlib = ct.cdll.LoadLibrary(libpath)
self.loaderlib.start.restype = ct.c_void_p
self.loaderlib.next.argtypes = [ct.c_void_p]
self.loaderlib.stop.argtypes = [ct.c_void_p]
self.loaderlib.reset.argtypes = [ct.c_void_p]
def alloc(self):
def alloc_bufs(dim0, dtype):
return [self.be.iobuf(dim0=dim0, dtype=dtype) for _ in range(2)]
def ct_cast(buffers, idx):
return ct.cast(int(buffers[idx].raw()), ct.c_void_p)
def cast_bufs(buffers):
return BufferPair(ct_cast(buffers, 0), ct_cast(buffers, 1))
self.data = alloc_bufs(self.datum_size, self.datum_dtype)
self.targets = alloc_bufs(self.target_size, self.target_dtype)
self.meta = alloc_bufs(2, np.int32)
self.media_params.alloc(self)
self.device_params = DeviceParams(self.be.device_type,
self.be.device_id,
cast_bufs(self.data),
cast_bufs(self.targets),
cast_bufs(self.meta))
if self.onehot:
self.onehot_labels = self.be.iobuf(self.nclasses,
dtype=self.be.default_dtype)
if self.datum_dtype == self.be.default_dtype:
self.backend_data = None
else:
self.backend_data = self.be.iobuf(self.datum_size,
dtype=self.be.default_dtype)
@property
def nbatches(self):
return -((self.start_idx - self.ndata) // self.bsz)
def start(self):
"""
Launch background threads for loading the data.
"""
if not os.path.exists(self.archive_dir):
logger.warning('%s not found. Triggering data ingest...' % self.archive_dir)
os.makedirs(self.archive_dir)
if self.item_count.value == 0:
indexer = Indexer(self.repo_dir, self.index_file)
indexer.run()
datum_dtype_size = np.dtype(self.datum_dtype).itemsize
target_dtype_size = np.dtype(self.target_dtype).itemsize
if self.ingest_params is None:
ingest_params = ct.POINTER(MediaParams)()
else:
ingest_params = ct.POINTER(MediaParams)(self.ingest_params)
self.loader = self.loaderlib.start(
ct.byref(self.item_count), self.bsz,
ct.c_char_p(self.repo_dir.encode()),
ct.c_char_p(self.archive_dir.encode()),
ct.c_char_p(self.index_file.encode()),
ct.c_char_p(self.archive_prefix.encode()),
self.shuffle, self.reshuffle,
self.macro_start,
ct.c_int(self.datum_size), ct.c_int(datum_dtype_size),
ct.c_int(self.target_size), ct.c_int(target_dtype_size),
ct.c_int(self.target_conversion),
self.subset_percent,
ct.POINTER(MediaParams)(self.media_params),
ct.POINTER(DeviceParams)(self.device_params),
ingest_params,
self.alphabet)
self.ndata = self.item_count.value
if self.loader is None:
raise RuntimeError('Failed to start data loader.')
def stop(self):
"""
Clean up and exit background threads.
"""
self.loaderlib.stop(self.loader)
def reset(self):
"""
Restart data from index 0
"""
self.buffer_id = 0
self.start_idx = 0
self.loaderlib.reset(self.loader)
def next(self, start):
end = min(start + self.bsz, self.ndata)
if end == self.ndata:
self.start_idx = self.bsz - (self.ndata - start)
self.loaderlib.next(self.loader)
if self.backend_data is None:
data = self.data[self.buffer_id]
else:
# Convert data to the required precision.
self.backend_data[:] = self.data[self.buffer_id]
data = self.backend_data
if self.onehot:
# Convert labels to one-hot encoding.
self.onehot_labels[:] = self.be.onehot(
self.targets[self.buffer_id], axis=0)
targets = self.onehot_labels
else:
targets = self.targets[self.buffer_id]
meta = self.meta[self.buffer_id]
self.buffer_id = 1 if self.buffer_id == 0 else 0
return self.media_params.process(self, data, targets, meta)
def __iter__(self):
for start in range(self.start_idx, self.ndata, self.bsz):
yield self.next(start)
|
Welcoming the 2012 Fall Events on Beech Mountain!
The mountain is showing the golden colors of fall, check the live cams. Autumn of Oz and Woolly Worm festivals are here. Hikers and mountain bikers are enjoying the temperatures in the upper 50’s. Hike the great Emerald Outback trail on top of Beech Mountain or just relax on our beautiful mountain meadow and get pampered with the good food in our Alpen Restaurant & Bar or our Bullwinkels Pizzeria and Sportscafe.
Previous PostSummer 2012 Summer Sunset Concerts at the Beech Alpen Pavilion 5PM!Next PostIndian Summer a most beautiful time on Beech Mountain.
|
from unittest import mock
from .common import AttrDict, BuiltinTest
from bfg9000.builtins import find, project, regenerate, version # noqa
from bfg9000.builtins.file_types import make_file_list, static_file
from bfg9000.file_types import *
from bfg9000.path import Path, Root
def srcpath(p):
return Path(p, Root.srcdir)
class TestStaticFile(BuiltinTest):
def test_basic(self):
expected = File(srcpath('file.txt'))
self.assertSameFile(static_file(self.context, File, 'file.txt'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_path(self):
p = srcpath('file.txt')
expected = File(p)
self.assertSameFile(static_file(self.context, File, p), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_builddir_path(self):
p = Path('file.txt', Root.builddir)
expected = File(p)
self.assertSameFile(static_file(self.context, File, p), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
def test_submodule(self):
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)):
expected = File(srcpath('dir/file.txt'))
self.assertSameFile(static_file(self.context, File, 'file.txt'),
expected)
self.assertEqual(list(self.build.sources()),
[self.bfgfile, expected])
def test_no_dist(self):
p = srcpath('file.txt')
expected = File(p)
self.assertSameFile(static_file(self.context, File, p, dist=False),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
def test_params_default(self):
expected = SourceFile(srcpath('file.txt'), 'c')
self.assertSameFile(static_file(
self.context, SourceFile, 'file.txt', params=[('lang', 'c')]
), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_params_custom(self):
expected = SourceFile(srcpath('file.txt'), 'c++')
self.assertSameFile(static_file(
self.context, SourceFile, 'file.txt', params=[('lang', 'c')],
kwargs={'lang': 'c++'}
), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_extra_kwargs(self):
self.assertRaises(TypeError, static_file, self.context,
SourceFile, 'file.txt', params=[('lang', 'c')],
kwargs={'lang': 'c++', 'extra': 'value'})
self.assertEqual(list(self.build.sources()), [self.bfgfile])
class TestFileList(BuiltinTest):
def make_file_list(self, *args):
def make_file(src, format=None):
obj = ObjectFile(src.path.stripext('.o').reroot(), format,
src.lang)
obj.creator = AttrDict(file=src)
return obj
files = [SourceFile(srcpath(i), 'c++') for i in args]
return make_file_list(self.context, make_file, files, format='elf')
def test_len(self):
self.assertEqual(len(self.make_file_list()), 0)
self.assertEqual(len(self.make_file_list('foo.cpp', 'bar.cpp')), 2)
def test_index_int(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
self.assertSameFile(f[0], ObjectFile(Path('foo.o'), 'elf', 'c++'))
def test_index_str(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
self.assertSameFile(f['foo.cpp'],
ObjectFile(Path('foo.o'), 'elf', 'c++'))
def test_index_path(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
self.assertSameFile(f[srcpath('foo.cpp')],
ObjectFile(Path('foo.o'), 'elf', 'c++'))
self.assertSameFile(f[srcpath('bar.cpp')],
ObjectFile(Path('bar.o'), 'elf', 'c++'))
def test_index_file(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
src = SourceFile(srcpath('foo.cpp'), 'c++')
self.assertEqual(f[src], ObjectFile(Path('foo.o'), 'elf', 'c++'))
def test_index_path_not_found(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
with self.assertRaises(IndexError):
f[srcpath('goofy.cpp')]
def test_submodule(self):
f = self.make_file_list('dir/foo.cpp', 'dir/bar.cpp')
obj = ObjectFile(Path('dir/foo.o'), 'elf', 'c++')
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)):
self.assertSameFile(f['foo.cpp'], obj)
self.assertSameFile(f['dir/foo.cpp'], obj)
def test_eq(self):
f1 = self.make_file_list('foo.cpp', 'bar.cpp')
f2 = self.make_file_list('foo.cpp', 'bar.cpp')
f3 = self.make_file_list('baz.cpp', 'quux.cpp')
s = list(f1)
self.assertTrue(f1 == f1)
self.assertFalse(f1 != f1)
self.assertTrue(f1 == f2)
self.assertFalse(f1 != f2)
self.assertFalse(f1 == f3)
self.assertTrue(f1 != f3)
self.assertTrue(f1 == s)
self.assertFalse(f1 != s)
def test_add(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
self.assertIsInstance(f + ['blah.cpp'], list)
self.assertEqual(f + ['blah.cpp'], [
ObjectFile(Path('foo.o'), 'elf', 'c++'),
ObjectFile(Path('bar.o'), 'elf', 'c++'),
'blah.cpp'
])
def test_radd(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
self.assertIsInstance(['blah.cpp'] + f, list)
self.assertEqual(['blah.cpp'] + f, [
'blah.cpp',
ObjectFile(Path('foo.o'), 'elf', 'c++'),
ObjectFile(Path('bar.o'), 'elf', 'c++'),
])
class TestAutoFile(BuiltinTest):
def test_identity(self):
expected = File(srcpath('file.txt'))
self.assertIs(self.context['auto_file'](expected), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
def test_source_file(self):
expected = SourceFile(srcpath('file.cpp'), 'c++')
self.assertSameFile(self.context['auto_file']('file.cpp'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_header_file(self):
expected = HeaderFile(srcpath('file.hpp'), 'c++')
self.assertSameFile(self.context['auto_file']('file.hpp'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_other_file(self):
expected = File(srcpath('file.txt'))
self.assertSameFile(self.context['auto_file']('file.txt'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_directory(self):
expected = Directory(srcpath('directory/'))
self.assertSameFile(self.context['auto_file']('directory/'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_header_directory(self):
expected = HeaderDirectory(srcpath('directory/'), 'c++')
self.assertSameFile(self.context['auto_file']('directory/', 'c++'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_auxext(self):
expected = HeaderFile(srcpath('file.h'), 'c++')
self.assertSameFile(self.context['auto_file']('file.h', 'c++'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_src_lang(self):
expected_src = SourceFile(srcpath('file.cpp'), 'qtmoc')
self.assertSameFile(self.context['auto_file']('file.cpp', 'qtmoc'),
expected_src)
expected_hdr = HeaderFile(srcpath('file.hpp'), 'qtmoc')
self.assertSameFile(self.context['auto_file']('file.hpp', 'qtmoc'),
expected_hdr)
self.assertEqual(list(self.build.sources()), [
self.bfgfile, expected_src, expected_hdr,
])
def test_unknown_ext(self):
expected = SourceFile(srcpath('file.goofy'), 'c++')
self.assertSameFile(self.context['auto_file']('file.goofy', 'c++'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_unknown_lang(self):
expected = SourceFile(srcpath('file.goofy'), 'goofy')
self.assertSameFile(self.context['auto_file']('file.goofy', 'goofy'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_submodule(self):
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)):
expected = SourceFile(srcpath('file.cpp'), 'c++')
self.assertIs(self.context['auto_file'](expected), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
expected = SourceFile(srcpath('dir/file.cpp'), 'c++')
self.assertSameFile(self.context['auto_file']('file.cpp'),
expected)
self.assertEqual(list(self.build.sources()),
[self.bfgfile, expected])
class TestGenericFile(BuiltinTest):
type = File
args = ()
fn = 'generic_file'
filename = 'file.txt'
def test_identity(self):
expected = self.type(srcpath(self.filename), *self.args)
self.assertIs(self.context[self.fn](expected), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
def test_basic(self):
expected = self.type(srcpath(self.filename), *self.args)
self.assertSameFile(self.context[self.fn](self.filename),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_no_dist(self):
expected = self.type(srcpath(self.filename), *self.args)
self.assertSameFile(
self.context[self.fn](self.filename, dist=False), expected
)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
def test_path(self):
path = srcpath(self.filename)
expected = self.type(path, *self.args)
self.assertSameFile(self.context[self.fn](path), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_submodule(self):
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)):
expected = self.type(srcpath(self.filename), *self.args)
self.assertIs(self.context[self.fn](expected), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
expected = self.type(srcpath('dir/' + self.filename), *self.args)
self.assertSameFile(self.context[self.fn](self.filename),
expected)
self.assertEqual(list(self.build.sources()),
[self.bfgfile, expected])
class TestModuleDefFile(TestGenericFile):
type = ModuleDefFile
fn = 'module_def_file'
filename = 'file.def'
class TestSourceFile(TestGenericFile):
type = SourceFile
args = ('c++',)
fn = 'source_file'
filename = 'file.cpp'
lang = 'c++'
def test_lang(self):
expected = self.type(srcpath('file.goofy'), self.lang)
self.assertSameFile(self.context[self.fn](
'file.goofy', lang=self.lang
), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
class TestHeaderFile(TestSourceFile):
type = HeaderFile
fn = 'header_file'
filename = 'file.hpp'
class TestDirectory(TestGenericFile):
type = Directory
fn = 'directory'
filename = 'dir'
def test_include(self):
def mock_walk(path, variables=None):
p = srcpath
return [
(p('dir'), [p('dir/sub')], [p('dir/file.txt')]),
(p('dir/sub'), [], [p('dir/sub/file2.txt')]),
]
expected = self.type(srcpath(self.filename), [
File(srcpath('dir/file.txt')),
File(srcpath('dir/sub/file2.txt')),
])
with mock.patch('bfg9000.builtins.find.walk', mock_walk):
self.assertSameFile(
self.context[self.fn](self.filename, include='**/*.txt'),
expected
)
self.assertEqual(list(self.build.sources()),
[self.bfgfile] + expected.files + [expected])
class TestHeaderDirectory(TestDirectory):
type = HeaderDirectory
fn = 'header_directory'
filename = 'include'
def test_include(self):
def mock_walk(path, variables=None):
p = srcpath
return [
(p('include'), [p('include/sub')], [p('include/file.hpp')]),
(p('include/sub'), [], [p('include/sub/file2.hpp')]),
]
expected = self.type(srcpath(self.filename), [
HeaderFile(srcpath('include/file.hpp'), 'c++'),
HeaderFile(srcpath('include/sub/file2.hpp'), 'c++'),
], langs=['c++'])
with mock.patch('bfg9000.builtins.find.walk', mock_walk):
self.assertSameFile(
self.context[self.fn](self.filename, include='**/*.hpp'),
expected
)
self.assertEqual(list(self.build.sources()),
[self.bfgfile] + expected.files + [expected])
|
Luxury Replica Watches Luxury sports watch is a sports watch, but with the dress also did not seem out of tune, very good with the wearer to change the different roles, so by the elite welcome. Luxury sports watch in addition to a senior watch-level movement, but also with a sophisticated modeling and exquisite workmanship … Continue reading "Choose luxury sports replica watches"
Luxury Audemars Piguet Replica Watches Hour meter appears to now also however 150 years of history, Abby since it was founded in 1875, made in 1880 to 1890 in the 1625 meter, 627 equipped with timing function, and there are 299 pieces of this after injection timing function, six have lightning femtosecond function. In 1993, … Continue reading "A classic example of Audemars Piguet replica watches"
|
"""
Django settings for apprelease project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ADMINS = [('Gabriel Ferreira', '[email protected]'),('admin', '[email protected]')]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'kgv63hbu@9!qo1#2k)tsp7ko5u^f82#7amz$_u@nq#@7_eayx3')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG', True)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'bootstrap3',
'storages',
'release'
)
DAB_FIELD_RENDERER = 'django_admin_bootstrapped.renderers.BootstrapFieldRenderer'
from django.contrib import messages
MESSAGE_TAGS = {
messages.SUCCESS: 'alert-success success',
messages.WARNING: 'alert-warning warning',
messages.ERROR: 'alert-danger error'
}
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'apprelease.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
'django.core.context_processors.static',
],
},
},
]
WSGI_APPLICATION = 'apprelease.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASSWORD'),
'HOST': os.environ.get('DB_HOST'), # Or an IP Address that your DB is hosted on
'PORT': os.environ.get('DB_PORT'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
if DEBUG:
STATIC_URL = '/static/'
MEDIA_URL = "/"
else:
AWS_HEADERS = { # see http://developer.yahoo.com/performance/rules.html#expires
'Cache-Control': 'max-age=%s' % (os.environ.get('CACHE_MAX_AGE')),
}
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_MEDIA_CUSTOM_DOMAIN = os.environ.get('AWS_MEDIA_CUSTOM_DOMAIN')
AWS_STATIC_CUSTOM_DOMAIN = os.environ.get('AWS_STATIC_CUSTOM_DOMAIN')
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
STATIC_URL = "%s/%s/" % (AWS_STATIC_CUSTOM_DOMAIN, STATICFILES_LOCATION)
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = "%s/%s/" % (AWS_MEDIA_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
# API config
# http://www.django-rest-framework.org/
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
# ]
}
try:
import django
django.setup()
from django.conf import settings
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
if User.objects.count() == 0:
for user in settings.ADMINS:
username = user[0].replace(' ', '')
email = user[1]
password = 'admin'
print('Creating account for %s (%s)' % (username, email))
admin = User.objects.create_superuser(email=email, username=username, password=password)
admin.is_active = True
admin.is_admin = True
admin.save()
else:
print('Admin accounts can only be initialized if no Accounts exist')
except:
pass
|
Cher wasn’t happy with Vegas Shows because Her audience was old and sickly. Seems that for some reason the majority of the ticket buyers were elderly Citizens. Why the audience was much older than other audiences for other shows in the same venue is unknown but one might guess it had something to do with lack of talent.
While Elton John was enjoying high energy crowds at the same venue, Cher wished She could get the same audience. Maybe the fact that Elton John never lip Synced once in His life might have something to do with it. If Anyone could take the top prize for most lip syncing in a career, it would have to be Cher. Audiences will not put up with Auto tuners and lip syncing anymore, there are too many choices in shows, especially in Vegas.
Take note Britney Spears, keep Your promise not to lip sync or You may end up complaining about your lame audiences.
Even the Legends in Concert Show doesn’t lip sync when they impersonate Cher. Its pretty lame when You have to see a Cher impersonator to avoid lip syncing. Cher had the best venue all over Vegas, Caesar’s Colosseum and it is just disrespectful not to use Your voice when You perform there.
|
from git import Repo
import os
import shutil
import stat
import utils
import tempfile
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
class StrykuBot:
def __init__(self):
self.password = ''
self.gh_url = 'https://github.com/stryku/'
self.repo_name = ''
self.name = 'stryku-bot'
self.email = '[email protected]'
self.repo_dir = ''
self.tmp_repo_dir = None
self.repo = None
self.git = None
self.commit_prefix = '[stryku-bot]: '
self.last_branch = ''
file = open('strykubot.password')
self.password = file.read().strip()
file.close()
def clone_repo(self, repo_name, dest='build', rm_old=True):
if rm_old:
if os.path.exists(dest):
shutil.rmtree(dest, onerror=on_rm_error)
self.repo_dir = dest
self.repo_name = repo_name
self.repo = Repo.clone_from(self.gh_url + self.repo_name, dest)
self.git = self.repo.git
self.git.checkout('dev')
writer = self.repo.config_writer()
writer.set_value('user', 'name', self.name)
writer.set_value('user', 'email', self.email)
writer.write()
def clone_tmp_repo(self, repo_name):
self.tmp_repo_dir = tempfile.TemporaryDirectory()
self.clone_repo(repo_name, self.tmp_repo_dir.name, False)
def add_all(self):
self.git.add('--all')
def checkout_branch(self, branch):
self.last_branch = branch
try:
self.git.checkout('HEAD', b=branch)
except Exception:
print("Branch already exist. Remove and create a new one")
self.git.branch('-D', branch)
self.git.checkout('HEAD', b=branch)
def commit(self, msg):
self.git.commit(m=self.commit_prefix + msg)
def push_last_branch(self):
command = ('git push https://stryku-bot:%[email protected]/stryku/%s %s' % (self.password, self.repo_name, self.last_branch))
print(utils.run_process_split(command, cwd=self.repo_dir))
def get_repo_dir(self):
return self.repo_dir
|
Home Design: 45 Phenomenal Movable Picket Fence. Movable Picket Fence.
Saturday 09th, February 2019 22:03:51: PM, Home Design.
Phenomenal movable picket fence home design portable for kids suppliers and plastic vinyl panels. Home design phenomenal movable picket fenceg yorkshire furniture and eventsrhyfeventscom u gardens geek portable event rhbackyardlandscapingfenceinfo. Home design phenomenal movable picket fence new free standing fencing for temporary use ideal. Home design zippity outdoor products ft x white vinyl lightweight portable picket fence panel movable.
Phenomenal movable picket fence portable www topsimages com header temporary fencing min home. Movable picket fence home design portable fencing for dogs yard phenomenal. Movable picket fence home design beautiful wood designs and gate ideas.
Thoughts on 45 Phenomenal Movable Picket Fence?
|
#!/usr/bin/env python
# Copyright (c) 2014 Eugene Zhuk.
# Use of this source code is governed by the MIT license that can be found
# in the LICENSE file.
"""Checks AWS EBS Snapshot status.
This script displays the current status of one or more AWS EBS snapshots.
Usage:
./check_snapshot_status.py [options]
"""
import boto.ec2
import optparse
import sys
import time
class Error(Exception):
pass
def main():
parser = optparse.OptionParser('Usage: %prog [options]')
parser.add_option('-s', '--snapshot', dest='snapshots', action='append',
help='The snapshot ID(s) to check status for. This option is required.')
(opts, args) = parser.parse_args()
if 0 != len(args) or opts.snapshots is None:
parser.print_help()
return 1
try:
c = boto.connect_ec2()
while True:
snapshots = c.get_all_snapshots(snapshot_ids=opts.snapshots)
if not snapshots:
raise Error('could not find \'{0}\''.format(opts.snapshots))
for snap in snapshots:
print '{0}: [{1}{2}] {3}'.format(snap.id,
'#' * 4 * (int(snap.progress.strip('%')) / 10),
' ' * 4 * ((100 - int(snap.progress.strip('%'))) / 10),
snap.progress)
if all(snap.status != 'pending' for snap in snapshots):
break
size = len(snapshots)
if (1 < size):
sys.stdout.write('\x1b[1A' * size)
time.sleep(3)
except (Error, Exception), err:
sys.stderr.write('[ERROR] {0}\n'.format(err))
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
Egypt will try 20 Al Jazeera journalists — including four foreigners — on terrorism charges.
Egypt military government says it will bring terrorism charges against 20 Al Jazeera journalists — including four foreigners. The announcement, largely seen as part of the military-backed interim government's crackdown on the Muslim Brotherhood in the country, accuses the journalists of endangering national security, and joining or assisting a terrorist group. Last month, the Egyptian government designated the Muslim Brotherhood as a "terrorist" organization.
The defendants are accused of "manipulat[ing] pictures" and creating "unreal scenes to give the impression to the outside world that there is a civil war that threatens to bring down the state," and of helping "the terrorist group in achieving its goals and influencing the public opinion," according to the AP's translation of the statement from Egyptian officials.
The current government has long been critical of the way in which foreign media covers their actions, particularly as their attempts to halt street protests in support of deposed president Mohammed Morsi became increasingly violent. In October, the government warned foreign journalists against producing coverage that is "biased to the Muslim Brotherhood," Morsi's political affiliation. But the government has been particularly critical of Al Jazeera's coverage, in part because the news organization has openly called the July 3rd military ouster of Morsi's elected government a "coup," and is seen as sympathetic to the deposed government. Authorities have virtually shut down the station's Cairo facilities, and five Al Jazeera journalists are currently detained by the government.
Those facing charges include Peter Greste of Australia, Canadian-Egyptian Mohammed Fahmy, a Dutch citizen, and two Britons, according to the AP. The Egyptian statement doesn't name the others accused of terrorism charges.
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import zipfile
from functools import partial
from PyQt4.Qt import (QFont, QVariant, QDialog, Qt, QColor, QColorDialog,
QMenu, QInputDialog)
from calibre.constants import iswindows, isxp
from calibre.utils.config import Config, StringConfig, JSONConfig
from calibre.gui2 import min_available_height
from calibre.gui2.shortcuts import ShortcutConfig
from calibre.gui2.viewer.config_ui import Ui_Dialog
from calibre.utils.localization import get_language
def config(defaults=None):
desc = _('Options to customize the ebook viewer')
if defaults is None:
c = Config('viewer', desc)
else:
c = StringConfig(defaults, desc)
c.add_opt('remember_window_size', default=False,
help=_('Remember last used window size'))
c.add_opt('user_css', default='',
help=_('Set the user CSS stylesheet. This can be used to customize the look of all books.'))
c.add_opt('max_fs_width', default=800,
help=_("Set the maximum width that the book's text and pictures will take"
" when in fullscreen mode. This allows you to read the book text"
" without it becoming too wide."))
c.add_opt('max_fs_height', default=-1,
help=_("Set the maximum height that the book's text and pictures will take"
" when in fullscreen mode. This allows you to read the book text"
" without it becoming too tall. Note that this setting only takes effect in paged mode (which is the default mode)."))
c.add_opt('fit_images', default=True,
help=_('Resize images larger than the viewer window to fit inside it'))
c.add_opt('hyphenate', default=False, help=_('Hyphenate text'))
c.add_opt('hyphenate_default_lang', default='en',
help=_('Default language for hyphenation rules'))
c.add_opt('remember_current_page', default=True,
help=_('Save the current position in the document, when quitting'))
c.add_opt('wheel_flips_pages', default=False,
help=_('Have the mouse wheel turn pages'))
c.add_opt('line_scrolling_stops_on_pagebreaks', default=False,
help=_('Prevent the up and down arrow keys from scrolling past '
'page breaks'))
c.add_opt('page_flip_duration', default=0.5,
help=_('The time, in seconds, for the page flip animation. Default'
' is half a second.'))
c.add_opt('font_magnification_step', default=0.2,
help=_('The amount by which to change the font size when clicking'
' the font larger/smaller buttons. Should be a number between '
'0 and 1.'))
c.add_opt('fullscreen_clock', default=False, action='store_true',
help=_('Show a clock in fullscreen mode.'))
c.add_opt('fullscreen_pos', default=False, action='store_true',
help=_('Show reading position in fullscreen mode.'))
c.add_opt('fullscreen_scrollbar', default=True, action='store_false',
help=_('Show the scrollbar in fullscreen mode.'))
c.add_opt('start_in_fullscreen', default=False, action='store_true',
help=_('Start viewer in full screen mode'))
c.add_opt('show_fullscreen_help', default=True, action='store_false',
help=_('Show full screen usage help'))
c.add_opt('cols_per_screen', default=1)
c.add_opt('use_book_margins', default=False, action='store_true')
c.add_opt('top_margin', default=20)
c.add_opt('side_margin', default=40)
c.add_opt('bottom_margin', default=20)
c.add_opt('text_color', default=None)
c.add_opt('background_color', default=None)
c.add_opt('show_controls', default=True)
fonts = c.add_group('FONTS', _('Font options'))
fonts('serif_family', default='Times New Roman' if iswindows else 'Liberation Serif',
help=_('The serif font family'))
fonts('sans_family', default='Verdana' if iswindows else 'Liberation Sans',
help=_('The sans-serif font family'))
fonts('mono_family', default='Courier New' if iswindows else 'Liberation Mono',
help=_('The monospaced font family'))
fonts('default_font_size', default=20, help=_('The standard font size in px'))
fonts('mono_font_size', default=16, help=_('The monospaced font size in px'))
fonts('standard_font', default='serif', help=_('The standard font type'))
fonts('minimum_font_size', default=8, help=_('The minimum font size in px'))
return c
def load_themes():
return JSONConfig('viewer_themes')
class ConfigDialog(QDialog, Ui_Dialog):
def __init__(self, shortcuts, parent=None):
QDialog.__init__(self, parent)
self.setupUi(self)
for x in ('text', 'background'):
getattr(self, 'change_%s_color_button'%x).clicked.connect(
partial(self.change_color, x, reset=False))
getattr(self, 'reset_%s_color_button'%x).clicked.connect(
partial(self.change_color, x, reset=True))
self.css.setToolTip(_('Set the user CSS stylesheet. This can be used to customize the look of all books.'))
self.shortcuts = shortcuts
self.shortcut_config = ShortcutConfig(shortcuts, parent=self)
bb = self.buttonBox
bb.button(bb.RestoreDefaults).clicked.connect(self.restore_defaults)
with zipfile.ZipFile(P('viewer/hyphenate/patterns.zip',
allow_user_override=False), 'r') as zf:
pats = [x.split('.')[0].replace('-', '_') for x in zf.namelist()]
names = list(map(get_language, pats))
pmap = {}
for i in range(len(pats)):
pmap[names[i]] = pats[i]
for x in sorted(names):
self.hyphenate_default_lang.addItem(x, QVariant(pmap[x]))
self.hyphenate_pats = pats
self.hyphenate_names = names
p = self.tabs.widget(1)
p.layout().addWidget(self.shortcut_config)
if isxp:
self.hyphenate.setVisible(False)
self.hyphenate_default_lang.setVisible(False)
self.hyphenate_label.setVisible(False)
self.themes = load_themes()
self.save_theme_button.clicked.connect(self.save_theme)
self.load_theme_button.m = m = QMenu()
self.load_theme_button.setMenu(m)
m.triggered.connect(self.load_theme)
self.delete_theme_button.m = m = QMenu()
self.delete_theme_button.setMenu(m)
m.triggered.connect(self.delete_theme)
opts = config().parse()
self.load_options(opts)
self.init_load_themes()
self.clear_search_history_button.clicked.connect(self.clear_search_history)
self.resize(self.width(), min(self.height(), max(575, min_available_height()-25)))
def clear_search_history(self):
from calibre.gui2 import config
config['viewer_search_history'] = []
def save_theme(self):
themename, ok = QInputDialog.getText(self, _('Theme name'),
_('Choose a name for this theme'))
if not ok:
return
themename = unicode(themename).strip()
if not themename:
return
c = config('')
c.add_opt('theme_name_xxx', default=themename)
self.save_options(c)
self.themes['theme_'+themename] = c.src
self.init_load_themes()
self.theming_message.setText(_('Saved settings as the theme named: %s')%
themename)
def init_load_themes(self):
for x in ('load', 'delete'):
m = getattr(self, '%s_theme_button'%x).menu()
m.clear()
for x in self.themes.iterkeys():
title = x[len('theme_'):]
ac = m.addAction(title)
ac.theme_id = x
def load_theme(self, ac):
theme = ac.theme_id
raw = self.themes[theme]
self.load_options(config(raw).parse())
self.theming_message.setText(_('Loaded settings from the theme %s')%
theme[len('theme_'):])
def delete_theme(self, ac):
theme = ac.theme_id
del self.themes[theme]
self.init_load_themes()
self.theming_message.setText(_('Deleted the theme named: %s')%
theme[len('theme_'):])
def restore_defaults(self):
opts = config('').parse()
self.load_options(opts)
def load_options(self, opts):
self.opt_remember_window_size.setChecked(opts.remember_window_size)
self.opt_remember_current_page.setChecked(opts.remember_current_page)
self.opt_wheel_flips_pages.setChecked(opts.wheel_flips_pages)
self.opt_page_flip_duration.setValue(opts.page_flip_duration)
fms = opts.font_magnification_step
if fms < 0.01 or fms > 1:
fms = 0.2
self.opt_font_mag_step.setValue(int(fms*100))
self.opt_line_scrolling_stops_on_pagebreaks.setChecked(
opts.line_scrolling_stops_on_pagebreaks)
self.serif_family.setCurrentFont(QFont(opts.serif_family))
self.sans_family.setCurrentFont(QFont(opts.sans_family))
self.mono_family.setCurrentFont(QFont(opts.mono_family))
self.default_font_size.setValue(opts.default_font_size)
self.minimum_font_size.setValue(opts.minimum_font_size)
self.mono_font_size.setValue(opts.mono_font_size)
self.standard_font.setCurrentIndex(
{'serif':0, 'sans':1, 'mono':2}[opts.standard_font])
self.css.setPlainText(opts.user_css)
self.max_fs_width.setValue(opts.max_fs_width)
self.max_fs_height.setValue(opts.max_fs_height)
pats, names = self.hyphenate_pats, self.hyphenate_names
try:
idx = pats.index(opts.hyphenate_default_lang)
except ValueError:
idx = pats.index('en_us')
idx = self.hyphenate_default_lang.findText(names[idx])
self.hyphenate_default_lang.setCurrentIndex(idx)
self.hyphenate.setChecked(opts.hyphenate)
self.hyphenate_default_lang.setEnabled(opts.hyphenate)
self.opt_fit_images.setChecked(opts.fit_images)
self.opt_fullscreen_clock.setChecked(opts.fullscreen_clock)
self.opt_fullscreen_scrollbar.setChecked(opts.fullscreen_scrollbar)
self.opt_start_in_fullscreen.setChecked(opts.start_in_fullscreen)
self.opt_show_fullscreen_help.setChecked(opts.show_fullscreen_help)
self.opt_fullscreen_pos.setChecked(opts.fullscreen_pos)
self.opt_cols_per_screen.setValue(opts.cols_per_screen)
self.opt_override_book_margins.setChecked(not opts.use_book_margins)
for x in ('top', 'bottom', 'side'):
getattr(self, 'opt_%s_margin'%x).setValue(getattr(opts,
x+'_margin'))
for x in ('text', 'background'):
setattr(self, 'current_%s_color'%x, getattr(opts, '%s_color'%x))
self.update_sample_colors()
self.opt_show_controls.setChecked(opts.show_controls)
def change_color(self, which, reset=False):
if reset:
setattr(self, 'current_%s_color'%which, None)
else:
initial = getattr(self, 'current_%s_color'%which)
if initial:
initial = QColor(initial)
else:
initial = Qt.black if which == 'text' else Qt.white
title = (_('Choose text color') if which == 'text' else
_('Choose background color'))
col = QColorDialog.getColor(initial, self,
title, QColorDialog.ShowAlphaChannel)
if col.isValid():
name = unicode(col.name())
setattr(self, 'current_%s_color'%which, name)
self.update_sample_colors()
def update_sample_colors(self):
for x in ('text', 'background'):
val = getattr(self, 'current_%s_color'%x)
if not val:
val = 'inherit' if x == 'text' else 'transparent'
ss = 'QLabel { %s: %s }'%('background-color' if x == 'background'
else 'color', val)
getattr(self, '%s_color_sample'%x).setStyleSheet(ss)
def accept(self, *args):
if self.shortcut_config.is_editing:
from calibre.gui2 import info_dialog
info_dialog(self, _('Still editing'),
_('You are in the middle of editing a keyboard shortcut'
' first complete that, by clicking outside the '
' shortcut editing box.'), show=True)
return
self.save_options(config())
return QDialog.accept(self, *args)
def save_options(self, c):
c.set('serif_family', unicode(self.serif_family.currentFont().family()))
c.set('sans_family', unicode(self.sans_family.currentFont().family()))
c.set('mono_family', unicode(self.mono_family.currentFont().family()))
c.set('default_font_size', self.default_font_size.value())
c.set('minimum_font_size', self.minimum_font_size.value())
c.set('mono_font_size', self.mono_font_size.value())
c.set('standard_font', {0:'serif', 1:'sans', 2:'mono'}[
self.standard_font.currentIndex()])
c.set('user_css', unicode(self.css.toPlainText()))
c.set('remember_window_size', self.opt_remember_window_size.isChecked())
c.set('fit_images', self.opt_fit_images.isChecked())
c.set('max_fs_width', int(self.max_fs_width.value()))
max_fs_height = self.max_fs_height.value()
if max_fs_height <= self.max_fs_height.minimum():
max_fs_height = -1
c.set('max_fs_height', max_fs_height)
c.set('hyphenate', self.hyphenate.isChecked())
c.set('remember_current_page', self.opt_remember_current_page.isChecked())
c.set('wheel_flips_pages', self.opt_wheel_flips_pages.isChecked())
c.set('page_flip_duration', self.opt_page_flip_duration.value())
c.set('font_magnification_step',
float(self.opt_font_mag_step.value())/100.)
idx = self.hyphenate_default_lang.currentIndex()
c.set('hyphenate_default_lang',
str(self.hyphenate_default_lang.itemData(idx).toString()))
c.set('line_scrolling_stops_on_pagebreaks',
self.opt_line_scrolling_stops_on_pagebreaks.isChecked())
c.set('fullscreen_clock', self.opt_fullscreen_clock.isChecked())
c.set('fullscreen_pos', self.opt_fullscreen_pos.isChecked())
c.set('fullscreen_scrollbar', self.opt_fullscreen_scrollbar.isChecked())
c.set('show_fullscreen_help', self.opt_show_fullscreen_help.isChecked())
c.set('cols_per_screen', int(self.opt_cols_per_screen.value()))
c.set('start_in_fullscreen', self.opt_start_in_fullscreen.isChecked())
c.set('use_book_margins', not
self.opt_override_book_margins.isChecked())
c.set('text_color', self.current_text_color)
c.set('background_color', self.current_background_color)
c.set('show_controls', self.opt_show_controls.isChecked())
for x in ('top', 'bottom', 'side'):
c.set(x+'_margin', int(getattr(self, 'opt_%s_margin'%x).value()))
|
Did you lose a TECTUS cover plate? We have most finishes in stock and ready to ship.
NOTE: Square cover plates *may* be available but require custom milling. Please give us a call for more information.
|
#!/usr/bin/env python
import string, sys, getopt, codecs
DEFAULT_MAP_FILE = "../kw_maps/kw_map_kannada.txt"
def getKeywordMapFromFile(fileName):
_k_map = {}
with codecs.open(fileName, "r", "utf-8") as fh:
for line in filter(lambda ln: 0 <> len(ln) and not ln.startswith("#"), map(string.strip, fh.readlines())):
(k, _, v) = map(string.strip, line.partition("="))
if not k or not v:
continue
_k_map[unicode(k)] = v
return _k_map
def is_ascii(in_str):
return all(ord(c) < 128 for c in in_str)
def kasi(kw_map_file, target_file):
kw_map = getKeywordMapFromFile(kw_map_file)
buffer_str = u""
quote_seen = False
line_no = 0
need_to_buffer = False
with codecs.open(target_file, "r", "utf-8") as rh, open("%s.C" % target_file, "w") as wh:
# yes, single pass :-)
for ln in rh:
line_no += 1
for ch in ln:
# handling string literals
if '"' == ch:
# Yes toggle
quote_seen = not quote_seen
# if inside the code just write it
if quote_seen:
wh.write(ch)
continue
# in the state of handling foriegn keywords
if need_to_buffer:
# is_ascii will change the state
if is_ascii(ch):
c_kw = kw_map.get(buffer_str, None)
# error out for an un mapped key word
if None == c_kw:
raise RuntimeError("no such keyword @ line_no %d" % line_no)
# write the map and current ascii char
wh.write(c_kw)
wh.write(ch)
# reset the state
buffer_str = u''
need_to_buffer = False
continue
else:
# else append to unicode buffer
buffer_str += ch
continue
# not ascii, drama starts
if not is_ascii(ch):
need_to_buffer = True
buffer_str += ch
continue
# don't care, just stream
wh.write(ch)
def usage():
sys.stderr.write('''usage: %s [-h] [-k <keyword_map_file>] [-v] <file>
-h, --help: show this help and exit.
-k, --kw_map: key word map file (default: %s).
-v, --verbose: enable verbose.
''' % (sys.argv[0], DEFAULT_MAP_FILE))
# start here
def main():
if 2 > len(sys.argv):
usage()
sys.exit(2)
try:
opts, args = getopt.getopt(sys.argv[1:], "hk:v", ["help", "kw_map", "verbose"])
except getopt.GetoptError as err:
sys.stderr.write("Error: %s\n" % str(err))
usage()
sys.exit(2)
kw_map_file = DEFAULT_MAP_FILE
verbose = False
for o, a in opts:
if o in ('-v', '--verbose'):
verbose = True
elif o in ('-c', '--kw_map'):
kw_map_file = a
elif o in ('-h', '--help'):
usage()
sys.exit()
kasi(kw_map_file, sys.argv[-1])
# let's start
if __name__ == '__main__':
main()
|
AMG all music guide series.
A comprehensive fan resource provides reviews and includes tips on how to obtain key recordings as well as biographies, essays, and a history of country music's evolution from American southern folk music to its many modern derivatives.
xi, 963 pages : illustrations ; 24 cm.
edited by Vladimir Bogdanov, Chris Woodstra, and Stephen Thomas Erlewine.
"A definite must for any serious music collector."
Add tags for "All music guide to country : the definitive guide to country music". Be the first.
schema:isPartOf <http://experiment.worldcat.org/entity/work/data/838235121#Series/amg_all_music_guide_series> ; # AMG all music guide series.
<http://experiment.worldcat.org/entity/work/data/838235121#Series/amg_all_music_guide_series> # AMG all music guide series.
|
## @package uniform_sampling
# Module caffe2.python.layers.uniform_sampling
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import LayerParameter, ModelLayer
class UniformSampling(ModelLayer):
"""
Uniform sampling `num_samples - len(input_record)` unique elements from the
range [0, num_elements). `samples` is the concatenation of input_record and
the samples. input_record is expected to be unique.
"""
def __init__(
self,
model,
input_record,
num_samples,
num_elements,
name='uniform_sampling',
**kwargs
):
super(UniformSampling, self).__init__(
model, name, input_record, **kwargs
)
assert num_elements > 0
assert isinstance(input_record, schema.Scalar)
self.num_elements = num_elements
self.num_samples = model.net.NextScopedBlob(name + "_num_samples")
self.params.append(
LayerParameter(
parameter=self.num_samples,
initializer=core.CreateOperator(
"GivenTensorInt64Fill",
[],
self.num_samples,
shape=(1, ),
values=[num_samples],
),
optimizer=model.NoOptim,
)
)
self.sampling_prob = model.net.NextScopedBlob(name + "_prob")
self.params.append(
LayerParameter(
parameter=self.sampling_prob,
initializer=core.CreateOperator(
"ConstantFill",
[],
self.sampling_prob,
shape=(num_samples, ),
value=float(num_samples) / num_elements,
dtype=core.DataType.FLOAT
),
optimizer=model.NoOptim,
)
)
self.output_schema = schema.Struct(
(
'samples', schema.Scalar(
np.int32, model.net.NextScopedBlob(name + "_samples")
)
),
('sampling_prob', schema.Scalar(np.float32, self.sampling_prob)),
)
def add_ops(self, net):
net.StopGradient(self.sampling_prob, self.sampling_prob)
shape = net.Shape([self.input_record()], net.NextScopedBlob("shape"))
shape = net.Sub([self.num_samples, shape], shape)
samples = net.UniqueUniformFill(
[shape, self.input_record()],
net.NextScopedBlob("samples"),
min=0,
max=self.num_elements - 1,
input_as_shape=True
)
net.Concat(
[self.input_record(), samples],
[self.output_schema.samples(), net.NextScopedBlob("split_info")],
axis=0
)
net.StopGradient(
self.output_schema.samples(), self.output_schema.samples()
)
|
There are many more children in China that need our help and your love.
How did the adopted mother locate the birth family? That should have been included in the story.
I’d like to know more about how this happened. Since child abandonment is illegal in China, it is usually impossible to find out who the child’s biological parents were. Also, SZ is a migrant town, with people coming and going from all over the country. I expect that more children will be able to find their biological parents in the future thanks to genetic testing, but still, this isn’t very common yet. Any information about how this reunion was made possible could help more adoptees find their biological families.
|
"""
Artifacts can be used to represent data created as a side-effect of running a Jenkins build.
Artifacts are files which are associated with a single build. A build can have any number of
artifacts associated with it.
This module provides a class called Artifact which allows you to download objects from the server
and also access them as a stream.
"""
from __future__ import with_statement
import urllib
import os
import logging
import hashlib
from jenkinsapi.exceptions import ArtifactBroken
from jenkinsapi.fingerprint import Fingerprint
log = logging.getLogger(__name__)
class Artifact(object):
"""
Represents a single Jenkins artifact, usually some kind of file
generated as a by-product of executing a Jenkins build.
"""
def __init__(self, filename, url, build=None):
self.filename = filename
self.url = url
self.build = build
def save(self, fspath):
"""
Save the artifact to an explicit path. The containing directory must exist.
Returns a reference to the file which has just been writen to.
:param fspath: full pathname including the filename, str
:return: filepath
"""
log.info("Saving artifact @ %s to %s" % (self.url, fspath))
if not fspath.endswith(self.filename):
log.warn("Attempt to change the filename of artifact %s on save." % self.filename)
if os.path.exists(fspath):
if self.build:
try:
if self._verify_download(fspath):
log.info("Local copy of %s is already up to date." % self.filename)
return fspath
except ArtifactBroken:
log.info("Jenkins artifact could not be identified.")
else:
log.info("This file did not originate from Jenkins, so cannot check.")
else:
log.info("Local file is missing, downloading new.")
filename = self._do_download(fspath)
try:
self._verify_download(filename)
except ArtifactBroken:
log.warning("fingerprint of the downloaded artifact could not be verified")
return filename
def _do_download(self, fspath):
"""
Download the the artifact to a path.
"""
filename, _ = urllib.urlretrieve(self.url, filename=fspath)
return filename
def _verify_download(self, fspath):
"""
Verify that a downloaded object has a valid fingerprint.
"""
local_md5 = self._md5sum(fspath)
fp = Fingerprint(self.build.job.jenkins.baseurl, local_md5, self.build.job.jenkins)
return fp.validate_for_build(os.path.basename(fspath), self.build.job.name, self.build.buildno)
def _md5sum(self, fspath, chunksize=2**20):
"""
A MD5 hashing function intended to produce the same results as that used by
Jenkins.
"""
md5 = hashlib.md5()
try:
with open(fspath,'rb') as f:
for chunk in iter(lambda: f.read(chunksize), ''):
md5.update(chunk)
except:
raise
return md5.hexdigest()
def savetodir(self, dirpath):
"""
Save the artifact to a folder. The containing directory must be exist, but use the artifact's
default filename.
"""
assert os.path.exists(dirpath)
assert os.path.isdir(dirpath)
outputfilepath = os.path.join(dirpath, self.filename)
return self.save(outputfilepath)
def __repr__(self):
"""
Produce a handy repr-string.
"""
return """<%s.%s %s>""" % (self.__class__.__module__,
self.__class__.__name__,
self.url)
|
The owners of this kitchen in a summer cottage are keen fans of folklore, old traditions and especially folk songs. The latter can be heard in their house almost every day. The only thing that didn’t answer the needs and mood of this cheerful family was the room, where they cook and eat their food. That is how these people became participants of a TV show, in which families can get interior designs of their dreams absolutely for free. Let’s have a look at what they got!
When the author of the project asked the hosts about the way their dream kitchen should look like, they said they had only two requirements: the kitchen had to be simple and rural and made from eco-friendly finishing materials. That is why it was determined to base this interior on wood. For a start the walls were finished with pine boards that were processed with a smoothing plane, which made them smooth like silk, and additionally coated with a layer of lacquer. The door was decorated with American walnut veneer, and kitchen cabinets and worktop were also made in light wood. Thanks to this trick the kitchen suite doesn’t stand out against the background of the walls and the pretty tight room looks more spacious and airy.
Especially for the kitchen backsplash handmade were 6-mm-thick tempered glass panels with a digitally printed image. Such a solution enabled the designer to place the cooker close to the wooden wall. And a radiator was concealed behind a decorative perforated cover of wenge color.
Glazed handmade ceramic tiles are traditionally used for facing the stoves, but here these gorgeous pieces adorn the doorway between the kitchen and dining room. Red-and-green checkwork was digitally printed on a piece of paper and used for decorating the bottom part of the kitchen wall. The same tartan was printed on a self-adhesive film and can be now found on the refrigerator. The top parts of the walls were finished with neutral wallpaper. And the role of a decorative border between them is played by a cellulose strip featuring coffee pots, cups and other kitchenware, which visually connects this part of the room with the preparation area (the backsplash print). The floor is finished with HPL laminate with a special undercoat that absorbs noise. This was especially relevant for this family with three little kids.
But the piece of this interior that deserves a special mention is an original designer dining table. For a start the decorator removed the old lacquer with the help of abrasive paper. After that he sawed out a new cellular framework for the table top from veneer, painted the entire construction green and added a touch of golden plates on the sides to bring some elegancy to the composition. Finally, versatile nuts and dried beans filled all the cells, and the table was covered with a piece of thick glass.
|
from PyQt4.QtGui import *
from PyQt4.QtCore import Qt, pyqtSlot, QThread
from ui_roope import *
import sys
import struct
import serial.tools.list_ports
import serial
import math
import time
import datetime
DOWN=180
UP=0
#COMMAND:
# DSSAAPB; (D)rive (S)teps (A)ngle 2bytes, 0-360 (P)en 1byte 0-255 (B)ackwards 1byte 0/1
class DrawThread(QThread):
def __init__(self,roope):
QThread.__init__(self)
self.roope=roope
def run(self):
#self.roope.calibrate_sidestep(120)
#self.roope.calibrate_vertical(120*150,1)
#self.roope.calibrate_pen()
self.roope.drawing_started=datetime.datetime.now()
self.roope.draw_fig()
#self.roope.two_lines(40)
self.terminate()
class Roope(QMainWindow):
def __init__(self,pixel_v_steps,pixel_h_steps):
QMainWindow.__init__(self)
self.gui=Ui_MainWindow()
self.gui.setupUi(self)
self.scene=QGraphicsScene()
self.gui.g_view.setScene(self.scene)
self.port=None
self.connect_to_port()
self.pixel_v_steps=pixel_v_steps
self.pixel_h_steps=pixel_h_steps
self.gui.verticalCorrection.setValue(93.0)
self.gui.sideStepCorrection.setValue(90.0)
self.draw_t=DrawThread(self)
self.gui.position_label.setText("")
self.progress=0
self.total_pixels=0 #draw_fig will set this
self.drawing_started=None #The draw process will fill this
self.v_to_h_ratio=1.28 #Multiplier for the vertical step to get a square
# def refreshSerialPorts(self):
# self.gui.portList.clear()
# for path,comment,HWID in serial.tools.list_ports.comports():
# if not "USB" in path:
# continue
# self.gui.portList.addItem(path)
def connect_to_port(self,port=None):
if self.port!=None:
self.port.close()
self.port=None
if port==None: #Try to auto-detect
for path,comment,HWID in serial.tools.list_ports.comports():
if "ttyUSB" in path:
port=path
break
if port:
self.port=serial.Serial(port,9600,bytesize=serial.EIGHTBITS,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE)
time.sleep(3)
print >> sys.stderr, "Connected to", port
else:
print >> sys.stderr, "Couldn't find any port to connect to"
def load(self,fig_file,height=50):
img=QImage(fig_file)
img2=img.scaledToHeight(height,Qt.SmoothTransformation)
self.image=img2
img_vis=self.BW2(img2)
pix_map=QPixmap(img_vis)
pix_map_item=self.scene.addPixmap(pix_map)
scale= min(self.scene.itemsBoundingRect().height()/float(self.gui.g_view.height()),self.scene.itemsBoundingRect().width()/float(self.gui.g_view.width()))
self.gui.g_view.scale(scale,scale)
#self.gui.g_view.fitInView(pix_map_item,Qt.KeepAspectRatio)
def BW2(self,img,white_level=100):
img2=img.copy()
for x in xrange(img2.width()):
for y in xrange(img2.height()):
g=qGray(img2.pixel(x,y))
g=255-g
if g<=white_level:
g=0
else:
g=(g//51)*51
img2.setPixel(x,y,qRgb(255-g,255-g,255-g))
return img2
def BW(self,img):
img2=img.scaled(img.width()*5,img.height()*5)
for x2 in xrange(img2.width()):
for y2 in xrange(img2.height()):
img2.setPixel(x2,y2,qRgb(255,255,255))
for x in xrange(img.width()):
for y in xrange(img.height()):
p=img.pixel(x,y)
gray=5-(qGray(p)//51)
assert gray<=6
for x2 in xrange(x*5,x*5+gray):
for y2 in xrange(y*5,y*5+5):
img2.setPixel(x2,y2,qRgb(0,0,0))
return img2
def comm(self,s):
if not self.port:
time.sleep(0.005)
return
self.port.write(s)
#Now wait for "OK"
while True:
b=self.port.read()
if b!=";":
sys.stderr.write(b)
sys.stderr.flush()
else:
break
def move_pixel(self,steps,angle,pen,backwards):
print "S=", steps, "A=", angle, "P=", pen, "B=", backwards
command=struct.pack("<cHHBBc","D",steps,angle,pen,backwards,";") #c character, H unsigned 2B int, B unsigned byte "<" little endian
self.comm(command)
def calibrate_sidestep(self,pixel_h_steps):
counter=1
for _ in range(150):
print counter
self.side_step(UP,pixel_h_steps,20,101)
def calibrate_pen(self):
while True:
self.move_pixel(200,0,0,False)
self.move_pixel(200,0,255,True)
def two_lines(self,pixel_v_steps):
while True:
self.move_pixel(int(pixel_v_steps*self.gui.verticalCorrection.value()/100.0),0,250,True)
for _ in range(50):
self.move_pixel(int(pixel_v_steps*self.gui.verticalCorrection.value()/100.0),0,0,True)
self.move_pixel(int(pixel_v_steps*self.gui.verticalCorrection.value()/100.0),0,250,True)
self.side_step(UP,pixel_v_steps,20)
self.move_pixel(pixel_v_steps,0,250,False)
for _ in range(50):
self.move_pixel(pixel_v_steps,0,0,False)
self.move_pixel(pixel_v_steps,0,250,False)
self.side_step(UP,pixel_v_steps,20)
def calibrate_vertical(self,pixel_v_steps,reps):
counter=1
while True:
print counter
for _ in range(reps):
self.move_pixel(int(pixel_v_steps*self.gui.verticalCorrection.value()/100.0*self.v_to_h_ratio),0,0,True)
time.sleep(10)
for _ in range(reps):
self.move_pixel(pixel_v_steps*self.v_to_h_ratio,0,0,False)
def gohome(self):
print "GO HOME"
command=struct.pack("<cHHBBc","H",0,0,0,False,";") #c character, H unsigned 2B int, B unsigned byte "<" little endian
self.comm(command)
for _ in range(10): #how many pixels to back?
self.move_pixel(self.pixel_v_steps,0,0,True) #backs to position
def draw_fig(self,from_x=0,from_y=0,direction=DOWN):
self.total_pixels=(self.image.width()-from_x)*self.image.height()-from_y
self.gohome() #start by finding home
xs=range(from_x,self.image.width(),self.pixel_h_steps//self.pixel_v_steps)
for x in xs:
# print "X=",x, "Image width:", self.image.width(), "Image height:", self.image.height()
if x==from_x:
y=from_y
else:
if direction==DOWN:
y=0
else:
y=self.image.height()-1
self.follow_line(x,y,direction)
if direction==DOWN:
self.side_step(UP,steps=self.pixel_h_steps,angle=20)
direction=UP
else:
self.side_step(DOWN,steps=self.pixel_h_steps,angle=20)
self.gohome()
direction=DOWN
def follow_line(self,x=0,from_y=0,direction=DOWN):
if direction==DOWN:
ys=xrange(from_y,self.image.height())
backwards=1
elif direction==UP:
ys=xrange(from_y,-1,-1)
backwards=0
for y in ys:
if direction==DOWN:
step=int(self.pixel_v_steps*self.gui.verticalCorrection.value()/100.0)
elif direction==UP:
step=int(self.pixel_v_steps)
step=int(step*self.v_to_h_ratio)
color2=self.image.pixel(x,y)
print "x,y=",x,y
self.move_pixel(step,0,255-qGray(color2),backwards)
self.progress+=1
time_progressed=(datetime.datetime.now()-self.drawing_started).total_seconds()
portion_done=float(self.progress)/self.total_pixels
eta=self.drawing_started+datetime.timedelta(seconds=float(time_progressed)/portion_done)
self.gui.position_label.setText("X=%03d Y=%03d Done: %.2f%% ETA: %02d:%02d:%02d"%(x,y,100.0*portion_done,eta.hour,eta.minute,eta.second))
def side_step(self,direction,steps,angle,pen=0):
angleRAD=math.radians(90-abs(angle))
traverse=int(steps/math.cos(angleRAD)) #How many steps to travel under angle?
back=int(steps*math.tan(angleRAD))
if direction==DOWN:
self.move_pixel(traverse,360-angle,pen,True)
self.move_pixel(int(back*self.gui.sideStepCorrection.value()/100.0),0,pen,False) #maybe less of a correction needed here?
elif direction==UP:
self.move_pixel(traverse,angle,pen,False)
self.move_pixel(int(back*self.gui.sideStepCorrection.value()/100.0),0,pen,True)
def main(app):
global draw_t
roope=Roope(pixel_v_steps=240,pixel_h_steps=240)
#roope.load("20140617_010845.jpg",height=30)
roope.load("spiral.png",height=50)
roope.show()
#roope.draw_fig()
roope.draw_t.start()
#roope.move_pixel(100,0,1,0)
#roope.side_step(UP,100)
#roope.move_pixel(100,0,1,1)
#roope.move_pixel(20,0,101,0)
#roope.move_pixel(200,0,255,0)
#roope.move_pixel(2000,0,180,0)
#roope.follow_line()
#roope.load("photo.jpg")
return app.exec_()
if __name__ == "__main__":
app = QApplication(sys.argv)
sys.exit(main(app))
|
Celebrate Chinese New Year with us!
Any question, please: email [email protected] or call Joanna Lin at 626-428-5082.
Feel free to make selection on one or both days!
We kindly ask each guest (adult) to sign-in upon arrival, so that we can do headcount.
Lunch meals are served with multiple courses.
Several dishes are traditional vegetarian dishes (no garlic, no onion, no leek, & no green onion). Some are vegan dishes (no dairy products). Some are qualified as both vegetarian and vegan categories. If you like, kindly let us know your preference, so that we can let you know which dish belong to each category.
All are welcome. Invite your families and friends over.
If for any reason you can't come, please kindly let us know so that we don't waste food.
Event is sponsored by World I-Kuan Tao.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following line in the
entry_points section in setup.cfg:
console_scripts =
fibonacci = siftsite.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
from __future__ import division, print_function, absolute_import
import argparse
import sys
import os
import logging
import requests
from siftsite import __version__
__author__ = "Sam Beck"
__copyright__ = "Sam Beck"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def upload_dir(filepath, label, source):
'''upload whole directory (calls upload on all .png)'''
base_dir = os.path.expanduser(os.path.dirname(filepath))
files = os.listdir(base_dir)
input('will upload {} files, continue or ctrl-c'.format(len(files)))
for f in files:
print(f[-4:])
if f[-4:] == '.png':
upload(os.path.join(base_dir, f), label, source)
def upload(filepath, label, source):
'''POST request to your API with "files" key in requests data dict'''
base_dir = os.path.expanduser(os.path.dirname(filepath))
# url = 'http://localhost:8000/api/'
url = 'https://still-taiga-56301.herokuapp.com/api/'
file_name = os.path.basename(filepath)
with open(os.path.join(base_dir, file_name), 'rb') as fin:
print('file:', base_dir, '/', file_name)
POST_data = {'correct_label': label, 'source': source,
'filename': file_name}
files = {'filename': (file_name, fin), 'file': file_name}
resp = requests.post(url, data=POST_data, files=files)
print(resp)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--version',
action='version',
version='siftsite {ver}'.format(ver=__version__))
parser.add_argument(
'--upload',
dest="upload",
help="Path to image file for upload to labler API",
type=str)
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_known_args(args)
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args, unknown = parse_args(args)
if args.upload:
upload(args.upload)
else:
print('yeah cool sure')
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
|
Discussion in 'Linux and Unix' started by DuO, Aug 6, 2006.
I need to know how to duel boot XP and Lunix. I know how to partion a hardrive but am unsure of what distarbution and what duel boot program would be the best for me (a Linux begginer). I currently have just XP instaled. Thanks in advance.
If you install a Linux, just about any distro, after having a working XP in the same computer the Linux installer has a duty to check every partition of every hard disk and will include it as a booting choice if a boot loader is detected inside.
Every MS systems has a boot loader installed in the begining of the partition it resides in. You will have to point a gun at the Linux installer's head in order not to end up with a dual boot.
What the Linux installer can't do is to know if it is a DOS or Win98 or Win2k or XP because MS systems share a common header, generally called the MBR, in their boot loader.
It is necessary for a Linux, or any other non-MS system you install, to replace the Windows MBR with its own version.
Never afraid of losing the MBR of any system as you can restore any of them. The last link of my signature documents all the commands and methods you ever need.
In a normal Linux installation, with a XP pre-installed, you should end up with a dual boot without lifting a finger. If that isn't the case I would throw away the distro and try another one.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# wrapper.py
#
# Copyright © 2013-2018 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Helper module to run some disk/partition related utilities """
import subprocess
import logging
from misc.extra import InstallError
from misc.run_cmd import call
# When testing, no _() is available
try:
_("")
except NameError as err:
def _(message):
return message
def wipefs(device, fatal=True):
""" Wipe fs from device """
err_msg = "Cannot wipe the filesystem of device {0}".format(device)
cmd = ["wipefs", "-a", device]
call(cmd, msg=err_msg, fatal=fatal)
def run_dd(input_device, output_device, bytes_block=512, count=2048, seek=0):
""" Helper function to call dd
Copy a file, converting and formatting according to the operands."""
cmd = [
'dd',
'if={}'.format(input_device),
'of={}'.format(output_device),
'bs={}'.format(bytes_block),
'count={}'.format(count),
'seek={}'.format(seek),
'status=noxfer']
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
logging.warning("Command %s failed: %s", err.cmd, err.output)
def partprobe():
""" Runs partprobe """
try:
subprocess.check_output('/usr/bin/partprobe', stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
logging.error("Command %s failed: %s", err.cmd, err.output.decode())
def sgdisk(command, device):
""" Helper function to call sgdisk (GPT) """
if command == 'zap-all':
# this will be the first sgdisk command. Try to run partprobe first
# so previous changes are communicated to the kernel
partprobe()
cmd = ['sgdisk', "--{0}".format(command), device]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
logging.error("Command %s failed: %s", err.cmd, err.output.decode())
txt = _("Command {0} failed: {1}").format(err.cmd, err.output.decode())
raise InstallError(txt)
def sgdisk_new(device, part_num, label, size, hex_code):
""" Helper function to call sgdisk --new (GPT) """
# --new: Create a new partition, numbered partnum, starting at sector start
# and ending at sector end.
# Parameters: partnum:start:end (zero in start or end means using default
# value)
# --typecode: Change a partition's GUID type code to the one specified by
# hexcode. Note that hexcode is a gdisk/sgdisk internal
# two-byte hexadecimal code.
# You can obtain a list of codes with the -L option.
# Parameters: partnum:hexcode
# --change-name: Change the name of the specified partition.
# Parameters: partnum:name
cmd = [
'sgdisk',
'--new={0}:0:+{1}M'.format(part_num, size),
'--typecode={0}:{1}'.format(part_num, hex_code),
'--change-name={0}:{1}'.format(part_num, label),
device]
_create_partition_cmd(device, cmd)
def parted_set(device, number, flag, state):
""" Helper function to call set parted command """
cmd = [
'parted', '--align', 'optimal', '--script', device,
'set', number, flag, state]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
txt = "Cannot set flag {0} on device {1}. Command {2} has failed: {3}"
txt = txt.format(flag, device, err.cmd, err.output.decode())
logging.error(txt)
def parted_mkpart(device, ptype, start, end, filesystem=""):
""" Helper function to call mkpart parted command """
# If start is < 0 we assume we want to mkpart at the start of the disk
if start < 0:
start_str = "1"
else:
start_str = "{0}MiB".format(start)
# -1s means "end of disk"
if end == "-1s":
end_str = end
else:
end_str = "{0}MiB".format(end)
cmd = [
'parted', '--align', 'optimal', '--script', device,
'--',
'mkpart', ptype, filesystem, start_str, end_str]
_create_partition_cmd(device, cmd)
def _create_partition_cmd(device, cmd):
""" Runs cmd command that tries to create a new partition in device """
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
txt = "Cannot create a new partition on device {0}. Command {1} has failed: {2}"
txt = txt.format(device, err.cmd, err.output.decode())
logging.error(txt)
txt = _(
"Cannot create a new partition on device {0}. Command {1} has failed: {2}")
txt = txt.format(device, err.cmd, err.output.decode())
raise InstallError(txt)
def parted_mklabel(device, label_type="msdos"):
""" Helper function to call mktable parted command """
cmd = [
"parted", "--align", "optimal", "--script", device,
"mklabel", label_type]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
txt = ("Cannot create a new partition table on device {0}. "
"Command {1} failed: {2}")
txt = txt.format(device, err.cmd, err.output.decode())
logging.error(txt)
txt = _("Cannot create a new partition table on device {0}. "
"Command {1} failed: {2}")
txt = txt.format(device, err.cmd, err.output.decode())
raise InstallError(txt)
|
This message appears when a significant part of the software cannot be reached. Clicking Abort terminates operation. Notify GPD Global if this message appears.
As a workaround, you can see what to do when FLOware doesn't load.
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import functools
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import swiftclient
try: # Python3
from urllib.parse import quote_plus
except ImportError: # Python2
from urllib import quote_plus
from zaqar import storage
from zaqar.storage import errors
from zaqar.storage.swift import utils
class SubscriptionController(storage.Subscription):
"""Implements subscription resource operations with swift backend.
Subscriptions are scoped by queue and project.
subscription -> Swift mapping:
+----------------+---------------------------------------+
| Attribute | Storage location |
+----------------+---------------------------------------+
| Sub UUID | Object name |
+----------------+---------------------------------------+
| Queue Name | Container name prefix |
+----------------+---------------------------------------+
| Project name | Container name prefix |
+----------------+---------------------------------------+
| Created time | Object Creation Time |
+----------------+---------------------------------------+
| Sub options | Object content |
+----------------+---------------------------------------+
"""
def __init__(self, *args, **kwargs):
super(SubscriptionController, self).__init__(*args, **kwargs)
self._client = self.driver.connection
def list(self, queue, project=None, marker=None,
limit=storage.DEFAULT_SUBSCRIPTIONS_PER_PAGE):
container = utils._subscription_container(queue, project)
try:
_, objects = self._client.get_container(container,
limit=limit,
marker=marker)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
objects = []
else:
raise
marker_next = {}
yield utils.SubscriptionListCursor(
objects, marker_next,
functools.partial(self._client.get_object, container))
yield marker_next and marker_next['next']
def get(self, queue, subscription_id, project=None):
container = utils._subscription_container(queue, project)
try:
headers, data = self._client.get_object(container, subscription_id)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise errors.SubscriptionDoesNotExist(subscription_id)
raise
return utils._subscription_to_json(data, headers)
def create(self, queue, subscriber, ttl, options, project=None):
sub_container = utils._subscriber_container(queue, project)
slug = uuidutils.generate_uuid()
try:
utils._put_or_create_container(
self._client,
sub_container,
quote_plus(subscriber),
contents=slug,
headers={'x-delete-after': ttl, 'if-none-match': '*'})
except swiftclient.ClientException as exc:
if exc.http_status == 412:
return
raise
container = utils._subscription_container(queue, project)
data = {'id': slug,
'source': queue,
'subscriber': subscriber,
'options': options,
'ttl': ttl,
'confirmed': False}
utils._put_or_create_container(
self._client, container, slug, contents=jsonutils.dumps(data),
content_type='application/json', headers={'x-delete-after': ttl})
return slug
def update(self, queue, subscription_id, project=None, **kwargs):
container = utils._subscription_container(queue, project)
data = self.get(queue, subscription_id, project)
data.pop('age')
ttl = data['ttl']
if 'subscriber' in kwargs:
sub_container = utils._subscriber_container(queue, project)
try:
self._client.put_object(
sub_container,
quote_plus(kwargs['subscriber']),
contents=subscription_id,
headers={'x-delete-after': ttl, 'if-none-match': '*'})
except swiftclient.ClientException as exc:
if exc.http_status == 412:
raise errors.SubscriptionAlreadyExists()
raise
self._client.delete_object(sub_container,
quote_plus(data['subscriber']))
data.update(kwargs)
self._client.put_object(container,
subscription_id,
contents=jsonutils.dumps(data),
content_type='application/json',
headers={'x-delete-after': ttl})
def exists(self, queue, subscription_id, project=None):
container = utils._subscription_container(queue, project)
return self._client.head_object(container, subscription_id)
def delete(self, queue, subscription_id, project=None):
try:
data = self.get(queue, subscription_id, project)
except errors.SubscriptionDoesNotExist:
return
sub_container = utils._subscriber_container(queue, project)
try:
self._client.delete_object(sub_container,
quote_plus(data['subscriber']))
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
container = utils._subscription_container(queue, project)
try:
self._client.delete_object(container, subscription_id)
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
def get_with_subscriber(self, queue, subscriber, project=None):
sub_container = utils._subscriber_container(queue, project)
headers, obj = self._client.get_object(sub_container,
quote_plus(subscriber))
return self.get(queue, obj, project)
def confirm(self, queue, subscription_id, project=None, confirmed=True):
self.update(queue, subscription_id, project, confirmed=confirmed)
|
Are you watching the news closely? Then you must have noticed that cybercrime is regularly the news of the day. From hacking Netflix to disrupting parking garages in the Netherlands. Cybercrime is the new way of working among criminals these days. Where they used to break in physically, now they break in remotely into the most advanced systems. Not only multinationals and government agencies, but also small businesses are targets. Is your security up to par?
Security is more than locks on server cabinets, customizing passwords and installing anti-virus protection. It goes a lot further. We are dealing with cryptolockers and ransomware, DDOS attacks and identity fraud. A lock on the front door is great, but what about the back door?
It is important that you are aware of the fact that cybercrime is everyday’s business. Be reluctant in providing (online) information to third parties. A bank or government will by no means ask you by mail or phone to provide sensitive information. If this is the case, you may be in the crosshairs of cybercriminals. Do not pay anything and never provide your information.
In order to keep your working environment as secure and user-friendly as possible, we are happy to help you with a thorough battle plan and design of your ICT environment. You are dependent on functioning and secure ICT. Without ICT, you are likely to be unable to perform your work.
In addition, we offer many additional options, such as anti-virus and anti-malware software for workstations, and the ability to use tokens for two-factor authentication.
Not only security of the ICT, but also your property?
In addition to protecting your ICT environment, it is also possible to secure your property, warehouse or other space. Both camera images and alarms can now be securely connected via standard Internet connections. How great would it be if you could also include the cameras and maybe the alarm in the service agreement? And of course we also provide support.
Is your security up to par?
|
import sys
import re
from email.utils import parseaddr
from sqlalchemy import not_, func, case, and_, or_, desc, extract
from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import literal_column, column
from datetime import datetime
from time import gmtime, strftime
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
import colander
from esipkd.views.base_view import BaseViews
from pyjasper import (JasperGenerator)
from pyjasper import (JasperGeneratorWithSubreport)
import xml.etree.ElementTree as ET
from pyramid.path import AssetResolver
from ..models import (
DBSession,
UserResourcePermission,
Resource,
User,
Group,
)
from ..models.isipkd import *
from ..security import group_in
"""import sys
import unittest
import os.path
import uuid
from osipkd.tools import row2dict, xls_reader
from datetime import datetime
#from sqlalchemy import not_, func
from sqlalchemy import *
from pyramid.view import (view_config,)
from pyramid.httpexceptions import ( HTTPFound, )
import colander
from deform import (Form, widget, ValidationFailure, )
from osipkd.models import DBSession, User, Group, Route, GroupRoutePermission
from osipkd.models.apbd_anggaran import Kegiatan, KegiatanSub, KegiatanItem
from datatables import ColumnDT, DataTables
from osipkd.views.base_view import BaseViews
from pyjasper import (JasperGenerator)
from pyjasper import (JasperGeneratorWithSubreport)
import xml.etree.ElementTree as ET
from pyramid.path import AssetResolver
from osipkd.models.base_model import *
from osipkd.models.pemda_model import *
from osipkd.models.apbd import *
from osipkd.models.apbd_anggaran import *
"""
def get_rpath(filename):
a = AssetResolver('esipkd')
resolver = a.resolve(''.join(['reports/',filename]))
return resolver.abspath()
angka = {1:'satu',2:'dua',3:'tiga',4:'empat',5:'lima',6:'enam',7:'tujuh',\
8:'delapan',9:'sembilan'}
b = ' puluh '
c = ' ratus '
d = ' ribu '
e = ' juta '
f = ' milyar '
g = ' triliun '
def Terbilang(x):
y = str(x)
n = len(y)
if n <= 3 :
if n == 1 :
if y == '0' :
return ''
else :
return angka[int(y)]
elif n == 2 :
if y[0] == '1' :
if y[1] == '1' :
return 'sebelas'
elif y[0] == '0':
x = y[1]
return Terbilang(x)
elif y[1] == '0' :
return 'sepuluh'
else :
return angka[int(y[1])] + ' belas'
elif y[0] == '0' :
x = y[1]
return Terbilang(x)
else :
x = y[1]
return angka[int(y[0])] + b + Terbilang(x)
else :
if y[0] == '1' :
x = y[1:]
return 'seratus ' + Terbilang(x)
elif y[0] == '0' :
x = y[1:]
return Terbilang(x)
else :
x = y[1:]
return angka[int(y[0])] + c + Terbilang(x)
elif 3< n <=6 :
p = y[-3:]
q = y[:-3]
if q == '1' :
return 'seribu' + Terbilang(p)
elif q == '000' :
return Terbilang(p)
else:
return Terbilang(q) + d + Terbilang(p)
elif 6 < n <= 9 :
r = y[-6:]
s = y[:-6]
return Terbilang(s) + e + Terbilang(r)
elif 9 < n <= 12 :
t = y[-9:]
u = y[:-9]
return Terbilang(u) + f + Terbilang(t)
else:
v = y[-12:]
w = y[:-12]
return Terbilang(w) + g + Terbilang(v)
class ViewLaporan(BaseViews):
def __init__(self, context, request):
global logo
global logo_pemda
BaseViews.__init__(self, context, request)
logo = self.request.static_url('esipkd:static/img/logo.png')
logo_pemda = self.request.static_url('esipkd:static/img/logo-pemda.png')
"""BaseViews.__init__(self, context, request)
self.app = 'anggaran'
row = DBSession.query(Tahun.status_apbd).filter(Tahun.tahun==self.tahun).first()
self.session['status_apbd'] = row and row[0] or 0
self.status_apbd = 'status_apbd' in self.session and self.session['status_apbd'] or 0
#self.status_apbd_nm = status_apbd[str(self.status_apbd)]
self.all_unit = 'all_unit' in self.session and self.session['all_unit'] or 0
self.unit_id = 'unit_id' in self.session and self.session['unit_id'] or 0
self.unit_kd = 'unit_kd' in self.session and self.session['unit_kd'] or "X.XX.XX"
self.unit_nm = 'unit_nm' in self.session and self.session['unit_nm'] or "Pilih Unit"
self.keg_id = 'keg_id' in self.session and self.session['keg_id'] or 0
self.datas['status_apbd'] = self.status_apbd
#self.datas['status_apbd_nm'] = self.status_apbd_nm
self.datas['all_unit'] = self.all_unit
self.datas['unit_kd'] = self.unit_kd
self.datas['unit_nm'] = self.unit_nm
self.datas['unit_id'] = self.unit_id
self.cust_nm = 'cust_nm' in self.session and self.session['cust_nm'] or 'PEMERINTAH KABUPATEN TANGERANG'
customer = self.cust_nm
logo = self.request.static_url('osipkd:static/img/logo.png')
"""
# LAPORAN PENERIMAAN
@view_config(route_name="report-sspd", renderer="templates/report/report_sspd.pt", permission="read")
def report_sspd(self):
params = self.request.params
return dict()
# LAPORAN
@view_config(route_name="report", renderer="templates/report/report.pt", permission="read")
def report(self):
params = self.request.params
return dict()
@view_config(route_name="reports_act")
def reports_act(self):
global awal, akhir, tgl_awal, tgl_akhir, u, unit_kd, unit_nm, unit_al, now, thn,bulan,bulan2
req = self.request
params = req.params
url_dict = req.matchdict
u = req.user.id
now = datetime.now().strftime('%Y-%m-%d')
thn = datetime.now().strftime('%Y')
id = 'id' in params and params['id'] and int(params['id']) or 0
#---------------------- Laporan ----------------------------------------------#
jenis = 'jenis' in params and params['jenis'] and str(params['jenis']) or ''
bayar = 'bayar' in params and params['bayar'] and str(params['bayar']) or ''
rek = 'rek' in params and params['rek'] and str(params['rek']) or ''
h2h = 'h2h' in params and params['h2h'] and str(params['h2h']) or ''
unit = 'unit' in params and params['unit'] and str(params['unit']) or ''
bulan = 'bulan' in params and params['bulan'] and str(params['bulan']) or ''
if bulan!='':
bulan2 = int(bulan)-1
sptpd_id = 'sptpd_id' in params and params['sptpd_id'] and str(params['sptpd_id']) or ''
if group_in(req, 'bendahara'):
unit_id = DBSession.query(UserUnit.unit_id
).filter(UserUnit.user_id==u
).first()
unit_id = '%s' % unit_id
unit_id = int(unit_id)
unit = DBSession.query(Unit.kode.label('kd'),
Unit.nama.label('nm'),
Unit.alamat.label('al')
).filter(UserUnit.unit_id==unit_id,
Unit.id==unit_id
).first()
unit_kd = '%s' % unit.kd
unit_nm = '%s' % unit.nm
unit_al = '%s' % unit.al
elif group_in(req, 'wp'):
unit_id = DBSession.query(UserUnit.unit_id
).filter(UserUnit.user_id==u
).first()
unit_id = '%s' % unit_id
unit_id = int(unit_id)
unit = DBSession.query(Unit.kode.label('kd'),
Unit.nama.label('nm'),
Unit.alamat.label('al')
).filter(UserUnit.unit_id==unit_id,
Unit.id==unit_id
).first()
unit_kd = '%s' % unit.kd
unit_nm = '%s' % unit.nm
unit_al = '%s' % unit.al
else:
unit_kd = "1.20.05."
unit_nm = "BADAN PENDAPATAN DAERAH"
unit_al = "Jl. Soekarno Hatta, No. 528, Bandung"
#-----------------------------------------------------------------------------#
tgl_awal = 'tgl_awal' in params and params['tgl_awal'] and str(params['tgl_awal']) or 0
tgl_akhir = 'tgl_akhir' in params and params['tgl_akhir'] and str(params['tgl_akhir']) or 0
awal = 'awal' in params and params['awal'] and str(params['awal']) or datetime.now().strftime('%Y-%m-%d')
akhir = 'akhir' in params and params['akhir'] and str(params['akhir']) or datetime.now().strftime('%Y-%m-%d')
##----------------------- Query laporan -------------------------------------##
if url_dict['act']=='Laporan_1' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
func.sum(ARInvoice.dasar).label('dasar'),
func.sum(ARInvoice.pokok).label('pokok'),
func.sum(ARInvoice.denda).label('denda'),
func.sum(ARInvoice.bunga).label('bunga'),
func.sum(ARInvoice.jumlah).label('jumlah')
).order_by(ARInvoice.unit_kode)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0,
ARInvoice.rekening_id==rek
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1),
ARInvoice.rekening_id==rek
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.rekening_id==rek
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap1Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_2' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah')
).order_by(ARInvoice.unit_kode,
ARInvoice.kode,
ARInvoice.jumlah)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0,
ARInvoice.rekening_id==rek)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1),
ARInvoice.rekening_id==rek)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.rekening_id==rek)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap2Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_3' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
func.sum(ARInvoice.dasar).label('dasar'),
func.sum(ARInvoice.pokok).label('pokok'),
func.sum(ARInvoice.denda).label('denda'),
func.sum(ARInvoice.bunga).label('bunga'),
func.sum(ARInvoice.jumlah).label('jumlah')
).order_by(ARInvoice.rek_kode)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0,
ARInvoice.unit_id==unit
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1),
ARInvoice.unit_id==unit
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.unit_id==unit
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap3Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_4' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah')
).order_by(ARInvoice.rek_kode,
ARInvoice.kode,
ARInvoice.jumlah)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0,
ARInvoice.unit_id==unit)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1),
ARInvoice.unit_id==unit)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.unit_id==unit)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap4Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_5' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
func.sum(ARInvoice.dasar).label('dasar'),
func.sum(ARInvoice.pokok).label('pokok'),
func.sum(ARInvoice.denda).label('denda'),
func.sum(ARInvoice.bunga).label('bunga'),
func.sum(ARInvoice.jumlah).label('jumlah')
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1)
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir)
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap5Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_6' :
query = DBSession.query(ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
func.sum(ARInvoice.dasar).label('dasar'),
func.sum(ARInvoice.pokok).label('pokok'),
func.sum(ARInvoice.denda).label('denda'),
func.sum(ARInvoice.bunga).label('bunga'),
func.sum(ARInvoice.jumlah).label('jumlah')
).order_by(ARInvoice.rek_kode,
ARInvoice.unit_kode
)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0
).group_by(ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama,
ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1)
).group_by(ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama,
ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir)
).group_by(ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama,
ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap6Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_8' :
if group_in(req, 'bendahara'):
query = DBSession.query(ARTbp.kode.label('kd'),
ARTbp.wp_nama.label('wp_nm'),
ARTbp.rekening_id.label('rek_id'),
ARTbp.rek_kode.label('rek_kd'),
ARTbp.rek_nama.label('rek_nm'),
ARTbp.unit_id.label('un_id'),
ARTbp.unit_kode.label('un_kd'),
ARTbp.unit_nama.label('un_nm'),
ARTbp.dasar.label('dasar'),
ARTbp.pokok.label('pokok'),
ARTbp.denda.label('denda'),
ARTbp.bunga.label('bunga'),
ARTbp.jumlah.label('jumlah')
).filter(ARTbp.tgl_terima.between(awal,akhir)
).order_by(desc(ARTbp.tgl_terima),
desc(ARTbp.kode)
)
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARTbp.unit_id==z)
generator = lap8benGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
query = DBSession.query(ARTbp.unit_id.label('un_id'),
ARTbp.unit_kode.label('un_kd'),
ARTbp.unit_nama.label('un_nm'),
ARTbp.rekening_id.label('rek_id'),
ARTbp.rek_kode.label('rek_kd'),
ARTbp.rek_nama.label('rek_nm'),
ARTbp.kode.label('kd'),
ARTbp.wp_nama.label('wp_nm'),
ARTbp.dasar.label('dasar'),
ARTbp.pokok.label('pokok'),
ARTbp.denda.label('denda'),
ARTbp.bunga.label('bunga'),
ARTbp.jumlah.label('jumlah')
).filter(ARTbp.tgl_terima.between(awal,akhir)
).order_by(ARTbp.unit_kode,
desc(ARTbp.tgl_terima),
desc(ARTbp.kode)
)
generator = lap8Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_9' :
if group_in(req, 'bendahara'):
query = DBSession.query(ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah')
).filter(ARInvoice.tgl_tetap.between(awal,akhir)
).order_by(desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode)
)
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap9benGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah')
).filter(ARInvoice.tgl_tetap.between(awal,akhir)
).order_by(ARInvoice.unit_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode)
)
generator = lap9Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_7' :
query = DBSession.query(ARSspd.bayar.label('bayar'),
ARSspd.bunga.label('bunga'),
ARSspd.tgl_bayar.label('tgl'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.jumlah.label('jumlah')
).join(ARInvoice)
if group_in(req, 'bendahara'):
if h2h=='1':
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0,
ARSspd.bank_id!=0
).order_by(desc(ARSspd.tgl_bayar),
ARInvoice.kode
)
elif h2h=='2':
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0,
ARSspd.bank_id==None
).order_by(desc(ARSspd.tgl_bayar),
ARInvoice.kode
)
else:
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0,
).order_by(desc(ARSspd.tgl_bayar),
ARInvoice.kode
)
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap7benGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
if h2h=='1':
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0,
ARSspd.bank_id!=0
).order_by(ARInvoice.unit_kode,
desc(ARSspd.tgl_bayar),
ARInvoice.kode,
ARInvoice.rek_kode
)
elif h2h == '2':
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0,
ARSspd.bank_id == None
).order_by(ARInvoice.unit_kode,
desc(ARSspd.tgl_bayar),
ARInvoice.kode,
ARInvoice.rek_kode
)
else:
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0
).order_by(ARInvoice.unit_kode,
desc(ARSspd.tgl_bayar),
ARInvoice.kode,
ARInvoice.rek_kode
)
generator = lap7Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_10' :
query = DBSession.query(ARTbp.unit_id.label('un_id'),
ARTbp.unit_kode.label('un_kd'),
ARTbp.unit_nama.label('un_nm'),
ARTbp.rekening_id.label('rek_id'),
ARTbp.rek_kode.label('rek_kd'),
ARTbp.rek_nama.label('rek_nm'),
ARTbp.kode.label('kd'),
ARTbp.invoice_kode.label('invoice_kode'),
ARTbp.tgl_terima.label('tgl_terima'),
ARTbp.periode_1.label('periode_1'),
ARTbp.periode_2.label('periode_2'),
ARTbp.jatuh_tempo.label('jatuh_tempo'),
ARTbp.wp_nama.label('wp_nm'),
ARTbp.op_nama.label('op_nm'),
ARTbp.dasar.label('dasar'),
ARTbp.pokok.label('pokok'),
ARTbp.denda.label('denda'),
ARTbp.bunga.label('bunga'),
ARTbp.jumlah.label('jumlah')
).order_by(ARTbp.unit_kode,
desc(ARTbp.tgl_terima),
desc(ARTbp.kode),
ARTbp.rek_kode,
)
if group_in(req, 'bendahara'):
query = query.filter(ARTbp.tgl_terima.between(awal,akhir),
ARTbp.unit_kode.ilike('%%%s%%' % unit_kd))
generator = lap10Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
query = query.filter(ARTbp.tgl_terima.between(awal,akhir))
generator = lap10budGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_11' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah'),
case([(ARInvoice.status_bayar==0,"Belum")],
else_="Sudah").label('status'))
if group_in(req, 'bendahara'):
## kondisi status Bayar ##
cek_bayar = bayar;
if(cek_bayar=='1'):
bayar=0;
elif(cek_bayar=='2'):
bayar=1;
if(cek_bayar=='1'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#ARInvoice.is_tbp==0,
ARInvoice.status_bayar==bayar,
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
elif (cek_bayar=='2'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#or_(ARInvoice.status_bayar==bayar,
# ARInvoice.is_tbp==1),
ARInvoice.status_bayar==bayar,
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
elif(cek_bayar=='3'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
generator = lap11Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
## kondisi status Bayar ##
cek_bayar = bayar;
if(cek_bayar=='1'):
bayar=0;
elif(cek_bayar=='2'):
bayar=1;
if(cek_bayar=='1'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#ARInvoice.is_tbp==0,
ARInvoice.status_bayar==bayar,
#ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
elif (cek_bayar=='2'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#or_(ARInvoice.status_bayar==bayar,
# ARInvoice.is_tbp==1),
ARInvoice.status_bayar==bayar,
#ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
elif(cek_bayar=='3'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
generator = lap11budGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_12' :
if group_in(req, 'bendahara'):
## kondisi status Bayar ##
cek_bayar = bayar;
if(cek_bayar=='1'):
bayar=0;
elif(cek_bayar=='2'):
bayar=1;
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah'),
case([(ARInvoice.status_bayar==0,"Belum")],
else_="Sudah").label('status')
).order_by(ARInvoice.rek_kode,
ARInvoice.unit_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
if(cek_bayar=='1'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#ARInvoice.is_tbp==0,
ARInvoice.status_bayar==bayar,
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd))
elif(cek_bayar=='2'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#or_(ARInvoice.status_bayar==bayar,
# ARInvoice.is_tbp==1),
ARInvoice.status_bayar==bayar,
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd))
elif(cek_bayar=='3'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd))
generator = lap12Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
## kondisi status Bayar ##
cek_bayar = bayar;
if(cek_bayar=='1'):
bayar=0;
elif(cek_bayar=='2'):
bayar=1;
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah'),
case([(ARInvoice.status_bayar==0,"Belum")],
else_="Sudah").label('status')
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
if(cek_bayar=='1'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#ARInvoice.is_tbp==0,
#ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd),
ARInvoice.status_bayar==bayar)
elif(cek_bayar=='2'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#or_(ARInvoice.status_bayar==bayar,
# ARInvoice.is_tbp==1),
#ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd),
ARInvoice.status_bayar==bayar)
elif(cek_bayar=='3'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir))
generator = lap12budGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_13' :
subq = DBSession.query(Rekening.kode.label('rek_kd'),
Rekening.nama.label('rek_nm'),
func.sum(case([(func.coalesce(Anggaran.perubahan,0)>0,func.coalesce(Anggaran.perubahan,0))],
else_=func.coalesce(Anggaran.murni,0))).label('target')
).select_from(Rekening
).filter(Anggaran.tahun==thn,
Anggaran.kode.ilike(func.concat(Rekening.kode,'%'))
).group_by(Rekening.kode,
Rekening.nama
).order_by(Rekening.kode
).subquery()
query = DBSession.query(subq.c.rek_kd,
subq.c.rek_nm,
subq.c.target,
func.sum(case([(func.extract('month', ARSspd.tgl_bayar)<=bulan2,func.coalesce(ARSspd.bayar,0))],
else_=0)).label('bayar1'),
func.sum(case([(func.extract('month', ARSspd.tgl_bayar)==bulan,func.coalesce(ARSspd.bayar,0))],
else_=0)).label('bayar2')
).select_from(ARSspd
).join(ARInvoice
).filter(ARSspd.tahun_id==ARInvoice.tahun_id,
ARInvoice.rek_kode.ilike(func.concat(subq.c.rek_kd,'%')),
ARInvoice.tahun_id==thn
).group_by(subq.c.rek_kd,
subq.c.rek_nm,
subq.c.target
).order_by(subq.c.rek_kd
)
##-------- Untuk memfilter sesuai Jalur Pembayaran ----------##
if h2h=='1':
query = query.filter(ARSspd.bayar!=0,
ARSspd.bank_id!=0)
elif h2h=='2':
query = query.filter(ARSspd.bayar!=0,
ARSspd.bank_id==None)
else:
query = query.filter(ARSspd.bayar!=0)
##---------------- Untuk memfilter sesuai Unit --------------##
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id
).filter(UserUnit.user_id==u
).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap13Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_14' :
query = DBSession.query(ARSspd.bayar.label('bayar'),
ARSspd.tgl_bayar.label('tgl'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.tgl_tetap.label('tgl_ttp'),
ARInvoice.jumlah.label('jumlah'),
Wilayah.kode.label('wil_kd'),
Wilayah.nama.label('wil_nm')
).join(ARInvoice
).outerjoin(Wilayah
).order_by(Wilayah.kode,
ARInvoice.rek_kode,
ARSspd.tgl_bayar,
ARInvoice.kode)
##-------- Untuk memfilter sesuai Jalur Pembayaran ----------##
if h2h=='1':
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
extract('month', ARSspd.tgl_bayar)==bulan,
ARSspd.bayar!=0,
ARSspd.bank_id!=0)
elif h2h=='2':
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
extract('month', ARSspd.tgl_bayar)==bulan,
ARSspd.bayar!=0,
ARSspd.bank_id==None)
else:
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
extract('month', ARSspd.tgl_bayar)==bulan,
ARSspd.bayar!=0)
##---------------- Untuk memfilter sesuai Unit --------------##
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id
).filter(UserUnit.user_id==u
).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap14Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_15' :
query = DBSession.query(func.sum(ARSspd.bayar).label('bayar'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
func.extract('month', ARSspd.tgl_bayar).label("bln"),
case([(func.extract('month', ARSspd.tgl_bayar)==1,"1"),
(func.extract('month', ARSspd.tgl_bayar)==2,"1"),
(func.extract('month', ARSspd.tgl_bayar)==3,"1"),
(func.extract('month', ARSspd.tgl_bayar)==4,"2"),
(func.extract('month', ARSspd.tgl_bayar)==5,"2"),
(func.extract('month', ARSspd.tgl_bayar)==6,"2"),
(func.extract('month', ARSspd.tgl_bayar)==7,"3"),
(func.extract('month', ARSspd.tgl_bayar)==8,"3"),
(func.extract('month', ARSspd.tgl_bayar)==9,"3"),
(func.extract('month', ARSspd.tgl_bayar)==10,"4"),
(func.extract('month', ARSspd.tgl_bayar)==11,"4"),
(func.extract('month', ARSspd.tgl_bayar)==12,"4")],
else_="4").label("triwulan"),
).join(ARInvoice
).order_by(func.extract('month', ARSspd.tgl_bayar),
ARInvoice.rek_kode
).group_by(func.extract('month', ARSspd.tgl_bayar),
ARInvoice.rek_kode,
ARInvoice.rek_nama
)
##-------- Untuk memfilter sesuai Jalur Pembayaran ----------##
if h2h=='1':
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
ARSspd.bayar!=0,
ARSspd.bank_id!=0
)
elif h2h=='2':
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
ARSspd.bayar!=0,
ARSspd.bank_id==None
)
else:
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
ARSspd.bayar!=0
)
##---------------- Untuk memfilter sesuai Unit --------------##
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id
).filter(UserUnit.user_id==u
).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap15Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
##----------------------------- End Laporan -----------------##
###################### USER
elif url_dict['act']=='r001' :
# function case when alchemy -> case([(User.status==1,"Aktif"),],else_="Tidak Aktif").label("status")
# iWan Mampir
query = DBSession.query(User.user_name.label('username'), User.email, case([(User.status==1,"Aktif"),],else_="Tidak Aktif").label("status"), User.last_login_date.label('last_login'), User.registered_date).\
order_by(User.user_name).all()
generator = r001Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### GROUP
elif url_dict['act']=='r002' :
query = DBSession.query(Group.group_name.label('kode'), Group.description.label('nama')).order_by(Group.group_name).all()
generator = r002Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### SKPD/UNIT
elif url_dict['act']=='r003' :
query = DBSession.query(Unit.kode, Unit.nama, Unit.level_id, Unit.is_summary).order_by(Unit.kode).all()
generator = r003Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### JABATAN
elif url_dict['act']=='r004' :
query = DBSession.query(Jabatan.kode, Jabatan.nama, Jabatan.status).order_by(Jabatan.kode).all()
generator = r004Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### PEGAWAI
elif url_dict['act']=='r005' :
query = DBSession.query(Pegawai.kode, Pegawai.nama).order_by(Pegawai.kode).all()
generator = r005Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### REKENING
elif url_dict['act']=='r006' :
query = DBSession.query(Rekening.kode, Rekening.nama, Rekening.level_id, Rekening.is_summary).order_by(Rekening.kode).all()
generator = r006Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### PAJAK DAN TARIF
elif url_dict['act']=='r007' :
query = DBSession.query(Pajak.kode, Pajak.nama, Rekening.nama.label('rek_nm'), Pajak.tahun, Pajak.tarif
).filter(Pajak.rekening_id==Rekening.id
).order_by(Pajak.kode).all()
generator = r007Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### WILAYAH
elif url_dict['act']=='r008' :
query = DBSession.query(Wilayah.kode, Wilayah.nama, Wilayah.level_id).order_by(Wilayah.kode).all()
generator = r008Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### JENISPAJAK
# function case when alchemy -> case([(JnsPajak.status==1,"Aktif"),],else_="Tidak Aktif").label("status")
# iWan Mampir
elif url_dict['act']=='semua_sektor' :
query = DBSession.query(JnsPajak.kode, JnsPajak.nama, case([(JnsPajak.status==1,"Aktif"),],else_="Tidak Aktif").label("status")).order_by(JnsPajak.kode).all()
generator = semua_sektorGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### --------- SUBJEK PAJAK --------- ###
elif url_dict['act']=='rSubjekPajak' :
query = DBSession.query(SubjekPajak.kode,
SubjekPajak.nama,
SubjekPajak.alamat_1,
SubjekPajak.kelurahan,
SubjekPajak.kecamatan,
SubjekPajak.kota,
SubjekPajak.email,
case([(SubjekPajak.status==1,"Aktif")],
else_="Tidak Aktif").label("status"),
Unit.nama.label('unit')
).join(Unit
).filter(SubjekPajak.status_grid==0
).order_by(Unit.kode,
desc(SubjekPajak.kode))
if group_in(req, 'bendahara'):
query = query.join(UserUnit
).filter(UserUnit.user_id==u)
generator = rSubjekPajakGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### --------- OBJEK PAJAK --------- ###
elif url_dict['act']=='r010' :
query = DBSession.query(ObjekPajak.nama.label('op_nm'),
SubjekPajak.kode.label('sp_kd'),
SubjekPajak.nama.label('sp_nm'),
Pajak.kode.label('p_kd'),
Wilayah.nama.label('w_nm'),
case([(SubjekPajak.status==1,"Aktif")],
else_="Tidak Aktif").label("status"),
Unit.nama.label('unit')
).join(SubjekPajak
).join(Unit
).outerjoin(Pajak
).outerjoin(Wilayah
).filter(ObjekPajak.status_grid==0,
ObjekPajak.status==1
).order_by(Unit.kode,
SubjekPajak.kode,
ObjekPajak.kode
)
if group_in(req, 'wp'):
query = query.filter(SubjekPajak.email==req.user.email)
elif group_in(req, 'bendahara'):
query = query.filter(SubjekPajak.unit_id==Unit.id)
query = query.join(UserUnit).filter(UserUnit.unit_id==Unit.id,
UserUnit.user_id==u)
generator = r010Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### ARINVOICE FAST PAY
elif url_dict['act']=='r101' :
query = DBSession.query(ARInvoice
).filter(ARInvoice.id==id,
ARInvoice.status_grid==1,
#ARInvoice.tgl_tetap.between(awal,akhir)
).order_by(ARInvoice.kode)
if u != 1:
query = query.filter(ARInvoice.owner_id==u)
generator = r101Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### ARINVOICE
elif url_dict['act']=='r100' :
query = DBSession.query(ARInvoice
).filter(ARInvoice.id==id
).order_by(ARInvoice.kode).all()
generator = r100Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### ARSSPD
# function trim to_char alchemy -> func.trim(func.to_char(ARInvoice.tarif,'999,999,999,990')).label('tarif'),
# iWan Mampir
elif url_dict['act']=='r200' :
print '*********tgl_akhir********',tgl_akhir
query = DBSession.query(ARSspd.id,
ARInvoice.kode,
ARInvoice.wp_kode,
ARInvoice.wp_nama,
ARInvoice.op_kode,
ARInvoice.op_nama,
ARInvoice.rek_kode,
ARInvoice.rek_nama,
func.trim(func.to_char(ARSspd.bayar,'999,999,999,990')).label('bayar'),
ARSspd.tgl_bayar,
).join(ARInvoice
).filter(and_(ARSspd.tgl_bayar>=tgl_awal, ARSspd.tgl_bayar<=tgl_akhir)
).order_by(ARSspd.id).all()
generator = r200Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='r200frm' :
query = DBSession.query(ARSspd.id,
ARSspd.tgl_bayar,
ARInvoice.wp_kode,
ARInvoice.wp_nama,
ARInvoice.op_kode,
ARInvoice.op_nama,
ARInvoice.rek_kode,
ARInvoice.rek_nama,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.kode,
func.trim(func.to_char(ARInvoice.tarif,'999,999,999,990')).label('tarif'),
func.trim(func.to_char(ARInvoice.dasar,'999,999,999,990')).label('dasar'),
func.trim(func.to_char(ARInvoice.pokok,'999,999,999,990')).label('pokok'),
func.trim(func.to_char(ARInvoice.bunga,'999,999,999,990')).label('bunga'),
func.trim(func.to_char(ARInvoice.denda,'999,999,999,990')).label('denda'),
func.trim(func.to_char(ARSspd.bayar,'999,999,999,990')).label('bayar'),
).join(ARInvoice
).filter(ARSspd.id==id,
)#.order_by(ARSspd.id).all()
generator = r200frmGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### ARSTS
elif url_dict['act']=='r300' :
query = DBSession.query(ARSts.id,
ARSts.kode,
ARSts.nama,
ARSts.tgl_sts,
Unit.kode.label('unit_kd'),
Unit.nama.label('unit_nm'),
Unit.alamat.label('unit_al'),
ARStsItem.rek_kode.label('rek_kd'),
ARStsItem.rek_nama.label('rek_nm'),
# ARStsItem.jumlah,
func.trim(func.to_char(ARStsItem.jumlah,'999,999,999,990')).label('jumlah'),
func.trim(func.to_char(ARSts.jumlah,'999,999,999,990')).label('jumlah_sts'),
ARStsItem.kode.label('no_bayar')
).filter(ARSts.id==id,
ARSts.unit_id==Unit.id,
ARStsItem.sts_id==ARSts.id,
ARStsItem.invoice_id==ARInvoice.id,
ARStsItem.rekening_id==Rekening.id,
).order_by(ARStsItem.rek_kode).all()
generator = r300Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### SPTPD RINCIAN ###
elif url_dict['act']=='sptpd_rincian' :
query = DBSession.query(Sptpd.id,
Sptpd.kode,
Sptpd.wp_nama.label('nama'),
InvoiceDet.sektor_nm,
Sptpd.tgl_sptpd,
Sptpd.periode_1,
Sptpd.periode_2,
InvoiceDet.wilayah_nm,
InvoiceDet.peruntukan_nm,
InvoiceDet.produk_nm,
InvoiceDet.nama.label('wp'),
InvoiceDet.volume,
InvoiceDet.dpp,
InvoiceDet.tarif,
InvoiceDet.total_pajak,
).filter(Sptpd.id==req.params['sptpd_id'],
InvoiceDet.sptpd_id==Sptpd.id
).order_by(Sptpd.kode,
InvoiceDet.sektor_nm,
InvoiceDet.wilayah_nm,
InvoiceDet.nama,
InvoiceDet.peruntukan_nm,
InvoiceDet.produk_nm
).all()
generator = rSptpdRincianGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### SPTPD SSPD ###
elif url_dict['act']=='sptpd_sspd' :
query = DBSession.query(Sptpd.id,
Sptpd.wp_nama.label('nama'),
Sptpd.wp_alamat_1,
InvoiceDet.produk_nm,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tgl_sptpd,
SubjekPajak.kode,
func.sum(InvoiceDet.total_pajak).label('total_pajak')
).filter(Sptpd.id==req.params['sptpd_id'],
InvoiceDet.sptpd_id==Sptpd.id,
SubjekPajak.id==Sptpd.subjek_pajak_id
).group_by(Sptpd.id,
SubjekPajak.kode,
Sptpd.nama,
Sptpd.wp_alamat_1,
InvoiceDet.produk_nm,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tgl_sptpd,
).order_by(Sptpd.kode,
InvoiceDet.produk_nm
).all()
generator = rSptpdSspdGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### SPTPD Lampiran ###
elif url_dict['act']=='sptpd_lampiran' :
query = DBSession.query(Sptpd.id,
Sptpd.kode,
Sptpd.nama,
Sptpd.wp_kode,
Sptpd.wp_nama,
Sptpd.wp_alamat_1,
Sptpd.tgl_sptpd,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tahun_id,
InvoiceDet.sektor_id,
InvoiceDet.sektor_nm,
InvoiceDet.produk_nm,
func.sum(InvoiceDet.volume).label('volume'),
func.sum(InvoiceDet.dpp).label('dpp'),
func.sum(InvoiceDet.total_pajak).label('total_pajak'),
).filter(Sptpd.id==req.params['sptpd_id'],
InvoiceDet.sptpd_id==Sptpd.id
).group_by(Sptpd.id,
Sptpd.kode,
Sptpd.nama,
Sptpd.wp_kode,
Sptpd.wp_nama,
Sptpd.wp_alamat_1,
Sptpd.tgl_sptpd,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tahun_id,
InvoiceDet.sektor_id,
InvoiceDet.sektor_nm,
InvoiceDet.produk_nm,
).order_by(Sptpd.kode,
InvoiceDet.sektor_id,
InvoiceDet.produk_nm,
).all()
generator = rSptpdLampiranGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### SPTPD ###
elif url_dict['act']=='sptpd' :
query = DBSession.query(Sptpd.id,
Sptpd.kode,
Sptpd.nama,
Sptpd.wp_kode,
Sptpd.wp_nama,
Sptpd.wp_alamat_1,
Sptpd.tgl_sptpd,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tahun_id,
InvoiceDet.sektor_id,
InvoiceDet.sektor_nm,
InvoiceDet.produk_nm,
func.sum(InvoiceDet.volume).label('volume'),
func.sum(InvoiceDet.dpp).label('dpp'),
func.sum(InvoiceDet.total_pajak).label('total_pajak'),
).filter(Sptpd.id==req.params['sptpd_id'],
InvoiceDet.sptpd_id==Sptpd.id
).group_by(Sptpd.id,
Sptpd.kode,
Sptpd.nama,
Sptpd.wp_kode,
Sptpd.wp_nama,
Sptpd.wp_alamat_1,
Sptpd.tgl_sptpd,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tahun_id,
InvoiceDet.sektor_id,
InvoiceDet.sektor_nm,
InvoiceDet.produk_nm,
).order_by(Sptpd.kode,
InvoiceDet.sektor_id,
InvoiceDet.produk_nm,
).all()
generator = rSptpdGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### E-SAMSAT
elif url_dict['act']=='esamsat' :
query = self.request.params
generator = r400Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### E-PAP
elif url_dict['act']=='epap' :
query = self.request.params
generator = r500Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
return HTTPNotFound() #TODO: Warning Hak Akses
class rSptpdRincianGenerator(JasperGenerator):
def __init__(self):
super(rSptpdRincianGenerator, self).__init__()
self.reportname = get_rpath('sptpd_rincian.jrxml')
self.xpath = '/webr/sptpd_rincian'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'sptpd_rincian')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "sektor_nm").text = row.sektor_nm
ET.SubElement(xml_greeting, "produk_nm").text = row.produk_nm
ET.SubElement(xml_greeting, "wilayah_nm").text = row.wilayah_nm
ET.SubElement(xml_greeting, "peruntukan_nm").text = row.peruntukan_nm
ET.SubElement(xml_greeting, "wp").text = row.wp
ET.SubElement(xml_greeting, "tgl_sptpd").text = unicode(row.tgl_sptpd)
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "volume").text = unicode(row.volume)
ET.SubElement(xml_greeting, "dpp").text = unicode(row.dpp)
ET.SubElement(xml_greeting, "tarif").text = unicode(row.tarif)
ET.SubElement(xml_greeting, "total_pajak").text = unicode(row.total_pajak)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class rSptpdSspdGenerator(JasperGenerator):
def __init__(self):
super(rSptpdSspdGenerator, self).__init__()
self.reportname = get_rpath('sptpd_sspd.jrxml')
self.xpath = '/webr/sptpd_sspd'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'sptpd_sspd')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "alamat_1").text = row.wp_alamat_1
ET.SubElement(xml_greeting, "produk_nm").text = row.produk_nm
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "tgl_sptpd").text = unicode(row.tgl_sptpd)
ET.SubElement(xml_greeting, "total_pajak").text = unicode(row.total_pajak)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "now").text = now
return self.root
class rSptpdLampiranGenerator(JasperGenerator):
def __init__(self):
super(rSptpdLampiranGenerator, self).__init__()
self.reportname = get_rpath('sptpd_lampiran.jrxml')
self.xpath = '/webr/sptpd_lampiran'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'sptpd_lampiran')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "wp_kode").text = row.wp_kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "wp_alamat_1").text = row.wp_alamat_1
ET.SubElement(xml_greeting, "tgl_sptpd").text = unicode(row.tgl_sptpd)
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "tahun_id").text = unicode(row.tahun_id)
ET.SubElement(xml_greeting, "sektor_id").text = unicode(row.sektor_id)
ET.SubElement(xml_greeting, "sektor_nm").text = row.sektor_nm
ET.SubElement(xml_greeting, "produk_nm").text = row.produk_nm
ET.SubElement(xml_greeting, "volume").text = unicode(row.volume)
ET.SubElement(xml_greeting, "dpp").text = unicode(row.dpp)
ET.SubElement(xml_greeting, "total_pajak").text = unicode(row.total_pajak)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "now").text = now
return self.root
class rSptpdGenerator(JasperGenerator):
def __init__(self):
super(rSptpdGenerator, self).__init__()
self.reportname = get_rpath('sptpd.jrxml')
self.xpath = '/webr/sptpd'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'sptpd')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "wp_kode").text = row.wp_kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "wp_alamat_1").text = row.wp_alamat_1
ET.SubElement(xml_greeting, "tgl_sptpd").text = unicode(row.tgl_sptpd)
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "tahun_id").text = unicode(row.tahun_id)
ET.SubElement(xml_greeting, "sektor_id").text = unicode(row.sektor_id)
ET.SubElement(xml_greeting, "sektor_nm").text = row.sektor_nm
ET.SubElement(xml_greeting, "produk_nm").text = row.produk_nm
ET.SubElement(xml_greeting, "volume").text = unicode(row.volume)
ET.SubElement(xml_greeting, "dpp").text = unicode(row.dpp)
ET.SubElement(xml_greeting, "total_pajak").text = unicode(row.total_pajak)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "now").text = now
return self.root
## ----------------- LAPORAN -------------------------------------------##
class lap1Generator(JasperGenerator):
def __init__(self):
super(lap1Generator, self).__init__()
self.reportname = get_rpath('Lap1.jrxml')
self.xpath = '/webr/lap1'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap1')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap2Generator(JasperGenerator):
def __init__(self):
super(lap2Generator, self).__init__()
self.reportname = get_rpath('Lap2.jrxml')
self.xpath = '/webr/lap2'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap2')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap3Generator(JasperGenerator):
def __init__(self):
super(lap3Generator, self).__init__()
self.reportname = get_rpath('Lap3.jrxml')
self.xpath = '/webr/lap3'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap3')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap4Generator(JasperGenerator):
def __init__(self):
super(lap4Generator, self).__init__()
self.reportname = get_rpath('Lap4.jrxml')
self.xpath = '/webr/lap4'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap4')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap5Generator(JasperGenerator):
def __init__(self):
super(lap5Generator, self).__init__()
self.reportname = get_rpath('Lap5.jrxml')
self.xpath = '/webr/lap5'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap5')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap6Generator(JasperGenerator):
def __init__(self):
super(lap6Generator, self).__init__()
self.reportname = get_rpath('Lap6.jrxml')
self.xpath = '/webr/lap6'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap6')
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap7Generator(JasperGenerator):
def __init__(self):
super(lap7Generator, self).__init__()
self.reportname = get_rpath('Lap7.jrxml')
self.xpath = '/webr/lap7'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap7')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "bayar").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "tgl").text = unicode(row.tgl)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
#ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
#ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
return self.root
class lap7benGenerator(JasperGenerator):
def __init__(self):
super(lap7benGenerator, self).__init__()
self.reportname = get_rpath('Lap7bendahara.jrxml')
self.xpath = '/webr/lap7ben'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap7ben')
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "bayar").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "tgl").text = unicode(row.tgl)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
ET.SubElement(xml_greeting, "un_al").text = unit_al
return self.root
class lap8Generator(JasperGenerator):
def __init__(self):
super(lap8Generator, self).__init__()
self.reportname = get_rpath('Lap8.jrxml')
self.xpath = '/webr/lap8'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap8')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
#ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
#ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
return self.root
class lap8benGenerator(JasperGenerator):
def __init__(self):
super(lap8benGenerator, self).__init__()
self.reportname = get_rpath('Lap8bendahara.jrxml')
self.xpath = '/webr/lap8ben'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap8ben')
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
ET.SubElement(xml_greeting, "un_al").text = unit_al
return self.root
class lap9Generator(JasperGenerator):
def __init__(self):
super(lap9Generator, self).__init__()
self.reportname = get_rpath('Lap9.jrxml')
self.xpath = '/webr/lap9'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap9')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
#ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
#ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
return self.root
class lap9benGenerator(JasperGenerator):
def __init__(self):
super(lap9benGenerator, self).__init__()
self.reportname = get_rpath('Lap9bendahara.jrxml')
self.xpath = '/webr/lap9ben'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap9ben')
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "un_al").text = unit_al
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
return self.root
## iWan mampir
class lap10Generator(JasperGenerator):
def __init__(self):
super(lap10Generator, self).__init__()
self.reportname = get_rpath('Lap10.jrxml')
self.xpath = '/webr/lap10'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap10')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "invoice_kode").text = row.invoice_kode
ET.SubElement(xml_greeting, "tgl_terima").text = unicode(row.tgl_terima)
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "jatuh_tempo").text = unicode(row.jatuh_tempo)
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "op_nm").text = row.op_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "un_al").text = unit_al
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
return self.root
class lap10budGenerator(JasperGenerator):
def __init__(self):
super(lap10budGenerator, self).__init__()
self.reportname = get_rpath('Lap10bud.jrxml')
self.xpath = '/webr/lap10bud'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap10bud')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "invoice_kode").text = row.invoice_kode
ET.SubElement(xml_greeting, "tgl_terima").text = unicode(row.tgl_terima)
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "jatuh_tempo").text = unicode(row.jatuh_tempo)
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "op_nm").text = row.op_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
return self.root
class lap11Generator(JasperGenerator):
def __init__(self):
super(lap11Generator, self).__init__()
self.reportname = get_rpath('Lap11.jrxml')
self.xpath = '/webr/lap11'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap11')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "un_al").text = unit_al
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
ET.SubElement(xml_greeting, "status").text = row.status
return self.root
class lap11budGenerator(JasperGenerator):
def __init__(self):
super(lap11budGenerator, self).__init__()
self.reportname = get_rpath('Lap11bud.jrxml')
self.xpath = '/webr/lap11bud'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap11bud')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "status").text = row.status
return self.root
class lap12Generator(JasperGenerator):
def __init__(self):
super(lap12Generator, self).__init__()
self.reportname = get_rpath('Lap12.jrxml')
self.xpath = '/webr/lap12'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap12')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "un_al").text = unit_al
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
ET.SubElement(xml_greeting, "status").text = row.status
return self.root
class lap12budGenerator(JasperGenerator):
def __init__(self):
super(lap12budGenerator, self).__init__()
self.reportname = get_rpath('Lap12bud.jrxml')
self.xpath = '/webr/lap12bud'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap12bud')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "status").text = row.status
return self.root
class lap14Generator(JasperGenerator):
def __init__(self):
super(lap14Generator, self).__init__()
self.reportname = get_rpath('Lap14.jrxml')
self.xpath = '/webr/lap14'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap14')
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "tgl_ttp").text = unicode(row.tgl_ttp)
ET.SubElement(xml_greeting, "bayar").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "tgl").text = unicode(row.tgl)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "wil_kd").text = row.wil_kd
ET.SubElement(xml_greeting, "wil_nm").text = row.wil_nm
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "unit_al").text = unit_al
ET.SubElement(xml_greeting, "now").text = now
ET.SubElement(xml_greeting, "bulan").text = bulan
ET.SubElement(xml_greeting, "thn").text = thn
return self.root
class lap13Generator(JasperGenerator):
def __init__(self):
super(lap13Generator, self).__init__()
self.reportname = get_rpath('Lap13.jrxml')
self.xpath = '/webr/lap13'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap13')
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "ag_m").text = unicode(row.target)
ET.SubElement(xml_greeting, "byr1").text = unicode(row.bayar1)
ET.SubElement(xml_greeting, "byr2").text = unicode(row.bayar2)
x=row.bayar1+row.bayar2
ET.SubElement(xml_greeting, "byr3").text = unicode(x)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "unit_al").text = unit_al
ET.SubElement(xml_greeting, "now").text = now
ET.SubElement(xml_greeting, "bulan").text = bulan
ET.SubElement(xml_greeting, "thn").text = thn
return self.root
class lap15Generator(JasperGenerator):
def __init__(self):
super(lap15Generator, self).__init__()
self.reportname = get_rpath('Lap15.jrxml')
self.xpath = '/webr/lap15'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap15')
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "bayar").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "unit_al").text = unit_al
ET.SubElement(xml_greeting, "now").text = now
ET.SubElement(xml_greeting, "thn").text = thn
ET.SubElement(xml_greeting, "bln").text = unicode(row.bln)
ET.SubElement(xml_greeting, "triwulan").text = row.triwulan
return self.root
## ----------------------------- End Laporan ----------------------------------------##
#User
class r001Generator(JasperGenerator):
def __init__(self):
super(r001Generator, self).__init__()
self.reportname = get_rpath('R0001.jrxml')
self.xpath = '/webr/user'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'user')
ET.SubElement(xml_greeting, "username").text = row.username
ET.SubElement(xml_greeting, "email").text = row.email
ET.SubElement(xml_greeting, "status").text = unicode(row.status)
ET.SubElement(xml_greeting, "last_login").text = unicode(row.last_login)
ET.SubElement(xml_greeting, "registered_date").text = unicode(row.registered_date)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Grup
class r002Generator(JasperGenerator):
def __init__(self):
super(r002Generator, self).__init__()
self.reportname = get_rpath('R0002.jrxml')
self.xpath = '/webr/grup'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'grup')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Unit
class r003Generator(JasperGenerator):
def __init__(self):
super(r003Generator, self).__init__()
self.reportname = get_rpath('R0003.jrxml')
self.xpath = '/webr/unit'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'unit')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "level_id").text = unicode(row.level_id)
ET.SubElement(xml_greeting, "is_summary").text = unicode(row.is_summary)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Jabatan
class r004Generator(JasperGenerator):
def __init__(self):
super(r004Generator, self).__init__()
self.reportname = get_rpath('R0004.jrxml')
self.xpath = '/webr/jabatan'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'jabatan')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "status").text = unicode(row.status)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Pegawai
class r005Generator(JasperGenerator):
def __init__(self):
super(r005Generator, self).__init__()
self.reportname = get_rpath('R0005.jrxml')
self.xpath = '/webr/pegawai'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'pegawai')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Rekening
class r006Generator(JasperGenerator):
def __init__(self):
super(r006Generator, self).__init__()
self.reportname = get_rpath('R0006.jrxml')
self.xpath = '/webr/rekening'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'rekening')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "level_id").text = unicode(row.level_id)
ET.SubElement(xml_greeting, "is_summary").text = unicode(row.is_summary)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Pajak dan Tarif
class r007Generator(JasperGenerator):
def __init__(self):
super(r007Generator, self).__init__()
self.reportname = get_rpath('R0007.jrxml')
self.xpath = '/webr/pajak'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'pajak')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "tahun").text = unicode(row.tahun)
ET.SubElement(xml_greeting, "tarif").text = unicode(row.tarif)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Wilayah
class r008Generator(JasperGenerator):
def __init__(self):
super(r008Generator, self).__init__()
self.reportname = get_rpath('R0008.jrxml')
self.xpath = '/webr/wilayah'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'wilayah')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "level_id").text = unicode(row.level_id)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#JnsPajak
class semua_sektorGenerator(JasperGenerator):
def __init__(self):
super(semua_sektorGenerator, self).__init__()
self.reportname = get_rpath('semua_sektor.jrxml')
self.xpath = '/webr/semua_sektor'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'semua_sektor')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "status").text = row.status
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
### ----------- Subjek Pajak ----------- ###
class rSubjekPajakGenerator(JasperGenerator):
def __init__(self):
super(rSubjekPajakGenerator, self).__init__()
self.reportname = get_rpath('R0009.jrxml')
self.xpath = '/webr/subjekpajak'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'subjekpajak')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "alamat_1").text = row.alamat_1
ET.SubElement(xml_greeting, "kelurahan").text = row.kelurahan
ET.SubElement(xml_greeting, "kecamatan").text = row.kecamatan
ET.SubElement(xml_greeting, "kota").text = row.kota
ET.SubElement(xml_greeting, "email").text = row.email
ET.SubElement(xml_greeting, "status").text = unicode(row.status)
ET.SubElement(xml_greeting, "unit").text = row.unit
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "un_nm").text = unit_nm
ET.SubElement(xml_greeting, "un_al").text = unit_al
#ET.SubElement(xml_greeting, "alamat_2").text = row.alamat_2
return self.root
#ObjekPajak
class r010Generator(JasperGenerator):
def __init__(self):
super(r010Generator, self).__init__()
self.reportname = get_rpath('R0010.jrxml')
self.xpath = '/webr/op'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'op')
ET.SubElement(xml_greeting, "op_nm").text = row.op_nm
ET.SubElement(xml_greeting, "sp_kd").text = row.sp_kd
ET.SubElement(xml_greeting, "sp_nm").text = row.sp_nm
ET.SubElement(xml_greeting, "p_kd").text = row.p_kd
ET.SubElement(xml_greeting, "w_nm").text = row.w_nm
ET.SubElement(xml_greeting, "status").text = unicode(row.status)
ET.SubElement(xml_greeting, "unit").text = row.unit
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "un_nm").text = unit_nm
ET.SubElement(xml_greeting, "un_al").text = unit_al
return self.root
#ARINVOICE FAST PAY
class r101Generator(JasperGenerator):
def __init__(self):
super(r101Generator, self).__init__()
self.reportname = get_rpath('epayment.jrxml')
self.xpath = '/webr/epayment'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'epayment')
ET.SubElement(xml_greeting, "kd_bayar").text = row.kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "op_nama").text = row.op_nama
ET.SubElement(xml_greeting, "unit_kd").text = row.unit_kode
ET.SubElement(xml_greeting, "unit_nm").text = row.unit_nama
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kode
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nama
ET.SubElement(xml_greeting, "periode1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "tgl_tetap").text = unicode(row.tgl_tetap)
ET.SubElement(xml_greeting, "tgl_jt_tempo").text = unicode(row.jatuh_tempo)
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "tarif").text = unicode(row.tarif)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#ARINVOICE
class r100Generator(JasperGenerator):
def __init__(self):
super(r100Generator, self).__init__()
self.reportname = get_rpath('epayment.jrxml')
self.xpath = '/webr/epayment'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'epayment')
ET.SubElement(xml_greeting, "kd_bayar").text = row.kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "op_nama").text = row.op_nama
ET.SubElement(xml_greeting, "unit_kd").text = row.unit_kode
ET.SubElement(xml_greeting, "unit_nm").text = row.unit_nama
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kode
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nama
ET.SubElement(xml_greeting, "periode1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "tgl_tetap").text = unicode(row.tgl_tetap)
ET.SubElement(xml_greeting, "tgl_jt_tempo").text = unicode(row.jatuh_tempo)
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "tarif").text = unicode(row.tarif)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#ARSSPD
class r200Generator(JasperGenerator):
def __init__(self):
super(r200Generator, self).__init__()
self.reportname = get_rpath('R2000.jrxml')
self.xpath = '/webr/arsspd'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'arsspd')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "wp_kode").text = row.wp_kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "op_kode").text = row.op_kode
ET.SubElement(xml_greeting, "op_nama").text = row.op_nama
ET.SubElement(xml_greeting, "rek_nama").text = row.rek_nama
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "tgl_bayar").text = unicode(row.tgl_bayar)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class r200frmGenerator(JasperGenerator):
def __init__(self):
super(r200frmGenerator, self).__init__()
self.reportname = get_rpath('R2000FRM.jrxml')
self.xpath = '/webr/arsspdfrm'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'arsspdfrm')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "wp_kode").text = row.wp_kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "op_kode").text = row.op_kode
ET.SubElement(xml_greeting, "op_nama").text = row.op_nama
ET.SubElement(xml_greeting, "rek_kode").text = row.rek_kode
ET.SubElement(xml_greeting, "rek_nama").text = row.rek_nama
ET.SubElement(xml_greeting, "unit_kode").text = row.unit_kode
ET.SubElement(xml_greeting, "unit_nama").text = row.unit_nama
ET.SubElement(xml_greeting, "tarif").text = unicode(row.tarif)
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "tgl_bayar").text = unicode(row.tgl_bayar)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#ARSTS
class r300Generator(JasperGenerator):
def __init__(self):
super(r300Generator, self).__init__()
self.reportname = get_rpath('R3000.jrxml')
self.xpath = '/webr/arsts'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'arsts')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "tgl_sts").text = unicode(row.tgl_sts)
ET.SubElement(xml_greeting, "unit_kd").text = row.unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = row.unit_nm
ET.SubElement(xml_greeting, "unit_al").text = row.unit_al
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "jumlah_sts").text = unicode(row.jumlah_sts)
# ET.SubElement(xml_greeting, "jumlah").text = row.jumlah
ET.SubElement(xml_greeting, "no_bayar").text = row.no_bayar
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#E-SAMSAT
class r400Generator(JasperGenerator):
def __init__(self):
super(r400Generator, self).__init__()
self.reportname = get_rpath('esamsat.jrxml')
self.xpath = '/webr/esamsat'
self.root = ET.Element('webr')
def generate_xml(self, row):
#for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'esamsat')
ET.SubElement(xml_greeting, "logo").text = logo
ET.SubElement(xml_greeting, "customer").text = 'AAAA'
ET.SubElement(xml_greeting, "kd_bayar").text = row['kd_bayar']
ET.SubElement(xml_greeting, "no_rangka").text = row['no_rangka1']
ET.SubElement(xml_greeting, "no_polisi").text = row['no_polisi']
ET.SubElement(xml_greeting, "no_identitas").text = row['no_ktp1']
ET.SubElement(xml_greeting, "nm_pemilik").text = row['nm_pemilik']
ET.SubElement(xml_greeting, "warna").text = row['warna_tnkb']
ET.SubElement(xml_greeting, "merk").text = row['nm_merek_kb']
ET.SubElement(xml_greeting, "model").text = row['nm_model_kb']
ET.SubElement(xml_greeting, "tahun").text = row['th_buatan']
ET.SubElement(xml_greeting, "tgl_pjk_lama").text = row['tg_akhir_pjklm']
ET.SubElement(xml_greeting, "tgl_pjk_baru").text = row['tg_akhir_pjkbr']
ET.SubElement(xml_greeting, "pokok_bbn").text = row['bbn_pok']
ET.SubElement(xml_greeting, "denda_bbn").text = row['bbn_den']
ET.SubElement(xml_greeting, "pokok_swdkllj").text = row['swd_pok']
ET.SubElement(xml_greeting, "denda_swdkllj").text = row['swd_den']
ET.SubElement(xml_greeting, "adm_stnk").text = row['adm_stnk']
ET.SubElement(xml_greeting, "adm_tnkb").text = row['adm_tnkb']
ET.SubElement(xml_greeting, "jumlah").text = row['jumlah']
ET.SubElement(xml_greeting, "status_byr").text = row['kd_status']
ET.SubElement(xml_greeting, "keterangan").text = row['ket']
return self.root
#E-PAP
class r500Generator(JasperGenerator):
def __init__(self):
super(r500Generator, self).__init__()
self.reportname = get_rpath('epap.jrxml')
self.xpath = '/webr/epap'
self.root = ET.Element('webr')
def generate_xml(self, row):
#for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'epap')
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "kd_bayar").text = row['kd_bayar']
ET.SubElement(xml_greeting, "npwpd").text = row['npwpd']
ET.SubElement(xml_greeting, "nm_perus").text = row['nm_perus']
ET.SubElement(xml_greeting, "al_perus").text = row['al_perus']
ET.SubElement(xml_greeting, "vol_air").text = row['vol_air']
ET.SubElement(xml_greeting, "npa").text = row['npa']
ET.SubElement(xml_greeting, "m_pjk_thn").text = row['m_pjk_thn']
ET.SubElement(xml_greeting, "m_pjk_bln").text = row['m_pjk_bln']
ET.SubElement(xml_greeting, "bea_pok_pjk").text = row['bea_pok_pjk']
ET.SubElement(xml_greeting, "bea_den_pjk").text = row['bea_den_pjk']
ET.SubElement(xml_greeting, "tgl_tetap").text = row['tgl_tetap']
ET.SubElement(xml_greeting, "tgl_jt_tempo").text = row['tgl_jt_tempo']
ET.SubElement(xml_greeting, "keterangan").text = row['keterangan']
return self.root
|
Wow. Sometimes we fail to realize how comfortable we’ve become in a certain set of circumstances until they are pulled like a rug out from under our feet. I’ve been struggling these past few months with the feeling of displacement and what it means to immerse yourself in a season of transition, when everything feels transient, impermanent and uncertain.
A month and a half ago, I packed up my college apartment and left the city I’ve called home for four years to move to Clarksville until I start my new job at the end of June. I’ve been spending my time nearly equally between my Toyota Camry, Green Hills Starbucks, and friends’ couches (Shoutout to everyone who has been letting me crash with you these past few weeks–I’m forever grateful!), and while it has been a grand adventure in learning to adapt to changing circumstances, I’d be lying if I said it didn’t feel strange not to have my own space anymore. I don’t think I gave much thought to the meaning of a home until I was without one of my own, and the realization that I will be traveling for the next year with nothing more than a few suitcases is thrilling and deeply frightening all at the same time.
How can we conquer the feeling of displacement without having a physical place to claim as ours? I think the most meaningful lesson I’m learning here is that place is a complex idea that encompasses more than a “You Are Here” dot on a map. It has to do with our entire set of surroundings and perspectives, and whenever those are shifted, it leaves us reeling without a sense of direction. Transitions make life richer but also profoundly uncomfortable and unsettling.
I think it’s important to acknowledge that displacement brings about its own unique set of challenges, but it also holds promises of growth and contentment if we allow ourselves to settle into the discomfort of the present moment. I’ve been asking myself to be fully present in this time of transition without immersing my thoughts too deeply in the past or future. It can be far too easy to fall into the trap of “what if” and “I wish,” trying to plan or change circumstances that are altogether outside of our control. This month of couch-surfing and eating burritos out of my car is teaching me a powerful lesson about letting go of expectation and allowing life to run its course. I am sitting here (at Starbucks–who’s surprised?) without a home, without a concrete plan, and without a semblance of control over what the next year holds. And most of all, I’m rejoicing in the uncertainty that comes along with this changing season.
Bring on June 28! Well, not until I pack my suitcases and clean out my car, but you get the picture. I am so excited for all this new chapter of life has to teach!
|
import argparse
import httplib2
import os
import sys
import json
import io
import os.path
from os import listdir
from os.path import isfile,join
# simulate the 'Cloud' in local storage
ROOT_DIR = '/home/ubuntu/tmp/'
def upload_file(service,from_file_name,to_file_name):
# try delete it first
try:
delete_file(service,'',"/" + to_file_name)
except Exception as e:
pass
# The BytesIO object may be replaced with any io.Base instance.
f = open(from_file_name,'r')
out_folder_name = ROOT_DIR + service['folder'] + '/'
out_f = open(out_folder_name + to_file_name,'w')
out_f.write(f.read())
f.close()
out_f.close()
def upload_string(service, str_to_upload,to_file_name):
# The BytesIO object may be replaced with any io.Base instance.
out_folder_name = ROOT_DIR + service['folder'] + '/'
out_f = open(out_folder_name + to_file_name,'w')
out_f.write(str_to_upload);
out_f.close()
def delete_file(service,object_name):
out_folder_name = ROOT_DIR + service['folder'] + '/'
os.remove(out_folder_name + object_name);
def download_file(service ,object_name, to_file_name):
in_folder_name = ROOT_DIR + service['folder'] + '/'
f_in = open(in_folder_name + object_name, 'r');
f_out = open(to_file_name,'w');
f_out.write(f_in.read());
return None
def get_all_file_names(service):
folder_name = ROOT_DIR + service['folder'] + '/'
file_names = [(f,os.stat(join(folder_name,f)).st_size) for f in os.listdir(folder_name) if isfile(join(folder_name,f)) ]
return file_names
def create_service_object(extra_info):
service = {'folder':extra_info}
return service
if __name__ == "__main__":
s = create_service_object()
print get_all_file_names(s)
|
North Wembley Man Vans offer a comprehensive removal of private individuals, moving companies, furniture transport, disassembly and assembly of interior design, packing and unpacking boxes and property.
North Wembley Man & Van offer boxes, bubble wrap and other materials needed for your move.
North Wembley Man and Van is very flexible and we can provide our service on very short notice, so if you need a Man & Vans service today then Man & Vans can help.
North Wembley Man with Van always gets the job done quickly, professionally, efficiently and with a smile on our faces!
|
########################################################################
#
# jpath-py
# An XPath-like querying interface for JSON objects
#
# author: Vasileios Mitrousis
# email: [email protected]
#
# The software is given as is, no guarantees from the author
# Licenced under Apache 2.0 licence
#
########################################################################
debug = False
# This function will accept a JSON document and a path /x/y/z[4]/*
# and will return you the actual value of the key(s)
def get_dict_value(doc, path, leaf=None):
if len(path.strip()) == 0:
return doc
path_splits = path.split('/')
for i, key in enumerate(path_splits):
if debug: print 'key processing: ' + key
if not doc:
return None
if '[' in key and ']' in key and i != len(path_splits)-1:
# array element
if debug: print 'array element'
idx = int(key[key.index('[')+1:key.index(']')])
key = key[:key.index('[')]
if debug: print 'key stripped: ' + key
if not doc.get(key):
return None
if isinstance(doc[key], list):
if debug: print 'is an array'
if idx >= len(doc[key]):
# index out of bounds
if debug: print 'out of bounds'
return None
doc = doc[key][idx]
else:
# single object, accept 0 index only
if debug: print 'single object'
if idx > 0:
return None
doc = doc[key]
elif key == '*':
# '*' has 2 meanings. The whole array,
# or the whole object if it is the last key
if debug: print 'wildcard key'
if i == len(path_splits) - 1:
# it is the last element, push the whole object
if debug: print 'last element'
else:
# '*' requires the whole array in this case
if debug: print 'getting the whole array'
if isinstance(doc, list):
if debug: print 'is type of array'
else:
if debug: print 'is not type of array, constructing it manually'
doc = [doc]
idx = -1
item_arr = []
recon_path = '/'.join(path_splits[i+1:])
if ']' == recon_path[-1]:
# we need indexed result
if debug: print 'getting indexed result'
idx = int(recon_path[recon_path.index('[')+1:recon_path.index(']')])
recon_path = recon_path[:recon_path.index('[')]
for k, item in enumerate(doc):
val = get_dict_value(item, recon_path, leaf)
if val:
item_arr.append(val)
if idx != -1:
if idx < len(item_arr):
return item_arr[idx]
return None
return item_arr
else:
if debug: print 'normal key: ' + key
if isinstance(doc, list):
if debug: print 'pointing to an array'
print "Warning: '%s' array was detected but not expected. Returning first item." % path_splits[i-1]
if len(doc) > 0:
doc = doc[0].get(key)
else:
if debug: print 'getting object normaly'
doc = doc.get(key)
if i == len(path_splits) - 1:
if debug: print 'it is the last component'
if isinstance(doc, list):
if debug: print 'it is a list, generate a @Val array'
try:
doc = [d[leaf] for d in doc if d]
except:
if debug: print "1,", path, doc
#raw_input()
else:
if debug: print 'final object @Val'
if doc and leaf:
try:
doc = doc[leaf]
except Exception, e:
print 'jpath_error:', e
#raw_input()
return doc
|
We Christians, at least here in the U. S., have an autonomy problem. We want to figure everything out on our own and we think we know better than anyone else. It seems suspiciously familiar to the root of sin found in the opening verses of Genesis 3.
It shows in the way we think about our relationship with Christ and his church. We think of the two as completely separate from one another, but this is not what the Bible presents. The Bible tells us that believers are baptized into the church and that the church is God’s ordained instrument for both evangelism and discipleship of the world. We need the church because we cannot do it on our own – nor were we meant to. And if we cannot do it on our own, we need to be humble enough to admit we do not have all of the answers ourselves and allow others to help us.
But this brings up another interesting issue: how we think of the church. Many of us are willing to admit, or at least acknowledge, that we need to be a part of a local church. We see the value in that. But what about the church worldwide? Do we see a need to think of ourselves as connected to a body that is larger than our local church? Can we learn from other brothers and sisters in Christ who live down the road or across the globe in completely different cultures than our own? I think we too often neglect how the larger body of Christ can help us.
Speaking of looking to believers around the world, what about believers across time? Do we think that those believers who walked before us and faced the challenges of their day have anything to teach us? Or have we got it all figured out on our own? How much value do we place in knowing the events and history of believers who came before us?
Christianity was never intended to be lived autonomously. We are called to be a part of something bigger than ourselves; something that transcends culture and time: Christ’s body, the church.
This entry was posted in Church, Church History and tagged Bible, Christianity, Church, religion, theology, Tim Farley. Bookmark the permalink.
|
#!/usr/bin/env python2
"""
PyDC - configs
==============
:copyleft: 2013 by Jens Diemer
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import inspect
from MC6809.utils.humanize import byte2bit_string
class BaseConfig:
""" shared config values """
# For writing WAVE files:
FRAMERATE = 22050
SAMPLEWIDTH = 2 # 1 for 8-bit, 2 for 16-bit, 4 for 32-bit samples
VOLUME_RATIO = 90 # "Loundness" in percent of the created wave file
def print_debug_info(self):
print(f"Config: '{self.__class__.__name__}'")
for name, value in inspect.getmembers(self): # , inspect.isdatadescriptor):
if name.startswith("_"):
continue
# print name, type(value)
if not isinstance(value, (int, str, list, tuple, dict)):
continue
if isinstance(value, int):
bit_string = byte2bit_string(value)
print("{:>20} = {:<4} (in hex: {:>7} - binary: {})".format(
name, value, repr(hex(value)), bit_string
))
else:
print(f"{name:>20} = {value}")
class Dragon32Config(BaseConfig):
"""
Dragon 32 specific config values
>> d32cfg = Dragon32Config()
>> d32cfg.print_debug_info() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Config: 'Dragon32Config'
AVG_COUNT = 0 (in hex: '0x0' - binary: 00000000)
BASIC_ASCII = 255 (in hex: '0xff' - binary: 11111111)
BASIC_CODE_END = [0, 0]
BASIC_TOKENIZED = 0 (in hex: '0x0' - binary: 00000000)
BASIC_TYPE_DICT = {0: 'tokenized BASIC (0x00)', 255: 'ASCII BASIC (0xff)'}
BIT_NUL_HZ = 1200 (in hex: '0x4b0' - binary: 00001101001)
BIT_ONE_HZ = 2400 (in hex: '0x960' - binary: 000001101001)
BLOCK_TYPE_DICT = {0: 'filename block (0x00)', 1: 'data block (0x01)', 255: 'end-of-file block (0xff)'}
DATA_BLOCK = 1 (in hex: '0x1' - binary: 10000000)
END_COUNT = 2 (in hex: '0x2' - binary: 01000000)
EOF_BLOCK = 255 (in hex: '0xff' - binary: 11111111)
FILENAME_BLOCK = 0 (in hex: '0x0' - binary: 00000000)
FILETYPE_DICT = {0: 'BASIC programm (0x00)', 1: 'Data file (0x01)', 255: 'Binary file (0xFF)'}
FTYPE_BASIC = 0 (in hex: '0x0' - binary: 00000000)
FTYPE_BIN = 255 (in hex: '0xff' - binary: 11111111)
FTYPE_DATA = 1 (in hex: '0x1' - binary: 10000000)
HZ_VARIATION = 450 (in hex: '0x1c2' - binary: 010000111)
LEAD_BYTE_CODEPOINT = 85 (in hex: '0x55' - binary: 10101010)
LEAD_BYTE_LEN = 255 (in hex: '0xff' - binary: 11111111)
MAX_SYNC_BYTE_SEARCH = 600 (in hex: '0x258' - binary: 0001101001)
MID_COUNT = 1 (in hex: '0x1' - binary: 10000000)
MIN_VOLUME_RATIO = 5 (in hex: '0x5' - binary: 10100000)
SYNC_BYTE_CODEPOINT = 60 (in hex: '0x3c' - binary: 00111100)
"""
# For reading WAVE files:
BIT_NUL_HZ = 1100 # Spec says: 1200Hz - Bit "0" is a single cycle at 1200 Hz
BIT_ONE_HZ = 2100 # Spec says: 2400Hz - Bit "1" is a single cycle at 2400 Hz
# see: http://five.pairlist.net/pipermail/coco/2013-August/070879.html
HZ_VARIATION = 450 # How much Hz can signal scatter to match 1 or 0 bit ?
MIN_VOLUME_RATIO = 5 # percent volume to ignore sample
AVG_COUNT = 0 # How many samples should be merged into a average value?
END_COUNT = 2 # Sample count that must be pos/neg at once
MID_COUNT = 1 # Sample count that can be around null
# Format values:
LEAD_BYTE_CODEPOINT = 0x55 # 10101010
LEAD_BYTE_LEN = 255
SYNC_BYTE_CODEPOINT = 0x3C # 00111100
MAX_SYNC_BYTE_SEARCH = 600 # search size in **Bytes**
# Block types:
FILENAME_BLOCK = 0x00
DATA_BLOCK = 0x01
EOF_BLOCK = 0xff
BLOCK_TYPE_DICT = {
FILENAME_BLOCK: "filename block (0x00)",
DATA_BLOCK: "data block (0x01)",
EOF_BLOCK: "end-of-file block (0xff)",
}
# File types:
FTYPE_BASIC = 0x00
FTYPE_DATA = 0x01
FTYPE_BIN = 0x02
FILETYPE_DICT = {
FTYPE_BASIC: "BASIC programm (0x00)",
FTYPE_DATA: "Data file (0x01)",
FTYPE_BIN: "Binary machine code file (0x02)",
}
# Basic format types:
BASIC_TOKENIZED = 0x00
BASIC_ASCII = 0xff
BASIC_TYPE_DICT = {
BASIC_TOKENIZED: "tokenized BASIC (0x00)",
BASIC_ASCII: "ASCII BASIC (0xff)",
}
# The gap flag
NO_GAPS = 0x00
GAPS = 0xff
# Convert to uppercase if source is .bas and to lowercase if destination is .bas
case_convert = False
if __name__ == "__main__":
import doctest
print(doctest.testmod(
verbose=False
# verbose=True
))
|
Maison Belfort Möbel Neu Xxl Möbel Line article is the best inspiration for home interior posted about . This Maison Belfort Möbel Neu Xxl Möbel Line was posted in category as ideas for inspiration to Remodel your accessories. This article can be your reference when you are confused to choose the right decoration for your home accessories. This Maison Belfort Möbel Neu Xxl Möbel Line maybe your best option to decoring because having a home with our own design is everyone's dream.
We hope by posting this Maison Belfort Möbel Neu Xxl Möbel Line ideas, we can fulfill your needs of inspiration for designing your accessories . If you need more ideas to design a home & interior accessories, you can check at our collection right below this post. Also, don't forget to always visit 21stcenturyfluency.com to find some new and fresh posts about and other Inspiration everyday.
|
"""
Generalized Recommender models amd utility classes.
This module contains basic memory recommender interfaces used throughout
the whole scikit-crab package as also utility classes.
The interfaces are realized as abstract base classes (ie., some optional
functionality is provided in the interface itself, so that the interfaces
can be subclassed).
"""
# Author: Marcel Caraciolo <[email protected]>
#
# License: BSD Style.
from ..base import MemoryBasedRecommender
#===========================
#Item-based Recommender Interface
class ItemRecommender(MemoryBasedRecommender):
def most_similar_items(self, item_id, how_many=None):
'''
Return the most similar items to the given item, ordered
from most similar to least.
Parameters
-----------
item_id: int or string
ID of item for which to find most similar other items
how_many: int
Desired number of most similar items to find
'''
raise NotImplementedError("ItemRecommender is an abstract class.")
def recommended_because(self, user_id, item_id, how_many, **params):
'''
Returns the items that were most influential in recommending a given item
to a given user. In most implementations, this method will return items
that the user prefers and that are similar to the given item.
Parameters
-----------
user_id : int or string
ID of the user who was recommended the item
item_id: int or string
ID of item that was recommended
how_many: int
Maximum number of items to return.
Returns
----------
The list of items ordered from most influential in recommended the given item to least
'''
raise NotImplementedError("ItemRecommender is an abstract class.")
#===========================
#User-based Recommender Interface
class UserRecommender(MemoryBasedRecommender):
def most_similar_users(self, user_id, how_many=None):
'''
Return the most similar users to the given user, ordered
from most similar to least.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
how_many: int
Desired number of most similar users to find
'''
raise NotImplementedError("UserRecommender is an abstract class.")
def recommended_because(self, user_id, item_id, how_many, **params):
'''
Returns the users that were most influential in recommending a given item
to a given user. In most implementations, this method will return users
that prefers the recommended item and that are similar to the given user.
Parameters
-----------
user_id : int or string
ID of the user who was recommended the item
item_id: int or string
ID of item that was recommended
how_many: int
Maximum number of items to return.
Returns
----------
The list of users ordered from most influential in recommended the given item to least
'''
raise NotImplementedError("UserRecommender is an abstract class.")
#===========================
# Base Item Candidate Strategy
class BaseCandidateItemsStrategy(object):
'''
Base implementation for retrieving
all items that could possibly be recommended to the user
'''
def candidate_items(self, user_id, data_model, **params):
'''
Return the candidate items that could possibly be recommended to the user
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
data_model: The data model that will be the source for the possible
candidates
'''
raise NotImplementedError("BaseCandidateItemsStrategy is an abstract class.")
#===========================
# Base User Candidates Strategies
class BaseUserNeighborhoodStrategy(object):
'''
Base implementation for retrieving
all users that could possibly be select as part of the neighborhood.
'''
def user_neighborhood(self, user_id, data_model, n_similarity='user_similarity',
distance=None, n_users=None, **params):
'''
Computes a neighborhood consisting of the n users to a given user based on the
strategy implemented in this method.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
data_model: DataModel instance
The data model that will be the source for the possible
candidates
n_similarity: string
The similarity to compute the neighborhood (default = user_similarity)
distance: function
Pairwise metric to compute the similarity between the users.
nhood_size: int
The neighborhood size (default = None all users)
'''
raise NotImplementedError("BaseCandidateItemsStrategy is an abstract class.")
|
Pascack Brook County Park Volunteer Opportunity!!!!
An unique opportunity will open up to the general public to help plant native plants and remove invasive plants around the pond at Pascack Brook County Park in Westwood NJ on the weekend of May 15th and 16th. There will be a training session and breakfast/lunch will be served. Click here for more information.
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python file with invalid syntax, used by scripts/linters/
python_linter_test. This file is using __metaclass__ tag which is not allowed.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import python_utils
class FakeClass(python_utils.OBJECT):
"""This is a fake docstring for invalid syntax purposes."""
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, name):
"""This doesn't do anything.
Args:
name: str. Means nothing.
Yields:
tuple(str, str). The argument passed in but twice in a tuple.
"""
yield (name, name)
class MyObject:
"""This is a fake docstring."""
__metaclass__ = FakeClass # Use of __metaclass__ is not allowed
def __init__(self, fake_arg):
self.fake_arg = fake_arg
|
SALISBURY, MD– February, 2015– Apple Discount Drugs Diabetes Center has renewed its accredited diabetes education program with the American Association of Diabetes Educators (AADE). Local community members will continue to have access to quality diabetes education services, including one-on-one and group counseling, workshops and diabetes product education and training. This current certification assures quality education through the year 2019.
Diabetes self-management education is a process led by a certified diabetes educator that helps people with pre-diabetes or diabetes learn how to self-manage their condition. The goal is to equip each person with the necessary knowledge and confidence to make informed decisions about their diabetes. Accreditation ensures that each program meets the National Standards for Diabetes Self-Management Education and Support.
Through the Apple Drugs Diabetes Center, John Motsko and Geoff Twigg offer one-on-one and group counseling sessions to educate, provide resources and support diabetic patients in managing the disease. During each counseling session, John and Geoff provide information on nutrition, lifestyle changes, stress and coping strategies and also discuss the correct use of diabetes medicines. They teach customers about the different types of blood glucose meters and discuss the importance of testing each meter to make sure it is functioning properly. The Center also offers blood glucose meter data analysis and download service to allow patients to “see” their blood sugar readings more effectively. Patients are encouraged to call for an appointment for their consultation.
Motsko is a registered pharmacist and a Certified Diabetes Educator. He has been working in the diabetes field for over 4 decades. Twigg is a PharmD, a Certified Diabetes Educator and is also Board Certified in Ambulatory Care Pharmacy. Geoff’s specialty is in Medication Therapy Management. Together they combine to bring almost 50 years of diabetes experience to their patients.
|
#
# Substitute one filename by another in open and stat64 call
#
# Copyright 2003 Jiri Dobes <[email protected]>
# Can be freely distributed and used under the terms of the GNU GPL.
#
# Version 0.0.1
from Trick import Trick
from Memory import *
class File(Trick):
def usage(self):
return """
This trick substitute one filename by another in calling open or state64.
Usefull to change hardcoded filenames.
Example:
sf --trick=File:\'file=[[\"a\",\"c\"],[\"b\",\"d\"]]\' cp a b
instead of copying \"a\" to \"b\" it copy file \"c\" to file \"d\"
(altrough some warining message is issued).
"""
def __init__(self, options):
if options.has_key('file'):
self.options = options[ 'file' ]
else:
self.options = [["",""]] #do nothing by default
def callmask( self ):
return { 'open' : 1, 'stat64' : 1 }
def callbefore(self, pid, call, args):
"Perform argument substitution"
#both calls have same argument structure
if( call == "open" or call == "stat64" ):
list = self.options
m = getMemory(pid)
address = args[0]
filename = m.get_string(address)
for x in list:
if( filename == x[0] ):
area, asize = m.areas()[0]
m.poke( area , x[1] + '%c' % 0 , self )
newargs = ( area , args[ 1 ], args[ 2 ] )
return (None, None, None , newargs )
if( call == "stat" ): #maybe include above?
print "stat is not yet implemented!"
|
Wasserhahn Für Küche. Did you know that Wasserhahn Für Küche is most likely the hottest topics in this category? Thats why were showing this content at this time. We had taken this image on the internet that we consider would be probably the most representative pictures for Wasserhahn Für Küche.
We all know everyones judgment; will be different from each other. Likewise to this picture, in our opinions, this really is one of the greatest image, now whats your opinion?
This Knowledge about Wasserhahn Für Küche has been submitted by admin in this category section. Please leave a comment here. Thank you.
|
import operator as op
from functools import reduce
def ncr(n, r):
r = min(r, n-r)
if r == 0: return 1
numer = reduce(op.mul, range(n, n-r, -1))
denom = reduce(op.mul, range(1, r+1))
return numer//denom
def p(n, r):
return ncr(n, r) / 2**n
f8 = 0
# f7 = 1 + p(1, 1)*f8 + p(1, 0)*f7 -> (1 - p(1, 0) * f7 =
f7 = (1 + p(1, 1)*f8) / (1 - p(1, 0))
f6 = (1 + p(2, 1)*f7 + p(2, 2)*f8) / (1 - p(2, 0))
f5 = (1 + p(3, 1)*f6 + p(3, 2)*f7 + p(3, 3)*f8) / (1 - p(3, 0))
f4 = (1 + p(4, 1)*f5 + p(4, 2)*f6 + p(4, 3)*f7 + p(4, 4)*f8) / (1 - p(4, 0))
f3 = (1 + p(5, 1)*f4 + p(5, 2)*f5 + p(5, 3)*f6 + p(5, 4)*f7 + p(5, 5)*f8) / (1 - p(5, 0))
f2 = (1 + p(6, 1)*f3 + p(6, 2)*f4 + p(6, 3)*f5 + p(6, 4)*f6 + p(6, 5)*f7 + p(6, 6)*f8) / (1 - p(6, 0))
f1 = (1 + p(7, 1)*f2 + p(7, 2)*f3 + p(7, 3)*f4 + p(7, 4)*f5 + p(7, 5)*f6 + p(7, 6)*f7 + p(7, 7)*f8) / (1 - p(7, 0))
f0 = (1 + p(8, 1)*f1 + p(8, 2)*f2 + p(8, 3)*f3 + p(8, 4)*f4 + p(8, 5)*f5 + p(8, 6)*f6 + p(8, 7)*f7 + p(8, 8)*f8) / (1 - p(8, 0))
print(f8)
print(f7)
print(f6)
print(f5)
print(f4)
print(f3)
print(f2)
print(f1)
print(round(f0, 10))
f = {32: 0}
for n in reversed(range(32)):
remaining = 32 - n
fn = (1 + sum(p(remaining, i)*f[n + i] for i in range(1, remaining + 1))) / (1 - p(remaining, 0))
f[n] = fn
print(f)
print(round(f[0], 10))
|
It has Ponderosa Pine, so that eliminates 98% of the province... I will go with King Edward Lake rd. coming down into Coldstream.
So far dirtybiker is the closest, though I have had a correct answer via PM from the sidelines.
Coming down Princeton rd in Peachland?
I'll hazard another guess, I'm kinda torn on what to name next, proccess of elimination.
I have travelled on most all of the main access roads to our back-country.
No. This is facing roughly south west looking down a very popular forest service road quite near to Kelowna. On this particular trip I was coming home from Ice Fishing. I didn't catch anything, and if I did it would have been small.
relatively level at that spot, fairly well maintained, wide.
Overlooking rounded highlands, not rugged mtn. tops.
Pretty much has to be Beaver Lake FSR.
Thank you for passing the torch. I'll get something posted asap.
Queen K to see if she would post in place of me.
If not, I'll hand it off elswhere..
Okedoky everyone, please name this local park.
What can I say, I'm a fast typer nowadays.
Smoke, show us what you got.
What took you so long?????????
|
#!/usr/bin/env python3
import sys
import numpy as np
import sympy as sp
from selfdrive.locationd.models.constants import ObservationKind
from rednose.helpers.ekf_sym import EKF_sym, gen_code
from rednose.helpers.sympy_helpers import euler_rotate, quat_matrix_r, quat_rotate
EARTH_GM = 3.986005e14 # m^3/s^2 (gravitational constant * mass of earth)
class States():
ECEF_POS = slice(0, 3) # x, y and z in ECEF in meters
ECEF_ORIENTATION = slice(3, 7) # quat for pose of phone in ecef
ECEF_VELOCITY = slice(7, 10) # ecef velocity in m/s
ANGULAR_VELOCITY = slice(10, 13) # roll, pitch and yaw rates in device frame in radians/s
GYRO_BIAS = slice(13, 16) # roll, pitch and yaw biases
ODO_SCALE = slice(16, 17) # odometer scale
ACCELERATION = slice(17, 20) # Acceleration in device frame in m/s**2
IMU_OFFSET = slice(20, 23) # imu offset angles in radians
# Error-state has different slices because it is an ESKF
ECEF_POS_ERR = slice(0, 3)
ECEF_ORIENTATION_ERR = slice(3, 6) # euler angles for orientation error
ECEF_VELOCITY_ERR = slice(6, 9)
ANGULAR_VELOCITY_ERR = slice(9, 12)
GYRO_BIAS_ERR = slice(12, 15)
ODO_SCALE_ERR = slice(15, 16)
ACCELERATION_ERR = slice(16, 19)
IMU_OFFSET_ERR = slice(19, 22)
class LiveKalman():
name = 'live'
initial_x = np.array([-2.7e6, 4.2e6, 3.8e6,
1, 0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0,
1,
0, 0, 0,
0, 0, 0])
# state covariance
initial_P_diag = np.array([1e16, 1e16, 1e16,
1e6, 1e6, 1e6,
1e4, 1e4, 1e4,
1**2, 1**2, 1**2,
0.05**2, 0.05**2, 0.05**2,
0.02**2,
1**2, 1**2, 1**2,
(0.01)**2, (0.01)**2, (0.01)**2])
# process noise
Q = np.diag([0.03**2, 0.03**2, 0.03**2,
0.001**2, 0.001*2, 0.001**2,
0.01**2, 0.01**2, 0.01**2,
0.1**2, 0.1**2, 0.1**2,
(0.005 / 100)**2, (0.005 / 100)**2, (0.005 / 100)**2,
(0.02 / 100)**2,
3**2, 3**2, 3**2,
(0.05 / 60)**2, (0.05 / 60)**2, (0.05 / 60)**2])
@staticmethod
def generate_code(generated_dir):
name = LiveKalman.name
dim_state = LiveKalman.initial_x.shape[0]
dim_state_err = LiveKalman.initial_P_diag.shape[0]
state_sym = sp.MatrixSymbol('state', dim_state, 1)
state = sp.Matrix(state_sym)
x, y, z = state[States.ECEF_POS, :]
q = state[States.ECEF_ORIENTATION, :]
v = state[States.ECEF_VELOCITY, :]
vx, vy, vz = v
omega = state[States.ANGULAR_VELOCITY, :]
vroll, vpitch, vyaw = omega
roll_bias, pitch_bias, yaw_bias = state[States.GYRO_BIAS, :]
odo_scale = state[States.ODO_SCALE, :][0, :]
acceleration = state[States.ACCELERATION, :]
imu_angles = state[States.IMU_OFFSET, :]
dt = sp.Symbol('dt')
# calibration and attitude rotation matrices
quat_rot = quat_rotate(*q)
# Got the quat predict equations from here
# A New Quaternion-Based Kalman Filter for
# Real-Time Attitude Estimation Using the Two-Step
# Geometrically-Intuitive Correction Algorithm
A = 0.5 * sp.Matrix([[0, -vroll, -vpitch, -vyaw],
[vroll, 0, vyaw, -vpitch],
[vpitch, -vyaw, 0, vroll],
[vyaw, vpitch, -vroll, 0]])
q_dot = A * q
# Time derivative of the state as a function of state
state_dot = sp.Matrix(np.zeros((dim_state, 1)))
state_dot[States.ECEF_POS, :] = v
state_dot[States.ECEF_ORIENTATION, :] = q_dot
state_dot[States.ECEF_VELOCITY, 0] = quat_rot * acceleration
# Basic descretization, 1st order intergrator
# Can be pretty bad if dt is big
f_sym = state + dt * state_dot
state_err_sym = sp.MatrixSymbol('state_err', dim_state_err, 1)
state_err = sp.Matrix(state_err_sym)
quat_err = state_err[States.ECEF_ORIENTATION_ERR, :]
v_err = state_err[States.ECEF_VELOCITY_ERR, :]
omega_err = state_err[States.ANGULAR_VELOCITY_ERR, :]
acceleration_err = state_err[States.ACCELERATION_ERR, :]
# Time derivative of the state error as a function of state error and state
quat_err_matrix = euler_rotate(quat_err[0], quat_err[1], quat_err[2])
q_err_dot = quat_err_matrix * quat_rot * (omega + omega_err)
state_err_dot = sp.Matrix(np.zeros((dim_state_err, 1)))
state_err_dot[States.ECEF_POS_ERR, :] = v_err
state_err_dot[States.ECEF_ORIENTATION_ERR, :] = q_err_dot
state_err_dot[States.ECEF_VELOCITY_ERR, :] = quat_err_matrix * quat_rot * (acceleration + acceleration_err)
f_err_sym = state_err + dt * state_err_dot
# Observation matrix modifier
H_mod_sym = sp.Matrix(np.zeros((dim_state, dim_state_err)))
H_mod_sym[States.ECEF_POS, States.ECEF_POS_ERR] = np.eye(States.ECEF_POS.stop - States.ECEF_POS.start)
H_mod_sym[States.ECEF_ORIENTATION, States.ECEF_ORIENTATION_ERR] = 0.5 * quat_matrix_r(state[3:7])[:, 1:]
H_mod_sym[States.ECEF_ORIENTATION.stop:, States.ECEF_ORIENTATION_ERR.stop:] = np.eye(dim_state - States.ECEF_ORIENTATION.stop)
# these error functions are defined so that say there
# is a nominal x and true x:
# true x = err_function(nominal x, delta x)
# delta x = inv_err_function(nominal x, true x)
nom_x = sp.MatrixSymbol('nom_x', dim_state, 1)
true_x = sp.MatrixSymbol('true_x', dim_state, 1)
delta_x = sp.MatrixSymbol('delta_x', dim_state_err, 1)
err_function_sym = sp.Matrix(np.zeros((dim_state, 1)))
delta_quat = sp.Matrix(np.ones((4)))
delta_quat[1:, :] = sp.Matrix(0.5 * delta_x[States.ECEF_ORIENTATION_ERR, :])
err_function_sym[States.ECEF_POS, :] = sp.Matrix(nom_x[States.ECEF_POS, :] + delta_x[States.ECEF_POS_ERR, :])
err_function_sym[States.ECEF_ORIENTATION, 0] = quat_matrix_r(nom_x[States.ECEF_ORIENTATION, 0]) * delta_quat
err_function_sym[States.ECEF_ORIENTATION.stop:, :] = sp.Matrix(nom_x[States.ECEF_ORIENTATION.stop:, :] + delta_x[States.ECEF_ORIENTATION_ERR.stop:, :])
inv_err_function_sym = sp.Matrix(np.zeros((dim_state_err, 1)))
inv_err_function_sym[States.ECEF_POS_ERR, 0] = sp.Matrix(-nom_x[States.ECEF_POS, 0] + true_x[States.ECEF_POS, 0])
delta_quat = quat_matrix_r(nom_x[States.ECEF_ORIENTATION, 0]).T * true_x[States.ECEF_ORIENTATION, 0]
inv_err_function_sym[States.ECEF_ORIENTATION_ERR, 0] = sp.Matrix(2 * delta_quat[1:])
inv_err_function_sym[States.ECEF_ORIENTATION_ERR.stop:, 0] = sp.Matrix(-nom_x[States.ECEF_ORIENTATION.stop:, 0] + true_x[States.ECEF_ORIENTATION.stop:, 0])
eskf_params = [[err_function_sym, nom_x, delta_x],
[inv_err_function_sym, nom_x, true_x],
H_mod_sym, f_err_sym, state_err_sym]
#
# Observation functions
#
#imu_rot = euler_rotate(*imu_angles)
h_gyro_sym = sp.Matrix([vroll + roll_bias,
vpitch + pitch_bias,
vyaw + yaw_bias])
pos = sp.Matrix([x, y, z])
gravity = quat_rot.T * ((EARTH_GM / ((x**2 + y**2 + z**2)**(3.0 / 2.0))) * pos)
h_acc_sym = (gravity + acceleration)
h_phone_rot_sym = sp.Matrix([vroll, vpitch, vyaw])
speed = sp.sqrt(vx**2 + vy**2 + vz**2 + 1e-6)
h_speed_sym = sp.Matrix([speed * odo_scale])
h_pos_sym = sp.Matrix([x, y, z])
h_vel_sym = sp.Matrix([vx, vy, vz])
h_orientation_sym = q
h_imu_frame_sym = sp.Matrix(imu_angles)
h_relative_motion = sp.Matrix(quat_rot.T * v)
obs_eqs = [[h_speed_sym, ObservationKind.ODOMETRIC_SPEED, None],
[h_gyro_sym, ObservationKind.PHONE_GYRO, None],
[h_phone_rot_sym, ObservationKind.NO_ROT, None],
[h_acc_sym, ObservationKind.PHONE_ACCEL, None],
[h_pos_sym, ObservationKind.ECEF_POS, None],
[h_vel_sym, ObservationKind.ECEF_VEL, None],
[h_orientation_sym, ObservationKind.ECEF_ORIENTATION_FROM_GPS, None],
[h_relative_motion, ObservationKind.CAMERA_ODO_TRANSLATION, None],
[h_phone_rot_sym, ObservationKind.CAMERA_ODO_ROTATION, None],
[h_imu_frame_sym, ObservationKind.IMU_FRAME, None]]
gen_code(generated_dir, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state_err, eskf_params)
def __init__(self, generated_dir):
self.dim_state = self.initial_x.shape[0]
self.dim_state_err = self.initial_P_diag.shape[0]
self.obs_noise = {ObservationKind.ODOMETRIC_SPEED: np.atleast_2d(0.2**2),
ObservationKind.PHONE_GYRO: np.diag([0.025**2, 0.025**2, 0.025**2]),
ObservationKind.PHONE_ACCEL: np.diag([.5**2, .5**2, .5**2]),
ObservationKind.CAMERA_ODO_ROTATION: np.diag([0.05**2, 0.05**2, 0.05**2]),
ObservationKind.IMU_FRAME: np.diag([0.05**2, 0.05**2, 0.05**2]),
ObservationKind.NO_ROT: np.diag([0.00025**2, 0.00025**2, 0.00025**2]),
ObservationKind.ECEF_POS: np.diag([5**2, 5**2, 5**2]),
ObservationKind.ECEF_VEL: np.diag([.5**2, .5**2, .5**2]),
ObservationKind.ECEF_ORIENTATION_FROM_GPS: np.diag([.2**2, .2**2, .2**2, .2**2])}
# init filter
self.filter = EKF_sym(generated_dir, self.name, self.Q, self.initial_x, np.diag(self.initial_P_diag), self.dim_state, self.dim_state_err, max_rewind_age=0.2)
@property
def x(self):
return self.filter.state()
@property
def t(self):
return self.filter.filter_time
@property
def P(self):
return self.filter.covs()
def rts_smooth(self, estimates):
return self.filter.rts_smooth(estimates, norm_quats=True)
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
if covs_diag is not None:
P = np.diag(covs_diag)
elif covs is not None:
P = covs
else:
P = self.filter.covs()
self.filter.init_state(state, P, filter_time)
def predict_and_observe(self, t, kind, meas, R=None):
if len(meas) > 0:
meas = np.atleast_2d(meas)
if kind == ObservationKind.CAMERA_ODO_TRANSLATION:
r = self.predict_and_update_odo_trans(meas, t, kind)
elif kind == ObservationKind.CAMERA_ODO_ROTATION:
r = self.predict_and_update_odo_rot(meas, t, kind)
elif kind == ObservationKind.ODOMETRIC_SPEED:
r = self.predict_and_update_odo_speed(meas, t, kind)
else:
if R is None:
R = self.get_R(kind, len(meas))
elif len(R.shape) == 2:
R = R[None]
r = self.filter.predict_and_update_batch(t, kind, meas, R)
# Normalize quats
quat_norm = np.linalg.norm(self.filter.x[3:7, 0])
self.filter.x[States.ECEF_ORIENTATION, 0] = self.filter.x[States.ECEF_ORIENTATION, 0] / quat_norm
return r
def get_R(self, kind, n):
obs_noise = self.obs_noise[kind]
dim = obs_noise.shape[0]
R = np.zeros((n, dim, dim))
for i in range(n):
R[i, :, :] = obs_noise
return R
def predict_and_update_odo_speed(self, speed, t, kind):
z = np.array(speed)
R = np.zeros((len(speed), 1, 1))
for i, _ in enumerate(z):
R[i, :, :] = np.diag([0.2**2])
return self.filter.predict_and_update_batch(t, kind, z, R)
def predict_and_update_odo_trans(self, trans, t, kind):
z = trans[:, :3]
R = np.zeros((len(trans), 3, 3))
for i, _ in enumerate(z):
R[i, :, :] = np.diag(trans[i, 3:]**2)
return self.filter.predict_and_update_batch(t, kind, z, R)
def predict_and_update_odo_rot(self, rot, t, kind):
z = rot[:, :3]
R = np.zeros((len(rot), 3, 3))
for i, _ in enumerate(z):
R[i, :, :] = np.diag(rot[i, 3:]**2)
return self.filter.predict_and_update_batch(t, kind, z, R)
if __name__ == "__main__":
generated_dir = sys.argv[2]
LiveKalman.generate_code(generated_dir)
|
DUE TO THE THANKSGIVING HOLIDAY, GARBAGE/TRASH WILL BE PICKED UP ON SATURDAY, NOVEMBER 26TH INSTEAD OF FRIDAY.
The registration for our semi-annual 3v3 tournament has been released. Click HERE for a copy to submit to city hall or visit the parks and rec page for instructions to register online!
Meet K-9 Caesar at “Yappy Hour” on November 15th.
Meet HSPD K-9 Caesar who will be wearing his new Bullet Resistant Vest which was donated by Clare Nobles.
|
# -*- coding: utf-8 -*-
#
# test_labeled_synapses.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Test setting and getting labels on synapses.
"""
import unittest
import nest
HAVE_GSL = nest.sli_func("statusdict/have_gsl ::")
@nest.check_stack
@unittest.skipIf(not HAVE_GSL, 'GSL is not available')
class LabeledSynapsesTestCase(unittest.TestCase):
"""Test labeled synapses"""
def default_network(self):
nest.ResetKernel()
# set volume transmitter for stdp_dopamine_synapse_lbl
vol = nest.Create('volume_transmitter', 3)
nest.SetDefaults('stdp_dopamine_synapse', {'vt': vol[0]})
nest.SetDefaults('stdp_dopamine_synapse_lbl', {'vt': vol[1]})
nest.SetDefaults('stdp_dopamine_synapse_hpc', {'vt': vol[2]})
# create neurons that accept all synapse connections (especially gap
# junctions)... hh_psc_alpha_gap is only available with GSL, hence the
# skipIf above
return nest.Create("hh_psc_alpha_gap", 5)
def test_SetLabelToSynapseOnConnect(self):
"""Set a label to a labeled synapse on connect."""
labeled_synapse_models = [s for s in nest.Models(
mtype='synapses') if s.endswith("_lbl")]
for syn in labeled_synapse_models:
a = self.default_network()
# set a label during connection
nest.Connect(a, a, {"rule": "one_to_one"}, {
"model": syn, "synapse_label": 123})
c = nest.GetConnections(a, a)
self.assertTrue(
all([
status['synapse_label'] == 123
for status in nest.GetStatus(c)
])
)
def test_SetLabelToSynapseSetStatus(self):
"""Set a label to a labeled synapse on SetStatus."""
labeled_synapse_models = [s for s in nest.Models(
mtype='synapses') if s.endswith("_lbl")]
for syn in labeled_synapse_models:
a = self.default_network()
# set no label during connection
nest.Connect(a, a, {"rule": "one_to_one"}, {"model": syn})
c = nest.GetConnections(a, a)
# still unlabeled
self.assertTrue(
all([
status['synapse_label'] == -1
for status in nest.GetStatus(c)
])
)
# set a label
nest.SetStatus(c, {'synapse_label': 123})
self.assertTrue(
all([
status['synapse_label'] == 123
for status in nest.GetStatus(c)
])
)
def test_SetLabelToSynapseSetDefaults(self):
"""Set a label to a labeled synapse on SetDefaults."""
labeled_synapse_models = [s for s in nest.Models(
mtype='synapses') if s.endswith("_lbl")]
for syn in labeled_synapse_models:
a = self.default_network()
# set a label during SetDefaults
nest.SetDefaults(syn, {'synapse_label': 123})
nest.Connect(a, a, {"rule": "one_to_one"}, {"model": syn})
c = nest.GetConnections(a, a)
self.assertTrue(
all([
status['synapse_label'] == 123
for status in nest.GetStatus(c)
])
)
def test_GetLabeledSynapses(self):
"""Get labeled synapses with GetConnections."""
labeled_synapse_models = [s for s in nest.Models(
mtype='synapses') if s.endswith("_lbl")]
for syn in labeled_synapse_models:
a = self.default_network()
# some more connections
nest.Connect(a, a, {"rule": "one_to_one"},
{"model": "static_synapse"})
# set a label during connection
nest.Connect(a, a, {"rule": "one_to_one"}, {
"model": syn, "synapse_label": 123})
c = nest.GetConnections(a, a, synapse_label=123)
self.assertTrue(
all([
status['synapse_label'] == 123
for status in nest.GetStatus(c)
])
)
def test_SetLabelToNotLabeledSynapse(self):
"""Try set a label to an 'un-label-able' synapse."""
labeled_synapse_models = [s for s in nest.Models(
mtype='synapses') if not s.endswith("_lbl")]
for syn in labeled_synapse_models:
a = self.default_network()
# try set a label during SetDefaults
with self.assertRaises(nest.NESTError):
nest.SetDefaults(syn, {'synapse_label': 123})
# try set on connect
with self.assertRaises(nest.NESTError):
nest.Connect(a, a, {"rule": "one_to_one"}, {
"model": syn, "synapse_label": 123})
# plain connection
nest.Connect(a, a, {"rule": "one_to_one"}, {"model": syn})
# try set on SetStatus
c = nest.GetConnections(a, a)
with self.assertRaises(nest.NESTError):
nest.SetStatus(c, {'synapse_label': 123})
def suite():
suite = unittest.makeSuite(LabeledSynapsesTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
This free Franklin county rx card program is being produced to help all Americans cut their prescription drug costs. Simply download your FREE Franklin county rx card and receive savings of up to 75% at more than 57,000 national, regional, and local pharmacies. Start Saving Today!
CountyRxCard.com has produced a small informational video commericial. County Commisioners are Helping all patients understanding on how easy it is to use this Franklin county rx card.
|
#####################################################################
##
## gradefiles-push.py
##
## Script to post grade files to the gsubmit directories of
## enrolled students; the input grade file names should correspond
## to the user names of the students.
##
##
import sys # For command line arguments.
import os # For commands and file manipulation (walk, path, system).
#####################################################################
## ASCII escape sequence macros for color output on the terminal.
##
class bcolors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
def printred(s): print(bcolors.RED + s + bcolors.ENDC)
def printblue(s): print(bcolors.BLUE + s + bcolors.ENDC)
def printyellow(s): print(bcolors.YELLOW + s + bcolors.ENDC)
def printpurple(s): print(bcolors.PURPLE + s + bcolors.ENDC)
#####################################################################
## Process the command line parameters.
##
if len(sys.argv) == 5\
and int(sys.argv[1]) in range(100,1000)\
and sys.argv[2] in ['Fall', 'Spring']\
and int(sys.argv[3]) in range(2000,2100):
courseNumber = sys.argv[1]
season = sys.argv[2]
year = sys.argv[3]
path = sys.argv[4]
task = path
course = 'cs' + courseNumber + '/' + season + '-' + year
else:
print('\n Usage:\n\n % python gradefiles-push.py <###> <Fall|Spring> <YYYY> <task>\n')
exit()
#####################################################################
## Check for list of files.
##
if not os.path.exists('./data'):
print('No folder "data" containing grade files found. Exiting.')
exit()
#####################################################################
## Post the grade files.
##
for curdir, dirs, files in os.walk('./data/'):
for file in files:
txt = open('./data/'+file, 'r').read()
name = file.split('.')[0]
path = '/cs/course/' + course + '/homework/spool/'+name
target = path+'/grade.' + task + '.txt'
if os.path.exists(path):
open(target, 'w').write(txt)
print('Wrote file: ' + target)
else:
printred('Path '+path+' does not exist!')
#####################################################################
## Adjust grade file permissions so that students can read them.
##
os.system('chmod 0664 /cs/course/' + course + '/homework/spool/*/grade.' + task + '.txt')
#eof
|
Topic : "Sijun.com Forums End"
It's been a long time coming, and I'm not sure anyone is even active here anymore, but it didn't seem right to just closeup without a word.
20 years ago this place started as one of the only, if not only, places to share and talk about digital art. A lot of talent had made it's way through here and a ton of learning and feedback and discussion that made it a joy for me to host. I believe at one point I was getting 50,000 people through here a day. A number that is not very impressive today, but in 1999, quite a feat. The more I got caught up in my work, the less time I had to spend here, but this forum thrived for many years after i'd stopped frequenting it thanks to all your contributions. Quite frankly I am as impressed as I am surprised that it went on for as long as it did. A huge huge thank you to Rowan 'Sumaleth' Crawford for really being the groundskeeper here in my stead. Without him, I have no doubt this place would have died many years ago.
After discussing it with Rowan, rather than completely shut it down, I will be locking the forums sometime in the next week or two so that people can still come here for the history of it in the future. Unfortunately many of the image links are dead and not much can be seen by way of images, but I guess it feels wrong to wipe it from existence.
It's been my pleasure to be part of Sijun all this time, as both an occasional digital art practitioner but mostly, I guess, as just a big fan of digital art.
It's been a huge joy to see names of artists I remember from these forums from 15-20 years ago making waves in film, television, videogames, and education in the years since. I suspect a full list of Sijun alumni achievements would be mind boggling.
You were the last frequent poster man.
Wow its been 16 years for me, I joined 2002. I did post under some different aliases during the years but my first one was Matthew and this was my last one.
I learned a lot from this place and not only artwise.
Nowadays I don't do art at all, I mostly play games on sparetime and work in the IT support business related field. I got my education as an Energy Technician though after many years studying art, I made 180 and then a 720 and then 180 and then 180 again I guess. Never got anywhere with my art and it made me start to hate everything related to creating, maybe I will be back we'll see. Anyway, gaming can be creative too. I am currently building a badass Dojo in Warframe lol so I guess I still do some creative as long as it is in within a game.
durgldeep, where are you man? We had a lot of fun with 3d.
Thank you very much for all the times and inspiration..
The love to make something and develop yourself as well as to give a nod when someone was pushing themselves always felt so authentic and inspiring here.
One of a kind place.
Thanks Dhabih, for making the place.
It was always a highlight for my day so see the amazing stuff people made.
Thanks to Sumaleth for keeping it alive for so long.
Thanks for all the great memories Dhabih!
What a fantastic creation! Sorry to see you close up shop.
You and everyone on this forum sculpted my path to becoming an Industrial designer.
I utilise the skills I learnt here on a daily basis.
Thanks also to Samuleth, great job mate!
Thanks for your admin help too Freddio!! Glad to hear you are doing well and able to use your skills for work thats the best.
Thank you Sijuners! You are all class and in a league of your own. Never have I come across a nicer and a friendlier place on the internet. I learnt so darn much here that it's hard to put into words.
So glad I was able to participate and be part of this.
What an awesome community of geeky digital artists it was.
PS: Thanx Sumaleth. QuakeRally 4 ever!!!
something (at least) they will choose to do - nothing.
Haha yea man I am making a statement here.
I do miss our 3d sessions though man, gave you something to strive for, for sure.
Thx man I remember we had a lot of fun.
I think is it only we the last posters that answered in this thread too.
It's amazing how much the field of digital painting has grown in the time that this forum was active. It was extremely important to my development as an artist, and I still remember a lot of the helpful critiques and paint-overs I received. You all rock!
This place was so important to me.
Its the reason ive been working as a concept artist now for 12 years.
I used to post here back in the early 2000s (I think I was 19 when I joined). Art was always a hobby, but I never had the discipline of practice and I always considered myself a doodler (especially compared to some of the art gods here). It really meant a lot to me to have a community like this back in those days - I was a new immigrant at that time and drawing was a bit of therapy I guess.
I'm 36 now and work in a completely unrelated field (marketing) but have started getting back into drawing and digital painting as a hobby (I even bought and old Cintiq, and my wife has the iPad Pro with the Pencil - in my days on Sijun I only ever used a Wacom Graphire tablet) and have started Googling to see if I can find an online community of artists like the old Sijun was. Unfortunately, nothing even remotely close to it exists today so I'm trying to download various bits of art, tips and tutorials that still exist here so I don't lose them to ravages of time.
I was sad to hear that Sijun will be closing, but I guess people like me who abandoned it are at least partly to blame. Looking back on things I wish I could've kept my art habbit going and had more discipline to practice and learn more effectively (maybe I would've even been doing something more artistic career-wise). Life has a funny way of getting in the way of things you actually enjoy the most. I guess that's my little rambling goodbye. With a little regret and a lot of good memories, so long Sijun, and thanks for all the art.
I started on here back in '98 when I was a little studio junior at at an advertsing agency and the talent, advice, connections and community of constructive helpful criticism of my work really helped me get to where I am today.
I learned a lot here. Thank you.
|
# -*- coding: utf-8 -*-
"""
Client Plaintext Upgrade
~~~~~~~~~~~~~~~~~~~~~~~~
This example code fragment demonstrates how to set up a HTTP/2 client that uses
the plaintext HTTP Upgrade mechanism to negotiate HTTP/2 connectivity. For
maximum explanatory value it uses the synchronous socket API that comes with
the Python standard library. In product code you will want to use an actual
HTTP/1.1 client if possible.
This code requires Python 3.5 or later.
"""
import h2.connection
import socket
def establish_tcp_connection():
"""
This function establishes a client-side TCP connection. How it works isn't
very important to this example. For the purpose of this example we connect
to localhost.
"""
return socket.create_connection(('localhost', 80))
def send_initial_request(connection, settings):
"""
For the sake of this upgrade demonstration, we're going to issue a GET
request against the root of the site. In principle the best request to
issue for an upgrade is actually ``OPTIONS *``, but this is remarkably
poorly supported and can break in weird ways.
"""
# Craft our initial request per RFC 7540 Section 3.2. This requires two
# special header fields: the Upgrade headre, and the HTTP2-Settings header.
# The value of the HTTP2-Settings header field comes from h2.
request = (
b"GET / HTTP/1.1\r\n" +
b"Host: localhost\r\n" +
b"Upgrade: h2c\r\n" +
b"HTTP2-Settings: " + settings + "\r\n"
b"\r\n"
)
connection.sendall(request)
def get_upgrade_response(connection):
"""
This function reads from the socket until the HTTP/1.1 end-of-headers
sequence (CRLFCRLF) is received. It then checks what the status code of the
response is.
This is not a substitute for proper HTTP/1.1 parsing, but it's good enough
for example purposes.
"""
data = b''
while b'\r\n\r\n' not in data:
data += connection.recv(8192)
headers, rest = data.split(b'\r\n\r\n', 1)
# An upgrade response begins HTTP/1.1 101 Switching Protocols. Look for the
# code. In production code you should also check that the upgrade is to
# h2c, but here we know we only offered one upgrade so there's only one
# possible upgrade in use.
split_headers = headers.split()
if split_headers[1] != b'101':
raise RuntimeError("Not upgrading!")
# We don't care about the HTTP/1.1 data anymore, but we do care about
# any other data we read from the socket: this is going to be HTTP/2 data
# that must be passed to the H2Connection.
return rest
def main():
"""
The client upgrade flow.
"""
# Step 1: Establish the TCP connecton.
connection = establish_tcp_connection()
# Step 2: Create H2 Connection object, put it in upgrade mode, and get the
# value of the HTTP2-Settings header we want to use.
h2_connection = h2.connection.H2Connection()
settings_header_value = h2_connection.initiate_upgrade_connection()
# Step 3: Send the initial HTTP/1.1 request with the upgrade fields.
send_initial_request(connection, settings_header_value)
# Step 4: Read the HTTP/1.1 response, look for 101 response.
extra_data = get_upgrade_response(connection)
# Step 5: Immediately send the pending HTTP/2 data.
connection.sendall(h2_connection.data_to_send())
# Step 6: Feed the body data to the connection.
events = connection.receive_data(extra_data)
# Now you can enter your main loop, beginning by processing the first set
# of events above. These events may include ResponseReceived, which will
# contain the response to the request we made in Step 3.
main_loop(events)
|
We are delighted that you have decided to include Lazy E Ranch in your breeding program and we look forward to having the opportunity to serve you. Please keep in mind that a stallion's book is filled on the basis of those booking fees that are received first. Returning your contract immediately, with your booking fee, will help insure your place in a stallion's book.
ALL CONTRACTS NOT RETURNED WITHIN THIRTY (30) DAYS OF ISSUANCE ARE CONSIDERED NULL AND VOID.
If the mare remains in residence at the Lazy E Ranch to be bred, the fee will be billed when the mare is 30 days in foal. Or, if she departs prior to being confirmed in foal, the fee is due on departure, unless she is pronounced not in foal by our veterinarian prior to departure. If the mare is to be left at the Lazy E Ranch to breed through her heat cycle and then picked up following ovulation, the fee is due when the mare departs. If the mare is to be hauled in, bred and leave the same day, the fee is due before she is bred.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.