text
stringlengths 29
850k
|
---|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "[email protected]"
import os
# Microblaze constants
BIN_LOCATION = os.path.dirname(os.path.realpath(__file__))+"/"
MAILBOX_PROGRAM = 'mailbox.bin'
IOP_FREQUENCY = 100000000
# IOP mailbox constants
MAILBOX_OFFSET = 0xF000
MAILBOX_SIZE = 0x1000
MAILBOX_PY2IOP_CMD_OFFSET = 0xffc
MAILBOX_PY2IOP_ADDR_OFFSET = 0xff8
MAILBOX_PY2IOP_DATA_OFFSET = 0xf00
# IOP mailbox commands
WRITE_CMD = 0
READ_CMD = 1
IOP_MMIO_REGSIZE = 0x10000
# IOP Switch Register Map
PMOD_SWITCHCONFIG_BASEADDR = 0x44A00000
PMOD_SWITCHCONFIG_NUMREGS = 8
# Each Pmod pin can be tied to digital IO, SPI, or IIC
PMOD_SWCFG_DIO0 = 0
PMOD_SWCFG_DIO1 = 1
PMOD_SWCFG_DIO2 = 2
PMOD_SWCFG_DIO3 = 3
PMOD_SWCFG_DIO4 = 4
PMOD_SWCFG_DIO5 = 5
PMOD_SWCFG_DIO6 = 6
PMOD_SWCFG_DIO7 = 7
PMOD_SWCFG_IIC0_SCL = 8
PMOD_SWCFG_IIC0_SDA = 9
# Switch config - all digital IOs
PMOD_SWCFG_DIOALL = [ PMOD_SWCFG_DIO0, PMOD_SWCFG_DIO1,
PMOD_SWCFG_DIO2, PMOD_SWCFG_DIO3,
PMOD_SWCFG_DIO4, PMOD_SWCFG_DIO5,
PMOD_SWCFG_DIO6, PMOD_SWCFG_DIO7]
# Switch config - IIC0, top row
PMOD_SWCFG_IIC0_TOPROW = [ PMOD_SWCFG_DIO0, PMOD_SWCFG_DIO1,
PMOD_SWCFG_IIC0_SCL, PMOD_SWCFG_IIC0_SDA,
PMOD_SWCFG_DIO2, PMOD_SWCFG_DIO3,
PMOD_SWCFG_DIO4, PMOD_SWCFG_DIO5]
# Switch config - IIC0, bottom row
PMOD_SWCFG_IIC0_BOTROW = [ PMOD_SWCFG_DIO0, PMOD_SWCFG_DIO1,
PMOD_SWCFG_DIO2, PMOD_SWCFG_DIO3,
PMOD_SWCFG_DIO4, PMOD_SWCFG_DIO5,
PMOD_SWCFG_IIC0_SCL, PMOD_SWCFG_IIC0_SDA]
# IIC register map
PMOD_XIIC_0_BASEADDR = 0x40800000
PMOD_XIIC_DGIER_OFFSET = 0x1C
PMOD_XIIC_IISR_OFFSET = 0x20
PMOD_XIIC_IIER_OFFSET = 0x28
PMOD_XIIC_RESETR_OFFSET = 0x40
PMOD_XIIC_CR_REG_OFFSET = 0x100
PMOD_XIIC_SR_REG_OFFSET = 0x104
PMOD_XIIC_DTR_REG_OFFSET = 0x108
PMOD_XIIC_DRR_REG_OFFSET = 0x10C
PMOD_XIIC_ADR_REG_OFFSET = 0x110
PMOD_XIIC_TFO_REG_OFFSET = 0x114
PMOD_XIIC_RFO_REG_OFFSET = 0x118
PMOD_XIIC_TBA_REG_OFFSET = 0x11C
PMOD_XIIC_RFD_REG_OFFSET = 0x120
PMOD_XIIC_GPO_REG_OFFSET = 0x124
# SPI register map
PMOD_SPI_0_BASEADDR = 0x44A10000
PMOD_XSP_DGIER_OFFSET = 0x1C
PMOD_XSP_IISR_OFFSET = 0x20
PMOD_XSP_IIER_OFFSET = 0x28
PMOD_XSP_SRR_OFFSET = 0x40
PMOD_XSP_CR_OFFSET = 0x60
PMOD_XSP_SR_OFFSET = 0x64
PMOD_XSP_DTR_OFFSET = 0x68
PMOD_XSP_DRR_OFFSET = 0x6C
PMOD_XSP_SSR_OFFSET = 0x70
PMOD_XSP_TFO_OFFSET = 0x74
PMOD_XSP_RFO_OFFSET = 0x78
# IO register map
PMOD_DIO_BASEADDR = 0x40000000
PMOD_DIO_DATA_OFFSET = 0x0
PMOD_DIO_TRI_OFFSET = 0x4
PMOD_DIO_DATA2_OFFSET = 0x8
PMOD_DIO_TRI2_OFFSET = 0xC
PMOD_DIO_GIE_OFFSET = 0x11C
PMOD_DIO_ISR_OFFSET = 0x120
PMOD_DIO_IER_OFFSET = 0x128
# AXI IO direction constants
PMOD_CFG_DIO_ALLOUTPUT = 0x0
PMOD_CFG_DIO_ALLINPUT = 0xff
# IOP switch register map
ARDUINO_SWITCHCONFIG_BASEADDR = 0x44A20000
ARDUINO_SWITCHCONFIG_NUMREGS = 19
# Each arduino pin can be tied to digital IO, SPI, or IIC
ARDUINO_SWCFG_AIO = 0x0
ARDUINO_SWCFG_AINT = 0x0
ARDUINO_SWCFG_SDA = 0x2
ARDUINO_SWCFG_SCL = 0x3
ARDUINO_SWCFG_DIO = 0x0
ARDUINO_SWCFG_DUART = 0x1
ARDUINO_SWCFG_DINT = 0x1
ARDUINO_SWCFG_DPWM = 0x2
ARDUINO_SWCFG_DTIMERG = 0x3
ARDUINO_SWCFG_DSPICLK = 0x4
ARDUINO_SWCFG_DMISO = 0x5
ARDUINO_SWCFG_DMOSI = 0x6
ARDUINO_SWCFG_DSS = 0x7
ARDUINO_SWCFG_DTIMERIC = 0xB
# Switch config - all digital IOs
ARDUINO_SWCFG_DIOALL = [ ARDUINO_SWCFG_AIO, ARDUINO_SWCFG_AIO,
ARDUINO_SWCFG_AIO, ARDUINO_SWCFG_AIO,
ARDUINO_SWCFG_AIO, ARDUINO_SWCFG_AIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO]
# IO register map
ARDUINO_AIO_BASEADDR = 0x40020000
ARDUINO_AIO_DATA_OFFSET = 0x8
ARDUINO_AIO_TRI_OFFSET = 0xc
ARDUINO_DIO_BASEADDR = 0x40020000
ARDUINO_DIO_DATA_OFFSET = 0x0
ARDUINO_DIO_TRI_OFFSET = 0x4
ARDUINO_UART_BASEADDR = 0x40600000
ARDUINO_UART_DATA_OFFSET = 0x0
ARDUINO_UART_TRI_OFFSET = 0x4
# AXI IO direction constants
ARDUINO_CFG_AIO_ALLOUTPUT = 0x0
ARDUINO_CFG_AIO_ALLINPUT = 0xffffffff
ARDUINO_CFG_DIO_ALLOUTPUT = 0x0
ARDUINO_CFG_DIO_ALLINPUT = 0xffffffff
ARDUINO_CFG_UART_ALLOUTPUT = 0x0
ARDUINO_CFG_UART_ALLINPUT = 0xffffffff
# IOP mapping
PMODA = 1
PMODB = 2
ARDUINO = 3
# Stickit Pmod to grove pin mapping
PMOD_GROVE_G1 = [0,4]
PMOD_GROVE_G2 = [1,5]
PMOD_GROVE_G3 = [7,3]
PMOD_GROVE_G4 = [6,2]
# Arduino shield to grove pin mapping
ARDUINO_GROVE_A1 = [0,1]
ARDUINO_GROVE_A2 = [2,3]
ARDUINO_GROVE_A3 = [3,4]
ARDUINO_GROVE_A4 = [4,5]
ARDUINO_GROVE_I2C = []
ARDUINO_GROVE_UART = [0,1]
ARDUINO_GROVE_G1 = [2,3]
ARDUINO_GROVE_G2 = [3,4]
ARDUINO_GROVE_G3 = [4,5]
ARDUINO_GROVE_G4 = [6,7]
ARDUINO_GROVE_G5 = [8,9]
ARDUINO_GROVE_G6 = [10,11]
ARDUINO_GROVE_G7 = [12,13] |
Physicians and other health professionals who have undergone training in mindful practice experience significant improvements in measures of personal well-being, resilience, and the quality of interpersonal care they provide to patients and their families (see key references). A growing number of medical institutions and healthcare systems are using Mindful Practice interventions for enhancing resilience among physicians and other health professionals.
In 2009, we published a report showing that practitioners participating in Mindful Practice programs show significant improvements in mindfulness, reduction in burnout, decrease in negative mood states, and enhancement of well-being and resilience (Krasner MS et al, JAMA 2009). These changes are associated with increased ability to experience and communicate empathy. Participants tend to adopt a more psychosocial orientation to care, in which their relationships with patients are richer, deeper, and more effective. They report a greater sense of purpose, community, and skill navigating the most challenging aspects of their practice (Beckman HB et al. Acad Med 2012). |
from django.http import HttpRequest
try:
from django.http import JsonResponse
native_json_response = True
except ImportError:
from easy_response.http import JsonResponse
native_json_response = False
from django.shortcuts import get_object_or_404
from django.forms import ModelForm
from functools import wraps
class validate(object):
def __init__(self, form_class, add_instance_using='pk'):#, extra=None):
self.form_class = form_class
self.add_instance_using = add_instance_using
# self.extra = extra
def __call__(self, view):
@wraps(view)
def wrapper(*args, **kwargs):
def get_form_kwargs(request):
data = request.GET if request.method=='GET' else request.POST
form_kwargs = {'data': data}
if not hasattr(request, "FILES"):
form_kwargs['files'] = request.FILES
if issubclass(self.form_class, ModelForm) and kwargs:
value = kwargs.get(self.add_instance_using, None)
if value != None:
model = self.form_class.Meta.model
instance = get_object_or_404(model, pk=value)
form_kwargs['instance'] = instance
# if self.extra != None:
# form_kwargs.update(self.extra(request))
return form_kwargs
def error_response(form):
content = {'Errors': form.errors}
if native_json_response:
return JsonResponse(content, status=400, safe=False)
else:
return JsonResponse(content, status=400)
def validate(request):
request.form = self.form_class(**get_form_kwargs(each))
if request.form.is_valid():
return view(*args, **kwargs)
return error_response(request.form)
for each in args:
if isinstance(each, HttpRequest):
return validate( each )
return validate( args[0] )
return wrapper
|
Boat tail hunting bullets designed for a combination of penetration and expansion in medium and heavy game.
7mm Sierra Bullet Heads .284 175gr SBT pkt100. For over 50 years shooters have associated accuracy and superior ballistic performance with one name more than any other, Sierra. On the range or in the field, Sierra bullets offer that extra margin of performance that shooters can depend on. |
#!/usr/bin/env python
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2010 Red Hat, Inc., John (J5) Palmieri <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
title = "Links"
description = """
GtkLabel can show hyperlinks. The default action is to call gtk_show_uri() on
their URI, but it is possible to override this with a custom handler.
"""
from gi.repository import Gtk
class LinksApp:
def __init__(self):
self.window = Gtk.Window()
self.window.set_title('Links')
self.window.set_border_width(12)
self.window.connect('destroy', Gtk.main_quit)
label = Gtk.Label(label="""Some <a href="http://en.wikipedia.org/wiki/Text"
title="plain text">text</a> may be marked up
as hyperlinks, which can be clicked
or activated via <a href="keynav">keynav</a>""")
label.set_use_markup(True)
label.connect("activate-link", self.activate_link)
self.window.add(label)
label.show()
self.window.show()
def activate_link(self, label, uri):
if uri == 'keynav':
parent = label.get_toplevel()
markup = """The term <i>keynav</i> is a shorthand for
keyboard navigation and refers to the process of using
a program (exclusively) via keyboard input."""
dialog = Gtk.MessageDialog(transient_for=parent,
destroy_with_parent=True,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.OK,
text=markup,
use_markup=True)
dialog.present()
dialog.connect('response', self.response_cb)
return True
def response_cb(self, dialog, response_id):
dialog.destroy()
def main(demoapp=None):
LinksApp()
Gtk.main()
if __name__ == '__main__':
main()
|
Samples of Lower Devonian vertebrate-bearing placoderm sandstones collected in a quarry at Podłazie Hill in the Holy Cross Mountains, central Poland, were found to contain numerous white and brownish aggregates of an unknown composition. Powder X-Ray Diffraction study has shown them to comprise plumbogummite group minerals (PGM). Gorceixite is most common, usually forming compact/porous aggregates. They are either found in voids within the quartz-rich zircon-, muscovite- and biotite-bearing matrix, or as a complete replacement after bone remnants. Goyazite aggregates are similar but rarer. Strontian crandallite is found as tiny zoned crystals closely associated with compact gorceixite (in the sandstone matrix) or as cores of fine-grained gorceixite aggregates (within the bones). All of the PGMs are enriched in Ce and La with two analyses marginally within the compositional field of florencite-(Ce). Ca enrichment, elevated F content and abundance of goyazite within the bone replacement suggest the primary bone apatite group as the source of these elements. Tuffites and claystones associated with the sandstones are probably the source of Ba and Sr, while Pb is possibly derived from local Devonian mineralisation. |
import os
import robot
from robot.errors import DataError
from selenium import webdriver
from Selenium2Library import webdrivermonkeypatches
from Selenium2Library.utils import BrowserCache
from Selenium2Library.locators import WindowManager
from keywordgroup import KeywordGroup
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
FIREFOX_PROFILE_DIR = os.path.join(ROOT_DIR, 'resources', 'firefoxprofile')
BROWSER_NAMES = {'ff': "_make_ff",
'firefox': "_make_ff",
'ie': "_make_ie",
'internetexplorer': "_make_ie",
'googlechrome': "_make_chrome",
'gc': "_make_chrome",
'chrome': "_make_chrome",
'opera' : "_make_opera",
'htmlunit' : "_make_htmlunit",
'htmlunitwithjs' : "_make_htmlunitwithjs"
}
class _BrowserManagementKeywords(KeywordGroup):
def __init__(self):
self._cache = BrowserCache()
self._window_manager = WindowManager()
self._speed_in_secs = float(0)
self._timeout_in_secs = float(5)
self._implicit_wait_in_secs = float(0)
# Public, open and close
def close_all_browsers(self):
"""Closes all open browsers and resets the browser cache.
After this keyword new indexes returned from `Open Browser` keyword
are reset to 1.
This keyword should be used in test or suite teardown to make sure
all browsers are closed.
"""
self._debug('Closing all browsers')
self._cache.close_all()
def close_browser(self):
"""Closes the current browser."""
if self._cache.current:
self._debug('Closing browser with session id %s'
% self._cache.current.session_id)
self._cache.close()
def open_browser(self, url, browser='firefox', alias=None,remote_url=False,
desired_capabilities=None,ff_profile_dir=None):
"""Opens a new browser instance to given URL.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
Optional alias is an alias for the browser instance and it can be used
for switching between browsers (just as index can be used). See `Switch
Browser` for more details.
Possible values for `browser` are as follows:
| firefox | FireFox |
| ff | FireFox |
| internetexplorer | Internet Explorer |
| ie | Internet Explorer |
| googlechrome | Google Chrome |
| gc | Google Chrome |
| chrome | Google Chrome |
| opera | Opera |
| htmlunit | HTMLUnit |
| htmlunitwithjs | HTMLUnit with Javascipt support |
Note, that you will encounter strange behavior, if you open
multiple Internet Explorer browser instances. That is also why
`Switch Browser` only works with one IE browser at most.
For more information see:
http://selenium-grid.seleniumhq.org/faq.html#i_get_some_strange_errors_when_i_run_multiple_internet_explorer_instances_on_the_same_machine
Optional 'remote_url' is the url for a remote selenium server for example
http://127.0.0.1/wd/hub. If you specify a value for remote you can
also specify 'desired_capabilities' which is a string in the form
key1:val1,key2:val2 that will be used to specify desired_capabilities
to the remote server. This is useful for doing things like specify a
proxy server for internet explorer or for specify browser and os if your
using saucelabs.com.
Optional 'ff_profile_dir' is the path to the firefox profile dir if you
wish to overwrite the default.
"""
if remote_url:
self._info("Opening browser '%s' to base url '%s' through remote server at '%s'"
% (browser, url, remote_url))
else:
self._info("Opening browser '%s' to base url '%s'" % (browser, url))
browser_name = browser
browser = self._make_browser(browser_name,desired_capabilities,ff_profile_dir,remote_url)
browser.get(url)
self._debug('Opened browser with session id %s'
% browser.session_id)
return self._cache.register(browser, alias)
def switch_browser(self, index_or_alias):
"""Switches between active browsers using index or alias.
Index is returned from `Open Browser` and alias can be given to it.
Example:
| Open Browser | http://google.com | ff |
| Location Should Be | http://google.com | |
| Open Browser | http://yahoo.com | ie | 2nd conn |
| Location Should Be | http://yahoo.com | |
| Switch Browser | 1 | # index |
| Page Should Contain | I'm feeling lucky | |
| Switch Browser | 2nd conn | # alias |
| Page Should Contain | More Yahoo! | |
| Close All Browsers | | |
Above example expects that there was no other open browsers when
opening the first one because it used index '1' when switching to it
later. If you aren't sure about that you can store the index into
a variable as below.
| ${id} = | Open Browser | http://google.com | *firefox |
| # Do something ... |
| Switch Browser | ${id} | | |
"""
try:
self._cache.switch(index_or_alias)
self._debug('Switched to browser with Selenium session id %s'
% self._cache.current.session_id)
except (RuntimeError, DataError): # RF 2.6 uses RE, earlier DE
raise RuntimeError("No browser with index or alias '%s' found."
% index_or_alias)
# Public, window management
def close_window(self):
"""Closes currently opened pop-up window."""
self._current_browser().close()
def get_window_identifiers(self):
"""Returns and logs id attributes of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_ids(self._current_browser()))
def get_window_names(self):
"""Returns and logs names of all windows known to the browser."""
values = self._window_manager.get_window_names(self._current_browser())
# for backward compatibility, since Selenium 1 would always
# return this constant value for the main window
if len(values) and values[0] == 'undefined':
values[0] = 'selenium_main_app_window'
return self._log_list(values)
def get_window_titles(self):
"""Returns and logs titles of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_titles(self._current_browser()))
def maximize_browser_window(self):
"""Maximizes current browser window."""
self._current_browser().maximize_window()
def select_frame(self, locator):
"""Sets frame identified by `locator` as current frame.
Key attributes for frames are `id` and `name.` See `introduction` for
details about locating elements.
"""
self._info("Selecting frame '%s'." % locator)
element = self._element_find(locator, True, True)
self._current_browser().switch_to_frame(element)
def select_window(self, locator=None):
"""Selects the window found with `locator` as the context of actions.
If the window is found, all subsequent commands use that window, until
this keyword is used again. If the window is not found, this keyword fails.
By default, when a locator value is provided,
it is matched against the title of the window and the
javascript name of the window. If multiple windows with
same identifier are found, the first one is selected.
Special locator `main` (default) can be used to select the main window.
It is also possible to specify the approach Selenium2Library should take
to find a window by specifying a locator strategy:
| *Strategy* | *Example* | *Description* |
| title | Select Window `|` title=My Document | Matches by window title |
| name | Select Window `|` name=${name} | Matches by window javascript name |
| url | Select Window `|` url=http://google.com | Matches by window's current URL |
Example:
| Click Link | popup_link | # opens new window |
| Select Window | popupName |
| Title Should Be | Popup Title |
| Select Window | | | # Chooses the main window again |
"""
self._window_manager.select(self._current_browser(), locator)
def unselect_frame(self):
"""Sets the top frame as the current frame."""
self._current_browser().switch_to_default_content()
# Public, browser/current page properties
def get_location(self):
"""Returns the current location."""
return self._current_browser().get_current_url()
def get_source(self):
"""Returns the entire html source of the current page or frame."""
return self._current_browser().get_page_source()
def get_title(self):
"""Returns title of current page."""
return self._current_browser().get_title()
def location_should_be(self, url):
"""Verifies that current URL is exactly `url`."""
actual = self.get_location()
if actual != url:
raise AssertionError("Location should have been '%s' but was '%s'"
% (url, actual))
self._info("Current location is '%s'." % url)
def location_should_contain(self, expected):
"""Verifies that current URL contains `expected`."""
actual = self.get_location()
if not expected in actual:
raise AssertionError("Location should have contained '%s' "
"but it was '%s'." % (expected, actual))
self._info("Current location contains '%s'." % expected)
def log_location(self):
"""Logs and returns the current location."""
url = self.get_location()
self._info(url)
return url
def log_source(self, loglevel='INFO'):
"""Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels are
`WARN`, `INFO` (default), `DEBUG`, `TRACE` and `NONE` (no logging).
"""
source = self.get_source()
self._log(source, loglevel.upper())
return source
def log_title(self):
"""Logs and returns the title of current page."""
title = self.get_title()
self._info(title)
return title
def title_should_be(self, title):
"""Verifies that current page title equals `title`."""
actual = self.get_title()
if actual != title:
raise AssertionError("Title should have been '%s' but was '%s'"
% (title, actual))
self._info("Page title is '%s'." % title)
# Public, navigation
def go_back(self):
"""Simulates the user clicking the "back" button on their browser."""
self._current_browser().back()
def go_to(self, url):
"""Navigates the active browser instance to the provided URL."""
self._info("Opening url '%s'" % url)
self._current_browser().get(url)
def reload_page(self):
"""Simulates user reloading page."""
self._current_browser().refresh()
# Public, execution properties
def get_selenium_speed(self):
"""Gets the delay in seconds that is waited after each Selenium command.
See `Set Selenium Speed` for an explanation."""
return robot.utils.secs_to_timestr(self._speed_in_secs)
def get_selenium_timeout(self):
"""Gets the timeout in seconds that is used by various keywords.
See `Set Selenium Timeout` for an explanation."""
return robot.utils.secs_to_timestr(self._timeout_in_secs)
def get_selenium_implicit_wait(self):
"""Gets the wait in seconds that is waited by Selenium.
See `Set Selenium Implicit Wait` for an explanation."""
return robot.utils.secs_to_timestr(self._implicit_wait_in_secs)
def set_selenium_speed(self, seconds):
"""Sets the delay in seconds that is waited after each Selenium command.
This is useful mainly in slowing down the test execution to be able to
view the execution. `seconds` may be given in Robot Framework time
format. Returns the previous speed value.
Example:
| Set Selenium Speed | .5 seconds |
"""
old_speed = self.get_selenium_speed()
self._speed_in_secs = robot.utils.timestr_to_secs(seconds)
for browser in self._cache.browsers:
browser.set_speed(self._speed_in_secs)
return old_speed
def set_selenium_timeout(self, seconds):
"""Sets the timeout in seconds used by various keywords.
There are several `Wait ...` keywords that take timeout as an
argument. All of these timeout arguments are optional. The timeout
used by all of them can be set globally using this keyword.
See `introduction` for more information about timeouts.
The previous timeout value is returned by this keyword and can
be used to set the old value back later. The default timeout
is 5 seconds, but it can be altered in `importing`.
Example:
| ${orig timeout} = | Set Selenium Timeout | 15 seconds |
| Open page that loads slowly |
| Set Selenium Timeout | ${orig timeout} |
"""
old_timeout = self.get_selenium_timeout()
self._timeout_in_secs = robot.utils.timestr_to_secs(seconds)
for browser in self._cache.get_open_browsers():
browser.set_script_timeout(self._timeout_in_secs)
return old_timeout
def set_selenium_implicit_wait(self, seconds):
"""Sets Selenium 2's default implicit wait in seconds and
sets the implicit wait for all open browsers.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| ${orig wait} = | Set Selenium Implicit Wait | 10 seconds |
| Perform AJAX call that is slow |
| Set Selenium Implicit Wait | ${orig wait} |
"""
old_wait = self.get_selenium_implicit_wait()
self._implicit_wait_in_secs = robot.utils.timestr_to_secs(seconds)
for browser in self._cache.get_open_browsers():
browser.implicitly_wait(self._implicit_wait_in_secs)
return old_wait
def set_browser_implicit_wait(self, seconds):
"""Sets current browser's implicit wait in seconds.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| Set Browser Implicit Wait | 10 seconds |
See also `Set Selenium Implicit Wait`.
"""
implicit_wait_in_secs = robot.utils.timestr_to_secs(seconds)
self._current_browser().implicitly_wait(implicit_wait_in_secs)
# Private
def _current_browser(self):
if not self._cache.current:
raise RuntimeError('No browser is open')
return self._cache.current
def _get_browser_token(self, browser_name):
return BROWSER_NAMES.get(browser_name.lower().replace(' ', ''), browser_name)
def _get_browser_creation_function(self,browser_name):
return BROWSER_NAMES.get(browser_name.lower().replace(' ', ''), browser_name)
def _make_browser(self , browser_name , desired_capabilities=None , profile_dir=None,
remote=None):
creation_func = self._get_browser_creation_function(browser_name)
browser = getattr(self,creation_func)(remote , desired_capabilities , profile_dir)
if browser is None:
raise ValueError(browser_name + " is not a supported browser.")
browser.set_speed(self._speed_in_secs)
browser.set_script_timeout(self._timeout_in_secs)
browser.implicitly_wait(self._implicit_wait_in_secs)
return browser
def _make_ff(self , remote , desired_capabilites , profile_dir):
if not profile_dir: profile_dir = FIREFOX_PROFILE_DIR
profile = webdriver.FirefoxProfile(profile_dir)
if remote:
browser = self._create_remote_web_driver(webdriver.DesiredCapabilities.FIREFOX ,
remote , desired_capabilites , profile)
else:
browser = webdriver.Firefox(firefox_profile=profile)
return browser
def _make_ie(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Ie,
webdriver.DesiredCapabilities.INTERNETEXPLORER, remote, desired_capabilities)
def _make_chrome(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Chrome,
webdriver.DesiredCapabilities.CHROME, remote, desired_capabilities)
def _make_opera(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Opera,
webdriver.DesiredCapabilities.OPERA, remote, desired_capabilities)
def _make_htmlunit(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNIT, remote, desired_capabilities)
def _make_htmlunitwithjs(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNITWITHJS, remote, desired_capabilities)
def _generic_make_browser(self, webdriver_type , desired_cap_type, remote_url, desired_caps):
'''most of the make browser functions just call this function which creates the
appropriate web-driver'''
if not remote_url:
browser = webdriver_type()
else:
browser = self._create_remote_web_driver(desired_cap_type,remote_url , desired_caps)
return browser
def _create_remote_web_driver(self , capabilities_type , remote_url , desired_capabilities=None , profile=None):
'''parses the string based desired_capabilities which should be in the form
key1:val1,key2:val2 and creates the associated remote web driver'''
desired_cap = self._create_desired_capabilities(capabilities_type , desired_capabilities)
return webdriver.Remote(desired_capabilities=desired_cap , command_executor=str(remote_url) , browser_profile=profile)
def _create_desired_capabilities(self, capabilities_type, capabilities_string):
desired_capabilities = capabilities_type
if capabilities_string:
for cap in capabilities_string.split(","):
(key, value) = cap.split(":")
desired_capabilities[key.strip()] = value.strip()
return desired_capabilities
|
Air gun bottles made from high quality chrome molybdenum steel are much more durable than aluminum tanks. New manufacturing technology enable reduce weight of air gun bottles without reducing safety so you can easily manipulate with your air gun and liquidate your enemies. Our air gun bottles are filling with high pressure air. Advantages of compressed air against CO2, are accuracy, efficiency, safety and longer working. Our refilling air gun bottles have abundant volume for multiple filling your primary air gun bottle.
Why steel air gun bottle?
Because steel is stronger and more ductile than aluminum, construction of diving cylinders requires less steel than aluminum. This usually means that for an equal gas capacity, a steel air gun bottle will have a total weight less than an aluminum.
The service life of a properly cared for steel air gun bottle is widely considered to be at least 40 years. The service life of a properly cared for aluminum air gun bottle is more controversial, but some shops no longer fill an aluminum tank over 15 years old.
Air gun cylinders made from high quality chrome molybdenum steel are much more durable than aluminum tanks. New manufacturing technology enable reduce weight of air gun bottles without reducing safety so you can easily manipulate with your air gun and liquidate your enemies. Our air gun cylinders are filling with high pressure air. Advantages of compressed air against CO2, are accuracy, efficiency, safety and longer working. Our refilling air gun bottles have abundant volume for multiple filling your primary air gun bottle.
Why steel air gun cylinder? |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from .mixins import deferred, Mapping, Timeboxed
from .reflection import PublishOnly
class ObjectControl(Timeboxed, Mapping, db.Model):
__tablename__ = 'object_controls'
role = deferred(db.Column(db.String), 'ObjectControl')
notes = deferred(db.Column(db.Text), 'ObjectControl')
control_id = db.Column(
db.Integer, db.ForeignKey('controls.id'), nullable=False)
controllable_id = db.Column(db.Integer, nullable=False)
controllable_type = db.Column(db.String, nullable=False)
@property
def controllable_attr(self):
return '{0}_controllable'.format(self.controllable_type)
@property
def controllable(self):
return getattr(self, self.controllable_attr)
@controllable.setter
def controllable(self, value):
self.controllable_id = value.id if value is not None else None
self.controllable_type = value.__class__.__name__ if value is not None \
else None
return setattr(self, self.controllable_attr, value)
@staticmethod
def _extra_table_args(cls):
return (
db.UniqueConstraint(
'control_id', 'controllable_id', 'controllable_type'),
db.Index('ix_control_id', 'control_id'),
)
_publish_attrs = [
'role',
'notes',
'control',
'controllable',
]
_sanitize_html = [
'notes',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(ObjectControl, cls).eager_query()
return query.options(
orm.subqueryload('control'))
def _display_name(self):
return self.controllable.display_name + '<->' + self.control.display_name
class Controllable(object):
@declared_attr
def object_controls(cls):
cls.controls = association_proxy(
'object_controls', 'control',
creator=lambda control: ObjectControl(
control=control,
controllable_type=cls.__name__,
)
)
joinstr = 'and_(foreign(ObjectControl.controllable_id) == {type}.id, '\
'foreign(ObjectControl.controllable_type) == "{type}")'
joinstr = joinstr.format(type=cls.__name__)
return db.relationship(
'ObjectControl',
primaryjoin=joinstr,
backref='{0}_controllable'.format(cls.__name__),
cascade='all, delete-orphan',
)
_publish_attrs = [
PublishOnly('controls'),
'object_controls',
]
_include_links = [
#'object_controls',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Controllable, cls).eager_query()
return cls.eager_inclusions(query, Controllable._include_links).options(
orm.subqueryload('object_controls'))
|
How to Find Dividend Income Stocks ?
I am in 10% income slab. I also do part time consulting and apart from that also do the real estate agent work. I have slow career growth as of now. I have my investment into the FD, ETF and PPF for now. I get amount of about 5-6K every alternate month which I would like to invest into the dividend based stocks. I don’t prefer trading everyday because of the limited time and the risk associated with it.
So my current focus is to find the stocks which are good at giving dividend for regular income. Now I am not sure which are some of the resource which can guide me properly to search for such stocks. Is there any site that maintains any such list? Any newspaper or magazine that keeps track of such stocks?
What would you advice to a person who is looking for dividend income based stocks?
Dear Mahesh, in my personal opinion, please invest the money in MFs of your choice and let it grow into a big corpus over the period of time.
Dear Mahesh, regular dividend looks useful but what about actual earning from your stocks in the name of dividend. Remember Dividend is declared on the Face Value of stocks and not on the market price. So your whole structure of earning a good passive income may require a really large size portfolio.
I agree that I have to use a lot of shares in order to earn some decent passive income but not sure how to proceed in such case.
I didn’t consider this chart while creating my stock portfolio. I have my own criterion to select stocks, like – valuations, consistency, sector outlook, long term prospects and my personal logic/choices. May be 2/3 of my stocks are from this list by chance, but I didn’t think about regular divedents as a criteria while selecting stocks.
Thanks a lot. I have one more question. How do you select which stock to go with when it comes to this chart ?
See Mahesh, Coal India announced special dividend of Rs 29/sh, would you have invested in Coal India, you would be getting such a significant amount without worrying about time to sell etc. + share price is more than 10% up from that valuation.
I have Coal India in my portfolio, which gives regular divedents. Currently it is in good valuations to buy. But do your own research before you take any decision. |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db.models import signals
from geonode.base.models import Link
from geonode.layers.models import Layer
from geonode.documents.models import Document
ISO_XSL_NAME = 'ISO with XSL'
settings.DOWNLOAD_FORMATS_METADATA.append(ISO_XSL_NAME)
def xsl_post_save(instance, sender, **kwargs):
"""Add a link to the enriched ISO metadata
"""
add_xsl_link(instance.resourcebase_ptr)
def add_xsl_link(resourcebase):
"""Add a link to the enriched ISO metadata
"""
urlpath = reverse('prefix_xsl_line', args=[resourcebase.id])
url = '{}{}'.format(settings.SITEURL, urlpath)
link, created = Link.objects.get_or_create(
resource=resourcebase,
url=url,
defaults=dict(name=ISO_XSL_NAME,
extension='xml',
mime='text/xml',
link_type='metadata'))
return created
if 'geonode.catalogue' in settings.INSTALLED_APPS:
signals.post_save.connect(xsl_post_save, sender=Layer)
signals.post_save.connect(xsl_post_save, sender=Document)
# TODO: maps as well?
|
Despite challenges of new FDA regulations and the attacks of critics, 2016 was a pretty good year for vapers. Check out our Vaping News 2016 year in review.
Applying Ohm's Law can tell you what your battery's limits are, how low you can go with the resistance and how much power your battery will support.
Just when you thought you had seen it all, here comes Vape Master. The first vaping video game is now available for the iOS and Android mobile platforms.
Is Apple developing an iVape? In late January 2017, news leaked that Apple filed a patent for a device called a Sublimator/Vaporizer back in July of 2016.
We have all had those dreaded vaping debates with people unfamiliar with electronic cigarettes. Use these vaping rebuttals to win any vaping argument.
Big Tobacco may be taking a Trojan Horse approach in the conflict with the vaping industry by manufacturing vaping devices and opening its own vape shops.
A stark contrast with Surgeon General claims on teens vaping. Studies show a decrease in the number of young people smoking and a decrease of young vapers.
Can you vape on a plane? The short answer is no. But here are some really helpful suggestions on how to transport your vape without leaks or battery issues.
Throughout the United States, there are areas which stand tall above the rest as hubs of vaping culture. Here are some vape friendly regions in our country.
A Billion Lives documentary advances a theory that big tobacco companies will go to great lengths to preserve their industry.
A new study shows newer vaping devices such as the KangerTech Top EVOD provides a higher nicotine absorption than first generation electronic cigarettes.
Vaper's Tongue, sometimes referred to as Vaper's Fatigue, occurs when the olfactory sensation (your ability to smell) is overwhelmed by a specific aroma. |
# -*- encoding: utf-8 -*-
################################################################################
# TuiterPy - A Python Command Line Social Network Application
# Copyright (C) 2016 Sergio Trujillo ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
class CommandLine(object):
def __init__(self, view, processor):
self.view = view
self.processor = processor
self.exit = False
def start(self):
self.view.show_welcome()
def resume(self):
self.view.show_prompt()
command_line = self.view.read_command_line()
self.process(command_line)
def process(self, command):
if self.processor.exit(command):
self.view.show_farewell()
self.exit = True
else:
printer = self.processor.process(command)
printer.print_command()
def is_exit(self):
return self.exit
|
A dynamite combination of rich color and incredible sparkle! The Geode Style story is a gorgeous display of natural wonder and creativity. We gathered just a few of the many fabulous ideas out there to create this wedding look, and you won’t be disappointed.
Geodes (and agates – similar to the geode) often have absolutely mesmerizing layers of color. Polished geodes with gold writing become stunning name cards for your guests. The wedding cake shown here also highlights the layered geode look and features a bit of gold sparkle. You’ll even find wedding cakes designed to look like geodes with the cavity and crystals built into the cake!
Geodes and agates come in all kinds of colors but blue is a very common color that happens to work beautifully for weddings. Dress the bridal party in vibrant shades of blue and make sure the ladies are wearing jewelry with some serious sparkle (this wedding theme works best with a little bling). Use your wedding napkins to incorporate some color and style, and watercolor wedding invitations are a great choice for bringing the geode look into your stationery. We chose the Mysterious Love Invitation with real foil to capture a little sparkle. |
import sys
import csv
class SimulationSettings:
Softening = 10.0 # KM added to every link distance to eliminate needless distinction between very short routes.
#TurnBackAllowed = True # feature disabled for now.
AgentLogLevel = 0 # set to 1 for basic agent information.
CampLogLevel = 0 # set to 1 to obtain average times for agents to reach camps at any time step (aggregate info).
InitLogLevel = 0 # set to 1 for basic information on locations added and conflict zones assigned.
TakeRefugeesFromPopulation = True
sqrt_ten = 3.16227766017 # square root of ten (10^0.5).
CampWeight = sqrt_ten # attraction factor for camps.
ConflictWeight = 1.0 / sqrt_ten # reduction factor for refugees entering conflict zones.
MaxMoveSpeed = 360 # most number of km that we expect refugees to traverse per time step (30 km/h * 12 hours).
MaxWalkSpeed = 35 # most number of km that we expect refugees to traverse per time step on foot (3.5 km/h * 10 hours).
MaxCrossingSpeed = 20 # most number of km that we expect refugees to traverse per time step on boat/walk to cross river (2 km/h * 10 hours).
StartOnFoot = True # Agents walk on foot when they travers their very first link.
CapacityBuffer = 1.0
# default move chance
ConflictMoveChance = 1.0
CampMoveChance = 0.001
DefaultMoveChance = 0.3
# Specific enhancements for the 2.0 ruleset.
# This includes a movespeed of 420 and a walk speed of 42.
AvoidShortStints = True # Displaced people will not take a break unless they at least travelled for a full day's distance in the last two days.
FlareConflictInputFile = ""
AwarenessLevel = 1 #-1, no weighting at all, 0 = road only, 1 = location, 2 = neighbours, 3 = region.
#NumProcs = 1 #This is not supported at the moment.
UseV1Rules = False
if UseV1Rules == True:
MaxMoveSpeed = 200
StartOnFoot = False
AvoidShortStints = False # Displaced people will not take a break unless they at least travelled for a full day's distance in the last two days.
CampWeight = 2.0 # attraction factor for camps.
ConflictWeight = 0.25 # reduction factor for refugees entering conflict zones.
def ReadFromCSV(csv_name):
"""
Reads simulation settings from CSV
"""
number_of_steps = -1
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if row[0][0] == "#":
pass
elif row[0].lower() == "agentloglevel":
SimulationSettings.AgentLogLevel = int(row[1])
elif row[0].lower() == "camploglevel":
SimulationSettings.CampLogLevel = int(row[1])
elif row[0].lower() == "initloglevel":
SimulationSettings.InitLogLevel = int(row[1])
elif row[0].lower() == "minmovespeed":
SimulationSettings.MinMoveSpeed = float(row[1])
elif row[0].lower() == "maxmovespeed":
SimulationSettings.MaxMoveSpeed = float(row[1])
elif row[0].lower() == "numberofsteps":
number_of_steps = int(row[1])
elif row[0].lower() == "campweight":
SimulationSettings.CampWeight = float(row[1])
elif row[0].lower() == "conflictweight":
SimulationSettings.ConflictWeight = float(row[1])
elif row[0].lower() == "conflictmovechance":
SimulationSettings.ConflictMoveChance = float(row[1])
elif row[0].lower() == "campmovechance":
SimulationSettings.CampMoveChance = float(row[1])
elif row[0].lower() == "defaultmovechance":
SimulationSettings.DefaultMoveChance = float(row[1])
elif row[0].lower() == "awarenesslevel":
SimulationSettings.AwarenessLevel = int(row[1])
elif row[0].lower() == "flareconflictinputfile":
SimulationSettings.FlareConflictInputFile = row[1]
elif row[0].lower() == "usev1rules":
SimulationSettings.UseV1Rules = (row[1].lower() == "true")
elif row[0].lower() == "startonfoot":
SimulationSettings.StartOnFoot = (row[1].lower() == "true")
elif row[0].lower() == "avoidshortstints":
SimulationSettings.AvoidShortStints = (row[1].lower() == "true")
else:
print("FLEE Initialization Error: unrecognized simulation parameter:",row[0])
sys.exit()
return number_of_steps
|
Our glaziers covering Hatfield Peverel and the local area are on call 24/7*. We offer an emergency response service when your window needs boarding or you require a glass replacement services. We provide free quotes and estimates, while our pricing structure is very competitive with local glazing companies in Hatfield Peverel.
Our fully trained engineers cover Hatfield Peverel and a 30 mile radius from CM3 2LE. |
# coding=utf-8
import logging
from django.shortcuts import render, redirect
from news.models import Feed
from news.tasks import twitter_task
from news.utils.common import get_active_tasks
from newsproject import celery_app
__author__ = 'ilov3'
logger = logging.getLogger(__name__)
def settings(request):
feeds = Feed.objects.all()
tasks_list = get_active_tasks(name='news.tasks.twitter_task.twitter_task')
return render(request, 'settings/settings.html', {'streams': tasks_list, 'feeds': feeds})
def stop_stream(request):
if request.method == 'POST':
task_id = request.POST.get('task_id')
if task_id:
celery_app.control.revoke(task_id=task_id, terminate=True)
return redirect('settings')
def new_tweet_stream(request):
if request.method == "POST":
keyword = request.POST.get('keyword')
location = request.POST.get('location')
if location:
try:
location = [float(coordinate) for coordinate in location.split(',')]
except Exception as e:
logger.error('Could not convert location string (%s) into coordinates. Error: %s' % (location, e))
twitter_task.delay(keyword=keyword, location=location)
return redirect('settings')
return redirect('settings')
|
Our particular circumstance of having a living and immersive eco-village campus provides a very distinct advantage in the Physical Education department. Students are provided a range of activities for physical development and frequently engage in hands-on-training in the maintenance of a real ranch. Sports and competitive athletics are arranged in our pastures from capture the flag and football, to soccer and volleyball, while laps and other aquatic skills can be practiced in our swimming pool. The physical aptitude and potentials of the individuals are tested and pushed so that optimal physical health can be enjoyed. Physical education also covers the knowledge of the body and relationship of its systems to one another and their roles in a balanced lifestyle. These studies can include proper diet, cardiovascular excercise, isolated and group muscular development, and stretching.
Copyright © 2019 Global Community Communications Schools. All Rights Reserved. |
"""
IMPORTANT: DO NOT USE THIS SCRIPT TO DOS THE SCHOOL SERVER. THIS IS
INTENDED TO MAKE IT EASIER TO CHECK WHICH COURSES ARE OPEN WITHOUT
HAVING TO LOG IN AND MANUALLY GO THROUGH EACH STEP TO SELECT THE
TERM AND DEPARTMENT. USE IT ONCE AND THEN REGISTER. AT MOSE USE IT
ONCE EVERY 15 MINUTES, ABOUT THE RATE AT WHICH A HUMAN USING A MOUSE
WOULD REFRESH THEIR OWN SEARCHES.
This is a single-module script to scrape OSCAR for course openings.
It works for Summer and Fall of 2016. There is no guarantee it will
work for future terms unless you change the CRNs and term dates.
Additionally, if you are using this in the future, you will want to
ensure that the OSCAR API has not changed and this url structure still
works. I will likely maintain this myself until I have graduated and
no longer.
(c) Adam Acosta 2016
"""
from __future__ import print_function
import re
import sys
import argparse
from urllib2 import urlopen
from bs4 import BeautifulSoup
class TermDependent(argparse.Action):
"""Custom Action to ensure user selects a term if specifying crn."""
def __call__(self, parser, namespace, values, option_string=None):
term = getattr(namespace, 'term')
# User tried to specify a crn without specifying a term
if term == 'all':
parser.error("must specify term to use crn")
else:
setattr(namespace, self.dest, values)
parser = argparse.ArgumentParser()
parser.add_argument('--term', type=str, default='all',
help='the term you wish to check')
parser.add_argument('--crn', type=str, default=None,
action=TermDependent,
help='use this if you only want to check one CRN')
crns = {
'summer':
{'intro to info security': '56393',
'software dev process': '55424',
'software arch and design': '56394',
'software analysis and test': '56395',
'comp photography': '55805',
'knowledge-based ai': '55806',
'artificial intelligence for robotics': '55426',
'intro to operating systems': '55804',
'reinforcement learning': '56396',
'embedded software': '56397'},
'fall':
{'high performance computing': '89826',
'data and visual analytics': '91202',
'big data for health': '91201',
'intro to info security': '89823',
'adv operating systems': '88770',
'computer networks': '88771',
'network security': '91203',
'high performance computer arch': '88775',
'software dev process': '88772',
'software arch and design': '88776',
'software analysis and test': '91197',
'db sys concepts and design': '91198',
'intro health informatics': '88777',
'educ tech foundations': '90228',
'comp photography': '89821',
'computer vision': '90192',
'computability and algorithms': '88778',
'artificial intelligence': '91199',
'knowledge-based ai': '88779',
'machine learning': '88773',
'mach learning for trading': '89824',
'artificial intelligence for robotics': '88774',
'intro to operating systems': '89822',
'reinforcement learning': '89825',
'embedded software': '91200',
'cyber-physical systems': '91581'},
}
terms = {'summer': '201605', 'fall': '201608'}
def get_seats(term, crn):
"""Enter the term and crn and return the number of open seats."""
# This is the API as of April 2016
url = "https://oscar.gatech.edu/pls/bprod/bwckschd" + \
".p_disp_detail_sched?term_in={}&crn_in={}".format(term, crn)
html = urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
text = soup.get_text()
# Uncomment one of these to test
# return url
# return soup
# return text
# Available seats is the third column in the table
seats = re.search('(?<=Seats\n[0-9]{3}\n[0-9]{3}\n)[0-9]{1,3}', text)
if seats is not None:
return seats.group(0)
# In this case, the course only has double-digit enrollment
# Do this twice because re.search() only accepts fixed-length strings
seats = re.search('(?<=Seats\n[0-9]{3}\n[0-9]{2}\n)[0-9]{1,3}', text)
return seats.group(0)
if __name__ == '__main__':
args = parser.parse_args()
# I am double-checking here that you are not DOSing the server
ans = raw_input("Have you checked in the last 15 minutes? (y/n): ")
if str(ans).lower() != 'n':
print("Please wait at least 15 minutes.")
sys.exit(0)
# Single CRN
if args.crn:
print(get_seats(terms[args.term], args.crn))
sys.exit(0)
# Single term
if args.term != 'all':
for course in crns[args.term]:
print(course, get_seats(terms[args.term], crns[args.term][course]))
sys.exit(0)
# Go ahead and check
for term in terms:
for course in crns[term]:
print(term, course, get_seats(terms[term], crns[term][course]))
sys.exit(0)
|
I am a freelance artist, Ilustrator, graphic designer. In 3 directions I have several years of experience. My focus lie in books illustration, for adults and for children. |
# Copyright 2012-2013 Ravello Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
# These inflection functions are trivial and only work for regular nouns
# and verbs. Special cases can be added as and when needed.
def plural_noun(noun, count=2):
"""Takes a singular English noun ``noun`` and returns a plural version,
depending on ``count``."""
if count == 1:
return noun
else:
return noun + 's'
def plural_verb(verb, count=2):
"""Return the plural conjugation of the English verb ``verb``, depending
on ``count``."""
if count == 1:
if verb == 'was':
return 'was'
return verb + 's'
else:
if verb == 'was':
return 'were'
return verb
|
Crisp Champagne with notes of sparkling Brut Champagne mingles with hints of Vanilla and Barrel Oak.
Voluspa Crisp Champagne 2 Wick Tin Candle is delicious sparkling brut champagne mingled with hints of vanilla and barrel oak.
Packaged in a beautiful gilded tin, this exquisite 2 wick candle gives off a rich glow and easily scents a large room. Maison Noir Candles by Voluspa exude the luxe sophistication that loyal fans have come to expect from Voluspa, featuring fragrances that are at once unique and familiar.
This scent combines Sparkling Brut Champagne, Vanilla, and Barrel Oak for a nostalgic, romantic aroma of a wedding. Candle is hand poured and comes in a stunning tin. Made of Apricot and Coconut waxes with about 50 hours of burn time…. |
# coding=utf-8
"""
toolkit_library.input_util
~~~~~~~~~~~~~~~~~~~~~~~~~~
Get inputs from user and validate them
"""
import re
class InputUtil(object):
"""get inputs from user and validate them"""
@staticmethod
def get_input(name, default = None, pattern = None):
"""get inputs from user and validate them
If user enters empty and default value is not None, default value will be returned.
if user enters non-empty and pattern is not None, user input should match the regex pattern.
Otherwise user will be prompt to enter again.
"""
assert type(name) == str and len(name) > 0
prompt = name
if pattern is not None:
prompt = '{0} ({1})'.format(prompt, pattern)
if default is not None:
prompt = '{0} [{1}]'.format(prompt, default)
prompt = 'Please enter {0}: '.format(prompt)
while True:
result = raw_input(prompt)
if not result:
if default is not None:
return default
else:
print 'Please enter sth, as there is no default value available.'
else:
if pattern is None:
return result
else:
if re.match(pattern, result):
return result
else:
print 'What you just entered is not valid, please try again.'
|
A man lives on the twelfth floor of an apartment building. Every morning he takes the elevator down to the lobby and leaves the building. In the evening, he gets into the elevator, and, if there is someone else in the elevator — or if it was raining that day — he goes back to his floor directly. Otherwise, he goes to the tenth floor and walks up two flights of stairs to his apartment.
The man is a dwarf. He can’t reach the upper elevator buttons, but he can ask people to push them for him. He can also push them with his umbrella.
Amiable brief and this mail helped me alot in my college assignement. Thank you on your information. |
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from otp.otpbase import OTPGlobals
class PositionExaminer(DirectObject, NodePath):
def __init__(self):
try:
self.__initialized
return
except:
self.__initialized = 1
NodePath.__init__(self, hidden.attachNewNode('PositionExaminer'))
self.cRay = CollisionRay(0.0, 0.0, 6.0, 0.0, 0.0, -1.0)
self.cRayNode = CollisionNode('cRayNode')
self.cRayNode.addSolid(self.cRay)
self.cRayNodePath = self.attachNewNode(self.cRayNode)
self.cRayNodePath.hide()
self.cRayBitMask = OTPGlobals.FloorBitmask
self.cRayNode.setFromCollideMask(self.cRayBitMask)
self.cRayNode.setIntoCollideMask(BitMask32.allOff())
self.cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.5)
self.cSphereNode = CollisionNode('cSphereNode')
self.cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = self.attachNewNode(self.cSphereNode)
self.cSphereNodePath.hide()
self.cSphereBitMask = OTPGlobals.WallBitmask
self.cSphereNode.setFromCollideMask(self.cSphereBitMask)
self.cSphereNode.setIntoCollideMask(BitMask32.allOff())
self.ccLine = CollisionSegment(0.0, 0.0, 0.0, 1.0, 0.0, 0.0)
self.ccLineNode = CollisionNode('ccLineNode')
self.ccLineNode.addSolid(self.ccLine)
self.ccLineNodePath = self.attachNewNode(self.ccLineNode)
self.ccLineNodePath.hide()
self.ccLineBitMask = OTPGlobals.CameraBitmask
self.ccLineNode.setFromCollideMask(self.ccLineBitMask)
self.ccLineNode.setIntoCollideMask(BitMask32.allOff())
self.cRayTrav = CollisionTraverser('PositionExaminer.cRayTrav')
self.cRayTrav.setRespectPrevTransform(False)
self.cRayQueue = CollisionHandlerQueue()
self.cRayTrav.addCollider(self.cRayNodePath, self.cRayQueue)
self.cSphereTrav = CollisionTraverser('PositionExaminer.cSphereTrav')
self.cSphereTrav.setRespectPrevTransform(False)
self.cSphereQueue = CollisionHandlerQueue()
self.cSphereTrav.addCollider(self.cSphereNodePath, self.cSphereQueue)
self.ccLineTrav = CollisionTraverser('PositionExaminer.ccLineTrav')
self.ccLineTrav.setRespectPrevTransform(False)
self.ccLineQueue = CollisionHandlerQueue()
self.ccLineTrav.addCollider(self.ccLineNodePath, self.ccLineQueue)
def delete(self):
del self.cRay
del self.cRayNode
self.cRayNodePath.removeNode()
del self.cRayNodePath
del self.cSphere
del self.cSphereNode
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
del self.ccLine
del self.ccLineNode
self.ccLineNodePath.removeNode()
del self.ccLineNodePath
del self.cRayTrav
del self.cRayQueue
del self.cSphereTrav
del self.cSphereQueue
del self.ccLineTrav
del self.ccLineQueue
def consider(self, node, pos, eyeHeight):
self.reparentTo(node)
self.setPos(pos)
result = None
self.cRayTrav.traverse(render)
if self.cRayQueue.getNumEntries() != 0:
self.cRayQueue.sortEntries()
floorPoint = self.cRayQueue.getEntry(0).getSurfacePoint(self.cRayNodePath)
if abs(floorPoint[2]) <= 4.0:
pos += floorPoint
self.setPos(pos)
self.cSphereTrav.traverse(render)
if self.cSphereQueue.getNumEntries() == 0:
self.ccLine.setPointA(0, 0, eyeHeight)
self.ccLine.setPointB(-pos[0], -pos[1], eyeHeight)
self.ccLineTrav.traverse(render)
if self.ccLineQueue.getNumEntries() == 0:
result = pos
self.reparentTo(hidden)
self.cRayQueue.clearEntries()
self.cSphereQueue.clearEntries()
self.ccLineQueue.clearEntries()
return result
|
If you have a motor vehicle and you want to drive it on your own you should learn the safety skills of driving. Understanding the basic rules is necessary to become an efficient and confident driver. If you get the guidance of a skilled and experienced instructor it will take a few days that you drive confidently on busy roads. Finding the best person is a must to understand everything about driving in a legitimate manner. You should choose the right course according to your precise demand. When you are with a renowned and trusted school, it will be easy to select a course that is designed for you only.
There are many options for how you get a good school for learning. You can take help of the internet and can browse some websites to choose the perfect one. Read the courses, duration, and everything you want to know in your need. Today, most of the organizations have their websites to guide interested people. You get the contact number also to contact and visit the authority. You can clear all your doubts once you meet an experienced executive. He/she will guide you with the courses you need to attend.
The mentioned process is a time-taking way to find support. You need to make a shortlist first. Then you visit each of them personally. If you have urgency it will be better to ask your friends and neighbors to recommend you a name. It is the way you have a satisfactory outcome always. When you get a recommendation just visit the school, ask your demand and get the right guidance. It is the easiest way how you get satisfactory service. And you don’t need to visit many places to find the best driving school.
It is better to choose a local school that you can save your time and you have a clear picture of the services. There is a lesser chance to be cheated when you have a recommendation from your close one. But ask everything you want to know before you make the deal final. If you are in Sydney and you are finding a reputable Driving School Western Sydney ask your neighbors and local friends to guide you in this matter.
A good school offers a variety of courses. You can choose a package also according to your need. A reputed company designs cheap and comprehensive lessons that learners get the knowledge properly with the safety aspects of driving.
Always judge the efficiency of the instructors when you find an Affordable Driving School Blacktown. Your success depends on their skill and experience. You will avail great learning with a skilled and experienced trainer. |
#
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tasklet decorator.
Tasklets are a way to write concurrently running functions without
threads; tasklets are executed by an event loop and can suspend
themselves blocking for I/O or some other operation using a yield
statement. The notion of a blocking operation is abstracted into the
Future class, but a tasklet may also yield an RPC in order to wait for
that RPC to complete.
The @tasklet decorator wraps generator function so that when it is
called, a Future is returned while the generator is executed by the
event loop. Within the tasklet, any yield of a Future waits for and
returns the Future's result. For example:
@tasklet
def foo():
a = yield <some Future>
b = yield <another Future>
raise Return(a + b)
def main():
f = foo()
x = f.get_result()
print x
Note that blocking until the Future's result is available using
get_result() is somewhat inefficient (though not vastly -- it is not
busy-waiting). In most cases such code should be rewritten as a tasklet
instead:
@tasklet
def main_tasklet():
f = foo()
x = yield f
print x
Calling a tasklet automatically schedules it with the event loop:
def main():
f = main_tasklet()
eventloop.run() # Run until no tasklets left to do
f.done() # Returns True
As a special feature, if the wrapped function is not a generator
function, its return value is returned via the Future. This makes the
following two equivalent:
@tasklet
def foo():
return 42
@tasklet
def foo():
if False: yield # The presence of 'yield' makes foo a generator
raise Return(42) # Or, after PEP 380, return 42
This feature (inspired by Monocle) is handy in case you are
implementing an interface that expects tasklets but you have no need to
suspend -- there's no need to insert a dummy yield in order to make
the tasklet into a generator.
"""
import collections
import logging
import os
import sys
import types
from .google_imports import apiproxy_stub_map
from .google_imports import apiproxy_rpc
from .google_imports import datastore
from .google_imports import datastore_errors
from .google_imports import datastore_pbs
from .google_imports import datastore_rpc
from .google_imports import namespace_manager
from . import eventloop
from . import utils
__all__ = ['Return', 'tasklet', 'synctasklet', 'toplevel', 'sleep',
'add_flow_exception', 'get_return_value',
'get_context', 'set_context',
'make_default_context', 'make_context',
'Future', 'MultiFuture', 'QueueFuture', 'SerialQueueFuture',
'ReducingFuture',
]
_logging_debug = utils.logging_debug
def _is_generator(obj):
"""Helper to test for a generator object.
NOTE: This tests for the (iterable) object returned by calling a
generator function, not for a generator function.
"""
return isinstance(obj, types.GeneratorType)
class _State(utils.threading_local):
"""Hold thread-local state."""
current_context = None
def __init__(self):
super(_State, self).__init__()
self.all_pending = set()
def add_pending(self, fut):
_logging_debug('all_pending: add %s', fut)
self.all_pending.add(fut)
def remove_pending(self, fut, status='success'):
if fut in self.all_pending:
_logging_debug('all_pending: %s: remove %s', status, fut)
self.all_pending.remove(fut)
else:
_logging_debug('all_pending: %s: not found %s', status, fut)
def clear_all_pending(self):
if self.all_pending:
logging.info('all_pending: clear %s', self.all_pending)
self.all_pending.clear()
else:
_logging_debug('all_pending: clear no-op')
def dump_all_pending(self, verbose=False):
pending = []
for fut in self.all_pending:
if verbose:
line = fut.dump() + ('\n' + '-' * 40)
else:
line = fut.dump_stack()
pending.append(line)
return '\n'.join(pending)
_state = _State()
# Tuple of exceptions that should not be logged (except in debug mode).
_flow_exceptions = ()
def add_flow_exception(exc):
"""Add an exception that should not be logged.
The argument must be a subclass of Exception.
"""
global _flow_exceptions
if not isinstance(exc, type) or not issubclass(exc, Exception):
raise TypeError('Expected an Exception subclass, got %r' % (exc,))
as_set = set(_flow_exceptions)
as_set.add(exc)
_flow_exceptions = tuple(as_set)
def _init_flow_exceptions():
"""Internal helper to initialize _flow_exceptions.
This automatically adds webob.exc.HTTPException, if it can be imported.
"""
global _flow_exceptions
_flow_exceptions = ()
add_flow_exception(datastore_errors.Rollback)
try:
from webob import exc
except ImportError:
pass
else:
add_flow_exception(exc.HTTPException)
_init_flow_exceptions()
class Future(object):
"""A Future has 0 or more callbacks.
The callbacks will be called when the result is ready.
NOTE: This is somewhat inspired but not conformant to the Future interface
defined by PEP 3148. It is also inspired (and tries to be somewhat
compatible with) the App Engine specific UserRPC and MultiRpc classes.
"""
# TODO: Trim the API; there are too many ways to do the same thing.
# TODO: Compare to Monocle's much simpler Callback class.
# Constants for state property.
IDLE = apiproxy_rpc.RPC.IDLE # Not yet running (unused)
RUNNING = apiproxy_rpc.RPC.RUNNING # Not yet completed.
FINISHING = apiproxy_rpc.RPC.FINISHING # Completed.
# XXX Add docstrings to all methods. Separate PEP 3148 API from RPC API.
_geninfo = None # Extra info about suspended generator.
def __init__(self, info=None):
# TODO: Make done a method, to match PEP 3148?
# pylint: disable=invalid-name
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._info = info # Info from the caller about this Future's purpose.
self._where = utils.get_stack()
self._context = None
self._reset()
def _reset(self):
self._done = False
self._result = None
self._exception = None
self._traceback = None
self._callbacks = []
self._immediate_callbacks = []
_state.add_pending(self)
self._next = None # Links suspended Futures together in a stack.
# TODO: Add a __del__ that complains if neither get_exception() nor
# check_success() was ever called? What if it's not even done?
def __repr__(self):
if self._done:
if self._exception is not None:
state = 'exception %s: %s' % (self._exception.__class__.__name__,
self._exception)
else:
state = 'result %r' % (self._result,)
else:
state = 'pending'
line = '?'
for line in self._where:
if 'tasklets.py' not in line:
break
if self._info:
line += ' for %s' % self._info
if self._geninfo:
line += ' %s' % self._geninfo
return '<%s %x created by %s; %s>' % (
self.__class__.__name__, id(self), line, state)
def dump(self):
return '%s\nCreated by %s' % (self.dump_stack(),
'\n called by '.join(self._where))
def dump_stack(self):
lines = []
fut = self
while fut is not None:
lines.append(str(fut))
fut = fut._next
return '\n waiting for '.join(lines)
def add_callback(self, callback, *args, **kwds):
if self._done:
eventloop.queue_call(None, callback, *args, **kwds)
else:
self._callbacks.append((callback, args, kwds))
def add_immediate_callback(self, callback, *args, **kwds):
if self._done:
callback(*args, **kwds)
else:
self._immediate_callbacks.append((callback, args, kwds))
def set_result(self, result):
if self._done:
raise RuntimeError('Result cannot be set twice.')
self._result = result
self._done = True
_state.remove_pending(self)
for callback, args, kwds in self._immediate_callbacks:
callback(*args, **kwds)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def set_exception(self, exc, tb=None):
if not isinstance(exc, BaseException):
raise TypeError('exc must be an Exception; received %r' % exc)
if self._done:
raise RuntimeError('Exception cannot be set twice.')
self._exception = exc
self._traceback = tb
self._done = True
_state.remove_pending(self, status='fail')
for callback, args, kwds in self._immediate_callbacks:
callback(*args, **kwds)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def done(self):
return self._done
@property
def state(self):
# This is just for compatibility with UserRPC and MultiRpc.
# A Future is considered running as soon as it is created.
if self._done:
return self.FINISHING
else:
return self.RUNNING
def wait(self):
if self._done:
return
ev = eventloop.get_event_loop()
while not self._done:
if not ev.run1():
logging.info('Deadlock in %s', self)
logging.info('All pending Futures:\n%s', _state.dump_all_pending())
_logging_debug('All pending Futures (verbose):\n%s',
_state.dump_all_pending(verbose=True))
self.set_exception(RuntimeError('Deadlock waiting for %s' % self))
def get_exception(self):
self.wait()
return self._exception
def get_traceback(self):
self.wait()
return self._traceback
def check_success(self):
self.wait()
if self._exception is not None:
raise self._exception.__class__, self._exception, self._traceback
def get_result(self):
self.check_success()
return self._result
# TODO: Have a tasklet that does this
@classmethod
def wait_any(cls, futures):
# TODO: Flatten MultiRpcs.
waiting_on = set(futures)
ev = eventloop.get_event_loop()
while waiting_on:
for f in waiting_on:
if f.state == cls.FINISHING:
return f
ev.run1()
return None
# TODO: Have a tasklet that does this
@classmethod
def wait_all(cls, futures):
# TODO: Flatten MultiRpcs.
waiting_on = set(futures)
ev = eventloop.get_event_loop()
while waiting_on:
waiting_on = set(f for f in waiting_on if f.state == cls.RUNNING)
ev.run1()
def _help_tasklet_along(self, ns, ds_conn, gen, val=None, exc=None, tb=None):
# XXX Docstring
info = utils.gen_info(gen)
# pylint: disable=invalid-name
__ndb_debug__ = info
try:
save_context = get_context()
save_namespace = namespace_manager.get_namespace()
save_ds_connection = datastore._GetConnection()
try:
set_context(self._context)
if ns != save_namespace:
namespace_manager.set_namespace(ns)
if ds_conn is not save_ds_connection:
datastore._SetConnection(ds_conn)
if exc is not None:
_logging_debug('Throwing %s(%s) into %s',
exc.__class__.__name__, exc, info)
value = gen.throw(exc.__class__, exc, tb)
else:
_logging_debug('Sending %r to %s', val, info)
value = gen.send(val)
self._context = get_context()
finally:
ns = namespace_manager.get_namespace()
ds_conn = datastore._GetConnection()
set_context(save_context)
if save_namespace != ns:
namespace_manager.set_namespace(save_namespace)
if save_ds_connection is not ds_conn:
datastore._SetConnection(save_ds_connection)
except StopIteration, err:
result = get_return_value(err)
_logging_debug('%s returned %r', info, result)
self.set_result(result)
return
except GeneratorExit:
# In Python 2.5, this derives from Exception, but we don't want
# to handle it like other Exception instances. So we catch and
# re-raise it immediately. See issue 127. http://goo.gl/2p5Pn
# TODO: Remove when Python 2.5 is no longer supported.
raise
except Exception, err:
_, _, tb = sys.exc_info()
if isinstance(err, _flow_exceptions):
# Flow exceptions aren't logged except in "heavy debug" mode,
# and then only at DEBUG level, without a traceback.
_logging_debug('%s raised %s(%s)',
info, err.__class__.__name__, err)
elif utils.DEBUG and logging.getLogger().level < logging.DEBUG:
# In "heavy debug" mode, log a warning with traceback.
# (This is the same condition as used in utils.logging_debug().)
logging.warning('%s raised %s(%s)',
info, err.__class__.__name__, err, exc_info=True)
else:
# Otherwise, log a warning without a traceback.
logging.warning('%s raised %s(%s)', info, err.__class__.__name__, err)
self.set_exception(err, tb)
return
else:
_logging_debug('%s yielded %r', info, value)
if isinstance(value, (apiproxy_stub_map.UserRPC,
datastore_rpc.MultiRpc)):
# TODO: Tail recursion if the RPC is already complete.
eventloop.queue_rpc(value, self._on_rpc_completion,
value, ns, ds_conn, gen)
return
if isinstance(value, Future):
# TODO: Tail recursion if the Future is already done.
if self._next:
raise RuntimeError('Future has already completed yet next is %r' %
self._next)
self._next = value
self._geninfo = utils.gen_info(gen)
_logging_debug('%s is now blocked waiting for %s', self, value)
value.add_callback(self._on_future_completion, value, ns, ds_conn, gen)
return
if isinstance(value, (tuple, list)):
# Arrange for yield to return a list of results (not Futures).
info = 'multi-yield from %s' % utils.gen_info(gen)
mfut = MultiFuture(info)
try:
for subfuture in value:
mfut.add_dependent(subfuture)
mfut.complete()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
mfut.set_exception(err, tb)
mfut.add_callback(self._on_future_completion, mfut, ns, ds_conn, gen)
return
if _is_generator(value):
# TODO: emulate PEP 380 here?
raise NotImplementedError('Cannot defer to another generator.')
raise RuntimeError('A tasklet should not yield a plain value: '
'%.200s yielded %.200r' % (info, value))
def _on_rpc_completion(self, rpc, ns, ds_conn, gen):
try:
result = rpc.get_result()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self._help_tasklet_along(ns, ds_conn, gen, exc=err, tb=tb)
else:
self._help_tasklet_along(ns, ds_conn, gen, result)
def _on_future_completion(self, future, ns, ds_conn, gen):
if self._next is future:
self._next = None
self._geninfo = None
_logging_debug('%s is no longer blocked waiting for %s', self, future)
exc = future.get_exception()
if exc is not None:
self._help_tasklet_along(ns, ds_conn, gen,
exc=exc, tb=future.get_traceback())
else:
val = future.get_result() # This won't raise an exception.
self._help_tasklet_along(ns, ds_conn, gen, val)
def sleep(dt):
"""Public function to sleep some time.
Example:
yield tasklets.sleep(0.5) # Sleep for half a sec.
"""
fut = Future('sleep(%.3f)' % dt)
eventloop.queue_call(dt, fut.set_result, None)
return fut
class MultiFuture(Future):
"""A Future that depends on multiple other Futures.
This is used internally by 'v1, v2, ... = yield f1, f2, ...'; the
semantics (e.g. error handling) are constrained by that use case.
The protocol from the caller's POV is:
mf = MultiFuture()
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
.
. (More mf.add_dependent() and/or mf.putq() calls)
.
mf.complete() # No more dependents will be added.
.
. (Time passes)
.
results = mf.get_result()
Now, results is a list of results from all dependent Futures in
the order in which they were added.
It is legal to add the same dependent multiple times.
Callbacks can be added at any point.
From a dependent Future POV, there's nothing to be done: a callback
is automatically added to each dependent Future which will signal
its completion to the MultiFuture.
Error handling: if any dependent future raises an error, it is
propagated to mf. To force an early error, you can call
mf.set_exception() instead of mf.complete(). After this you can't
call mf.add_dependent() or mf.putq() any more.
"""
def __init__(self, info=None):
# pylint: disable=invalid-name
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._full = False
self._dependents = set()
self._results = []
super(MultiFuture, self).__init__(info=info)
def __repr__(self):
# TODO: This may be invoked before __init__() returns,
# from Future.__init__(). Beware.
line = super(MultiFuture, self).__repr__()
lines = [line]
for fut in self._results:
lines.append(fut.dump_stack().replace('\n', '\n '))
return '\n waiting for '.join(lines)
# TODO: Maybe rename this method, since completion of a Future/RPC
# already means something else. But to what?
def complete(self):
if self._full:
raise RuntimeError('MultiFuture cannot complete twice.')
self._full = True
if not self._dependents:
self._finish()
# TODO: Maybe don't overload set_exception() with this?
def set_exception(self, exc, tb=None):
self._full = True
super(MultiFuture, self).set_exception(exc, tb)
def _finish(self):
if not self._full:
raise RuntimeError('MultiFuture cannot finish until completed.')
if self._dependents:
raise RuntimeError('MultiFuture cannot finish whilst waiting for '
'dependents %r' % self._dependents)
if self._done:
raise RuntimeError('MultiFuture done before finishing.')
try:
result = [r.get_result() for r in self._results]
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
else:
self.set_result(result)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if isinstance(fut, list):
mfut = MultiFuture()
map(mfut.add_dependent, fut)
mfut.complete()
fut = mfut
elif not isinstance(fut, Future):
raise TypeError('Expected Future, received %s: %r' % (type(fut), fut))
if self._full:
raise RuntimeError('MultiFuture cannot add a dependent once complete.')
self._results.append(fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
self._dependents.remove(fut)
if self._full and not self._dependents and not self._done:
self._finish()
class QueueFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However, instead of returning results as a list, it lets you
retrieve results as soon as they are ready, one at a time, using
getq(). The Future itself finishes with a result of None when the
last result is ready (regardless of whether it was retrieved).
The getq() method returns a Future which blocks until the next
result is ready, and then returns that result. Each getq() call
retrieves one unique result. Extra getq() calls after the last
result is already returned return EOFError as their Future's
exception. (I.e., q.getq() returns a Future as always, but yieding
that Future raises EOFError.)
NOTE: Values can also be pushed directly via .putq(value). However
there is no flow control -- if the producer is faster than the
consumer, the queue will grow unbounded.
"""
# TODO: Refactor to share code with MultiFuture.
def __init__(self, info=None):
self._full = False
self._dependents = set()
self._completed = collections.deque()
self._waiting = collections.deque()
# Invariant: at least one of _completed and _waiting is empty.
# Also: _full and not _dependents <==> _done.
super(QueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('MultiFuture cannot complete twice.')
self._full = True
if not self._dependents:
self.set_result(None)
self._mark_finished()
def set_exception(self, exc, tb=None):
self._full = True
super(QueueFuture, self).set_exception(exc, tb)
if not self._dependents:
self._mark_finished()
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future instance; received %r' % fut)
if self._full:
raise RuntimeError('QueueFuture add dependent once complete.')
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
if not fut.done():
raise RuntimeError('Future not done before signalling dependant done.')
self._dependents.remove(fut)
exc = fut.get_exception()
tb = fut.get_traceback()
val = None
if exc is None:
val = fut.get_result()
if self._waiting:
waiter = self._waiting.popleft()
self._pass_result(waiter, exc, tb, val)
else:
self._completed.append((exc, tb, val))
if self._full and not self._dependents and not self._done:
self.set_result(None)
self._mark_finished()
def _mark_finished(self):
if not self.done():
raise RuntimeError('Future not done before marking as finished.')
while self._waiting:
waiter = self._waiting.popleft()
self._pass_eof(waiter)
def getq(self):
fut = Future()
if self._completed:
exc, tb, val = self._completed.popleft()
self._pass_result(fut, exc, tb, val)
elif self._full and not self._dependents:
self._pass_eof(fut)
else:
self._waiting.append(fut)
return fut
def _pass_eof(self, fut):
if not self._done:
raise RuntimeError('QueueFuture cannot pass EOF until done.')
exc = self.get_exception()
if exc is not None:
tb = self.get_traceback()
else:
exc = EOFError('Queue is empty')
tb = None
self._pass_result(fut, exc, tb, None)
def _pass_result(self, fut, exc, tb, val):
if exc is not None:
fut.set_exception(exc, tb)
else:
fut.set_result(val)
class SerialQueueFuture(Future):
"""Like QueueFuture but maintains the order of insertion.
This class is used by Query operations.
Invariants:
- At least one of _queue and _waiting is empty.
- The Futures in _waiting are always pending.
(The Futures in _queue may be pending or completed.)
In the discussion below, add_dependent() is treated the same way as
putq().
If putq() is ahead of getq(), the situation is like this:
putq()
v
_queue: [f1, f2, ...]; _waiting: []
^
getq()
Here, putq() appends a Future to the right of _queue, and getq()
removes one from the left.
If getq() is ahead of putq(), it's like this:
putq()
v
_queue: []; _waiting: [f1, f2, ...]
^
getq()
Here, putq() removes a Future from the left of _waiting, and getq()
appends one to the right.
When both are empty, putq() appends a Future to the right of _queue,
while getq() appends one to the right of _waiting.
The _full flag means that no more calls to putq() will be made; it
is set by calling either complete() or set_exception().
Calling complete() signals that no more putq() calls will be made.
If getq() is behind, subsequent getq() calls will eat up _queue
until it is empty, and after that will return a Future that passes
EOFError (note that getq() itself never raises EOFError). If getq()
is ahead when complete() is called, the Futures in _waiting are all
passed an EOFError exception (thereby eating up _waiting).
If, instead of complete(), set_exception() is called, the exception
and traceback set there will be used instead of EOFError.
"""
def __init__(self, info=None):
self._queue = collections.deque()
self._waiting = collections.deque()
super(SerialQueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(EOFError('Queue is empty'))
# When the writer is complete the future will also complete. If there are
# still pending queued futures, these futures are themselves in the pending
# list, so they will eventually be executed.
self.set_result(None)
def set_exception(self, exc, tb=None):
super(SerialQueueFuture, self).set_exception(exc, tb)
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(exc, tb)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
if self._waiting:
waiter = self._waiting.popleft()
waiter.set_result(value)
return
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future instance; received %r' % fut)
if self._done:
raise RuntimeError('SerialQueueFuture cannot add dependent '
'once complete.')
if self._waiting:
waiter = self._waiting.popleft()
fut.add_callback(_transfer_result, fut, waiter)
else:
self._queue.append(fut)
def getq(self):
if self._queue:
fut = self._queue.popleft()
else:
fut = Future()
if self._done:
err = self.get_exception()
if err is not None:
tb = self.get_traceback()
else:
err = EOFError('Queue is empty')
tb = None
fut.set_exception(err, tb)
else:
self._waiting.append(fut)
return fut
def _transfer_result(fut1, fut2):
"""Helper to transfer result or errors from one Future to another."""
exc = fut1.get_exception()
if exc is not None:
tb = fut1.get_traceback()
fut2.set_exception(exc, tb)
else:
val = fut1.get_result()
fut2.set_result(val)
class ReducingFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However the result, instead of being a list of results of dependent
Futures, is computed by calling a 'reducer' tasklet. The reducer tasklet
takes a list of values and returns a single value. It may be called
multiple times on sublists of values and should behave like
e.g. sum().
NOTE: The reducer input values may be reordered compared to the
order in which they were added to the queue.
"""
# TODO: Refactor to reuse some code with MultiFuture.
def __init__(self, reducer, info=None, batch_size=20):
self._reducer = reducer
self._batch_size = batch_size
self._full = False
self._dependents = set()
self._completed = collections.deque()
self._queue = collections.deque()
super(ReducingFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('ReducingFuture cannot complete twice.')
self._full = True
if not self._dependents:
self._mark_finished()
def set_exception(self, exc, tb=None):
self._full = True
self._queue.clear()
super(ReducingFuture, self).set_exception(exc, tb)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if self._full:
raise RuntimeError('ReducingFuture cannot add dependent once complete.')
self._internal_add_dependent(fut)
def _internal_add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future; received %r' % fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
if not fut.done():
raise RuntimeError('Future not done before signalling dependant done.')
self._dependents.remove(fut)
if self._done:
return # Already done.
try:
val = fut.get_result()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
self._queue.append(val)
if len(self._queue) >= self._batch_size:
todo = list(self._queue)
self._queue.clear()
try:
nval = self._reducer(todo)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self._queue.append(nval)
if self._full and not self._dependents:
self._mark_finished()
def _mark_finished(self):
if not self._queue:
self.set_result(None)
elif len(self._queue) == 1:
self.set_result(self._queue.pop())
else:
todo = list(self._queue)
self._queue.clear()
try:
nval = self._reducer(todo)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self.set_result(nval)
# Alias for StopIteration used to mark return values.
# To use this, raise Return(<your return value>). The semantics
# are exactly the same as raise StopIteration(<your return value>)
# but using Return clarifies that you are intending this to be the
# return value of a tasklet.
# TODO: According to Monocle authors Steve and Greg Hazel, Twisted
# used an exception to signal a return value from a generator early
# on, and they found out it was error-prone. Should I worry?
Return = StopIteration
def get_return_value(err):
# XXX Docstring
if not err.args:
result = None
elif len(err.args) == 1:
result = err.args[0]
else:
result = err.args
return result
def tasklet(func):
# XXX Docstring
@utils.wrapping(func)
def tasklet_wrapper(*args, **kwds):
# XXX Docstring
# TODO: make most of this a public function so you can take a bare
# generator and turn it into a tasklet dynamically. (Monocle has
# this I believe.)
# pylint: disable=invalid-name
__ndb_debug__ = utils.func_info(func)
fut = Future('tasklet %s' % utils.func_info(func))
fut._context = get_context()
try:
result = func(*args, **kwds)
except StopIteration, err:
# Just in case the function is not a generator but still uses
# the "raise Return(...)" idiom, we'll extract the return value.
result = get_return_value(err)
if _is_generator(result):
ns = namespace_manager.get_namespace()
ds_conn = datastore._GetConnection()
eventloop.queue_call(None, fut._help_tasklet_along, ns, ds_conn, result)
else:
fut.set_result(result)
return fut
return tasklet_wrapper
def synctasklet(func):
"""Decorator to run a function as a tasklet when called.
Use this to wrap a request handler function that will be called by
some web application framework (e.g. a Django view function or a
webapp.RequestHandler.get method).
"""
taskletfunc = tasklet(func) # wrap at declaration time.
@utils.wrapping(func)
def synctasklet_wrapper(*args, **kwds):
# pylint: disable=invalid-name
__ndb_debug__ = utils.func_info(func)
return taskletfunc(*args, **kwds).get_result()
return synctasklet_wrapper
def toplevel(func):
"""A sync tasklet that sets a fresh default Context.
Use this for toplevel view functions such as
webapp.RequestHandler.get() or Django view functions.
"""
synctaskletfunc = synctasklet(func) # wrap at declaration time.
@utils.wrapping(func)
def add_context_wrapper(*args, **kwds):
# pylint: disable=invalid-name
__ndb_debug__ = utils.func_info(func)
_state.clear_all_pending()
# Create and install a new context.
ctx = make_default_context()
try:
set_context(ctx)
return synctaskletfunc(*args, **kwds)
finally:
set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
return add_context_wrapper
_CONTEXT_KEY = '__CONTEXT__'
_DATASTORE_APP_ID_ENV = 'DATASTORE_APP_ID'
_DATASTORE_PROJECT_ID_ENV = 'DATASTORE_PROJECT_ID'
_DATASTORE_ADDITIONAL_APP_IDS_ENV = 'DATASTORE_ADDITIONAL_APP_IDS'
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV = 'DATASTORE_USE_PROJECT_ID_AS_APP_ID'
def get_context():
# XXX Docstring
ctx = None
if os.getenv(_CONTEXT_KEY):
ctx = _state.current_context
if ctx is None:
ctx = make_default_context()
set_context(ctx)
return ctx
def make_default_context():
# XXX Docstring
datastore_app_id = os.environ.get(_DATASTORE_APP_ID_ENV, None)
datastore_project_id = os.environ.get(_DATASTORE_PROJECT_ID_ENV, None)
if datastore_app_id or datastore_project_id:
# We will create a Cloud Datastore context.
app_id_override = bool(os.environ.get(
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV, False))
if not datastore_app_id and not app_id_override:
raise ValueError('Could not determine app id. To use project id (%s) '
'instead, set %s=true. This will affect the '
'serialized form of entities and should not be used '
'if serialized entities will be shared between '
'code running on App Engine and code running off '
'App Engine. Alternatively, set %s=<app id>.'
% (datastore_project_id,
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV,
_DATASTORE_APP_ID_ENV))
elif datastore_app_id:
if app_id_override:
raise ValueError('App id was provided (%s) but %s was set to true. '
'Please unset either %s or %s.' %
(datastore_app_id,
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV,
_DATASTORE_APP_ID_ENV,
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV))
elif datastore_project_id:
# Project id and app id provided, make sure they are the same.
id_resolver = datastore_pbs.IdResolver([datastore_app_id])
if (datastore_project_id !=
id_resolver.resolve_project_id(datastore_app_id)):
raise ValueError('App id "%s" does not match project id "%s".'
% (datastore_app_id, datastore_project_id))
datastore_app_id = datastore_project_id or datastore_app_id
additional_app_str = os.environ.get(_DATASTORE_ADDITIONAL_APP_IDS_ENV, '')
additional_apps = (app.strip() for app in additional_app_str.split(','))
return _make_cloud_datastore_context(datastore_app_id, additional_apps)
return make_context()
@utils.positional(0)
def make_context(conn=None, config=None):
# XXX Docstring
from . import context # Late import to deal with circular imports.
return context.Context(conn=conn, config=config)
def _make_cloud_datastore_context(app_id, external_app_ids=()):
"""Creates a new context to connect to a remote Cloud Datastore instance.
This should only be used outside of Google App Engine.
Args:
app_id: The application id to connect to. This differs from the project
id as it may have an additional prefix, e.g. "s~" or "e~".
external_app_ids: A list of apps that may be referenced by data in your
application. For example, if you are connected to s~my-app and store keys
for s~my-other-app, you should include s~my-other-app in the external_apps
list.
Returns:
An ndb.Context that can connect to a Remote Cloud Datastore. You can use
this context by passing it to ndb.set_context.
"""
from . import model # Late import to deal with circular imports.
# Late import since it might not exist.
if not datastore_pbs._CLOUD_DATASTORE_ENABLED:
raise datastore_errors.BadArgumentError(
datastore_pbs.MISSING_CLOUD_DATASTORE_MESSAGE)
import googledatastore
try:
from google.appengine.datastore import cloud_datastore_v1_remote_stub
except ImportError:
from google3.apphosting.datastore import cloud_datastore_v1_remote_stub
current_app_id = os.environ.get('APPLICATION_ID', None)
if current_app_id and current_app_id != app_id:
# TODO(pcostello): We should support this so users can connect to different
# applications.
raise ValueError('Cannot create a Cloud Datastore context that connects '
'to an application (%s) that differs from the application '
'already connected to (%s).' % (app_id, current_app_id))
os.environ['APPLICATION_ID'] = app_id
id_resolver = datastore_pbs.IdResolver((app_id,) + tuple(external_app_ids))
project_id = id_resolver.resolve_project_id(app_id)
endpoint = googledatastore.helper.get_project_endpoint_from_env(project_id)
datastore = googledatastore.Datastore(
project_endpoint=endpoint,
credentials=googledatastore.helper.get_credentials_from_env())
conn = model.make_connection(_api_version=datastore_rpc._CLOUD_DATASTORE_V1,
_id_resolver=id_resolver)
# If necessary, install the stubs
try:
stub = cloud_datastore_v1_remote_stub.CloudDatastoreV1RemoteStub(datastore)
apiproxy_stub_map.apiproxy.RegisterStub(datastore_rpc._CLOUD_DATASTORE_V1,
stub)
except:
pass # The stub is already installed.
# TODO(pcostello): Ensure the current stub is connected to the right project.
return make_context(conn=conn)
def set_context(new_context):
# XXX Docstring
os.environ[_CONTEXT_KEY] = '1'
_state.current_context = new_context
# TODO: Rework the following into documentation.
# A tasklet/coroutine/generator can yield the following things:
# - Another tasklet/coroutine/generator; this is entirely equivalent to
# "for x in g: yield x"; this is handled entirely by the @tasklet wrapper.
# (Actually, not. @tasklet returns a function that when called returns
# a Future. You can use the pep380 module's @gwrap decorator to support
# yielding bare generators though.)
# - An RPC (or MultiRpc); the tasklet will be resumed when this completes.
# This does not use the RPC's callback mechanism.
# - A Future; the tasklet will be resumed when the Future is done.
# This uses the Future's callback mechanism.
# A Future can be used in several ways:
# - Yield it from a tasklet; see above.
# - Check (poll) its status via f.done.
# - Call its wait() method, perhaps indirectly via check_success()
# or get_result(). This invokes the event loop.
# - Call the Future.wait_any() or Future.wait_all() method.
# This is waits for any or all Futures and RPCs in the argument list.
# XXX HIRO XXX
# - A tasklet is a (generator) function decorated with @tasklet.
# - Calling a tasklet schedules the function for execution and returns a Future.
# - A function implementing a tasklet may:
# = yield a Future; this waits for the Future which returns f.get_result();
# = yield an RPC; this waits for the RPC and then returns rpc.get_result();
# = raise Return(result); this sets the outer Future's result;
# = raise StopIteration or return; this sets the outer Future's result;
# = raise another exception: this sets the outer Future's exception.
# - If a function implementing a tasklet is not a generator it will be
# immediately executed to completion and the tasklet wrapper will
# return a Future that is already done. (XXX Alternative behavior:
# it schedules the call to be run by the event loop.)
# - Code not running in a tasklet can call f.get_result() or f.wait() on
# a future. This is implemented by a simple loop like the following:
# while not self._done:
# eventloop.run1()
# - Here eventloop.run1() runs one "atomic" part of the event loop:
# = either it calls one immediately ready callback;
# = or it waits for the first RPC to complete;
# = or it sleeps until the first callback should be ready;
# = or it raises an exception indicating all queues are empty.
# - It is possible but suboptimal to call rpc.get_result() or
# rpc.wait() directly on an RPC object since this will not allow
# other callbacks to run as they become ready. Wrapping an RPC in a
# Future will take care of this issue.
# - The important insight is that when a generator function
# implementing a tasklet yields, raises or returns, there is always a
# wrapper that catches this event and either turns it into a
# callback sent to the event loop, or sets the result or exception
# for the tasklet's Future.
|
It’s never fun losing things. My personal bane is my car keys. No matter how many holders or key rings I hang up by the doorway, I always end up losing my keys, and when I need to leave for work, I’m scrambling to find them!
And that’s what I want to talk about today: losing things! Or, at least the words that we use to describe losing things.
If you lose your car keys (like I do), are they lost or loss?
How should I use lost? In most cases, lost is the past tense and past participle of the verb to lose. In other words, you should be using lost as a verb in your writing.
There are many different uses and meanings of lost, but they can be broken down into a few main categories.
Unable to find something > I lost my keys.
Fail to retain > I lost my job.
Fail to win > We lost the game.
There are other figurative uses of lost, too.
In addition to its use as a verb, lost can also function as an adjective where it means unable to find one’s way or unable to be found.
Sally solved the mystery of the lost TV remote.
Lost can be found in a number of common phrases that you are likely to hear in speech or read in books. Let’s go over a few.
Get lost! > Meaning: Informal and rude way to tell someone to leave.
All is not lost > Meaning: There is still hope. Don’t give up.
A lost cause > Meaning: No hope.
How should I use loss? Unlike lost, which can function as multiple parts of speech, loss is only ever used as a noun, so it’s meaning a little more restricted.
The fact or process of losing > The team had three losses last week.
An amount of money lost by a business or organization > I had to sell these products at a loss.
Lost is also used a few commonly used phrases. Let’s go over them, so you’re familiar with them.
At a loss > Meaning: to be puzzled or confused.
As a loss for words > Meaning: Not sure what to say from a surprise or shock.
Since these are different parts of speech, you’ll want to know how these words differ from each other and how to use them correctly. They are never interchangeable. |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 15:31:29 2015
@author: Zahari Kassabov
"""
import inspect
import difflib
import logging
import functools
import yaml
from reportengine import namespaces
from reportengine.utils import ChainMap
log = logging.getLogger(__name__)
_config_token = 'parse_'
class ConfigError(Exception):
alternatives_header="Instead of '%s', did you mean one of the following?"
def __init__(self, message, bad_item = None, alternatives = None, *,
display_alternatives='best'):
super().__init__(message)
self.bad_item = bad_item
if alternatives:
alternatives = list(alternatives)
self.alternatives = alternatives
self.display_alternatives = display_alternatives
def alternatives_text(self):
if (self.display_alternatives=='none' or not self.display_alternatives
or not self.alternatives):
return ''
if self.display_alternatives == 'best':
alternatives = difflib.get_close_matches(self.bad_item,
self.alternatives)
elif self.display_alternatives == 'all':
alternatives = self.alternatives
else:
raise ValueError("Unrecognized display_alternatives option. "
"Must be one of: 'all', 'best' or 'none'.")
if not alternatives:
return ''
head = (self.alternatives_header
% (self.bad_item,))
txts = [' - {}'.format(alt) for alt in alternatives]
return '\n'.join((head, *txts))
class BadInputType(ConfigError, TypeError):
def __init__(self, param, val, input_type):
msg = ("Bad input type for parameter '{param}': Value '{val}' "
"is not of type {input_type}.").format(**locals())
super().__init__(msg)
class InputNotFoundError(ConfigError, KeyError):
alternatives_header = "Maybe you mistyped %s in one of the following keys?"
def element_of(paramname, elementname=None):
def inner(f):
nonlocal elementname
if elementname is None:
if f.__name__.startswith(_config_token):
elementname = f.__name__[len(_config_token):]
f._element_of = paramname
f._elementname = elementname
return f
return inner
def named_element_of(paramname, elementname=None):
def inner(f):
element_of(paramname, elementname)(f)
f._named = True
return f
return inner
def _make_element_of(f):
if getattr(f, '_named', False):
def parse_func(self, param:dict, **kwargs):
d = {k: f(self, v , **kwargs) for k,v in param.items()}
return namespaces.NSItemsDict(d, nskey=f._elementname)
else:
def parse_func(self, param:list, **kwargs):
l = [f(self, elem, **kwargs) for elem in param]
return namespaces.NSList(l, nskey=f._elementname)
#We replicate the same signature for the kwarg parameters, so that we can
#use that to build the graph.
list_params = list(inspect.signature(parse_func).parameters.values())[0:2]
kwarg_params = list(inspect.signature(f).parameters.values())[2:]
params = [*list_params, *kwarg_params]
parse_func.__signature__ = inspect.Signature(parameters=params)
return parse_func
def _parse_func(f):
"""Check that the function has at least one argument, and check that the
argument corresponds the type declared in the annotation id any."""
sig = inspect.signature(f)
try:
first_param = list(sig.parameters.values())[1]
except IndexError:
raise TypeError(("Parser functiom must have at least one "
"parameter: %s")
% f.__qualname__)
input_type = first_param.annotation
@functools.wraps(f)
def f_(self, val, *args, **kwargs):
if input_type is not sig.empty:
if not isinstance(val, input_type):
raise BadInputType(f.__name__, val, input_type)
return f(self, val, *args, **kwargs)
return f_
class ElementOfResolver(type):
"""Generate a parsing function for collections of each 'atomic' parsing
function found in the class, and marked with the relevant decorator."""
def __new__(cls, name, bases, attrs):
newattrs = {}
_list_keys = {}
for attr, f in attrs.items():
if hasattr(f, '_element_of'):
newattr = _config_token + f._element_of
if newattr in attrs:
raise ValueError("Cannot construct {newattr} from "
"'_element_of' {attr} because it is "
"already declared.")
#We have to apply parse func in here as well.
newattrs[newattr] = _make_element_of(_parse_func(f))
_list_keys[f._element_of] = f._elementname
newattrs['_list_keys'] = _list_keys
attrs = {**newattrs, **attrs}
return super().__new__(cls, name, bases, attrs)
class AutoTypeCheck(type):
"""Apply automatically the _parse_func decorator
to every parsing method fouds in the class."""
def __new__(cls, name, bases, attrs):
for k,v in attrs.items():
if k.startswith(_config_token):
attrs[k] = _parse_func(v)
return super().__new__(cls, name, bases, attrs)
class ConfigMetaClass(ElementOfResolver, AutoTypeCheck):
pass
class Config(metaclass=ConfigMetaClass):
def __init__(self, input_params, environment=None):
self.environment = environment
self.input_params = input_params
#self.params = self.process_params(input_params)
def get_parse_func(self, param):
func_name = _config_token + param
try:
return getattr(self, func_name)
except AttributeError:
return lambda x : x
def resolve_key(self, key, ns, input_params=None, parents=None):
if key in ns:
return ns.get_where(key)
if parents is None:
parents = []
if input_params is None:
input_params = self.input_params
if not key in input_params:
msg = "A parameter is required: {key}.".format(key=key)
if parents:
msg += "\nThis is needed to process:\n"
msg += '\ntrough:\n'.join(' - ' + str(p) for
p in reversed(parents))
#alternatives_text = "Note: The following similarly spelled "
# "params exist in the input:"
raise InputNotFoundError(msg, key, alternatives=input_params.keys())
input_val = input_params[key]
f = self.get_parse_func(key)
max_index = len(ns.maps) -1
put_index = max_index
sig = inspect.signature(f)
kwargs = {}
for pname, param in list(sig.parameters.items())[1:]:
if pname in ns:
index, pval = ns.get_where(pname)
else:
try:
index, pval = self.resolve_key(pname, ns, parents=[*parents, key])
except KeyError:
if param.default is not sig.empty:
pval = param.default
index = max_index
else:
raise
if index < put_index:
put_index = index
kwargs[pname] = pval
val = f(input_val, **kwargs)
ns.maps[put_index][key] = val
return put_index, val
def process_fuzzyspec(self, fuzzy, ns, parents=None):
if not parents:
parents = []
gen = namespaces.expand_fuzzyspec_partial(fuzzy, ns)
while True:
try:
key, currspec, currns = next(gen)
except StopIteration as e:
return e.value
else:
self.resolve_key(key, currns, parents=[*parents, currspec])
def process_all_params(self, input_params=None):
"""Simple shortcut to process all paams in a simple namespace, if
possible."""
if input_params is None:
input_params = self.input_params
ns = ChainMap()
for param in input_params:
if param not in ns:
self.resolve_key(param, ns, input_params=input_params)
return ns
def _parse_actions_gen(self, actions, currspec=()):
if isinstance(actions, dict):
for k,v in actions.items():
yield from self._parse_actions_gen(v, (*currspec, k))
elif isinstance(actions, list):
for v in actions:
if isinstance(v, dict):
if len(v) != 1:
raise ConfigError(("Invalid action specification %s. "
"Must be a scalar or a mapping with exactly one key") % v)
k = next(iter(v.keys()))
args = v[k]
if not isinstance(args, dict):
raise ConfigError("Action arguments must be "
"a mapping if present" % k)
yield k, currspec, tuple(args.items())
elif isinstance(v, str):
yield v, currspec, ()
else:
raise ConfigError("Unrecognized format for actions. "
"Must be a string or mapping, not '%s'" %v)
else:
raise ConfigError("Unrecognized format for actions")
def parse_actions_(self, actions):
return list(self._parse_actions_gen(actions))
def __getitem__(self, item):
return self.input_params[item]
def __iter__(self):
return iter(self.input_params)
def __len__(self):
return len(self.input_params)
def __contains__(self, item):
return item in self.input_params
@classmethod
def from_yaml(cls, o, *args, **kwargs):
try:
return cls(yaml.load(o), *args, **kwargs)
except yaml.error.YAMLError as e:
raise ConfigError("Failed to parse yaml file: %s" % e)
|
If you’ve ever experienced tooth sensitivity, you’re well aware of just how uncomfortable it can be. Something as simple as drinking a cold beverage or trying to enjoy a of bowl of ice cream can send you into a fury of pain. When faced with the discomfort of sensitive teeth, your dentist in West Chester is the first place you should turn to for help.
The most common cause of tooth sensitivity is due to part of the tooth’s root becoming exposed. These roots are packed with tons of nerves that can send pain signals soaring into your brain when they come in contact with heat or cold. Oftentimes root exposure happens as a result of gum recession or worn enamel, which can be caused by a number of things including chronic grinding or clenching, brushing too hard, or consuming a lot of acidic foods or drinks.
Choosing the Right Toothpaste. Selecting a toothpaste that’s specifically designed to easy sensitivity and using it regularly can help reduce the severity of the sensitivity and give you some relief. Look for an option that’s formulated for those with sensitive teeth and avoid using toothpaste that contains sodium pyrophosphate, which is found many whitening and tartar-control pastes.
Using a Softer Toothbrush. Using the right toothpaste and also a soft-bristled toothbrush can double the sensitivity-fighting effects. Toothbrushes with soft bristles are more gentle on both the gums and tooth enamel, yet are still very effective at removing bacteria and plaque buildup. Harder bristles, on the other hand, can scratch enamel and even cause it to erode. This will increase the risk of roots becoming exposed and teeth becoming more sensitive.
Taking it Easy While Brushing. It may first appear that the harder you brush, the cleaner your teeth will be. However, quite the opposite is true. Brushing with too much pressure can easily cause gums to recede and enamel to erode, again leaving your roots at risk for being exposed.
Making some adjustments to your oral hygiene routine can help reduce tooth sensitivity, but if the pain continues to bother you and keeps you from enjoying your favorite foods, schedule an appointment with your West Chester dentist. There are many treatments available such as fluoride, bonding, or a root canal and dental crown.
Halitosis, or more commonly referred to as bad breath, can happen to anyone. It can be uncomfortable and embarrassing. But what’s more concerning for the team at our dental office in Kettering is that bad breath can be a sign of something more serious. Let’s take a look at what causes bad breath and some ways you can get rid of it.
Bad breath can be caused by something temporary such as fragrant food or something like coffee. This type of bad breath usually resolves itself and is probably nothing to worry about. However, when bad breath becomes an ongoing occurrence, you should consider calling your dentist in Kettering.
See Your Dentist. Maintaining regular visits to your Kettering dentist can not only help protect your smile from cavities, it can also help catch any potential problems before they have a chance to become serious… including gum disease. If you’re worried about your bad breath, or if it’s been longer than six months since your last dental appointment, give us a call today.
Wisdom teeth are often a topic of concern for many patients, and we get asked a lot of questions about these sometimes pesky teeth. How do you know if you need to have your wisdom teeth removed? What happens if you don’t extract wisdom teeth? Does the procedure hurt? We understand that wisdom teeth can be confusing and perhaps even scary, but don’t worry, our dental office in West Chester is here to help.
Wisdom teeth are usually first seen on dental x-rays during routine appointments with your dentist in West Chester. Typically your dental team can see them before you even know that they’re starting to work their way to surface. This is also when we can most likely tell if they’re going to need to be removed. Most of the time, they will need to come out. In fact, 90% of Americans have their wisdom teeth removed. But what happens if you don’t have your wisdom teeth extracted?
Maintaining regular visits to our West Chester dental office not only helps protect your existing teeth from decay, but these appointments can also help us identify whether your wisdom teeth should be removed before any problems arise. If your wisdom teeth do need to be removed, don’t worry. The procedure is incredibly common and your dental team will do everything they can to keep you comfortable during and after treatment.
If you have questions about dentistry, you’re not alone. At our dental office in Middletown, we love answering questions from our patients and neighbors in order to help them get a strong, healthy smile. Let’s take a look at four of the most common questions we get asked.
Dental x-rays are extremely safe, but they do still require radiation. However, the radiation exposure from dental x-rays is super small and so are the potential side effects. The use of protective equipment such as a leaded apron farther minimizes risk. Dental x-rays are incredibly helpful when looking at teeth as they allow us to see things that may not yet be visible to the naked eye as well as into the jaw bone. The images produced from x-rays help us catch decay or other problems early when treatment is easier.
It’s usually recommended that patients get a dental checkup every six months. But unfortunately, many people only go to the dentist when they have a problem. The truth is, your dentist in Middletown suggests visiting regularly to prevent just that. Bi-annual dental checkups can prevent potentially painful problems from ever occurring, and if there does happen to be a concern, catching it early often means easier and cheaper treatment.
Even if you brush your teeth twice a day every day you’re still missing out on thorough cleaning. While brushing does a good job at removing plaque and bacteria from your teeth and mouth, most toothbrushes just can’t get in between the tiny spaces between each tooth. This is where bacteria, plaque, and food particles love to hang out. If not removed regularly the chances for decay greatly increases. But cavities aren’t your only concern. Failing to remove buildup from around and under the gum line can also put you at risk for gum disease.
There are many smile whitening options available to us today. From over-the-counter strips to whitening toothpaste, professional in-office whitening to take-home trays from your dentist, how do you know which whitening option is best for you? Each whitening method has its own set of pros and cons. The fastest way to get whiter teeth is usually a professional in-office whitening treatment. But it’s not the right choice for everyone. If you’re looking to get a whiter smile, start by talking with your dentist to find the solution that fits your needs.
As always, if you have questions about dentistry, the team at our Middletown dental office is here to help. Simply call and talk with us today!
We often hear of patients who don’t go to the dentist unless they have a problem. In fact, when it comes to dental care, the best way to prevent a problem from occurring in the first place is to visit your dentist in West Chester at least twice a year. This can save you from experiencing the pain of many dental problems, as well as the cost of extensive treatment.
Maintaining a good oral health routine of brushing and flossing daily is only half of what it takes to keep your smile healthy for life. Make sure you’re using proper brushing and flossing techniques, eating a well-balanced diet packed with vegetables and fruits, and seeing your dentist every six months. We’re always welcoming new patients at our West Chester dental office. Call to schedule an appointment today.
We have three dental offices in Middletown, Kettering, and West Chester to best serve our patients.
It all started with a slight tingling sensation in your mouth. Then all of a sudden, up pops a canker sore. Now you’re stuck with this uncomfortable and painful blister-like sore. What can you do to get rid of it? Even though there’s no official cure for canker sores, there are some things you can do to help reduce discomfort. Join our dental office in Kettering as we cover some common canker sore treatments and talk about how you may be able to prevent one in the future.
While there isn’t any cure that will quickly and easily get rid of canker sores, you don’t have to stay feeling uncomfortable while the sore runs its course. Your first go-to treatment option can be buying one of the several over-the-counter products designed to numb and ease the pain associated with canker sores. If that doesn’t quite cut it, you can schedule an appointment with your Kettering dentist. They may be able to use a laser to speed up healing time or may even suggest a corticosteroid or prescription-strength antimicrobial rinse.
Unlike cold sores, which are very similar to canker sores but affect the outside of the mouth, canker sores are not contagious. They’re mostly just annoying. But if you’ve been dealing with a canker sore for longer than three weeks or you notice any other changes to your mouth, don’t hesitate to call our Kettering dental office today.
Garland & Johnson Dental has three dental offices in Middletown, Kettering, and West Chester.
Tooth pain is a pain like no other, and when you have it you’re willing to do just about anything to get rid of it. This includes perhaps not thinking twice when your dentist recommends extracting the tooth that’s causing you pain. But our dental office in Middletown wants you to know that you may have another option that will both get you out of pain and save your tooth.
Essentially, a root canal is a dental treatment that involves removing infection from inside the pulp of a tooth. We understand that may sound scary, but the treatment is incredibly common and many patients report little to no pain. Your dentist in Middletown will start by thoroughly numbing the area. After the numbing agent takes effect, a tiny hole is made in the tooth and the infected material causing all your pain is removed. Your tooth may then be capped with a dental crown to keep it protected.
Maintaining regular dental appointments at our Middletown dental office can help you protect your smile and save your teeth from needing a root canal or an extraction. At these visits, decay can be caught and treated early before it has a chance to affect the roots and cause pain. Call to schedule an appointment today.
Even though acid reflux is a condition that originates in the stomach, it can affect other areas of the body, including the mouth. The truth is, people who suffer from acid reflux can be at greater risk for oral health concerns than those who don’t. Our dental office in West Chester is here to help anyone dealing with acid reflux understand how it can negatively affect dental health and what you can do to reduce your risk.
Seeing your dentist in West Chester every six months to catch any problems early.
If you suffer from acid reflux and are worried about your dental health, we welcome you to call our West Chester dental office to schedule an appointment today. We will take a close look at your overall oral health and talk with you about the best way to protect your teeth against the dangers of acid reflux.
When it comes to all of the health complications that can go hand-in-hand with diabetes, oral health is often overlooked. At our Kettering dental office, we want our patients and neighbors to know just how drastically diabetes can affect oral health, and precautions that those with diabetes should take to keep their mouths healthy.
Everyone, whether diabetic or not, should do everything they can to eat a well-balanced diet. Fueling your body with fruits, vegetables, grains, dairy, and proteins can do wonders in protecting overall health and keeping glucose levels in check. Limiting sugary foods and drinks is great for managing your diabetes and is something your dentist in Kettering recommends.
At our dental office in Kettering, we’re here to help our community get healthy and stay healthy, even when patients have health concerns that may not appear at first to have any effect on oral health. The truth is that many diseases, including diabetes, have a connection to the mouth. If you have diabetes and notice anything unusual about your oral health, do not hesitate to give us a call. We’re always happy to help or answer any questions you may have.
With Halloween right around the corner, our dental office in West Chester wants to share a secret with our patients and neighbors. Did you know that there are snacks out there that are worse for your teeth than candy? You heard us right. Candy may not be the scariest thing for your oral health. It’s no trick. Just the truth.
While we’re here to talk about surprising snacks that are dangerous to oral health, it is worth mentioning that candy is still a concern for your dentist in West Chester. But it’s not really the sugar itself that’s the problem. It’s what happens to the sugar when you eat it. Bacteria that live in the mouth love sugar and will feed on it every chance they get. This keeps the bacteria full and healthy. But what’s more concerning is what happens when these bacteria digest sugars. Like all living things, bacteria have to release waste. They just so happen to release an acid that wears away tooth enamel and increases the likelihood of cavities. Because of this, it’s still important to enjoy sugary foods in moderation.
Happy Halloween from our West Chester, Kettering, and Middletown dental offices! |
from annoying.decorators import render_to
from annoying.functions import get_object_or_None
from blogs.views import blog_list
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.core.paginator import Paginator, InvalidPage
from django.db.models import Count
from django.db.models import Q
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.decorators.cache import never_cache
from haystack.forms import ModelSearchForm
from haystack.query import EmptySearchQuerySet, SearchQuerySet
from pygments.lexers import get_lexer_by_name
from snipts.models import Favorite, Snipt, SniptSecureView
from taggit.models import Tag
from teams.models import Team
RESULTS_PER_PAGE = getattr(settings, 'HAYSTACK_SEARCH_RESULTS_PER_PAGE', 20)
@render_to('snipts/detail.html')
def detail(request, username, snipt_slug):
snipt = get_object_or_404(Snipt, user__username=username, slug=snipt_slug)
user = snipt.user
if snipt.lexer != 'markdown':
if 'linenos' not in snipt.stylized:
snipt.save()
if user != request.user:
if not snipt.public:
if 'key' not in request.GET:
raise Http404
else:
if request.GET.get('key') != snipt.key:
raise Http404
if snipt.secure and not request.user.is_authenticated():
raise Http404
snipt.views = snipt.views + 1
snipt.save()
if snipt.secure:
secure_view = SniptSecureView(user=request.user, snipt=snipt)
secure_view.save()
tags = Tag.objects
if user == request.user:
tags = tags.filter(snipt__user=user)
public = False
else:
tags = tags.filter(snipt__user=user, snipt__public=True)
public = True
tags = tags.annotate(count=Count('taggit_taggeditem_items__id'))
tags = tags.order_by('-count', 'name')
return {
'detail': True,
'has_snipts': True,
'public': public,
'snipt': snipt,
'tags': tags,
'user': user,
}
def download(request, snipt_key):
snipt = get_object_or_404(Snipt, key=snipt_key)
return HttpResponse(snipt.code, content_type='application/x-download')
def embed(request, snipt_key):
snipt = get_object_or_404(Snipt, key=snipt_key)
lines = snipt.embedded.split('\n')
return render_to_response('snipts/embed.html',
{'lines': lines, 'snipt': snipt},
context_instance=RequestContext(request),
content_type='application/javascript')
def report_spam(request, snipt_id):
if not request.user.is_authenticated():
return HttpResponseBadRequest()
snipt = get_object_or_404(Snipt, pk=snipt_id)
send_mail('[Snipt] Spam reported',
"""
Snipt: https://snipt.net/admin/snipts/snipt/{}/
User: https://snipt.net/admin/auth/user/{}/delete/
Reporter: https://snipt.net/{}/
""".format(snipt.id, snipt.user.id, request.user.username),
'[email protected]',
['[email protected]'],
fail_silently=False)
return HttpResponse("""Thanks! Your report has been
submitted to the site admins.""")
@render_to('snipts/list-user.html')
def blog_posts(request, username):
if request.blog_user:
raise Http404
if request.user.username == username:
public = False
public_user = False
user = request.user
snipts = Snipt.objects.filter(user=request.user, blog_post=True)
tags = Tag.objects.filter(snipt__user=request.user).distinct()
else:
public = True
public_user = True
user = get_object_or_404(User, username=username)
snipts = Snipt.objects.filter(blog_post=True, user=user, public=True)
tags = Tag.objects.filter(snipt__user=user,
snipt__public=True).distinct()
tags = tags.order_by('name')
snipts = snipts.order_by('-created')
context = {
'has_snipts': True,
'public': public,
'public_user': public_user,
'snipts': snipts,
'tags': tags,
'user': user,
}
if 'rss' in request.GET:
context['snipts'] = context['snipts'][:20]
return rss(request, context)
return context
@render_to('snipts/list-user.html')
def favorites(request, username):
if request.user.username != username:
raise Http404
if request.blog_user:
raise Http404
public = False
favorites = Favorite.objects.filter(user=request.user).values('snipt')
favorites = [f['snipt'] for f in favorites]
snipts = Snipt.objects.filter(Q(pk__in=favorites))
tags = Tag.objects.filter(snipt__user=request.user).distinct()
tags = tags.order_by('name')
snipts = snipts.order_by('-created')
context = {
'favorites': favorites,
'has_snipts': True,
'public': public,
'public_user': False,
'snipts': snipts,
'tags': tags,
'user': request.user,
}
if 'rss' in request.GET:
context['snipts'] = context['snipts'][:20]
return rss(request, context)
return context
@render_to('snipts/list-public.html')
def list_public(request, tag_slug=None):
if request.blog_user:
return blog_list(request)
snipts = Snipt.objects.filter(public=True).order_by('-created')
if tag_slug:
snipts = snipts.filter(tags__slug__in=[tag_slug])
tag = get_object_or_404(Tag, slug=tag_slug)
else:
tag = None
context = {
'has_snipts': True,
'public': True,
'snipts': snipts,
'tag': tag,
}
if 'rss' in request.GET:
context['snipts'] = context['snipts'][:20]
return rss(request, context)
return context
@render_to('snipts/list-user.html')
def list_user(request, username_or_custom_slug, tag_slug=None):
if request.blog_user:
return blog_list(request, username_or_custom_slug)
user = get_object_or_None(User, username=username_or_custom_slug)
if user is None:
snipt = get_object_or_404(Snipt, custom_slug=username_or_custom_slug)
return detail(request, snipt.user, snipt.slug)
tags = Tag.objects
snipts = Snipt.objects
if user == request.user or \
(request.GET.get('api_key') == user.api_key.key) or \
(user.profile.is_a_team and
user.team.user_is_member(request.user)):
public = False
favorites = Favorite.objects.filter(user=user).values('snipt')
favorites = [f['snipt'] for f in favorites]
snipts = snipts.filter(Q(user=user) | Q(pk__in=favorites))
tags = tags.filter(snipt__user=user).distinct()
else:
tags = tags.filter(snipt__user=user, snipt__public=True).distinct()
snipts = snipts.filter(user=user, public=True)
public = True
tags = tags.order_by('name')
snipts = snipts.order_by('-created')
if tag_slug:
snipts = snipts.filter(tags__slug__in=[tag_slug])
tag = get_object_or_404(Tag, slug=tag_slug)
else:
tag = None
if tag is None:
snipts = snipts.exclude(tags__name__in=['tmp'])
context = {
'has_snipts': True,
'public': public,
'public_user': (public and user),
'snipts': snipts,
'tags': tags,
'tag': tag,
'user': user,
}
if 'rss' in request.GET:
context['snipts'] = context['snipts'][:20]
return rss(request, context)
return context
def raw(request, snipt_key, lexer=None):
snipt = get_object_or_404(Snipt, key=snipt_key)
if request.user == snipt.user:
if lexer:
lexer = lexer.strip('/')
if lexer != snipt.lexer:
try:
lexer_obj = get_lexer_by_name(lexer)
except:
lexer_obj = None
if lexer_obj:
snipt.lexer = lexer
snipt.save()
content_type = 'text/plain'
if 'nice' in request.GET:
content_type = 'text/html'
return render_to_response('snipts/raw.html',
{'snipt': snipt},
context_instance=RequestContext(request),
content_type=content_type)
def rss(request, context):
return render_to_response('rss.xml',
context,
context_instance=RequestContext(request),
content_type="application/rss+xml")
@never_cache
def search(request, template='search/search.html', load_all=True,
form_class=ModelSearchForm, searchqueryset=None,
context_class=RequestContext, extra_context=None,
results_per_page=None):
query = ''
results = EmptySearchQuerySet()
if request.GET.get('q'):
searchqueryset = SearchQuerySet() \
.filter(Q(public=True) | Q(author=request.user)) \
.order_by('-pub_date')
if request.user.is_authenticated() and \
'mine-only' in request.GET:
searchqueryset = SearchQuerySet().filter(author=request.user) \
.order_by('-pub_date')
elif request.user.is_authenticated() and \
('author' in request.GET and
request.GET.get('author')):
author = request.GET.get('author')
if author == request.user.username:
searchqueryset = SearchQuerySet().filter(author=request.user) \
.order_by('-pub_date')
else:
team = get_object_or_None(Team, slug=author)
if team and team.user_is_member(request.user):
searchqueryset = SearchQuerySet().filter(author=team) \
.order_by('-pub_date')
form = ModelSearchForm(request.GET,
searchqueryset=searchqueryset,
load_all=load_all)
if form.is_valid():
query = form.cleaned_data['q']
results = form.search()
else:
form = form_class(searchqueryset=searchqueryset, load_all=load_all)
paginator = Paginator(results, results_per_page or RESULTS_PER_PAGE)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise Http404("No such page of results!")
context = {
'form': form,
'has_snipts': True,
'page': page,
'paginator': paginator,
'query': query,
'suggestion': None,
}
if results.query.backend.include_spelling:
context['suggestion'] = form.get_suggestion()
if extra_context:
context.update(extra_context)
return render_to_response(template,
context,
context_instance=context_class(request))
def redirect_snipt(request, snipt_key, lexer=None):
snipt = get_object_or_404(Snipt, key=snipt_key)
return HttpResponseRedirect(snipt.get_absolute_url())
def redirect_public_tag_feed(request, tag_slug):
return HttpResponseRedirect('/public/tag/{}/?rss'.format(tag_slug))
def redirect_user_feed(request, username):
user = get_object_or_404(User, username=username)
return HttpResponseRedirect(user.get_absolute_url() + '?rss')
def redirect_user_tag_feed(request, username, tag_slug):
return HttpResponseRedirect(u'/{}/tag/{}/?rss'.format(username, tag_slug))
|
New iPhone 6 Cases are Here!
We are excited to announce that we have personalized iPhone 6 cases. If you were lucky enough to get the newest iPhone, it’s time to protect it with a personalized case. These also make great Holiday gifts (hint, hint … start your shopping now to get a jump start!). Here are a few of the fun designs from our collection.
Comments Comments Off on New iPhone 6 Cases are Here!
The Stationery Studio is excited to announce their new leather gift collection! We have added an exclusive collection of beautiful leather gifts for those who appreciate quality craftsmanship. Gorgeous new additions include padlock diaries, tech accessories and luggage tags. Why not check out the new line for yourself?
Campuses across the country are buzzing about our Sorority Tech Accessories. These chic tech skins, covers and cases are the latest trend to hit college campuses across the country. TheStationeryStudio.com features a Sorority Tech section with officially-licensed sorority products with a twist: personalization.
Whether you are looking to show your sorority spirit with your cell phone case, laptop bag, ipad sleeve or tech skin, we’ve got you covered! For these creative ideas and more, check out our Studio Notes Sorority Pinterest Boards. Our pins are spreading like wildfire – with sorority members sharing these fun and creative ways to dress up their tech and show off their sorority pride.
Personalized Sorority Custom Tech Skins from TheStationeryStudio.com have been designed to perfectly fit the unique contour of your device. These items, which were featured as part of a Back-to-School segment on NBC-TV’s The TODAY Show, are made of ultra low-profile decorative stickers that ensure it will not interfere with docks, chargers or other accessories. The tech skins have an easy on – easy off durable, automotive-grade 3M™ Vinyl for easy application and residue-free removal.
Sorority Cell phone Cases: This one-piece ultra-lightweight snap-on hard shell covers the back and the sides of your phone, leaving access to all ports and functions. It will protect against impact and falls. The smooth finish makes it simple to slide your phone in and out of pockets. Available for iPhone, Blackberry, Samsung Galaxy S and iPod Touch. In addition we offer a special “tough case” option for iPhones that adds an extra level of protection.
Personalized designer sleeves will protect your iPad in style. This Neoprene sleeve cushions your iPad for maximum protection. It’s thick, yet lightweight construction provides natural moisture, shock and scratch protection for your iPad (with or without an Apple Smart Cover). Our covers fits all iPad models. The sleeve has a zipper which opens for easy access to your device. The design and personalization are printed on both sides of the sleeve.
This personalized designer laptop bag is sure to protect your technology in style. Carry your laptop by the handle or the convenient strap. The bag has a zipper which opens for easy access to your laptop. The design and personalization is printed on both sides of the bag. This Neoprene bag cushions your Laptop for maximum protection.
Be the first on campus to start this new trend of personalized sorority tech accessories. Whether it’s a birthday gift, initiation gift for a new member – or a gift for that hard-to-shop for relative, we’ve definitely got you covered! These fashion statements make great gifts for new initiates, members and alumnae who want to show off their letters with style. |
import json
import sys
import urllib2
import re
import os
from HTMLParser import HTMLParser
class htmltagparser(HTMLParser):
def __init__(self):
self.reset()
self.NEWATTRS = []
def handle_starttag(self, tag, attrs):
self.NEWATTRS = attrs
def clean(self):
self.NEWATTRS = []
class Cloner(object):
def __init__(self, url, path, maxdepth=3):
self.start_url = url
self.path = os.getcwd() + "/" + path
self.maxdepth = maxdepth
self.seenurls = []
self.user_agent="Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)"
# ######################################3
# Utility Functions
# ######################################3
# http get request
def get_url(self, url):
headers = { 'User-Agent' : self.user_agent }
try:
req = urllib2.Request(url, None, headers)
return urllib2.urlopen(req).read()
except urllib2.HTTPError, e:
print 'We failed with error code - %s.' % e.code
if e.code == 404:
return ""
else:
return ""
# download a binary file
def download_binary(self, url):
filename = ""
if url.startswith(self.start_url):
filename = url[len(self.start_url):]
else:
return
data = self.get_url(url)
if (data == ""):
return
self.write_outfile(data, filename)
return
# writeout a file
def write_outfile(self, data, filename):
if filename.startswith("/"):
filename = filename[1:]
fullfilename = self.path + "/" + filename
if not os.path.exists(os.path.dirname(fullfilename)):
os.makedirs(os.path.dirname(fullfilename))
print "WRITING OUT FILE [%s]" % (filename)
f = open(fullfilename, 'a')
f.write(data)
f.close()
# unique a list
def unique_list(self, old_list):
new_list = []
if old_list != []:
for x in old_list:
if x not in new_list:
new_list.append(x)
return new_list
# ######################################3
# html and link processing functions
# ######################################3
def find_forms(self, html):
form_regex = re.compile('<form[^>]+>')
return self.unique_list(form_regex.findall(html))
# convert all forms to contain hooks
def process_forms(self, html, method="get", action="index"):
# find all forms in page
forms = self.find_forms(html)
parser = htmltagparser()
# loop over each form
for form in forms:
print "FOUND A FORM [%s]" % (form)
# parse out parts of old form tag
parser.feed(form)
attrs = parser.NEWATTRS
parser.clean()
# build new form
new_form = "<form method=\"%s\" action=\"%s\"" % (method, action)
for (name, value) in attrs:
if ((name.lower() != "method") and (name.lower() != "action")):
new_form += " %s=\"%s\"" % (name, value)
new_form += ">"
print "REWROTE FORM TO BE [%s]" % (new_form)
# rewrite html with new form
html = html.replace(form, new_form)
return html
# build new list of only the link types we are interested in
def process_links(self, links):
new_links = []
for link in links:
link = link.lower()
if (link.endswith(".css") or
link.endswith(".html") or
link.endswith(".php") or
link.endswith(".asp") or
link.endswith(".aspx") or
link.endswith(".js") or
link.endswith(".ico") or
link.endswith(".png") or
link.endswith(".jpg") or
link.endswith(".jpeg") or
link.endswith(".bmp") or
link.endswith(".gif")
# ("." not in os.path.basename(link))
):
new_links.append(link)
return new_links
# primary recersive function used to clone and crawl the site
def clone(self, depth=0, url="", base="", method="get", action="index"):
# early out if max depth is reached
if (depth > self.maxdepth):
print "MAX URL DEPTH [%s]" % (url)
return
# if no url is specified, then assume the starting url
if (url == ""):
url = self.start_url
# if no base is specified, then assume the starting url
if (base == ""):
base = self.start_url
# check to see if we have processed this url before
if (url in self.seenurls):
print "ALREADY SEEN URL [%s]" % (url)
return
else:
self.seenurls.append(url)
# get the url and return if nothing was returned
html = self.get_url(url)
if (html == ""):
return
# determine the websites script/filename
filename = ""
# we are only interested in urls on the same site
if url.startswith(base):
filename = url[len(base):]
# if filename is blank, assume index.html
if (filename == ""):
filename = "index.html"
else:
print "BAD URL [%s]" % (url)
return
print "CLONING URL [%s]" % (url)
# find links
links = re.findall(r"<link.*?\s*href=\"(.*?)\".*?>", html)
links += re.findall(r"<script.*?\s*src=\"(.*?)\".*?>", html)
links += re.findall(r"<img.*?\s*src=\"(.*?)\".*?>", html)
links += re.findall(r"\"(.*?)\"", html)
links += re.findall(r"url\(\"?(.*?)\"?\);", html)
links = self.process_links(self.unique_list(links))
# loop over the links
for link in links:
link = link.lower()
new_link = link
if link.startswith("http"):
new_link = link
elif link.startswith("//"):
new_link = "http:" + link
elif link.startswith("/"):
new_link = base + link
elif link.startswith("../"):
new_link = base + "/" + link[3:]
else:
new_link = base + "/" + link
good_link = new_link
if (new_link.startswith(self.start_url)):
good_link = new_link[len(self.start_url):]
print "FOUND A NEW LINK [%s]" % (new_link)
print "FOUND A NEW LINK * [%s]" % (good_link)
# switch out new_link for link
html = html.replace("\"" + link + "\"", "\"" + good_link + "\"")
# determine is we need to call Clone recursively
if (link.endswith(".css") or
link.endswith(".html") or
link.endswith(".php") or
link.endswith(".asp") or
link.endswith(".aspx") or
link.endswith(".js")
# ("." not in os.path.basename(link))
):
# recursively call process_html on each non-image link
if base != self.start_url:
self.clone(url=new_link, base=os.path.dirname(url), depth=depth+1)
else:
self.clone(url=new_link, depth=depth+1)
else:
# must be a binary file, so just download it
self.download_binary(new_link)
# update any forms within the page
html = self.process_forms(html, action=action)
# write out the html for the page we have been processing
self.write_outfile(html, filename)
return
if __name__ == "__main__":
def usage():
print "%s <URL> <outdirectory> (optional <form action>)" % (sys.argv[0])
if ((len(sys.argv) < 3) or (len(sys.argv) > 4)):
usage()
sys.exit(0)
c = Cloner(sys.argv[1], sys.argv[2])
if len(sys.argv) == 4:
c.clone(action=sys.argv[3])
else:
c.clone()
|
Mahajan Imaging has a history of bringing new technologies into the private sector in the country for example standing weight bearing MRI, Cone Beam Dental CT, functional MRI, MR tractography, MR mammography, CT coronary angiography, 4D ultrasound with tomography, PET-CT, CT-guided robotic biopsy, digital mammography and DEXA bone densitometry. Continuing the said trend, they have recently installed Asia’s 1st Silent MRI technology and India’s first Dual Energy Spectral CT Scanner with the most advances cardiac imaging capabilities in the world. There are currently seven centres in New Delhi, with the standalone centres in Hauz Khas and Defence Colony. The other centres located in prestigious hospitals including Sir Ganga Ram Hospital, PSRI Hospital, Fortis Hospital, BLK Super Speciality Hospital and Safdarjung Hospital. Mahajan Imaging’s latest centre in Gurugram has been set up with a vision of blending the best technology in the world, under guidance of some of Indias best doctors, with a calm and relaxing environment that takes you far away from the stress and anxiety of illness. Over the last 25 years, we have learnt that feeling well is the first step one can take towards a true healthier lifestyle. |
import sublime, sublime_plugin
import re
# to find on which indentation level we currently are
current_indentation_re = re.compile("^\s*")
# to leave additional new lines as is
aditional_new_lines_re = re.compile("^\s*\n+\s*\n+\s*$")
# no indentation
no_indent = re.compile("^</?(head|body)[>| ]", re.I)
# possible self closing tags: XML-------HTML------------------------------------------------HTML5----------------
self_closing_tags = re.compile("^<(\?|\!|%|#|area|base|br|col|frame|hr|img|input|link|meta|param|command|embed|source)", re.I)
skip_content_of_this_tags_re = re.compile("^<(script|style|pre|code)(>| )", re.I)
trim_outter_left = "abbr|acronym|dfn|em|strong|b|i|u|font|del|ins|sub|sup".split('|')
trim_outter_right = "".split('|')
trim_inner_left = "abbr|acronym|dfn|em|strong|b|i|u|font|del|ins|sub|sup|title".split('|')
trim_inner_right = "abbr|acronym|dfn|em|strong|b|i|u|font|del|ins|sub|sup|title".split('|')
def TagIndentBlock(data, view):
# User settings
settings = sublime.load_settings('Tag Package.sublime-settings')
preserve_additional_new_lines = bool(settings.get('preserve_additional_new_lines', True))
num_chars_considered_little_content = str(int(settings.get('little_content_means_this_number_of_characters', 60)))
# the indent character
if view.settings().get('translate_tabs_to_spaces') :
indent_character = ' '*int(view.settings().get('tab_size', 4))
else:
indent_character = '\t'
# on which indentation level we currently are?
indentation_level = (current_indentation_re.search(data).group(0)).split("\n")
current_indentation = indentation_level.pop()
if len(indentation_level) == 1:
beauty = "\n"+indentation_level[0]
elif len(indentation_level) > 1:
beauty = "\n".join(indentation_level)
else:
beauty = ''
# pre processing
if preserve_additional_new_lines == False:
#fix comments
data = re.sub(r'(\n\s*<\!--)', '\n\t\n\\1', data)
# first newline should be skipped
starting = True
# inspiration from http://jyro.blogspot.com/2009/08/makeshift-xml-beautifier-in-python.html
level = 0
tags = re.split('(<[^>]+>)',data)
lenght = len(tags)
i = 0
while i < lenght:
f = tags[i]
no_indent_match = no_indent.match(f[:20])
if f.strip() == '':
if preserve_additional_new_lines and aditional_new_lines_re.match(f):
beauty += '\n'
elif f[0]=='<' and f[1] != '/':
# beauty += '1'
if starting == False:
beauty += '\n'
starting = False
beauty += current_indentation
if not no_indent_match:
beauty += indent_character*level
if skip_content_of_this_tags_re.match(f[:20]):
tag_is = re.sub(r'<([^ ]+)(>| ).*', '\\1', f[:20], 1)
tag_is = re.compile("/"+tag_is+">$", re.I)
beauty += f
i = i+1
while i < lenght:
f = tags[i]
if not tag_is.search(f[-20:]):
beauty += f
i = i+1
else:
beauty += f
break
else:
beauty += f.strip()
if not no_indent_match:
level = level + 1
#self closing tag
if f[-2:] == '/>' or self_closing_tags.match(f):
#beauty += '2'
beauty += current_indentation
if not no_indent_match:
level = level - 1
elif f[:2]=='</':
if not no_indent_match:
level = level - 1
#beauty += '3'
if starting == False:
beauty += '\n'
starting = False
beauty += current_indentation
if not no_indent_match:
beauty += indent_character*level
beauty += f.strip()
else:
#beauty += '4'
if starting == False:
beauty += '\n'
starting = False
beauty += current_indentation
if not no_indent_match:
beauty += indent_character*level
beauty += f.strip()
i = i+1
if bool(settings.get('empty_tags_close_on_same_line', True)):
# put empty tags on same line
beauty = re.sub(r'<([^/!][^>]*[^/])>\s+</', '<\\1></', beauty)
# put empty tags on same line for tags with one character
beauty = re.sub(r'<([^/!])>\s+</', '<\\1></', beauty)
if bool(settings.get('tags_with_little_content_on_same_line', True)):
# put tags with little content on same line
beauty = re.sub(r'<([^/][^>]*[^/])>\s*([^<\t\n]{1,'+num_chars_considered_little_content+'})\s*</', '<\\1>\\2</', beauty)
# put tags with little content on same line for tags with one character
beauty = re.sub(r'<([^/])>\s*([^<\t\n]{1,'+num_chars_considered_little_content+'})\s*</', '<\\1>\\2</', beauty)
for tag in trim_outter_left:
beauty = re.sub(r'\s+<'+tag+'(>| )', ' <'+tag+'\\1', beauty, re.I)
for tag in trim_outter_right:
beauty = re.sub(r'</'+tag+'>\s+([^\s])', '</'+tag+'> \\1', beauty, re.I)
for tag in trim_inner_left:
beauty = re.sub(r'<'+tag+'(>| [^>]*>)\s+([^\s])', '<'+tag+'\\1\\2', beauty, re.I)
for tag in trim_inner_right:
beauty = re.sub(r'\s+</'+tag+'>', '</'+tag+'> ', beauty, re.I)
return beauty
class TagIndentCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if region.empty():
continue
if self.view.score_selector(region.a, 'text.html | text.xml') <= 0:
dataRegion = region
else:
dataRegion = sublime.Region(self.view.line(region.begin()).begin(), region.end())
data = TagIndentBlock(self.view.substr(dataRegion), self.view)
self.view.replace(edit, dataRegion, data);
def is_visible(self):
for region in self.view.sel():
if not region.empty():
return True
return False
class TagIndentDocumentCommand(sublime_plugin.TextCommand):
def run(self, edit):
dataRegion = sublime.Region(0, self.view.size())
data = TagIndentBlock(self.view.substr(dataRegion).strip(), self.view)
self.view.replace(edit, dataRegion, data);
def is_visible(self):
value = False
for region in self.view.sel():
if region.empty():
continue
if self.view.score_selector(region.a, 'text.html | text.xml') <= 0:
return False
else:
value = True
return value or self.view.score_selector(0, 'text.html | text.xml') > 0 |
TEC Accessories specializes in keychain gadgets. These Isotope fobs, available in multiple shapes and sizes, are a great way to add some flair to your keychain, knife, flashlight, or pack—and they’ll make them far easier to see in the dark. That’s thanks to the fobs’ ability to hold vials of tritium: a self-powered isotope of hydrogen that glows in the dark. Crafted from bead-blasted stainless steel, each model offered is strong and stable, with skeletonized holes to show off the bright glow. They come with a split ring for attachment and rubber plugs and washers to keep the tritium tubes (sold separately) nice and tight on the go. If you're looking for uncompromising performance, opt for the Chain Reaction. It retains a tritium vial and includes an Embrite glow-in-the-dark pellet for double the glow power.
Note: The base price is for the S311T Tandem Fob. At checkout, you can upgrade to the S360 (+ $10), the S323 (+ $10), and the Chain Reaction (+ $12) with aqua or green glow. This product is designed to accept a self-luminous glass tritium vial. Due to US restrictions, tritium is considered a controlled substance (NRC regulations) and cannot be sold within the United States for this application. However, tritium is easily obtained from sources outside the United States and can be legally purchased, imported and owned by US consumers.
Estimated ship date is Apr 9, 2019 PT.
I see. Hope you find a better one. If you do, please let me know. Thanks!
Doesn't glow well and you can spend your money on a better product. I really was disappointed. |
# coding=utf-8
import os
import re
import unicodedata
from django.utils import six
from .settings import (MEDIA_IMAGE_EXTENSION, MEDIA_IMAGE_FORMAT, MEDIA_IMAGE_QUALITY,
MEDIA_NORMALIZE_FILENAME, MEDIA_CONVERT_FILENAME)
def thumbnail_path(path, size, method):
"""
Returns the path for the resized image.
"""
directory, name = os.path.split(path)
image_name, ext = name.rsplit('.', 1)
return os.path.join(directory, '%s_%s_%s.%s' % (image_name, method, size, MEDIA_IMAGE_EXTENSION))
def generate_thumbnail(path, size, method):
try:
import Image
except ImportError:
try:
from PIL import Image
except ImportError:
raise ImportError('Cannot import the Python Image Library.')
image = Image.open(path)
# normalize image mode
if image.mode != 'RGB':
image = image.convert('RGB')
# parse size string 'WIDTHxHEIGHT'
width, height = [int(i) for i in size.split('x')]
# use PIL methods to edit images
if method == 'scale':
image.thumbnail((width, height), Image.ANTIALIAS)
image.save(thumbnail_path(path, size, method), MEDIA_IMAGE_FORMAT, quality=MEDIA_IMAGE_QUALITY)
elif method == 'crop':
try:
import ImageOps
except ImportError:
from PIL import ImageOps
ImageOps.fit(
image, (width, height), Image.ANTIALIAS
).save(thumbnail_path(path, size, method), MEDIA_IMAGE_FORMAT, quality=MEDIA_IMAGE_QUALITY)
def process_filename(value):
"""
Convert Filename. # from django-filebrowser
"""
if MEDIA_NORMALIZE_FILENAME:
chunks = value.split(os.extsep)
normalized = []
for v in chunks:
v = unicodedata.normalize('NFKD', six.text_type(v)).encode('ascii', 'ignore').decode('ascii')
v = re.sub(r'[^\w\s-]', '', v).strip()
normalized.append(v)
value = '.'.join(normalized) if len(normalized) > 1 else normalized[0]
return value.replace(" ", "_").lower() if MEDIA_CONVERT_FILENAME else value
|
We will have decent to good availability next week on most of our J6 Pansies with the exception of Matrix Clear Yellow. As usual, Murphy says that your most popular color of pansy will be the one variety that needs one more week to get ready. The clear yellow is small and needs to fill out and bloom.
Several orders got move up a week and have taken up a large chunk of our #4 inventory. Our next crop should be ready the week of October 6.
There still seems to be a lot of you interested in more information on Cora Vinca. Here is a flier from Goldsmith Seeds, the breeder of Cora Vinca.
Also here is more on Cora Vinca from Alecia’s comments on the Cora Vinca blog post that I posted on July 25.
Well, after reading your comments I felt I could add a little something. Let me tell you straight-up, I work for Goldsmith Seeds, the breeder of Cora Vinca! I know I am a little biased, but I can tell you we have heard incredibly positive feedback from folks that have grown and planted Cora Vinca.
Let me give you a little background on Cora. Our founder, Glenn Goldsmith, went on a plant collecting expedition over 18 years ago and found a wild Vinca that seemed to be very disease resistant. It wasn’t a very pretty plant, however it had the potential to be something quite special! He, and other breeders, worked (for over 18 years) on incorporating its disease resistance into a more traditional garden-friendly plant and, voila!, Cora was born!
We’ve heard from trial grounds across the country, when other Vinca succumb to AP, Cora survives. Cora really is a breeding breakthrough and now landscaper and gardeners can enjoy a Vinca that will thrive all season long.
Here’s a little tidbit that’s interesting — they used Cora Vinca to decorate the grounds around the Beijing Olympic Stadium and Water Cube. They tested hundreds of flower varieties for three years to see which performed the best in China’s hot and humid climate. Cora made the cut and they used it extensively throughout the display.
Thanks Alecia for your comments!
Lastly, here are some pictures of Cora Vinca we planted here at the farm between a building and a retaining wall. These Cora Vinca were planted in early June and have not been touched since. The only water they received was when it rained.
Here are some pictures of some of the current crops we are shipping from as well as some upcoming crops that will be ready next week. I will have a new ready list posted tomorrow and probably some more pictures to go with it.
We added in about 1/2 a planting of #4 pansies which helped some, but overall pansy availability is less than stellar. Next week is looking a good though. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import subprocess
import sys
import design
class StartScanThread(QThread):
def __init__(self, process_list):
QThread.__init__(self)
self.process_list = process_list
def start_scan(self):
print(self.process_list) # for debugging
subprocess.check_output(self.process_list)
def run(self):
self.start_scan()
class DiskImageProcessorApp(QMainWindow, design.Ui_DiskImageProcessor):
def __init__(self, parent=None):
super(DiskImageProcessorApp, self).__init__(parent)
self.setupUi(self)
# build browse functionality buttons
self.analysisSourceBtn.clicked.connect(self.browse_analysis_source)
self.procSourceBtn.clicked.connect(self.browse_processing_source)
self.analysisDestBtn.clicked.connect(self.browse_analysis_dest)
self.procDestBtn.clicked.connect(self.browse_processing_dest)
# build start functionality
self.analysisStartBtn.clicked.connect(self.start_analysis)
self.procStartBtn.clicked.connect(self.start_processing)
# about dialog
self.actionAbout.triggered.connect(self.about_dialog)
def about_dialog(self):
QMessageBox.information(
self,
"About",
"Disk Image Processor v1.0.0\nCanadian Centre for Architecture\nDeveloper: Tessa Walsh\n2018\nMIT License\nhttps://github.com/CCA-Public/cca-diskimageprocessor",
)
def browse_analysis_source(self):
self.analysisSource.clear() # clear directory source text
directory = QFileDialog.getExistingDirectory(self, "Select folder")
if directory: # if user didn't pick directory don't continue
self.analysisSource.setText(directory)
def browse_processing_source(self):
self.procSource.clear() # clear directory source text
directory = QFileDialog.getExistingDirectory(self, "Select folder")
if directory: # if user didn't pick directory don't continue
self.procSource.setText(directory)
def browse_analysis_dest(self):
self.analysisDest.clear() # clear directory source text
directory = QFileDialog.getExistingDirectory(self, "Select folder")
if directory: # if user didn't pick directory don't continue
self.analysisDest.setText(directory)
def browse_processing_dest(self):
self.procDest.clear() # clear directory source text
directory = QFileDialog.getExistingDirectory(self, "Select folder")
if directory: # if user didn't pick directory don't continue
self.procDest.setText(directory)
def done_analysis(self):
self.analysisCancelBtn.setEnabled(False)
self.analysisStartBtn.setEnabled(True)
QMessageBox.information(self, "Finished", "Analysis complete.")
self.analysisStatus.setText("Completed")
def done_processing(self):
self.procCancelBtn.setEnabled(False)
self.procStartBtn.setEnabled(True)
QMessageBox.information(self, "Finished", "Processing complete.")
self.procStatus.setText("Completed")
def start_analysis(self):
# clear status
self.analysisStatus.clear()
# create list for process
self.process_list = list()
self.process_list.append("python3")
self.process_list.append(
"/usr/share/ccatools/diskimageprocessor/diskimageanalyzer.py"
)
# give indication process has started
self.analysisStatus.setText("Processing. Please be patient.")
# option handling
if self.quietLogBtn.isChecked():
self.process_list.append("--quiet")
if self.retainFilesBtn.isChecked():
self.process_list.append("-k")
if self.unallocBtn.isChecked():
self.process_list.append("-e")
if self.resForksBtn.isChecked():
self.process_list.append("-r")
# add source and dest
self.process_list.append(self.analysisSource.text())
self.process_list.append(self.analysisDest.text())
# process
self.get_thread = StartScanThread(self.process_list)
self.get_thread.finished.connect(self.done_analysis)
self.get_thread.start()
self.analysisCancelBtn.setEnabled(True)
self.analysisCancelBtn.clicked.connect(self.get_thread.terminate)
self.analysisStartBtn.setEnabled(False)
def start_processing(self):
# clear status
self.procStatus.clear()
# create list for process
self.process_list = list()
self.process_list.append("python3")
self.process_list.append(
"/usr/share/ccatools/diskimageprocessor/diskimageprocessor.py"
)
# give indication process has started
self.procStatus.setText("Processing. Please be patient.")
# option handling
if self.quietLogBtn.isChecked():
self.process_list.append("--quiet")
if self.unallocBtn.isChecked():
self.process_list.append("-e")
if self.resForksBtn.isChecked():
self.process_list.append("-r")
if self.bagBtn.isChecked():
self.process_list.append("-b")
if self.logicalFilesOnlyBtn.isChecked():
self.process_list.append("-f")
if self.bulkExtBtn.isChecked():
self.process_list.append("-p")
# add source and dest
self.process_list.append(self.procSource.text())
self.process_list.append(self.procDest.text())
# process
self.get_thread = StartScanThread(self.process_list)
self.get_thread.finished.connect(self.done_processing)
self.get_thread.start()
self.procCancelBtn.setEnabled(True)
self.procCancelBtn.clicked.connect(self.get_thread.terminate)
self.procStartBtn.setEnabled(False)
def main():
app = QApplication(sys.argv)
form = DiskImageProcessorApp()
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
The final and most crucial steps in any jewelry design are the final polish and finish. It says the most about the metalsmith, his attention to detail and commitment to his craft. Being patient and persnickety pays the bills!
Visit our Ask the Expert section for lots of advice on polishing and finishing, which wheels to use and when. Don't miss the info about flexshafts too, since they make tedious jobs go quickly!
Finishing also includes patina on jewelry pieces, darkening low areas to highlight the raised portions. Liver of sulfur is one of the most common patinas available, with many options. |
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# from traits.api import HasTraits
# from traitsui.api import View,Item,Group,HGroup,VGroup
# ============= standard library imports ========================
from __future__ import absolute_import
import time
from .monitor import Monitor
# from threading import Thread
# ============= local library imports ==========================
cnt = 0
class PumpingMonitor(Monitor):
'''
G{classtree}
'''
gauge_manager = None
tank_gauge_name = 'gauge1'
pump_gauge_name = 'gauge2'
# pumping_duration=Float
# idle_duration=Float
name = 'AnalyticalPumpingMonitor'
def _monitor_(self):
'''
'''
pump_start = 0
idle_start = 0
def get_time(_start_):
ct = time.time()
_dur_ = 0
if _start_ == 0:
_start_ = ct
else:
_dur_ = ct - _start_
return _start_, _dur_
while self.gauge_manager is not None:
state = self._get_pumping_state()
if state == 'pumping':
idle_start = 0
pump_start, pump_duration = get_time(pump_start)
self.parent.update_pumping_duration(self.name, pump_duration)
else: # state=='idle'
pump_start = 0
idle_start, idle_duration = get_time(idle_start)
# print 'idle duir',idle_duration
self.parent.update_idle_duration(self.name, idle_duration)
time.sleep(1)
def _get_pumping_state(self):
'''
'''
state = 'idle'
# gm=self.gauge_manager
global cnt
if cnt >= 5 and cnt < 10:
state = 'pumping'
# tankgauge=gm.get_gauge_by_name(self.tank_gauge_name)
# if tankgauge.pressure<1:
# state='pumping'
cnt += 1
return state
# ============= EOF ====================================
|
For those clients that require a focus on paid leads, I always recommend giving Bing a try, along with Google, as not doing so is lazy and could mean you are missing a valuable opportunity.
And Bing knows this. That’s why Bing is making every effort to streamline the process of importing your Google Adwords campaigns. And it is super simple. A few clicks and you’re done. But it’s not without it’s issues. The more complex an account you have in Google the more you have to watch out for.
But this article isn’t about preaching the benefits of incremental conversions from Bing.
This is likely to change but at the moment BingAds do not support display campaigns in the UK. This is only available in the US. This might sound simple but it also means that image ads and CPM (Cost-per-thousand) bidding will not be imported. So unless you want a large error log it’d be wise to exclude these campaigns. If you want a full breakdown of where your ads are likely to appear there’s a useful article about where Bing will place your ads.
In AdWords you can target loads of languages but not in Bing Ads. You can choose a maximum of one target language per ad group. If you’ve imported an Adwords account and have multiple languages targeted, Bing Ads will just select the language with the highest rank (based on ad impressions). If your chosen targeted language isn’t in the list, the campaign will not be imported.
There are some differences between location targeting with Bing Ads, compared to Adwords. If Bing Ads cannot match the targeted city location, Bing gives an example of Cafelandia, it will match the parent location instead, which in this case is the State of Parana, Brazil. So check to make sure you’re okay with a broader target.
On a second but related note you will need to double check that your targeting preferences have transferred over correctly. Even though Bing supports targeting “People in your targeted location” it will default to “People in, searching for, or showing interest in your targeted location (recommended)” which may mean you pay for clicks in another country, which may not be what you want.
These don’t get transferred across but are supported. Simply download your lists from Adwords or Editor, whichever you prefer, and create a list in the Bing shared library. Bare in mind that Bing do not allow broad match negatives. If you do have any broad match negatives in your list, they will be converted to phrase match, so it’s worth doing a little more investigation to make sure phrase match will prevent your unwanted search terms.
Standard text ads will be imported as normal. Bing will automatically adjust the Adwords description in line 1 and 2 on to one line as the Bing Ads character limits are relative to Adwords.
The final thing that’s of interest is bidding types, as Bing Ads have Manual CPC (cost-per-click) and Enhanced CPC and not CPA (cost-per-acquisition). When a CPA bid is imported, Bing Ads will change it to a CPC bid. So, make sure you are happy using the CPC figure, otherwise you may get a nasty surprise!
I think more businesses will start using Bing Ads and since Adwords tends to be the go to platform initially, knowing the pitfalls of transferring Adwords over to Bing Ads can be useful. However, both platforms are always making changes, so make sure you keep on top of these too. |
# coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
#from api import fields
import model
import util
import config
from .counter import CountableLazy
from .collection import Collection, AddCollection
import config
import cloudstorage as gcs
from google.appengine.api import images
from google.appengine.ext import blobstore
"""
An icon consists of two model classes:
IconStructure: Which helds all icon specific data but no additional information.
Icon: The Icon model contains an IconStructure as an icon and additional information
like a counter and collection.
For each icon exists a toplevel icon which can have children grouped by collection.
Once an icon is created it should not be changed anymore.
If one of the childrens counter is updated the topevel icon's counter is updated
as well.
The highest toplevel has the default collection Collection.top_key().
"""
class IconValidator(model.BaseValidator):
name = [2,30]
class Icon(CountableLazy, AddCollection, model.Base):
name = ndb.StringProperty(required=True,\
validator=IconValidator.create('name'))
#icon = ndb.StructuredProperty(IconStructure)
icon = ndb.BlobProperty(required=True)
icon_url = ndb.StringProperty(required=True,default="",indexed=False)
private = ndb.BooleanProperty(required=True,default=False) # not shown for others
# private means inside its collection
replaced_by = ndb.KeyProperty(kind='Icon') # if the icon should not be used anymore
fallback = ndb.KeyProperty(kind='Icon') # fallback icon, for example a png for a svg
external_source = ndb.StringProperty(indexed=False) # not recommended
filetype = ndb.StringProperty(choices=['svg','pixel','external'],indexed=True,
default='svg', required=True)
#see: http://support.flaticon.com/hc/en-us/articles/202798381-How-to-attribute-the-icons-to-their-authors
# this would be the author link
author_html = ndb.StringProperty()
comment = ndb.TextProperty()
# take as keywords the tags from flaticon
keywords = ndb.StringProperty(indexed=True,repeated=True)
@classmethod
def create(cls,icon,name,icon_url=None,collection=Collection.top_key(),\
toplevel=None, private=False, author_html=None,\
fallback=None, external_source=None, \
filetype=None, keywords=None, comment=None, auto=True):
""" Creates and puts a new icon to the database.
As icon is the source code expected (svg or image).
Keywords should be a list.
Returns Icon key"""
new_icon = Icon(icon = icon,
name=name,
collection=collection,
private=private,
icon_url=icon_url)
if toplevel:
new_icon.toplevel = toplevel
if fallback:
new_icon.toplevel = fallback
if author_html:
new_icon.author_html = author_html
if external_source:
new_icon.external_source = external_source
if filetype:
new_icon.filetype = filetype
if comment:
new_icon.comment = comment
if keywords:
# TODO check keywords (tag validator) and make list unique
new_icon.keywords = model.TagValidator.name(keywords)
# SAVE TO CLOUD STORAGE
adr = "{}/{}/{}/{}".format(config.BUCKET, collection.urlsafe(), 'icons', name)
write_retry_params = gcs.RetryParams(backoff_factor=1.1)
gcs_file = gcs.open(adr, 'w',
content_type="image/svg+xml",
options={
'x-goog-meta-name': name
},
retry_params=write_retry_params)
gcs_file.write(icon) # saves file to cloud storage
gcs_file.close()
blob_key = blobstore.create_gs_key('/gs' + adr)
img_url = images.get_serving_url(blob_key=blob_key)
if not icon_url:
new_icon.icon_url = img_url
if not external_source:
new_icon.external_source = img_url
key = new_icon._add_and_put(auto=auto)
return key
@classmethod
def add(cls,key,collection=None, as_child=False):
""" Add a icon which already exists by key.
If no collection or the same belonging to the key is given the icon
counter is increased by one.
If the collection is different two things can happen:
1. If the key's collection is Collection.top_key() (no toplevel) or 'as_child' is true:
The key is assigned as toplevel.
('as_child' means the icon is added with 'key' as 'toplevel')
2. It is not a toplevel key:
The property 'toplevel' is assigned as key.
In both cases a toplevel is set. The next step is to look for a icon with
the same toplevel and collection, if one exists its counter is increased.
If none exists a new one is created.
"""
icon_db = key.get()
if icon_db.collection == collection or not collection:
icon_db.incr()
icon_db.put()
return key
else:
if collection == Collection.top_key():
return self.add(icon_db.toplevel,collection)
elif icon_db.collection == Collection.top_key() or as_child:
toplevel = key
else:
toplevel = icon_db.toplevel
## Look for icons with same toplevel and collection
keys = Icon.get_by_toplevel(toplevel, collection=collection, keys_only=True, limit=1)
if keys:
#for key in keys:
key = keys[0]
return Icon.add(key,collection)
else:
return Icon.create(icon_db.icon,icon_db.name,collection=collection,toplevel=toplevel)
@classmethod
def remove(cls,id):
"""Removes a icon by its key
Remove means its counter is decreased by one"""
key = cls.id_to_key(id)
icon_db = key.get()
icon_db.decr()
icon_db.put()
def get_tags(self,limit=10):
"""Fetches tags which are used together with this icon
returns a tag dbs and a variable more if more tags are available."""
#TODO write test
dbs = model.Tag.query(model.Tag.icon_id==self.key.id())\
.order(-model.Tag.cnt).fetch(limit+1)
if len(dbs) > limit:
more = True
else:
more = False
return dbs, more
@classmethod
def qry(cls, toplevel=None, name=None, collection=None, private=False,
replaced_by=None, order_by_count=True, **kwargs):
"""Query for the icon model"""
qry = cls.query(**kwargs)
if toplevel:
qry_tmp = qry
qry = qry.filter(cls.toplevel==toplevel)
if name:
qry_tmp = qry
qry = qry.filter(cls.name==name,)
if collection:
qry_tmp = qry
qry = qry.filter(cls.collection == collection)
if not private:
qry_tmp = qry
qry = qry_tmp.filter(cls.private==False)
if order_by_count:
qry_tmp = qry
qry = qry.order(-cls.cnt)
#else filter for private True and False
return qry
@classmethod
def get_by_toplevel(cls, toplevel=None, collection=None, private=False,
keys_only=False, limit=100):
"""Returns icon dbs or keys defined by its toplevel and some addition parameters"""
return cls.qry(toplevel=toplevel,collection=collection,private=private).\
fetch(keys_only=keys_only, limit=limit)
@classmethod
def get_dbs(
cls, name=None, private=None, \
replaced_by=None, **kwargs
):
kwargs = cls.get_col_dbs(**kwargs)
kwargs = cls.get_counter_dbs(**kwargs)
return super(Icon, cls).get_dbs(
name=name or util.param('name', None),
private=private or util.param('private', bool),
replaced_by=replaced_by or util.param('replaced_by', ndb.Key),
**kwargs
)
def _add_and_put(self, auto=True):
""" Adds and puts an icon to the DB
If 'auto' is true it automatically creates a toplevel icon if none is given.
This only works for one level, if a higher hierarchy is required it needs to be
done manually.
"""
if not getattr(self,'toplevel',None) \
and self.collection != Collection.top_key() \
and auto \
and not self.private: #no toplevel if private
#top = Icon(icon=self.icon,name=self.name)
top = Icon(icon=self.icon,name=self.name,\
private=False, icon_url=self.icon_url, \
external_source=self.external_source, \
filetype=self.filetype, keywords=self.keywords)
if getattr(self,'fallback',None) : # TODO test fallbacks
fallback_db = fallback.get()
fallback_key = getattr(fallback_db,'toplevel',None) # take toplevel if available
if not fallback_key:
fallback_key = self.fallback
top.fallback=fallback_key
top_key = top.put()
self.toplevel = top_key
self.incr()
self.put()
#self.get_icon()
return self.key
class Iconize(ndb.Model):
"""Adds an icon property
Icons are managed in the 'Icon' model, this mzixins
adds two methods to deal with icons:
'add_icon': if an icon already exists it can be added by its key
'create_icon': create a new icon
The two method 'put' the icons automatically, this means it is recommanded to
put the iconized model as well or remove the icon again if something went wrong.
"""
#icon = ndb.StructuredProperty(IconStructure)
icon_id = ndb.IntegerProperty(indexed=True,required=True, default=0)
icon_url = ndb.StringProperty(required=True,default="",indexed=False)
def add_icon(self, key=None, id=None):
"""Adds an icon by key or id, the key is either a toplevel key or an icon key.
'id' needs to be a integer."""
if id:
key = Icon.id_to_key(id)
elif key:
id = key.id()
else:
return False
if not getattr(self,'collection',None):
col = Collection.top_key()
else:
col = self.collection
key = Icon.add(key,collection=col)
#self.icon = key.get().get_icon()
self.icon_id = key.id()
self.icon_url = key.get().icon_url
def create_icon(self,icon,name,private=False):
if not getattr(self,'collection',None):
col = Collection.top_key()
else:
col = self.collection
key = Icon.create(icon=icon,name=name,collection=col,private=private)
#icon.icon_key = key
#self.icon = icon
self.icon_id = key.id()
self.icon_url = key.get().icon_url
def remove_icon(self):
if getattr(self,'icon_id',None):
Icon.remove(self.icon_id)
self.icon_id = 0
self.icon_url = ""
## TODO write test
# shuld not be used anymore, replaced by get_icon_id
def get_icon_key(self):
if getattr(self,'icon',None):
return self.icon.icon_key
elif getattr(self,'toplevel',None):
top_db = self.toplevel.get()
if getattr(top_db,'icon',None):
return top_db.icon.icon_key
else:
None
def get_icon_id(self):
if getattr(self,'icon_id',None):
return self.icon_id
elif getattr(self,'toplevel',None):
top_db = self.toplevel.get()
if getattr(top_db,'icon_id',None):
return top_db.icon_id
else:
None
|
EDWARDSVILLE– Austin Peay’s Terry Taylor scored a career-best 33 points to lead the Governors past SIUE 79-71 at the Vadalabene Center.
Taylor made 12 of 24 shots, including two treys, sank 7 of 13 free throws and grabbed 12 rebounds.
While SIUE fell to 5-12 overall and 1-4 in the OVC, Austin Peay remained tied with Murray State and Jacksonville State for first place in the league. The Governors improved to 13-5 and 5-0 in defeating the Cougars for the eighth successive time.
Freshman Cameron Williams continued his solid play by scoring a team-high 16 points. He also had seven rebounds. Reserve Jaylen McCoy pitched in 10 points, while Tyresse Williford contributed 13 points and four assists. Brandon Jackson added eight points and eight rebounds.
Foul trouble also badgered the Cougars. They committed 29 of them and three players fouled out. Austin Peay made 17 of 34 free throws and hit on 28 of 62 field-goal attempts, including 6 of 20 treys.
SIUE made 22 of 58 shots, including 9 of 20 from 3-point land. The Cougars sank 18 of 23 foul shots and snagged 41 rebounds, one more than the Governors.
“I like how we played the whole game, but we were a little passive against their zone defense,” Harris said.
The Cougars welcome OVC opponent Murray State at 7 p.m. Saturday to complete their two-game homestand. SIUE then travels to Southeast Missouri (Jan. 24) and UT Martin (Jan. 26) before returning home to face Morehead State (Jan. 31) and Eastern Kentucky (Feb. 2). Both of those home games are scheduled for 7 p.m. |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, redirect
from django import forms
from django.template.loader import render_to_string
from django.template import Context, Template
from django.template import RequestContext
from django.conf import settings
from django.views.generic import TemplateView
from pytigon_lib.schviews.form_fun import form_with_perms
from pytigon_lib.schviews.viewtools import dict_to_template, dict_to_odf, dict_to_pdf, dict_to_json, dict_to_xml
from pytigon_lib.schviews.viewtools import render_to_response
from pytigon_lib.schdjangoext.tools import make_href
from pytigon_lib.schviews import actions
from django.utils.translation import ugettext_lazy as _
from . import models
import os
import sys
import datetime
import time
from pytigon_lib.schdjangoext.tools import import_model
from pyexcel_odsr import get_data
from pytigon_lib.schtools.schjson import json_dumps, json_loads
from pytigon_lib.schfs.vfstools import get_temp_filename
import openpyxl
import csv
PFORM = form_with_perms('schtools')
class ImportTableForm(forms.Form):
import_file = forms.FileField(label=_('File to import'), required=True, )
def view_importtableform(request, *argi, **argv):
return PFORM(request, ImportTableForm, 'schtools/formimporttableform.html', {})
def autocomplete_search(request, type):
q = request.GET.get('query', request.POST.get('query', None))
if not q:
return HttpResponse(content_type='text/plain')
limit = request.GET.get('limit', request.POST.get('limit', 15))
try:
limit = int(limit)
except ValueError:
return HttpResponseBadRequest()
if q != ' ':
tab = Autocomplete.objects.filter(type=typ, label__istartswith=q)[:limit]
else:
tab = Autocomplete.objects.filter(type=typ)[:limit]
out_tab = []
for pos in tab:
out_tab.append({'id': pos.id, 'label': pos.label, 'name': pos.label, 'value': pos.value})
json_data = json.dumps(out_tab)
return HttpResponse(json_data, content_type='application/x-javascript')
def set_user_param(request, **argv):
key = request.POST.get('param', None)
value = request.POST.get('value', None)
user = request.user.username
p = models.Parameter.objects.filter(type='sys_user', subtype=user, key=key)
if len(p)>0:
obj = p[0]
else:
obj = models.Parameter()
obj.type = 'sys_user'
obj.subtype = user
obj.key = key
obj.value = value
obj.save()
return HttpResponse("OK")
def get_user_param(request, **argv):
key = request.POST.get('param', None)
user = request.user.username
p = models.Parameter.objects.filter(type='sys_user', subtype=user, key=key)
if len(p)>0:
obj = p[0]
return HttpResponse(obj.value)
else:
return HttpResponse("")
@dict_to_template('schtools/v_import_table.html')
def import_table(request, app, table):
if request.FILES:
if 'import_file' in request.FILES:
data = request.FILES['import_file']
name = data.name
ext = name.split('.')[-1].lower()
model = import_model(app, table)
table = []
if ext in ('xlsx', 'xls', 'ods'):
if ext == 'ods':
d = get_data(data)
#print("F0", d)
#buf = json_dumps(d)
for key in d:
table = d[key]
break
else:
first_line = True
width = 0
file_name = get_temp_filename("temp.xlsx")
f = open(file_name, 'wb')
f.write(data.read())
f.close()
workbook = openpyxl.load_workbook(filename=file_name, read_only=True)
worksheets = workbook.get_sheet_names()
worksheet = workbook.get_sheet_by_name(worksheets[0])
for row in list(worksheet.rows):
if first_line:
first_line = False
buf = []
i = 0;
for pos in row:
value = pos.value
if value:
buf.append(value)
else:
break
i += 1
if len(buf)>0:
count = len(buf)
table.append(buf)
else:
break
else:
if row[0].value:
buf = []
i = 0
for pos in row:
if i >= count:
break
buf.append(pos.value)
i += 1
table.append(buf)
else:
break
os.remove(file_name)
elif ext in ('txt', 'csv'):
first_line = True
sep_list = ['\t', ';', ',', '|', ]
sep = None
txt = data.read().decode('utf-8').replace('\r','').split('\n')
for line in txt:
for pos in sep_list:
if pos in line:
sep = pos
break
break
if sep:
csv_reader = csv.reader(txt, delimiter=sep)
for row in csv_reader:
table.append(row)
if table and len(table)>1:
header = list([pos.strip() for pos in table[0] if pos])
tree = False
tmp = []
for pos in header:
if not pos in tmp:
tmp.append(pos)
else:
tree = True
id1 = tmp.index(pos)
id2 = len(tmp)
break
for row in table[1:]:
if len(row) == len(header):
x = model()
parent = None
for index, (attr_name, value) in enumerate(zip(header,row)):
if tree:
if index == id1:
if row[id2]:
objs = model.objects.filter(**{ attr_name: value })
if len(objs)==1:
parent = objs[0]
else:
setattr(x, attr_name, value)
elif index == id2:
if row[id2]:
setattr(x, attr_name, value)
if parent:
setattr(x, 'parent', parent)
else:
setattr(x, attr_name, value)
else:
setattr(x, attr_name, value)
x.save()
return { 'redirect': '/schsys/ok/' }
else:
form = ImportTableForm(request.POST, request.FILES)
else:
form = ImportTableForm()
return { 'form': form }
|
14 karat gold plated brass 2.5mm x 16mm bar post wrapped with aqua thread. The ear backs have triple 1.2mm x 52mm twist bar design drops. The front back earrings hang approximately 2.5". |
import time
from itertools import chain
from .connection import Urllib3HttpConnection
from .connection_pool import ConnectionPool, DummyConnectionPool
from .serializer import JSONSerializer, Deserializer, DEFAULT_SERIALIZERS
from .exceptions import ConnectionError, TransportError, SerializationError, \
ConnectionTimeout
def get_host_info(node_info, host):
"""
Simple callback that takes the node info from `/_cluster/nodes` and a
parsed connection information and return the connection information. If
`None` is returned this node will be skipped.
Useful for filtering nodes (by proximity for example) or if additional
information needs to be provided for the :class:`~elasticsearch.Connection`
class. By default master only nodes are filtered out since they shouldn't
typically be used for API operations.
:arg node_info: node information from `/_cluster/nodes`
:arg host: connection information (host, port) extracted from the node info
"""
# ignore master only nodes
if node_info.get('roles', []) == ['master']:
return None
return host
class Transport(object):
"""
Encapsulation of transport-related to logic. Handles instantiation of the
individual connections as well as creating a connection pool to hold them.
Main interface is the `perform_request` method.
"""
def __init__(self, hosts, connection_class=Urllib3HttpConnection,
connection_pool_class=ConnectionPool, host_info_callback=get_host_info,
sniff_on_start=False, sniffer_timeout=None, sniff_timeout=.1,
sniff_on_connection_fail=False, serializer=JSONSerializer(), serializers=None,
default_mimetype='application/json', max_retries=3, retry_on_status=(502, 503, 504, ),
retry_on_timeout=False, send_get_body_as='GET', **kwargs):
"""
:arg hosts: list of dictionaries, each containing keyword arguments to
create a `connection_class` instance
:arg connection_class: subclass of :class:`~elasticsearch.Connection` to use
:arg connection_pool_class: subclass of :class:`~elasticsearch.ConnectionPool` to use
:arg host_info_callback: callback responsible for taking the node information from
`/_cluser/nodes`, along with already extracted information, and
producing a list of arguments (same as `hosts` parameter)
:arg sniff_on_start: flag indicating whether to obtain a list of nodes
from the cluser at startup time
:arg sniffer_timeout: number of seconds between automatic sniffs
:arg sniff_on_connection_fail: flag controlling if connection failure triggers a sniff
:arg sniff_timeout: timeout used for the sniff request - it should be a
fast api call and we are talking potentially to more nodes so we want
to fail quickly. Not used during initial sniffing (if
``sniff_on_start`` is on) when the connection still isn't
initialized.
:arg serializer: serializer instance
:arg serializers: optional dict of serializer instances that will be
used for deserializing data coming from the server. (key is the mimetype)
:arg default_mimetype: when no mimetype is specified by the server
response assume this mimetype, defaults to `'application/json'`
:arg max_retries: maximum number of retries before an exception is propagated
:arg retry_on_status: set of HTTP status codes on which we should retry
on a different node. defaults to ``(502, 503, 504)``
:arg retry_on_timeout: should timeout trigger a retry on different
node? (default `False`)
:arg send_get_body_as: for GET requests with body this option allows
you to specify an alternate way of execution for environments that
don't support passing bodies with GET requests. If you set this to
'POST' a POST method will be used instead, if to 'source' then the body
will be serialized and passed as a query parameter `source`.
Any extra keyword arguments will be passed to the `connection_class`
when creating and instance unless overridden by that connection's
options provided as part of the hosts parameter.
"""
# serialization config
_serializers = DEFAULT_SERIALIZERS.copy()
# if a serializer has been specified, use it for deserialization as well
_serializers[serializer.mimetype] = serializer
# if custom serializers map has been supplied, override the defaults with it
if serializers:
_serializers.update(serializers)
# create a deserializer with our config
self.deserializer = Deserializer(_serializers, default_mimetype)
self.max_retries = max_retries
self.retry_on_timeout = retry_on_timeout
self.retry_on_status = retry_on_status
self.send_get_body_as = send_get_body_as
# data serializer
self.serializer = serializer
# store all strategies...
self.connection_pool_class = connection_pool_class
self.connection_class = connection_class
# ...save kwargs to be passed to the connections
self.kwargs = kwargs
self.hosts = hosts
# ...and instantiate them
self.set_connections(hosts)
# retain the original connection instances for sniffing
self.seed_connections = self.connection_pool.connections[:]
# sniffing data
self.sniffer_timeout = sniffer_timeout
self.sniff_on_connection_fail = sniff_on_connection_fail
self.last_sniff = time.time()
self.sniff_timeout = sniff_timeout
# callback to construct host dict from data in /_cluster/nodes
self.host_info_callback = host_info_callback
if sniff_on_start:
self.sniff_hosts(True)
def add_connection(self, host):
"""
Create a new :class:`~elasticsearch.Connection` instance and add it to the pool.
:arg host: kwargs that will be used to create the instance
"""
self.hosts.append(host)
self.set_connections(self.hosts)
def set_connections(self, hosts):
"""
Instantiate all the connections and create new connection pool to hold them.
Tries to identify unchanged hosts and re-use existing
:class:`~elasticsearch.Connection` instances.
:arg hosts: same as `__init__`
"""
# construct the connections
def _create_connection(host):
# if this is not the initial setup look at the existing connection
# options and identify connections that haven't changed and can be
# kept around.
if hasattr(self, 'connection_pool'):
for (connection, old_host) in self.connection_pool.connection_opts:
if old_host == host:
return connection
# previously unseen params, create new connection
kwargs = self.kwargs.copy()
kwargs.update(host)
return self.connection_class(**kwargs)
connections = map(_create_connection, hosts)
connections = list(zip(connections, hosts))
if len(connections) == 1:
self.connection_pool = DummyConnectionPool(connections)
else:
# pass the hosts dicts to the connection pool to optionally extract parameters from
self.connection_pool = self.connection_pool_class(connections, **self.kwargs)
def get_connection(self):
"""
Retreive a :class:`~elasticsearch.Connection` instance from the
:class:`~elasticsearch.ConnectionPool` instance.
"""
if self.sniffer_timeout:
if time.time() >= self.last_sniff + self.sniffer_timeout:
self.sniff_hosts()
return self.connection_pool.get_connection()
def _get_sniff_data(self, initial=False):
"""
Perform the request to get sniffins information. Returns a list of
dictionaries (one per node) containing all the information from the
cluster.
It also sets the last_sniff attribute in case of a successful attempt.
In rare cases it might be possible to override this method in your
custom Transport class to serve data from alternative source like
configuration management.
"""
previous_sniff = self.last_sniff
try:
# reset last_sniff timestamp
self.last_sniff = time.time()
# go through all current connections as well as the
# seed_connections for good measure
for c in chain(self.connection_pool.connections, self.seed_connections):
try:
# use small timeout for the sniffing request, should be a fast api call
_, headers, node_info = c.perform_request(
'GET', '/_nodes/_all/http',
timeout=self.sniff_timeout if not initial else None)
node_info = self.deserializer.loads(node_info, headers.get('content-type'))
break
except (ConnectionError, SerializationError):
pass
else:
raise TransportError("N/A", "Unable to sniff hosts.")
except:
# keep the previous value on error
self.last_sniff = previous_sniff
raise
return list(node_info['nodes'].values())
def _get_host_info(self, host_info):
host = {}
address = host_info.get('http', {}).get('publish_address')
# malformed or no address given
if not address or ':' not in address:
return None
host['host'], host['port'] = address.rsplit(':', 1)
host['port'] = int(host['port'])
return self.host_info_callback(host_info, host)
def sniff_hosts(self, initial=False):
"""
Obtain a list of nodes from the cluster and create a new connection
pool using the information retrieved.
To extract the node connection parameters use the ``nodes_to_host_callback``.
:arg initial: flag indicating if this is during startup
(``sniff_on_start``), ignore the ``sniff_timeout`` if ``True``
"""
node_info = self._get_sniff_data(initial)
hosts = list(filter(None, (self._get_host_info(n) for n in node_info)))
# we weren't able to get any nodes or host_info_callback blocked all -
# raise error.
if not hosts:
raise TransportError("N/A", "Unable to sniff hosts - no viable hosts found.")
self.set_connections(hosts)
def mark_dead(self, connection):
"""
Mark a connection as dead (failed) in the connection pool. If sniffing
on failure is enabled this will initiate the sniffing process.
:arg connection: instance of :class:`~elasticsearch.Connection` that failed
"""
# mark as dead even when sniffing to avoid hitting this host during the sniff process
self.connection_pool.mark_dead(connection)
if self.sniff_on_connection_fail:
self.sniff_hosts()
def perform_request(self, method, url, headers=None, params=None, body=None):
"""
Perform the actual request. Retrieve a connection from the connection
pool, pass all the information to it's perform_request method and
return the data.
If an exception was raised, mark the connection as failed and retry (up
to `max_retries` times).
If the operation was succesful and the connection used was previously
marked as dead, mark it as live, resetting it's failure count.
:arg method: HTTP method to use
:arg url: absolute url (without host) to target
:arg headers: dictionary of headers, will be handed over to the
underlying :class:`~elasticsearch.Connection` class
:arg params: dictionary of query parameters, will be handed over to the
underlying :class:`~elasticsearch.Connection` class for serialization
:arg body: body of the request, will be serializes using serializer and
passed to the connection
"""
if body is not None:
body = self.serializer.dumps(body)
# some clients or environments don't support sending GET with body
if method in ('HEAD', 'GET') and self.send_get_body_as != 'GET':
# send it as post instead
if self.send_get_body_as == 'POST':
method = 'POST'
# or as source parameter
elif self.send_get_body_as == 'source':
if params is None:
params = {}
params['source'] = body
body = None
if body is not None:
try:
body = body.encode('utf-8', 'surrogatepass')
except (UnicodeDecodeError, AttributeError):
# bytes/str - no need to re-encode
pass
ignore = ()
timeout = None
if params:
timeout = params.pop('request_timeout', None)
ignore = params.pop('ignore', ())
if isinstance(ignore, int):
ignore = (ignore, )
for attempt in range(self.max_retries + 1):
connection = self.get_connection()
try:
# add a delay before attempting the next retry
# 0, 1, 3, 7, etc...
delay = 2**attempt - 1
time.sleep(delay)
status, headers_response, data = connection.perform_request(method, url, params, body, headers=headers, ignore=ignore, timeout=timeout)
except TransportError as e:
if method == 'HEAD' and e.status_code == 404:
return False
retry = False
if isinstance(e, ConnectionTimeout):
retry = self.retry_on_timeout
elif isinstance(e, ConnectionError):
retry = True
elif e.status_code in self.retry_on_status:
retry = True
if retry:
# only mark as dead if we are retrying
self.mark_dead(connection)
# raise exception on last retry
if attempt == self.max_retries:
raise
else:
raise
else:
# connection didn't fail, confirm it's live status
self.connection_pool.mark_live(connection)
if method == 'HEAD':
return 200 <= status < 300
if data:
data = self.deserializer.loads(data, headers_response.get('content-type'))
return data
def close(self):
"""
Explicitly closes connections
"""
self.connection_pool.close()
|
The pilot of McLaren Fernando Alonso in practice for the Mexican Grand Prix showed 7th result, becoming thus the leader of the middle group of the peloton. In the race the Spaniard will be fined for the replacement of the engine.Fernando Alonso "I'd say today was very positive and productive Friday. We covered a lot of laps, had a lot of things to test and got positive results. We were only seven tenths off the fastest time and two-tenths clear of Bottas, which surprisingly looks competitive on such a difficult track.We know it will be a difficult race in any case. To start with the tail of the peloton it is hard and beat is also pretty easy. To some extent we have to sacrifice this race to have a fresh engine in Brazil and Abu Dhabi. We still don't know where we'll be after qualifying, so let's see what decision will give us the least painful result.It was great to see so many people in the stands in the section of the Stadium. It's great that we here are so supportive". |
# -*- coding: utf-8 -*-
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""SMS utilities.
Helper methods used when sending SMS messages.
"""
__author__ = '[email protected] (Andy Kimball)'
import re
from tornado import escape
# Regular expression used to identify valid GSM characters, which is the 7-bit character set
# that is widely supported by SMS systems across the world (i.e. *not* ASCII):
# https://en.wikipedia.org/wiki/GSM_03.38
_good_gsm_chars = u'@£$¥èéùìòÇ\nØø\rÅå_ÆæßÉ !"#%&\'()*+,-./0123456789:;<=>?¡ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÑܧ¿abcdefghijklmnopqrstuvwxyzäöñüà'
assert len(_good_gsm_chars) == 116
_gsm_re = re.compile(u'^[%s]*$' % re.escape(_good_gsm_chars))
# Greek capital letters contained in the GSM character set, and the currency symbol don't get
# sent properly in the GSM encoding (they get mapped into other chars by some intermediary).
_bad_gsm_chars = u'¤ΔΦΓΛΩΠΨΣΘΞ'
assert len(escape.to_unicode(_bad_gsm_chars)) == 11
_force_unicode_re = re.compile(u'^[%s%s]*$' % (re.escape(_bad_gsm_chars), re.escape(_good_gsm_chars)))
# Maximum number of GSM encoded chars that Twilio can send.
MAX_GSM_CHARS = 160
# Maximum number of UTF-16 encoded chars that Twilio can send. The SMS spec really uses the
# UCS-2 encoding, but many/most devices allow UTF-16, which allows non-BMP chars to be used
# (such as Emoji).
MAX_UTF16_CHARS = 70
def ForceUnicode(value):
"""Returns true if the value contains only GSM chars, but also contains at least one
problematic GSM char, such as a Greek capital letter. In this case, the caller should
force the UCS-2 SMS encoding so that GSM will not be attempted.
"""
value = escape.to_unicode(value)
return _force_unicode_re.search(value) and not _gsm_re.search(value)
def IsOneSMSMessage(value):
"""Returns true if the value can be sent in a single SMS message. If the value contains
only GSM chars, then it can be up to 160 chars. Otherwise, it must be sent as Unicode and
can only be up to 70 chars.
"""
value = escape.to_unicode(value)
utf16_count = len(value.encode('utf-16-be')) / 2
if _gsm_re.search(value):
return utf16_count <= MAX_GSM_CHARS
return utf16_count <= MAX_UTF16_CHARS
|
Most Popular Missoni from all around the world are offered at the online optical superstore. Find Most Popular, Womens, Mens and more Most Popular Missoni online for sale in our vast selection of eyewear. Over thousands of styles are available to choose from. You can find the perfect pair to complement your facial features and style. Save time, money, and order frames conveniently from the touch of your mobile device or keyboard, or give us a call any time of the day for assistance. Our live eyewear experts are here to serve you. If you have seen a cheaper price on Most Popular Missoni please let us know using the Price match request form. Free shipping is available, 30 day returns and doctor verified prescription glasses are what to expect when buying your frames from the online optical superstore, since 1999. |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('problem', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('score', models.IntegerField(default=0)),
('status', models.CharField(default=b'QUE', max_length=3)),
('running_time', models.IntegerField(default=0)),
('running_memory', models.IntegerField(default=0)),
('info', models.TextField(blank=True)),
('code', models.TextField()),
('Language', models.ForeignKey(related_name='submissions', to='problem.Language')),
('problem', models.ForeignKey(to='problem.Problem')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
Assessment of Student Learning Committee, "Assessment of Student Learning minutes 10/30/1997" (1997). Assessment of Student Learning Minutes. 115. |
"""
mflpf module. Contains the ModflowLpf class. Note that the user can access
the ModflowLpf class as `flopy.modflow.ModflowLpf`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?lpf.htm>`_.
"""
import sys
import numpy as np
from .mfpar import ModflowPar as mfpar
from ..pakbase import Package
from ..utils import Util2d, Util3d, read1d
from ..utils.flopy_io import line_parse
class ModflowLpf(Package):
"""
MODFLOW Layer Property Flow Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is 53)
hdry : float
Is the head that is assigned to cells that are converted to dry during
a simulation. Although this value plays no role in the model
calculations, it is useful as an indicator when looking at the
resulting heads that are output from the model. HDRY is thus similar
to HNOFLO in the Basic Package, which is the value assigned to cells
that are no-flow cells at the start of a model simulation.
(default is -1.e30).
laytyp : int or array of ints (nlay)
Layer type, contains a flag for each layer that specifies the layer type.
0 confined
>0 convertible
<0 convertible unless the THICKSTRT option is in effect.
(default is 0).
layavg : int or array of ints (nlay)
Layer average
0 is harmonic mean
1 is logarithmic mean
2 is arithmetic mean of saturated thickness and logarithmic mean of
of hydraulic conductivity
(default is 0).
chani : float or array of floats (nlay)
contains a value for each layer that is a flag or the horizontal
anisotropy. If CHANI is less than or equal to 0, then variable HANI
defines horizontal anisotropy. If CHANI is greater than 0, then CHANI
is the horizontal anisotropy for the entire layer, and HANI is not
read. If any HANI parameters are used, CHANI for all layers must be
less than or equal to 0. Use as many records as needed to enter a
value of CHANI for each layer. The horizontal anisotropy is the ratio
of the hydraulic conductivity along columns (the Y direction) to the
hydraulic conductivity along rows (the X direction).
(default is 1).
layvka : float or array of floats (nlay)
a flag for each layer that indicates whether variable VKA is vertical
hydraulic conductivity or the ratio of horizontal to vertical
hydraulic conductivity.
0: VKA is vertical hydraulic conductivity
not 0: VKA is the ratio of horizontal to vertical hydraulic conductivity
(default is 0).
laywet : float or array of floats (nlay)
contains a flag for each layer that indicates if wetting is active.
0 wetting is inactive
not 0 wetting is active
(default is 0).
wetfct : float
is a factor that is included in the calculation of the head that is
initially established at a cell when it is converted from dry to wet.
(default is 0.1).
iwetit : int
is the iteration interval for attempting to wet cells. Wetting is
attempted every IWETIT iteration. If using the PCG solver
(Hill, 1990), this applies to outer iterations, not inner iterations.
If IWETIT less than or equal to 0, it is changed to 1.
(default is 1).
ihdwet : int
is a flag that determines which equation is used to define the
initial head at cells that become wet.
(default is 0)
hk : float or array of floats (nlay, nrow, ncol)
is the hydraulic conductivity along rows. HK is multiplied by
horizontal anisotropy (see CHANI and HANI) to obtain hydraulic
conductivity along columns.
(default is 1.0).
hani : float or array of floats (nlay, nrow, ncol)
is the ratio of hydraulic conductivity along columns to hydraulic
conductivity along rows, where HK of item 10 specifies the hydraulic
conductivity along rows. Thus, the hydraulic conductivity along
columns is the product of the values in HK and HANI.
(default is 1.0).
vka : float or array of floats (nlay, nrow, ncol)
is either vertical hydraulic conductivity or the ratio of horizontal
to vertical hydraulic conductivity depending on the value of LAYVKA.
(default is 1.0).
ss : float or array of floats (nlay, nrow, ncol)
is specific storage unless the STORAGECOEFFICIENT option is used.
When STORAGECOEFFICIENT is used, Ss is confined storage coefficient.
(default is 1.e-5).
sy : float or array of floats (nlay, nrow, ncol)
is specific yield.
(default is 0.15).
vkcb : float or array of floats (nlay, nrow, ncol)
is the vertical hydraulic conductivity of a Quasi-three-dimensional
confining bed below a layer. (default is 0.0). Note that if an array
is passed for vkcb it must be of size (nlay, nrow, ncol) even though
the information for the bottom layer is not needed.
wetdry : float or array of floats (nlay, nrow, ncol)
is a combination of the wetting threshold and a flag to indicate
which neighboring cells can cause a cell to become wet.
(default is -0.01).
storagecoefficient : boolean
indicates that variable Ss and SS parameters are read as storage
coefficient rather than specific storage.
(default is False).
constantcv : boolean
indicates that vertical conductance for an unconfined cell is
computed from the cell thickness rather than the saturated thickness.
The CONSTANTCV option automatically invokes the NOCVCORRECTION
option. (default is False).
thickstrt : boolean
indicates that layers having a negative LAYTYP are confined, and their
cell thickness for conductance calculations will be computed as
STRT-BOT rather than TOP-BOT. (default is False).
nocvcorrection : boolean
indicates that vertical conductance is not corrected when the vertical
flow correction is applied. (default is False).
novfc : boolean
turns off the vertical flow correction under dewatered conditions.
This option turns off the vertical flow calculation described on p.
5-8 of USGS Techniques and Methods Report 6-A16 and the vertical
conductance correction described on p. 5-18 of that report.
(default is False).
extension : string
Filename extension (default is 'lpf')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output name will be created using
the model name and .cbc extension (for example, modflowtest.cbc),
if ipakcbc is a number greater than zero. If a single string is passed
the package will be set to the string and cbc output name will be
created using the model name and .cbc extension, if ipakcbc is a
number greater than zero. To define the names for all package files
(input and output) the length of the list of strings should be 2.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lpf = flopy.modflow.ModflowLpf(m)
"""
'Layer-property flow package class\n'
def __init__(self, model, laytyp=0, layavg=0, chani=1.0, layvka=0,
laywet=0, ipakcb=None, hdry=-1E+30, iwdflg=0, wetfct=0.1,
iwetit=1, ihdwet=0, hk=1.0, hani=1.0, vka=1.0, ss=1e-5,
sy=0.15, vkcb=0.0, wetdry=-0.01, storagecoefficient=False,
constantcv=False, thickstrt=False, nocvcorrection=False,
novfc=False, extension='lpf',
unitnumber=None, filenames=None):
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowLpf.defaultunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(ipakcb, fname=fname,
package=ModflowLpf.ftype())
else:
ipakcb = 0
# Fill namefile items
name = [ModflowLpf.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
self.url = 'lpf.htm'
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
# item 1
self.ipakcb = ipakcb
self.hdry = hdry # Head in cells that are converted to dry during a simulation
self.nplpf = 0 # number of LPF parameters
self.laytyp = Util2d(model, (nlay,), np.int, laytyp, name='laytyp')
self.layavg = Util2d(model, (nlay,), np.int, layavg, name='layavg')
self.chani = Util2d(model, (nlay,), np.float32, chani, name='chani')
self.layvka = Util2d(model, (nlay,), np.int, layvka, name='layvka')
self.laywet = Util2d(model, (nlay,), np.int, laywet, name='laywet')
self.wetfct = wetfct # Factor that is included in the calculation of the head when a cell is converted from dry to wet
self.iwetit = iwetit # Iteration interval for attempting to wet cells
self.ihdwet = ihdwet # Flag that determines which equation is used to define the initial head at cells that become wet
self.options = ' '
if storagecoefficient:
self.options = self.options + 'STORAGECOEFFICIENT '
if constantcv: self.options = self.options + 'CONSTANTCV '
if thickstrt: self.options = self.options + 'THICKSTRT '
if nocvcorrection: self.options = self.options + 'NOCVCORRECTION '
if novfc: self.options = self.options + 'NOVFC '
self.hk = Util3d(model, (nlay, nrow, ncol), np.float32, hk, name='hk',
locat=self.unit_number[0])
self.hani = Util3d(model, (nlay, nrow, ncol), np.float32, hani,
name='hani', locat=self.unit_number[0])
keys = []
for k in range(nlay):
key = 'vka'
if self.layvka[k] != 0:
key = 'vani'
keys.append(key)
self.vka = Util3d(model, (nlay, nrow, ncol), np.float32, vka,
name=keys, locat=self.unit_number[0])
tag = 'ss'
if storagecoefficient:
tag = 'storage'
self.ss = Util3d(model, (nlay, nrow, ncol), np.float32, ss, name=tag,
locat=self.unit_number[0])
self.sy = Util3d(model, (nlay, nrow, ncol), np.float32, sy, name='sy',
locat=self.unit_number[0])
self.vkcb = Util3d(model, (nlay, nrow, ncol), np.float32, vkcb,
name='vkcb', locat=self.unit_number[0])
self.wetdry = Util3d(model, (nlay, nrow, ncol), np.float32, wetdry,
name='wetdry', locat=self.unit_number[0])
self.parent.add_package(self)
return
def write_file(self, check=True, f=None):
"""
Write the package file.
Parameters
----------
check : boolean
Check package data for common errors. (default True)
Returns
-------
None
"""
if check: # allows turning off package checks when writing files at model level
self.check(f='{}.chk'.format(self.name[0]),
verbose=self.parent.verbose, level=1)
# get model information
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
dis = self.parent.get_package('DIS')
if dis is None:
dis = self.parent.get_package('DISU')
# Open file for writing
if f is None:
f = open(self.fn_path, 'w')
# Item 0: text
f.write('{}\n'.format(self.heading))
# Item 1: IBCFCB, HDRY, NPLPF
f.write('{0:10d}{1:10.6G}{2:10d} {3:s}\n'.format(self.ipakcb,
self.hdry,
self.nplpf,
self.options))
# LAYTYP array
f.write(self.laytyp.string)
# LAYAVG array
f.write(self.layavg.string)
# CHANI array
f.write(self.chani.string)
# LAYVKA array
f.write(self.layvka.string)
# LAYWET array
f.write(self.laywet.string)
# Item 7: WETFCT, IWETIT, IHDWET
iwetdry = self.laywet.sum()
if iwetdry > 0:
f.write('{0:10f}{1:10d}{2:10d}\n'.format(self.wetfct,
self.iwetit,
self.ihdwet))
transient = not dis.steady.all()
for k in range(nlay):
f.write(self.hk[k].get_file_entry())
if self.chani[k] <= 0.:
f.write(self.hani[k].get_file_entry())
f.write(self.vka[k].get_file_entry())
if transient == True:
f.write(self.ss[k].get_file_entry())
if self.laytyp[k] != 0:
f.write(self.sy[k].get_file_entry())
if dis.laycbd[k] > 0:
f.write(self.vkcb[k].get_file_entry())
if (self.laywet[k] != 0 and self.laytyp[k] != 0):
f.write(self.wetdry[k].get_file_entry())
f.close()
return
@staticmethod
def load(f, model, ext_unit_dict=None, check=True):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
check : boolean
Check package data for common errors. (default True)
Returns
-------
lpf : ModflowLpf object
ModflowLpf object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lpf = flopy.modflow.ModflowLpf.load('test.lpf', m)
"""
if model.verbose:
sys.stdout.write('loading lpf package file...\n')
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# determine problem dimensions
nr, nc, nlay, nper = model.get_nrow_ncol_nlay_nper()
dis = model.get_package('DIS')
if dis is None:
dis = model.get_package('DISU')
# Item 1: IBCFCB, HDRY, NPLPF - line already read above
if model.verbose:
print(' loading IBCFCB, HDRY, NPLPF...')
t = line_parse(line)
ipakcb, hdry, nplpf = int(t[0]), float(t[1]), int(t[2])
#if ipakcb != 0:
# model.add_pop_key_list(ipakcb)
# ipakcb = 53
# options
storagecoefficient = False
constantcv = False
thickstrt = False
nocvcorrection = False
novfc = False
if len(t) > 3:
for k in range(3, len(t)):
if 'STORAGECOEFFICIENT' in t[k].upper():
storagecoefficient = True
elif 'CONSTANTCV' in t[k].upper():
constantcv = True
elif 'THICKSTRT' in t[k].upper():
thickstrt = True
elif 'NOCVCORRECTION' in t[k].upper():
nocvcorrection = True
elif 'NOVFC' in t[k].upper():
novfc = True
# LAYTYP array
if model.verbose:
print(' loading LAYTYP...')
laytyp = np.empty((nlay), dtype=np.int)
laytyp = read1d(f, laytyp)
# LAYAVG array
if model.verbose:
print(' loading LAYAVG...')
layavg = np.empty((nlay), dtype=np.int)
layavg = read1d(f, layavg)
# CHANI array
if model.verbose:
print(' loading CHANI...')
chani = np.empty((nlay), dtype=np.float32)
chani = read1d(f, chani)
# LAYVKA array
if model.verbose:
print(' loading LAYVKA...')
layvka = np.empty((nlay), dtype=np.float32)
layvka = read1d(f, layvka)
# LAYWET array
if model.verbose:
print(' loading LAYWET...')
laywet = np.empty((nlay), dtype=np.int)
laywet = read1d(f, laywet)
# Item 7: WETFCT, IWETIT, IHDWET
wetfct, iwetit, ihdwet = None, None, None
iwetdry = laywet.sum()
if iwetdry > 0:
if model.verbose:
print(' loading WETFCT, IWETIT, IHDWET...')
line = f.readline()
t = line.strip().split()
wetfct, iwetit, ihdwet = float(t[0]), int(t[1]), int(t[2])
# parameters data
par_types = []
if nplpf > 0:
par_types, parm_dict = mfpar.load(f, nplpf, model.verbose)
# print parm_dict
# non-parameter data
transient = not dis.steady.all()
hk = [0] * nlay
hani = [0] * nlay
vka = [0] * nlay
ss = [0] * nlay
sy = [0] * nlay
vkcb = [0] * nlay
wetdry = [0] * nlay
# load by layer
for k in range(nlay):
# allow for unstructured changing nodes per layer
if nr is None:
nrow = 1
ncol = nc[k]
else:
nrow = nr
ncol = nc
# hk
if model.verbose:
print(' loading hk layer {0:3d}...'.format(k + 1))
if 'hk' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hk',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'hk', parm_dict,
findlayer=k)
hk[k] = t
# hani
if chani[k] <= 0.:
if model.verbose:
print(' loading hani layer {0:3d}...'.format(k + 1))
if 'hani' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hani',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'hani',
parm_dict, findlayer=k)
hani[k] = t
# vka
if model.verbose:
print(' loading vka layer {0:3d}...'.format(k + 1))
key = 'vk'
if layvka[k] != 0:
key = 'vani'
if 'vk' not in par_types and 'vani' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, key,
ext_unit_dict)
else:
line = f.readline()
key = 'vk'
if 'vani' in par_types:
key = 'vani'
t = mfpar.parameter_fill(model, (nrow, ncol), key, parm_dict,
findlayer=k)
vka[k] = t
# storage properties
if transient:
# ss
if model.verbose:
print(' loading ss layer {0:3d}...'.format(k + 1))
if 'ss' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'ss',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'ss',
parm_dict, findlayer=k)
ss[k] = t
# sy
if laytyp[k] != 0:
if model.verbose:
print(' loading sy layer {0:3d}...'.format(k + 1))
if 'sy' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'sy',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'sy',
parm_dict, findlayer=k)
sy[k] = t
# vkcb
if dis.laycbd[k] > 0:
if model.verbose:
print(' loading vkcb layer {0:3d}...'.format(k + 1))
if 'vkcb' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'vkcb',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'vkcb',
parm_dict, findlayer=k)
vkcb[k] = t
# wetdry
if (laywet[k] != 0 and laytyp[k] != 0):
if model.verbose:
print(' loading wetdry layer {0:3d}...'.format(k + 1))
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'wetdry',
ext_unit_dict)
wetdry[k] = t
# set package unit number
unitnumber = None
filenames = [None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=ModflowLpf.ftype())
if ipakcb > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# create instance of lpf class
lpf = ModflowLpf(model, ipakcb=ipakcb, laytyp=laytyp, layavg=layavg,
chani=chani, layvka=layvka, laywet=laywet, hdry=hdry,
iwdflg=iwetdry, wetfct=wetfct, iwetit=iwetit,
ihdwet=ihdwet, hk=hk, hani=hani, vka=vka, ss=ss,
sy=sy, vkcb=vkcb, wetdry=wetdry,
storagecoefficient=storagecoefficient,
constantcv=constantcv, thickstrt=thickstrt,
novfc=novfc,
unitnumber=unitnumber, filenames=filenames)
if check:
lpf.check(f='{}.chk'.format(lpf.name[0]),
verbose=lpf.parent.verbose, level=0)
return lpf
@staticmethod
def ftype():
return 'LPF'
@staticmethod
def defaultunit():
return 15
|
ShopMania provides the best offers for Octotunes by Lamaze. Find the most secure online stores that deliver in London, Birmingham, Glasgow, Liverpool, Manchester and all over the UK and buy Octotunes by Lamaze toy at discount prices. Choose from hundreds of retailers and find the best deals, compare toys offers and find the best Octotunes by Lamaze prices available. Read user comments and product reviews, see our Octotunes by Lamaze photo gallery, find all its details and features only on ShopMania. |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-02 09:45
from __future__ import unicode_literals
from django.db import migrations
from django.db.transaction import atomic
from django.db.utils import IntegrityError
def add_subscriptions_for_memberships(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
Membership = apps.get_model('memberships', 'Membership')
Subscription = apps.get_model('subscriptions', 'Subscription')
for membership in Membership.objects.all():
try:
with atomic():
Subscription.objects.create(
subscribed_to_id=membership.group.id,
subscribed_to_type=ContentType.objects.get_for_model(membership.group),
subscriber=membership.member)
except IntegrityError:
pass
class Migration(migrations.Migration):
dependencies = [
('memberships', '0014_auto_20170609_1029'),
('subscriptions', '0013_auto_20170918_1340'),
]
operations = [
migrations.RunPython(add_subscriptions_for_memberships),
]
|
The holidays are about to begin and that means a lot of different things. We see our schedules ramp up with extra hours and lots of social opportunities or demands. We may find ourselves stressed and depressed by the season and changes in the weather. We might be headed out of town a few different times to visit family and celebrate with friends, or we might open up our own homes to gatherings and get together. We’ll definitely do a lot of shopping, gift buying or gift and food making.
It is estimated by the National Retail Federation that consumers are going to spend 4% more than they did last year and that means around $1,007 per person for the holidays. I know a lot of people who drop a lot more than that, and many who spend that on food and decorations alone! It means that more than $720 billion is going to be spent on the Christmas season this year.
And while this puts a toll on our wallets, our waistbands and our free time, it also interferes with our dogs’ lives. After all, if you revisit everything just outlined above, it will all have an impact on the dogs in our homes.
You need to remember that dogs can respond to their human family’s stress level, and so you have to keep yourself in check to ensure your dog is not stressed out by you! The same goes for the blues or signs of depression.
Though your schedule and routine may be different, you cannot ask a dog to change their habits, i.e. pooping and peeing MUST be allowed at the normal hours regardless of your schedule, the weather and so on. Eating has to happen at the same time, put lights on timers if you are going to be late, and consider how your schedule varies and impacts the dogs in your home.
I know this is supposed to be an article about gifts, but we can’t go anywhere with the whole gift idea until we ensure that the place where those gifts are given or received is a safe haven for both people and pets. I’ve written extensively about unsafe foods and other household items that can be harmful to pets, situations in which dogs might be scared and bite, separation anxiety and more. All of these things can hit at the holidays, and even an attentive dog parent may not realize the stage is set for disaster.
So, before you bust out the wrapping paper or start writing your “to do” lists, be sure you understand what Christmas means to your dogs. Here’s a list of “danger zones” and tips for dealing with them.
1. The tree – From the bucket of water at the bottom of a cut tree to the miles of wires inside the strands of lights, Christmas trees can be both beautiful and hazardous. To avoid the worst issues, simply train your dogs to leave the tree alone. Make the skirt around the tree a no-go zone. Don’t use negative reinforcement.
Instead, if a dog approaches the tree (or the presents or other items you don’t want them to touch), simply toss a treat in the opposite direction of the tree and say “away”. The dog leaves the tree to get the treat, and you can then reinforce the behavior by playing or letting the dog out for a quick romp. Repeat this any time the dog nears the tree and they soon associate the word “away” with a positive. After a while, you can eliminate the reward and just say “away” and the dog will leave the tree area alone.
2. Your behavior – Whether you leave a box of candy or cookies in easy reach, put out a bunch of decorations without hanging them, or leave something out that you don’t want a dog to touch – it is YOUR fault if the dog chews, eats or destroys the item.
Curb your behaviors and pay close attention to what you are taking out and displaying or leaving out when not around. I know it is hard to be on the ball when the Christmas crush is on, but just think of the vet bills and the risks to your dogs.
I’ve known dogs to demolish a collection of papiermache snowmen, chew up irreplaceable strands of vintage lights (unplugged, thank heavens), demolish the cookie swap cookies in their paper bags, and so on. All of these things happened because dogs found the items appealing and/or irresistible, but more importantly, because people left these things in places they could be reached.
Also remember that you should not be comfortable lashing out at your nearest and dearest over your holiday stress, and that includes the dogs. I once related the tale of one Christmas when I was trying to just relax with a cup of hot chocolate (after having to rush the dogs to the vet because they’d eaten a strand of lights) and snapping when Janice came to me for comfort. She tried to lay on my lap and the hot chocolate ended up everywhere. I lashed out, shouted and terrified both dogs.
Ah, the warm holiday memories!
Stressing out, doing too much, and allowing the season to run your thoughts is unhealthy, and I had just experienced the only possible outcome. I spent the rest of the evening feeling like Scrooge, and the dogs gave me those looks that only dogs seem to manage. “What is WRONG with you?” was the gentlest message those looks conveyed. I’d gotten all caught up in a perfect Christmas, flawless decorations, too many things to do, and all the rest.
Don’t ruin years of work with your dogs by behaving badly because you are stressed. It took me days to get Janice to climb into my lap, and only then did it feel like Christmas again.
The point here is to be attentive to your behaviors and don’t get so frazzled that you forget your primary job – caring for and keeping your dogs safe. This can be safe from unhealthy foods, safe from tempting but dangerous decorations, or safe from you and your stress.
3. The company – My mother, as my readers know, is a big animal advocate with loads of dogs and cats in the house at all times (along with the periodic raccoon, rehabilitating opossum, group of ducks, and so on). The holidays at her house are more amped up in terms of chaos and bedlam, and even more so when my parents “entertain”. From the crazy dog that stands on the table like he’s a cat (he weights 60 pounds) to the free-roaming parrot who has been trained to shout “Ho, Ho, Ho!” in a Santa-jolly voice, that household is a bit much.
The dogs get over-excited when the door bell rings or someone clomps on to the old porch, and just a few years ago they got overexcited and one of them bit someone entering the side door. This was not a quick snarl or lunge, this was a full on, chomping down on a non-stranger’s hand.
They knew the guy for years, loved him to bits whenever he arrived and did not have any sort of aggressive tendencies. However, the household had been in a bit of chaos for days, and my dad had been cooking for several hours that morning. The smells, the turmoil and the surprise entry of someone the dogs had not heard arriving caused the biggest and goofiest of the dogs to panic.
Lashing out from fear or doing the opposite and hiding for days on end, can all result from too many people coming and going. Whether it is several visits from the UPS driver, the neighbors, a group of family paying a visit for a few days, or a combination of these things, all of the new activity can lead dogs to behave badly.
The answer? Have a solution in place ahead of time. Does the dog have a crate? If so, make sure they have easy access. Can you warn those who are visiting that you’ll want to put the dog away before they arrive? Whatever it is that your dog needs to feel normal and calm, do this to ensure no surprises or unwelcome behaviors.The routine – Is your day to day routine going to change a lot beginning around Thanksgiving and extending into the New Year? Be honest with yourself about this. For me, it is Christmas Day alone that I find different. The rest of the year I spend at least one day per weekend doing household chores and a lot of cooking. I don’t spend a lot of time outside of the house each day, and the dogs have a real routine. On Christmas Day, I am up early and visiting family and friends. Breakfast with one, late lunch with another, and later dinner with still other loved ones. I can bring the dogs to two out of three homes, so they actually enjoy themselves (though I have to keep after friends to stop giving them so many “treats”).
What’s your schedule like during the holidays? Do you go out to a lot of parties and evening events? Are you away from home for much longer stretches of time? What about your weekend routines? Are you online shopping or doing unusual things around the house?
No matter what, your pets notice. They try to accommodate their people, but as I said earlier, potty breaks, walks and meal times cannot be swayed much for days or weeks on end. It will lead to accidents in the house, upset stomachs and even changes in behavior that require re-training after the holidays are over. Rather than allowing your dogs to be affected so much by the season, really look at your routine and work around the established patterns rather than changing them for those weeks. You’ll find that your dogs are much happier and that you don’t feel so chaotic or dizzy by the events. In fact, it may inspire you to turn down some activities in order to keep a healthier routine.
The season is meant to be a joyful one, but if it means you are going to rush around like the proverbial chicken that has lots its head or you are going to be continually cranky, bloated, annoyed, sad or all of the above, why do it? Be realistic about your capabilities and embrace the ability to say “Oh, I’m so sorry but I have plans!” Saying no to too many things is a gift you can give to yourself and your dogs who look forward to your arrival home at the end of every single day!
And now that I mentioned the word “gift”, we can start to look at those excellent gifts for the dogs and dog lovers in your life! Keep in mind that I’ve gone over all of those details above because those are things YOU can give to YOUR dogs this year as the ultimate holiday treat. Rather than being stressed, upset, away too long, and all the rest – by following those tips, you and your puppos can have a fantastic holiday season.
So, here’s the thing: I’m cheap. I’m not re-gifting-the-things-you-got-from-people-last-year cheap, but I’m no big spender. I like to make things for people to enjoy, and most of what we (me, Leroy and Janice) give out at the holidays will be “consumable”. Jams, breads, cookies, homemade scarves or hats, small trinkets from my ventures in the local pottery studio, and so on…these are what we give away each year.
However, those retail figures I researched show that I may be the odd man (woman) out on this one. People love to go out and do holiday shopping, and that is just fine. While I will always say to set a budget and stick to it (so you don’t stress out later about the sums spent), I also know that it can be more difficult to succeed in that goal. That is why I’ve done a bit of preliminary shopping for all of you dog fans and dog owners.
your life OR you head to a craft store and buy the supplies to make your own ornaments, it is a great idea. These are remarkably affordable whether in kit form or as a DIY option. You can easily and affordably doll up your entire tree or even sneak around and get your friends’ dogs to donate a quick print and then bake them up at home to give as a sweeter than sweet holiday surprise. It is a wonderful keepsake and I’ve known lots of people who actually end up framing such gifts to protect them over the years.
2. Never lost – Even with a harness, a dog might still bust loose and get lost. That’s why this makes a great gift for the new or first-time dog owner in your circle of family or friends. Available through L.L. Bean, this harness can have the dog’s name and owner’s contact information embroidered into the material. So, even if the puppo manages to escape, they can be easily returned thanks to the bold lettering and clear information on the harness. It is also a high quality and comfortable harness with the company’s years of dedication to premium products offering additional benefits. They also have reflective options. Alternately, you can give this to your dog and reward yourself with peace of mind as the ultimate holiday gift for a dog lover!
there are A) Bubble machines for dogs and B) That said machines blow bacon scented bubbles! The gadget is affordably priced as a sort of “big gift” for a dog or two (or three), and it is self-described as blowing a windstorm of bacon bubbles…who could resist that!
4. A safe and chewable wreath – I always emphasize positive reinforcement, and if you find your dogs are a bit fixated on the holiday decorations you break out after Thanksgiving, why not give them a bit of holiday cheer to safely play with? This item from Chewy.com looks like a typical Christmas wreath but is a plush dog toy that lets your pup join in on the festivities without any worries. At such an affordable price, it is a good idea to buy a few extras for the friends whose dogs might appreciate a toy from you at Christmas!
5. The Everlasting Treat – Just like Willie Wonka’s gobstopper, this remarkable Starmark Everlasting Treat Ball Dog Chew Toy is a reasonably priced solution for dogs of all sizes. Available in small, medium or large sizes it arrives pre-filled and can be refilled with whatever kibble or treats you like. It is indestructible and designed to mentally soothe and stimulate. It helps dogs with natural foraging instincts and eliminates the risks of destructive behaviors for dogs easily bored.
6. Puzzle it out – I love the different Nina Ottoson dog puzzle toys.
They are not slow feeders (though I do have one I recommend below) but are instead treat-based puzzle games that ask the dog to open and close the sliders to see if there are treats hidden in the puzzle. This is a great toy for a dog prone to boredom or separation anxiety but might just be a fun option for a dog that likes to figure things out and get rewarded for their efforts. These are surprisingly affordable, so you might find them a good answer for all of the dogs and/or dog owners on your gift list.
of smiles. The PetPeek Fence Window is designed to be installed in a fence and allow your dog’s head to fit into the unbreakable bubble where they can look outside and see the world beyond the fence. It alleviates curiosity and worry, lets a naturally protective dog feel calmer because they can see outside of the perimeter, and it can even stop dogs from trying to jump over or dig under fences. It is not the least expensive item, but at less than $40 it might be a great gift for a curious or active dog who enjoys seeing and exploring the world around them.
of the best and most expressive dog photos I have ever seen. Totally affordable, this Pooch Selfie should be on your list for every dog owner (and yourself). Just pop it on the top of your phone, put the ball in place and watch as your dog smiles for the camera in every single shot!
9. Matching collars – I love the idea of wearing a bracelet that has the same look as my dog’s collar – and Chewy.com offers up this adorable set with a red and gold holiday Whether you buy it for a friend who likes to dress up their puppo for the season or for yourself and your furriest pal, it is a lovely little splurge that can become a fun holiday tradition.
10. Give bonding for the holidays – Dogs love to be touched by their humans and they love massage as much as the rest of us. These unique pet massage products from Gaiam let you give your dogs or your human friends the tools they need to enjoy lots of time spent bonding through massage. Vet approved, they are sized for different breeds and make a wonderful way to soothe sore muscles or aching joints on dogs of any age or size. They are affordably priced, meaning you can get one for your dogs and give away a few over the holidays!
own dog a bit of the stink eye every time they hop down from the sofa (or worse, you don’t let them sit on the sofa at all!)? This Duck River Textiles WUBBA pet bed is an affordable gift for you, your dog or that friend who is upset by the whole dog hair situation. Large and easy to use, it is an attractively designed bed that fits on a standard sofa and offers an easy to use and easy to clean solution. A dog can feel like a king or queen as they are surrounded by cozy foam roll that protects the sofa and keeps owners happy.
12. Have water will travel! – You guys know that I am a huge advocate of taking your dogs out into the world and even nights out backpacking and camping. I also go on a lot about properly packing for such trips, and whether you are just out doing errands, on a regular walk, or taking a lengthy hike, your dog needs water. Sometimes it is difficult to carry extra bottles, but this unique Dog Bowl Water Bottle solves the issue. It uses a traditional water bottle design and with just a squeeze, the bowl-shaped top is filled and ready for your puppo to drink. It doesn’t leak and any leftovers are drained back into the bottle! Carry it in the car in a cupholder or with you in a pack, and your dog is always well stocked with essential water. Affordable and ideal for dogs and dog lovers alike, I’ll be giving away a few of these this year!
Golden Retriever. Because I have the worst throwing arm, Janice and Leroy tend to roll their eyes if I suggest they play ball with me. Sure, they humor me, but they don’t get to run far. I invested in a Chuckit! and now have to be careful if I so much as move it while getting myself out the door each day. They have become that obsessed with this thing, and at such an affordable price it is an ideal gift to the dogs I love most. Since it also serves to help friends bond with their dogs, it is a wonderful way to give the gift of time to dogs and owners alike.
14. I go out a walkin’ after midnight… – Do you head out after dark to take the dog for a walk? Maybe you are pre-dawn stroller or you just don’t get home in time to get the dogs out before sunset? No matter what, the Beacon is a high-performance safety light that works in full dark as well as low light and bad weather (which are more dangerous than just plain full dark situations). These clip on easily to a collar or harness and quickly recharges for consistent use. It features a durable LED light that cycles through a few modes to ensure you and your puppos are easily seen. These are the perfect gift for dogs and dog lovers who are often out walking before or after sunset.
amazing, and I highly recommend them for those of you who work outside of your homes. They offer a dog 14 different ways to puzzle out some treats, and while keeping a dog mentally stimulated it is also a way to slow their food consumption (i.e. help a chubby dog burn off a few pounds). It is good for supporting a dog’s natural hunting instincts, too and help a dog with separation anxiety to feel a bit better about being alone. If you don’t need this, it is still a very affordable gift for the dog in your social circle who might benefit from a bit of fun and food control.
go-to gift for all of the dog lovers and dog owners on your list, as well as a good idea for your own dogs. They are one of the most effective grooming tools and toys, and my dogs love it when they see me coming with our bright red Kong brush in hand.
a structured maze inside, it can be filled with your dog’s preferred treats. As they roll it about with their nose or their foot, the bones or treats move through a maze and make their way to the exit hole. This is another great way to help a dog bored or anxious about time alone, and you can use larger or smaller treats in order to control food consumption. It is a fun toy when empty too! Reasonably priced, it is a great gift for any dog in your life.
19. Noisy toys for dog girls and boys – Pet Qwerks Animal Sound X-Tire Ball Dog Toy is a good solution for the dog who struggles with boredom or anxiety since it is not just a fun toy but a noise-making towy with an unusual rolling pattern. Dogs cannot easily predict where it will go or what it will sound like when it is tossed, kicked or jostled. This is not the cheapest dog toy on the market, but if your dogs or the dogs owned by friends struggle with being alone during the day or simple boredom, they are sure to love this fun and long-lasting toy.
is for. I bought this as a gift for a friend recently and she misunderstood the “message”. Every time I entered her home, she’d say she was sorry about the dog fur. I would reassure her that she was in for much worse at my home, but she never seemed comfortable with the condition of the couch. So, I picked this up for her as a gift. She opened this FurminatorFurVac Vacuum Accessory and gave me a sort of half-hearted, “Oh…thanks” in response. I felt like apologizing as I felt I’d offended. However, about three hours after I left, I got a text message telling me that the attachment was one of the best gifts she’d received in years. It really does get all different kinds of fur from upholstery and attaches to almost all vacuums. Whether for your pre-holiday cleaning spree or for a dog lover on your list, it is sure to delight…eventually!
tool). However, the Squishy Face Studio Flirt Pole is for the most active dogs you know. A perfect training and obedience tool, it is a super durable lure at the end of a stick that can help you get a dog to run, jump and burn off lots of energy. Whether you have the active dog who really, really needs to get moving or you have a friend with a dog who might enjoy such a unique way to play, this is a fun and affordable gift.
Okay, so that is 21 good ideas for gifts for the dogs and dog lovers in your life. However, I would be remiss if I didn’t also mention a few “over the top” options. For example, I have been bombarded with questions about Bark Box gifts. These are boxes of curated snacks, toys and other items that can be sent once or as a sort of subscription. This knocks the usual budget out of the park, but if you have some dogs that deserve the royal treatment, a one-time or regular delivery of gift boxes may not be a bad idea.
dog. The PetChatz HD is the top of the line model that has aromatherapy, treat dispensing and motion detection to allow you to know when your dog is nearby and speak to them through your smartphone from anywhere in the world. At more than $300, it is definitely far outside of the budget of the other items already considered, but if you want to be in touch with your dogs when you are away from home, this is the ultimate gift to give to them and yourself!
specifically for dogs or cats. Meant to keep them safely inside of a spacious and cozy space, the strollers feature durable construction and materials that ensure a dog is warm and dry, but able to be outdoors and in the world with their owner. One of the best models is the Pet Gear Happy Trails Stroller sized for small to medium sized dogs.
With gift ideas of just a few dollars and others at a few hundred, you should be able to find something for every dog on your list. Don’t forget the myriad of books about dogs, grooming products for dogs, and garments for dogs that could also make great and affordable presents. Let this Christmas be one that you keep as stress-free and happy as possible and start it all by doing your shopping from the comfort of home and using the tips and suggestions I’ve offered up here! Happy holidays! |
import scrapy
import time
from openpyxl import Workbook
import sqlite3
# This app prints data to standard out, creates a spreadsheet and updates an sqlite3 database
class EcocrackenbackSpider(scrapy.Spider):
name = 'Ecocrackenback Availability'
wb = Workbook()
properties = {
'33': 'Ecocrackenback 2',
'34': 'Ecocrackenback 3',
'35': 'Ecocrackenback 4',
'36': 'Ecocrackenback 5',
'37': 'Ecocrackenback 7',
'38': 'Ecocrackenback 9',
'39': 'Ecocrackenback 10',
'40': 'Ecocrackenback 11',
'41': 'Ecocrackenback 12',
'42': 'Ecocrackenback 13',
'43': 'Ecocrackenback 14',
'46': 'Ecocrackenback 15',
'44': 'Ecocrackenback 16',
'50': 'Ecocrackenback 17',
'45': 'Ecocrackenback 18',
'49': 'Ecocrackenback 19'
}
ws1 = wb.active
ws1.append(["Ecocrackenback bookings last extracted {0}".format(time.strftime("%c"))])
start_urls = [ 'http://www.jindabyneaccommodationcentre.com.au/accommodation/{0}'.format(p) for p in properties.keys() ]
conn = sqlite3.connect('./eco.db')
c = conn.cursor()
c.execute("insert into eco_execution_run values (NULL, '{0}');".format(time.strftime("%Y-%m-%d %H:%M:%S")))
eid = c.lastrowid
conn.commit()
def parse(self, response):
print('\n= {0} ='.format(self.properties[response.url.split('/')[-1:][0]]))
self.c.execute("insert into eco_property values (NULL, {0}, '{1}', '{2}');".format(self.eid, self.properties[response.url.split('/')[-1:][0]], response.url.split('/')[-1:][0]))
pid = self.c.lastrowid
self.conn.commit()
ws = self.wb.create_sheet(title="{0}".format(self.properties[response.url.split('/')[-1:][0]]))
print('*'*80)
attributes = {}
rows = response.xpath('//*[@id="ipage"]/div[4]/table/tr')
for index, row in enumerate(rows):
if index > 0:
print('== {0} =='.format(row.xpath('td[1]/text()').extract()[0]))
self.c.execute("insert into eco_month values (NULL, {0}, {1}, '{2}');".format(self.eid, pid, row.xpath('td[1]/text()').extract()[0]))
mid = self.c.lastrowid
self.conn.commit()
ws.append([row.xpath('td[1]/text()').extract()[0]])
print('AVAILABLE {0}'.format(row.css('.available').xpath('@title').extract()))
for str_date in row.css('.available').xpath('@title').extract():
from datetime import datetime
date_object = datetime.strptime(str_date, '%a %d-%b-%Y')
self.c.execute("insert into eco_day values (NULL, {0}, 'AVAILABLE', '{1}', '{2}')".format(mid, str_date.split(' ')[0], date_object.strftime('%Y-%m-%d')))
self.conn.commit()
ws.append(['AVAILABLE'] + row.css('.available').xpath('@title').extract())
print('BOOKED {0}'.format(row.css('.booked').xpath('@title').extract()))
for str_date in row.css('.booked').xpath('@title').extract():
from datetime import datetime
date_object = datetime.strptime(str_date, '%a %d-%b-%Y')
self.c.execute("insert into eco_day values (NULL, {0}, 'BOOKED', '{1}', '{2}')".format(mid, str_date.split(' ')[0], date_object.strftime('%Y-%m-%d')))
self.conn.commit()
ws.append(['BOOKED'] + row.css('.booked').xpath('@title').extract())
ws.append([''])
def closed(self, reason):
self.wb.save(filename = "./output.xlsx")
self.conn.commit()
self.c.close()
|
The music of Amelia Murray, who records as Fazerdaze, is on its surface bright and summery. Pick any track at random from Morningside, her excellent full-length debut for Flying Nun, and you’ll be greeted by foamy waves of guitar and Murray’s warm, sighing voice. But zero in on the lyrics, and the sun begins to fade. A portrait of romantic uncertainty, Morningside rests Murray’s emotionally-candid lyrics in silvery latticeworks of guitar, and the resulting tension is one of the things that makes the record so alluring. (It was one of our Essential Releases the week it came out.) A step up from her excellent, self-recorded, self-titled 2014 EP, Morningside is the sound of a person who—as Murray herself puts it in her song “Little Uneasy”—is feeling their way through the world, in every sense of the word.
We caught up with Murray during her shift at the New Zealand record store where she works to talk about the importance of the DIY scene and the difficulty of balancing softness and strength.
I’ve read a few other interviews with you where you credit ‘the underage scene in Wellington,’ as being crucial to your development. When I read that sentence, I had absolutely no context for what you were talking about. What is ‘the underage scene in Wellington’?
Well, I haven’t been part of that scene for a while, and I’m not sure what state it’s in right now, but when I was growing up, there were gigs in the city [of Wellington] every Friday night. There were two underage venues, and I remember seeing really great bands there. The scene was very small, but it was enough to get me and a lot of my friends into playing in bands and playing shows. Before that, I didn’t even know that playing music was an option—I had been listening to a lot of Bob Dylan and a lot of Beatles, and as much as I really loved it, it all just seemed so far away. I ended up starting a band with four other girls, and that was where I learned how to write songs and how to arrange songs. Even though the music we made is a little embarrassing to me now, that experience was so essential to me, in learning the groundwork for being a musician.
Morningside, your new record, was released by Flying Nun, which is a fairly legendary label. But it seemed like every piece of press that I read about both you and another recent signee, The Courtneys, went out of their way to mention bands like The Clean—who neither you nor The Courtneys sound very much like. I was wondering if it’s frustrating to be an artist in that situation.
I think about this a lot. I’m honored to be part of this legendary label, but at the same time, it’s quite important to me to not just rehash the past. I want to make sure I’m doing music that fits into 2017. What I’m doing can only really exist in 2017—recording at home, releasing on the Internet, building my own fan base. I mean, if I’m going to be compared to any band, it’s awesome to be compared to The Clean or The Bats. But I don’t think me or The Courtneys are derivative of either.
You named the album after the town where you now live.
Yeah, it’s the name of the suburb where I live. I finished the whole album there. I remember the day I got my first letter in the mail to my new address—I just felt so happy to be there. I was just in such a bad rut, and when I moved here I finally got out of it. I finished my EP at the end of university, and I’ve been out of university for two years now. So the last two years, I’ve been drifting, figuring things out, making lots of mistakes [laughs]. And so now, I finally finished this album, which I’d been slaving over—it just felt like I’d come through something. Morningside seemed to encapsulate me moving into a new stage of my life.
Going into this record, what were some things you knew you wanted to do differently from the EP?
I sort of see this album as being an older sister to the EP. It was just diving a little bit deeper into where the EP was going. I don’t know if I changed too much about my approach, I just further explored.
You made this EP by yourself, but there were a few more people in the mix on this one. How did that change your process?
Having other people around allowed me to push myself a lot more and to experiment a lot more. I was really worried about the first track, “Last To Sleep,” and having a few people to show it to and have them encourage me made me a little bit braver. I wasn’t so alone. I could get instant feedback.
I wanted to ask about that song, actually. The lyrics to that song—you don’t really hold anything back, emotionally. You put all of your doubts and fears and uncertainties about love front-and-center. Were there ever moments that you were second-guessing that?
I definitely worry about that. It scares me. Ultimately, I write the songs for myself first, and it freaks me out a lot. But I kind of trust that if I’m being really true to myself, then hopefully other people will be able to identify it in their own way. And if people don’t get it, that’s OK too, because I can’t really change who I am or how I feel. I trust that being yourself will bring the right people into your life.
I do think there are a few songs on this album where I’m quite scared of loving someone. Really falling in love with someone makes you really vulnerable, and that freaks me out. It freaks a lot of my friends out as well. Because it puts you in this position where you could be really hurt. So that song is about that situation.
That’s a tricky type of song to write, because rather than just writing a straight love song, what you’re trying to do is put shape around these indefinite feelings. So you’re afraid, but you’re happy at the same time. Striking that balance is hard to do.
A lot of the love songs on this album—like that track, and maybe ‘Shoulders’ as well—I’m not looking at the other person so much as I’m looking in a mirror back at myself. So they’re kind of love songs, but they’re inwardly-focused. I’m trying to figure myself out rather than pushing all of the focus on the other person.
‘Shoulders’ is one of the other songs I wanted to talk about. You get at this strong emotional content in the song, but you do it through a series of images rather than through literal narration. It feels more like looking at a photograph of someone lying in bed.
When I was writing that, I was trying to describe a scene in my head, and the feeling that came with it. So the song comes from a visual place. I put that into words, and then I refined it to as few words as I could, while still getting across both that image and the internal feeling.
On the other end of the spectrum, ‘Lucky Girl’ is such a bright, buoyant song. Did you go into that song with a sense of optimism, or is there a darker subtext that I’m missing?
I think on first listen, that song does sound overtly happy. But I did try to put in an undercurrent of uncertainty. It’s the same thing—being afraid of getting close to someone. It’s almost like I’m trying to convince myself that I’m a lucky girl.
Artists often describe records as a snapshot of a moment in their life. What do you hope Morningside captures about your life right now?
I think the thing I’ll feel proud about when I look back on this in twenty years is that there is strength in the album, but there’s also sensitivity and softness. I’ve struggled with that my whole life—how do you get through life being a sensitive, soft person? Life isn’t made for people like me. And so Morningside symbolizes those two things to me—the softness and the strength.
This entry was written by Editorial, posted on May 18, 2017 at 6:59 am, filed under featured music and tagged Fazerdaze, Indiepop. Bookmark the permalink. Follow any comments here with the RSS feed for this post. Post a comment or leave a trackback: Trackback URL. |
import re
from collections import defaultdict
from livestreamer.compat import urljoin
from livestreamer.exceptions import PluginError, NoStreamsError
from livestreamer.plugin import Plugin
from livestreamer.stream import AkamaiHDStream, HLSStream
from livestreamer.utils import urlget, verifyjson, res_xml, parse_json
SWF_URL = "http://cdn.livestream.com/swf/hdplayer-2.0.swf"
class Livestream(Plugin):
@classmethod
def default_stream_types(cls, streams):
return ["akamaihd", "hls"]
@classmethod
def can_handle_url(self, url):
return "new.livestream.com" in url
def _get_stream_info(self):
res = urlget(self.url)
match = re.search("window.config = ({.+})", res.text)
if match:
config = match.group(1)
return parse_json(config, "config JSON")
def _parse_smil(self, url, swfurl):
res = urlget(url)
smil = res_xml(res, "SMIL config")
streams = {}
httpbase = smil.find("{http://www.w3.org/2001/SMIL20/Language}head/"
"{http://www.w3.org/2001/SMIL20/Language}meta[@name='httpBase']")
if not (httpbase is not None and httpbase.attrib.get("content")):
raise PluginError("Missing HTTP base in SMIL")
httpbase = httpbase.attrib.get("content")
videos = smil.findall("{http://www.w3.org/2001/SMIL20/Language}body/"
"{http://www.w3.org/2001/SMIL20/Language}switch/"
"{http://www.w3.org/2001/SMIL20/Language}video")
for video in videos:
url = urljoin(httpbase, video.attrib.get("src"))
bitrate = int(video.attrib.get("system-bitrate"))
streams[bitrate] = AkamaiHDStream(self.session, url,
swf=swfurl)
return streams
def _get_streams(self):
self.logger.debug("Fetching stream info")
info = self._get_stream_info()
if not info:
raise NoStreamsError(self.url)
event = verifyjson(info, "event")
streaminfo = verifyjson(event, "stream_info")
if not streaminfo or not streaminfo.get("is_live"):
raise NoStreamsError(self.url)
streams = defaultdict(list)
play_url = streaminfo.get("play_url")
if play_url:
swfurl = info.get("hdPlayerSwfUrl") or SWF_URL
if not swfurl.startswith("http://"):
swfurl = "http://" + swfurl
qualities = streaminfo.get("qualities", [])
smil = self._parse_smil(streaminfo["play_url"], swfurl)
for bitrate, stream in smil.items():
name = "{0}k".format(bitrate/1000)
for quality in qualities:
if quality["bitrate"] == bitrate:
name = "{0}p".format(quality["height"])
streams[name].append(stream)
m3u8_url = streaminfo.get("m3u8_url")
if m3u8_url:
hls_streams = HLSStream.parse_variant_playlist(self.session,
m3u8_url,
namekey="pixels")
for name, stream in hls_streams.items():
streams[name].append(stream)
return streams
__plugin__ = Livestream
|
In terms of travel and tourism, China is leading the race! Over the last two decades, the volume of international trips by Chinese travelers grew from 10 million (in 2000) to 83 million (in 2012), and domestic tourism reported 3.3 billion trips in 2013, according to the United Nations World Tourism Organization (UNWTO). When you consider that travelers from China are the top source of tourism spending in the world, it’s no wonder that travel industry leaders are focused on this hotspot.
Several factors have fueled this significant growth: rapid urbanization, rising disposable incomes, and the relaxation of government restrictions on foreign travel. In fact, the Chinese government has a five-year plan focused on the travel and tourism market and aims to double leisure spending to $886 billion (5.5 trillion yuan) by 2020.
This is all excellent news for the vacation ownership industry! The percentage of qualified households that own a timeshare rests at less than 0.3 percent, making for a nearly 20 million household opportunity (ARDA’s International Foundation 2013 World Wide Shared Vacation Ownership Report). Timeshare development in the region is growing and poised for a supportive business environment—given the government’s direction to develop new business models for the travel industry.
The only challenge continues to be China’s few vacation days offered to workers. However, that, too, is slowly changing. The government adopted new regulations in 2008 offering more public holidays and paid annual leave. And, as the needs of China’s travelers are growing, the vacation ownership industry stands ready to create luxurious vacation experiences for a new class of world travelers. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File : extractRegionFromCoordinates.py
Author : Dominik R. Laetsch, dominik.laetsch at gmail dot com
Version : 0.1
Description :
- INPUT: fasta file, gff file (with intron features), coordinates
- OUTPUT: line for each intron
"""
from __future__ import division
import sys, time
class DataObj():
def __init__(self, filename):
self.filename = filename
self.geneObj_order = []
self.geneObj_dict = {}
def add_geneObj(self, geneObj):
if not geneObj.name in self.geneObj_dict:
self.geneObj_order.append(geneObj.name)
self.geneObj_dict[geneObj.name] = geneObj
def add_intronObj(self, intronObj):
gene_name = intronObj.name
if gene_name in self.geneObj_dict:
self.geneObj_dict[gene_name].add_intronObj(intronObj)
else:
sys.exit("ERROR1")
def yield_introns(self):
for gene_name in self.geneObj_order:
geneObj = self.geneObj_dict[gene_name]
introns = ""
if geneObj.strand == "+":
for idx, intronObj in enumerate(geneObj.introns):
introns += "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( \
geneObj.contig, \
intronObj.start, \
intronObj.stop, \
geneObj.strand, \
geneObj.name + "_" + str(idx + 1), \
geneObj.name, \
idx + 1, \
len(geneObj.introns) \
)
elif geneObj.strand == "-":
for idx, intronObj in enumerate(reversed(geneObj.introns)):
introns += "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( \
geneObj.contig, \
intronObj.start, \
intronObj.stop, \
geneObj.strand, \
geneObj.name + "_" + str(idx + 1), \
geneObj.name, \
idx + 1, \
len(geneObj.introns) \
)
yield introns
def write_intron_pos(self):
out_f = self.filename + ".intronpos.txt"
print "Writing %s" % out_f
with open(out_f, "w") as fh:
for string in dataObj.yield_introns():
fh.write(string)
class GeneObj():
def __init__(self, contig, strand, name):
self.contig = contig
self.strand = strand
self.name = name
self.introns = []
def add_intronObj(self, intronObj):
self.introns.append(intronObj)
class IntronObj():
def __init__(self, name, start, stop):
self.name = name
self.start = start
self.stop = stop
def parse_gff(gff_f):
dataObj = DataObj(gff_f)
with open(gff_f) as fh:
for line in fh:
if not line.startswith("#"):
temp = line.rstrip("\n").split()
if temp[2] == "intron":
contig = temp[0]
start = int(temp[3])
stop = int(temp[4])
strand = temp[6]
name = temp[8].replace("Parent=", "")
geneObj = GeneObj(contig, strand, name)
intronObj = IntronObj(name, start, stop)
dataObj.add_geneObj(geneObj)
dataObj.add_intronObj(intronObj)
#print "%s\t%s\t%s\t%s\t%s" % (contig, start, stop, strand, name)
return dataObj
def parse_fasta(fasta_f):
fasta_dict = {}
header, seq = '', ''
with open(fasta) as fh:
for line in fh:
if line.startswith(">"):
if (seq):
fasta_dict[header] = seq.upper()
seq = ''
header = line.rstrip("\n").lstrip(">").split(" ")[0]
else:
seq += line.rstrip("\n")
fasta_dict[header] = seq
return fasta_dict
def compute_splice_sites(fasta_dict, dataObj, upstream_start, downstream_start, upstream_end, downstream_end):
'''
1234567890 2 9 + 4 7 -
-23----89- 0 1 1 0 D=2:4 A=8:10 => D=(start-1)-UP:(start-1)+DOWN A=(end-1)-UP:(end-1)
---4567--- 0 1 1 0 A=4:6 D=6:8
0123456789
AGTGATGAGG D=1:3 A=7:9 D=(start-1)-UP:(start)+DOWN A=(end-1)-UP:(end)+DOWN
GCACTACTCC A=3:5 D=5:7 A=(start-1)-UP:(start)+DOWN D=(end-1)-UP:end+DOWN
0123456789
'''
for introns in dataObj.yield_introns():
for line in introns.split("\n"):
if (line):
field = line.rstrip("\n").split("\t") # LOCATION\tSTART\tSTOP\tORIENTATION\tNAME
location = field[0]
start = int(field[1])
end = int(field[2])
strand = field[3]
name = field[4]
gene = field[5]
intron_pos = field[6]
intron_count = field[7]
donor_start, donor_end, acceptor_start, acceptor_end = 0,0,0,0
if location in fasta_dict:
if end - start > MIN_INTRON_LENGTH:
if strand == '+':
donor_start = (start-1)-upstream_start
donor_end = start + downstream_start
acceptor_start = (end-1)-upstream_end
acceptor_end = end + downstream_end
if donor_start < 0:
donor_start = 0
if acceptor_end > len(fasta_dict[location]):
acceptor_end = len(fasta_dict[location])
elif strand == '-':
acceptor_start = (start-1) - downstream_end
acceptor_end = start + upstream_end
donor_start = (end-1) - downstream_start
donor_end = end + upstream_start
if donor_start > len(fasta_dict[location]):
donor_start = len(fasta_dict[location])
if acceptor_end < 0:
acceptor_end = 0
else:
sys.exit("[ERROR] - strand should be +/-, not : %s" % (strand))
#print "Start", donor_start, donor_end
#print str(donor_start) + ":" + str(donor_end) + "," + str(acceptor_start) + ":" + str(acceptor_end)
#donor_header = ">donor;"+ str(start) + "|" + str(donor_start) + ":" + str(donor_end) + ":" + strand #+ " " + fasta_dict[location]
donor_seq = getRegion(fasta_dict[location], donor_start, donor_end, strand)
#acceptor_header = ">acceptor;"+ str(end) + "_" + str(acceptor_start) + ":" + str(acceptor_end) + ":" + strand #+ " " + fasta_dict[location]
acceptor_seq = getRegion(fasta_dict[location], acceptor_start, acceptor_end, strand)
print "%s\t%s\t%s" % ("\t".join(field), donor_seq, acceptor_seq)
#print ">Donor_%s\n%s\n>Acceptor_%s\n%s" % (name, donor_seq, name, acceptor_seq)
else:
print "[WARN] - %s\t : Intron length is below threshold of %s " % ("\t".join(field), MIN_INTRON_LENGTH)
else:
print line
print fasta_dict.keys()
sys.exit("[ERROR] %s from coordinate %s not found in fasta %s" % (location, coordinates, fasta))
#with open(coordinates) as fh:
# for line in fh:
# if line.startswith("#"):
# pass
# else:
# field = line.rstrip("\n").split("\t") # LOCATION\tSTART\tSTOP\tORIENTATION\tNAME
# location = field[0]
# start = int(field[1])
# end = int(field[2])
# strand = field[3]
# name = field[4]
# gene = field[5]
# intron_pos = field[6]
# intron_count = field[7]
# #print field
#
# donor_start, donor_end, acceptor_start, acceptor_end = 0,0,0,0
# if location in fasta_dict:
# if end - start > MIN_INTRON_LENGTH:
# if strand == '+':
# donor_start = (start-1)-upstream_start
# donor_end = start + downstream_start
# acceptor_start = (end-1)-upstream_end
# acceptor_end = end + downstream_end
# if donor_start < 0:
# donor_start = 0
# if acceptor_end > len(fasta_dict[location]):
# acceptor_end = len(fasta_dict[location])
# elif strand == '-':
#
#
# acceptor_start = (start-1) - downstream_end
# acceptor_end = start + upstream_end
# donor_start = (end-1) - downstream_start
# donor_end = end + upstream_start
#
# if donor_start > len(fasta_dict[location]):
# donor_start = len(fasta_dict[location])
# if acceptor_end < 0:
# acceptor_end = 0
# else:
# sys.exit("[ERROR] - strand should be +/-, not : %s" % (strand))
# #print "Start", donor_start, donor_end
# #print str(donor_start) + ":" + str(donor_end) + "," + str(acceptor_start) + ":" + str(acceptor_end)
#
# #donor_header = ">donor;"+ str(start) + "|" + str(donor_start) + ":" + str(donor_end) + ":" + strand #+ " " + fasta_dict[location]
# donor_seq = getRegion(fasta_dict[location], donor_start, donor_end, strand)
#
#
# #acceptor_header = ">acceptor;"+ str(end) + "_" + str(acceptor_start) + ":" + str(acceptor_end) + ":" + strand #+ " " + fasta_dict[location]
# acceptor_seq = getRegion(fasta_dict[location], acceptor_start, acceptor_end, strand)
# print "%s\t%s\t%s" % ("\t".join(field), donor_seq, acceptor_seq)
# #print ">Donor_%s\n%s\n>Acceptor_%s\n%s" % (name, donor_seq, name, acceptor_seq)
# else:
# print "[WARN] - %s\t : Intron length is below threshold of %s " % ("\t".join(field), MIN_INTRON_LENGTH)
# else:
# print line
# print fasta_dict.keys()
# sys.exit("[ERROR] %s from coordinate %s not found in fasta %s" % (location, coordinates, fasta))
def getRegion(seq, start, stop, strand):
region = seq[int(start):int(stop)]
if strand == '-':
complement = {'A':'T','C':'G','G':'C','T':'A','N':'N'}
region = "".join([complement.get(nt.upper(), '') for nt in region[::-1]])
elif strand == '+':
pass
else :
sys.exit("[ERROR] - strand should be +/-, not : %s" % (strand))
return region
if __name__ == "__main__":
MIN_INTRON_LENGTH = 4
try:
gff_f = sys.argv[1]
fasta = sys.argv[2]
upstream_start = int(sys.argv[3])
downstream_start = int(sys.argv[4])
upstream_end = int(sys.argv[5])
downstream_end = int(sys.argv[6])
except:
sys.exit("Usage: ./extractRegionFromCoordinates.py [GFF] [FASTA] [US] [DS] [UE] [DE] \n\n\
[GFF] : Intron features have to be present in GFF (use Genometools)\n\
[US] : Positions upstream of start of intron feature in GFF\n\
[DS] : Positions downstream of start of intron feature in GFF\n\
[UE] : Positions upstream of end of intron feature in GFF\n\
[DS] : Positions downstream of end of intron feature in GFF\n\n - Extracting splice sites : \n\n ./extractRegionFromCoordinates.py nGr.v1.0.gff3 nGr.v1.0.fa 0 1 1 0 \n\n")
dataObj = parse_gff(gff_f)
#dataObj.write_intron_pos()
fasta_dict = parse_fasta(fasta)
compute_splice_sites(fasta_dict, dataObj, upstream_start, downstream_start, upstream_end, downstream_end)
|
"""
CLI Tools - Command Line Interface building tools
Example usage::
from clitools import CliApp
cli = CliApp()
@cli.command
def hello(args):
print("Hello, world!")
@cli.command
@cli.parser_arg('--name')
def hello2(args):
print("Hello, {0}!".format(args.name))
if __name__ == '__main__':
cli.run_from_command_line()
"""
import argparse
import logging
import sys
__version__ = '0.4a2' # sync with setup.py!
logger = logging.getLogger('clitools')
class Command(object):
def __init__(self, func, func_info):
self.func = func
self.func_info = func_info
logger.debug('-- New CliApp instance')
def __call__(self, parsed_args):
"""
We need to map parsed arguments to function arguments
before calling..
"""
args = []
kwargs = {}
for argname in self.func_info['positional_args']:
args.append(getattr(parsed_args, argname))
for argname, default in self.func_info['keyword_args']:
kwargs[argname] = getattr(parsed_args, argname, default)
return self.func(*args, **kwargs)
class CliApp(object):
class arg(object):
"""Class used to wrap arguments as function defaults"""
def __init__(self, *a, **kw):
self.args = a
self.kwargs = kw
def __init__(self, prog_name='cli-app'):
self.prog_name = prog_name
self.parser = argparse.ArgumentParser(prog=prog_name)
self.subparsers = self.parser.add_subparsers(help='sub-commands')
def command(self, func=None, **kwargs):
"""
Decorator to register a command function
:param name: Name for the command
:param help: Help text for the function
"""
def decorator(func):
self._register_command(func, **kwargs)
return func
if func is None:
return decorator
return decorator(func)
def _register_command(self, func, **kwargs):
"""
Register a command function. We need to hack things a bit here:
- we need to change argument defaults in the function (copying it)
- The original function is copied, and default values changed
- The new function is copied in the subparser object
WARNING! variable arguments / keyword arguments are not supported
(yet)! They are just stripped & ignored, ATM..
"""
func_info = self._analyze_function(func)
## WARNING! We're not supporting things like this, right now:
## def func(a, ((b, c), d)=((1, 2), 3)): pass
## Maybe, we should fallback to requiring "flat" arguments,
## at least for the moment?
## Read keyword arguments
name = kwargs.get('name')
if name is None:
name = func_info['name']
## Strip the command_ prefix from function name
if name.startswith('command_'):
name = name[len('command_'):]
help_text = kwargs.get('help')
if help_text is None:
help_text = func_info['help_text']
## Create the new subparser
subparser = self.subparsers.add_parser(name, help=help_text)
## Process required positional arguments
for argname in func_info['positional_args']:
logger.debug('New argument: {0}'.format(argname))
subparser.add_argument(argname)
## Process optional keyword arguments
func_new_defaults = []
for argname, argvalue in func_info['keyword_args']:
if isinstance(argvalue, self.arg):
## We already have args / kwargs for this argument
a = (['--' + argname] + list(argvalue.args))
kw = argvalue.kwargs
func_new_defaults.append(kw.get('default'))
else:
## We need to guess args / kwargs from default value
a, kw = self._arg_from_free_value(argname, argvalue)
func_new_defaults.append(argvalue) # just use the old one
logger.debug('New argument: {0!r} {1!r}'.format(a, kwargs))
subparser.add_argument(*a, **kw)
func.func_defaults = tuple(func_new_defaults)
## todo: replace defaults on the original function, to strip
## any instance of ``self.arg``?
new_function = Command(func=func, func_info=func_info)
## Positional arguments are treated as required values
subparser.set_defaults(func=new_function)
return subparser # for further analysis during tests
def _analyze_function(self, func):
"""
Extract information from a function:
- positional argument names
- optional argument names / default values
- does it accept *args?
- does it accept **kwargs?
"""
import inspect
info = {}
info['name'] = func.func_name
# todo extract arguments docs too!
info['help_text'] = inspect.getdoc(func)
argspec = inspect.getargspec(func)
is_generator = inspect.isgeneratorfunction(func)
info['accepts_varargs'] = argspec.varargs is not None
info['varargs_name'] = argspec.varargs
info['accepts_kwargs'] = argspec.keywords is not None
info['kwargs_name'] = argspec.keywords
info['is_generator'] = is_generator
arg_defaults = argspec.defaults or []
akw_limit = len(argspec.args) - len(arg_defaults)
info['positional_args'] = argspec.args[:akw_limit]
kwargs_names = argspec.args[akw_limit:]
assert len(kwargs_names) == len(arg_defaults)
info['keyword_args'] = zip(kwargs_names, arg_defaults)
return info
def _arg_from_free_value(self, name, value):
"""
Guess the correct argument type to be built for free-form
arguments (default values)
"""
logger.debug('_arg_from_free_value({0!r}, {1!r})'.format(name, value))
arg_name = '--' + name
def o(*a, **kw):
return a, kw
if value is None:
## None: this is just a generic argument, accepting any value
logger.debug('None -> generic optional argument')
return o(arg_name, default=value)
elif (value is True) or (value is False):
## Boolean value: on/off flag
logger.debug('bool -> flag')
action = 'store_false' if value else 'store_true'
return o(arg_name, action=action, default=value)
elif isinstance(value, (list, tuple)):
## List/tuple: if has at least two items, it will
## be used for a 'choice' option, else for an 'append'
## list.
if len(value) > 1:
## Choices
logger.debug('List with length >= 2 -> choices')
return o(arg_name, type='choice', choices=value,
default=value[0])
else:
## Append (of type)
type_ = None
logger.debug('List with length < 2 -> list of items')
if len(value) > 0:
## This is [<type>]
type_ = (value[0]
if isinstance(value[0], type)
else type(value[0]))
return o(arg_name, type=type_, action='append', default=[])
else:
## Anything of this type will fit..
## todo: make sure the type is a supported one?
if isinstance(value, type):
type_ = value
default = None
else:
type_ = type(value)
default = value
logger.debug('Generic object of type {0!r} (default: {1!r})'
.format(type_, default))
# import ipdb; ipdb.set_trace()
return o(arg_name, type=type_, default=default)
def run(self, args=None):
"""Handle running from the command line"""
parsed_args = self.parser.parse_args(args)
function = getattr(parsed_args, 'func', None)
if function is None:
## Emulate Python2 behavior..
self.parser.print_help(sys.stderr)
sys.exit(2)
# function = parsed_args.func
return parsed_args.func(parsed_args)
## Utility methods
##----------------------------------------
def split_function_doc(doc):
"""
Performs a very simple splitting of a function documentation:
- separate blocks starting with :name from the rest of the
function documentation
Note: this function expects the passed-in docstring
to be already cleaned, usually via pydoc.getdoc().
:yields: two-tuples (block_info, block_data).
- block info is a tuple of strings describing the workds
between the first two colons, or None
- block data is the block data without any prefix
"""
def tokenize_blocks(lines):
## We need to loop until we find a line starting with :
## or the end of the docstring.
buf = []
for line in lines:
if line.startswith(':'):
if len(buf):
yield buf
buf = []
buf.append(line)
if len(buf):
yield buf
for block in tokenize_blocks(doc.splitlines()):
block_data = '\n'.join(block).strip()
if block_data.startswith(':'):
_, args, block_data = block_data.split(':', 2)
block_info = tuple(args.split())
else:
block_info = None
yield block_info, block_data.strip()
def extract_arguments_info(doc):
"""
Extract (organized) arguments information from a docstring.
This will extract all the :param: and :type: arguments
from the function docstring and return them in a dictionary,
along with function docstring.
>>> extract_arguments_info('''
... My example function.
...
... :param spam: Some spam argument
... :type spam: str
... :param int eggs: Some eggs argument
... :param bacon: Yummy!
... ''') == {
... 'function_help': 'My example function.\\n',
... 'params_help': {
... 'spam': {'help': 'Some spam argument', 'type': 'str'},
... 'eggs': {'help': 'Some eggs argument', 'type': 'int'},
... 'bacon': {'help': 'Yummy!'}
... }
... }
True
"""
from collections import defaultdict
func_doc = []
args_doc = defaultdict(dict)
for block_info, block_data in split_function_doc(doc):
if block_info is None:
func_doc.append(block_data)
else:
block_type = block_info[0]
# :param <type> <name>: <doc>
# :param <name>: <doc>
# :type <name>: <type>
if block_type in ('param', 'type'):
if block_type == 'param' and len(block_info) == 3:
p_type, p_name = block_info[1:3]
p_help = block_data
args_doc[p_name]['type'] = p_type
args_doc[p_name]['help'] = p_help
elif block_type == 'param' and len(block_info) == 2:
p_name = block_info[1]
p_help = block_data
args_doc[p_name]['help'] = p_help
elif block_type == 'type' and len(block_info) == 2:
p_name = block_info[1]
p_type = block_data
args_doc[p_name]['type'] = p_type
else:
raise ValueError("Wrong block information")
return {
'function_help': '\n'.join(func_doc).strip() + '\n',
'params_help': dict(args_doc),
}
|
Assassins Creed: Odyssey Can I Run It?
There’s been of a hoo-ha since the launch of the Assassin’s Creed Odyssey because it doesn’t run at all on CPUs that lack AVX extension support. In particular, this affected 1st-Gen Intel Core i3, i5, i7, Pentium G and some Intel Xeon processors.
“Thanks to your support, we were able to identify the common cause of a few instances of reported crashes: the impacted CPUs didn’t support AVX” said Ubisoft at the time. AVX, or Advanced Vector Extensions, refers to a set of CPU instructions first introduced in 2011. Assassin’s Creed Odyssey was built to use these instructions and likely wasn’t actually tested on 1st Intel Core processors, meaning there was no compatibility.
A new AC Odyssey PC patch version 1.03 has now rolled out which appears to fix the problem. There is no mention of a fix in the meagre patch notes, but players with affected processors are now reporting Assassin’s Creed Odyssey runs correctly.
The patch size on PC is about 1.9GB and it’s available right now through both Uplay and Steam.
Up For Debate - Which Game Series Have You Had Enough Of?
Up For Debate - Assassin's Creed Origins or AC Odyssey, Which is the Better Game?
Does it also fix the grind that shoves microtransactions up the players where the sun doesn't shine??
Has anyone actually noticed a performance and stability change?
Wow man, thats a big bottleneck going on there specially for recen assassins creed games xD.
i9900k but i depends on its performance.
I hope you are talking about your CPU and not getting the RTX series GPU.
haha LOL. I nearly died laughing.
hey hation, do you have prblems playing 1080/60 fps ? |
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / [email protected]
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact [email protected]
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# [email protected]
#
# ============================================================================
"""
Created on Jul 19, 2009
@author: tw55413
"""
camelot_maxint = 2147483647
camelot_minint = -2147483648
camelot_maxfloat = 1e15
camelot_minfloat = -1e15
camelot_time_format = 'hh:mm'
strftime_time_format = '%H:%M'
camelot_datetime_format = 'dd-MM-yyyy hh:mm'
strftime_datetime_format = '%d-%m-%Y %H:%M'
camelot_date_format = 'dd-MM-yyyy'
strftime_date_format = '%d-%m-%Y'
camelot_small_icon_width = 16
camelot_small_icon_height = 16
MAXIMIZED = 'maximized'
MINIMIZED = 'minimized'
|
If you have questions, call us now 904-437-4079 and we'll answer them quickly. It is our commitment to make your dumpster rental in Jacksonville as hassle free as possible. We understand that you may have a few questions about us and we are certain that the best way to answer them is to talk to us. Call us now and a customer-friendly representative will give you clear-cut answers.
Often dumpster rental pricing in Jacksonville is not as well-defined as it should be. There are dumpster rental companies that give add on fees and which you wouldn’t be aware of until it’s already too late. We are proud to say that we don’t practice this. It’s our policy for our dumpster rental pricing to be clear and well-defined. Included in our affordable pricing are delivery, landfill charges, specific weight limit, and pick up. We offer well-defined and reasonable pricing with no hidden surprises.
If you’re living in Jacksonville and planning to rent a dumpster for whatever reasons such as a renovation or remodeling project, this simple guide we’ve created is perfect for you. Renting a dumpster should be easy, but knowing what questions you should ask and the right information to provide can certainly make the whole process more efficient, and in the end, it will cost you less money and time.
- First, before you ask for a price quote, gather the right information which you need to provide us. If you need help, just give us a call 904-437-4079.
It is worthwhile to spend some time to gather the information that Top Dog Dumpster Rental in Jacksonville will need from you so they can provide you with the right price quote. At least, you will need to provide us some basic information so we can provide you with the right price quote.
The very first thing any dumpster rental company in Jacksonville would want to know from you is the kind of waste material and how much of it you’re planning to put in their dumpster. This helps us provide you with the right recommendation on the best dumpster size most appropriate for your job. In general, there are four rental dumpster sizes available, and these are 10, 20, 30, and 40 yards.
Once you already have the needed information to get your free price quote from Top Dog Dumpster Rental then you should give us a call at 904-437-4079. If you need help, we are here to guide you along the way.
Here at Top Dog Dumpster Rental in Jacksonville, we do our best not to charge you with additional fees. We try to be transparent and help you understand which fees will be charged depending on how you use the dumpster.
If you’re planning to place the dumpster on your property in Jacksonville, make sure not to place it on any grassy or soft ground. This is because dumpsters are very heavy, and without proper support they can do damage to your property or sink into the ground.
4669 Swilcan Bridge Ln. S. |
import datetime
import feedparser
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.timezone import make_aware, utc
from boxes.models import Box
from .models import BlogEntry, Feed
def get_all_entries(feed_url):
""" Retrieve all entries from a feed URL """
d = feedparser.parse(feed_url)
entries = []
for e in d['entries']:
published = make_aware(
datetime.datetime(*e['published_parsed'][:7]), timezone=utc
)
entry = {
'title': e['title'],
'summary': e.get('summary', ''),
'pub_date': published,
'url': e['link'],
}
entries.append(entry)
return entries
def _render_blog_supernav(entry):
""" Utility to make testing update_blogs management command easier """
return render_to_string('blogs/supernav.html', {'entry': entry})
def update_blog_supernav():
"""Retrieve latest entry and update blog supernav item """
try:
latest_entry = BlogEntry.objects.filter(
feed=Feed.objects.get(
feed_url=settings.PYTHON_BLOG_FEED_URL,
)
).latest()
except (BlogEntry.DoesNotExist, Feed.DoesNotExist):
pass
else:
rendered_box = _render_blog_supernav(latest_entry)
box, _ = Box.objects.update_or_create(
label='supernav-python-blog',
defaults={
'content': rendered_box,
'content_markup_type': 'html',
}
)
|
Technology can be used to create more positive customer experiences, but many companies fall short in this regard. How can technology be used effectively to bring us closer to our customers rather than further away? Win big in Customer Relationships with the infographic of today.
Let’s stay a little realistic. We have the big era of wise people who are the majority of business owners and the one which made industries great and rose. Technology envolved over the last century in a lightspeed and it is not easy to keep on track. Generation-Y and Millennials arrived just now to be our prominent consumers but we do not see them yet as business owners or business drivers in the traditional way.
But it does not mean avoiding technology, especially when it comes to customer engagement and customer experience. Tech can portray a valuable role. 42% of business leaders believe automation technologies will improve the customer experience aspect of their business. 48% say they are currently using automation technology with another 40% planning to implement some elements of it by 2020.
80% of businesses are planning to use Chatbots, software platforms that behave like a human, by 2020 to sharpen Customer Relationships.
But the question still stays unanswered and valid. How can technology bring us closer to our customer? The answers and highlights we can read in the infographic prepared by digital customer experience platform CUBE.
The graphics convey this point up of authenticity which our clients want. Especially emerging technologies like Artificial Intelligence seem to lack when it comes to authenticity. Millennials are promptly the largest living generation. They are a group of tech-savvy social conscious consumers who know what they want. Having said that, as they can have what they want in a lovely package at their door, it makes building relationship with them even more vital.
At the same time, big data and AI are on the rise, but this hardly provides authenticity, does it? Moreover, when people think about AI, one of their first thoughts is broadly about its lack of humanity. Does this disqualify the technology for building Customer Relationships?
The infographic shows more elements which we need to care about to utilize technology in our customer relationship. Utilize your data to generate messaging that is truly relevant because we can. Use automation to your advantage and don’t permit it over-rolling you like a caterpillar of technology.
Furthermore, we can read some industry quotes from Steve Jobs, John S.Watson, and Anita Brearton.
Allover it is a crucial lesson for any marketer and business owner who wants to make technology work for them in Customer Relationships. Share your opinion in the commenting section below to emerging technologies like AI and AR. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
import sys
import os, glob, re, shutil, subprocess, string, time, errno, optparse
import IrgFileFunctions, IrgIsisFunctions, IrgPbsFunctions, IrgSystemFunctions
def man(option, opt, value, parser):
print >>sys.stderr, parser.usage
print >>sys.stderr, '''\
Calls mapproject in parallel with ISIS cameras.
'''
sys.exit()
#------------------------------------------------------------------------------
def main(argsIn):
try:
usage = "usage: pbs_parallel_stereo.py (same inputs as parallel_stereo plus new options)"
parser = IrgSystemFunctions.PassThroughOptionParser(usage=usage) # Use parser that ignores unknown options
parser.add_option("--num-correlation-nodes", dest="numCorrelationNodes", type='int', default=1,
help="Number of nodes to use for the two correlation steps")
parser.add_option("--num-triangulation-nodes", dest="numTriangulationNodes", type='int', default=1,
help="Number of nodes to use for the triangulation steps")
parser.add_option('--node-type', dest='nodeType', default='wes',
help='Type of processing node to request (wes, san, or ivy)')
parser.add_option('--group-id', dest='groupId', default='',
help='GID to charge the hours to [REQUIRED]')
# This call handles all the specific options for this code.
(options, args) = parser.parse_args(argsIn)
# 'args' contains everything for parallel_stereo
# Check the required positional arguments.
if not options.groupId:
parser.error("Must input a group ID to charge to!")
# Any additional arguments need to be forwarded to the mapproject function
options.extraArgs = args
except optparse.OptionError, msg:
raise Usage(msg)
#startTime = time.time()
# Currently all the outputs are written to the current directory!
scriptFolder = os.getcwd()
pbsPath = os.path.abspath(os.path.join(scriptFolder, 'mainPbsScript.sh'))
stdLogPaths = []
errLogPaths = []
scriptCalls = []
for i in range(4):
stdLogPaths.append(os.path.abspath(os.path.join(scriptFolder, 'stdLog' +str(i)+'.txt')))
errLogPaths.append(os.path.abspath(os.path.join(scriptFolder, 'errLog' +str(i)+'.txt')))
scriptCalls.append(os.path.abspath(os.path.join(scriptFolder, 'pbsScript'+str(i)+'.sh' )))
# Generate the core PBS string
cpusPerNode = 12
if options.nodeType == 'san':
cpusPerNode = 18
elif options.nodeType == 'ivy':
cpusPerNode = 24
# TODO: Allow users to input these times!
stepHours = ['5:00:00', '40:00:00', "30:00:00", "40:00:00"]
corePbsString = ('qsub -q long -W group_list='+ options.groupId+
' -m eb -S /bin/bash -V -j oe -C '+ scriptFolder)
# Generate all of the individual PBS calls
pbsStrings = []
# Preprocessing stage
pbsStrings.append('subjob1=$( ' + corePbsString + ' -N pbs_stereo1 -l walltime="'+stepHours[0]+'"'
+ ' -e '+ errLogPaths[0] +' -o '+ stdLogPaths[0]
+ ' -l select='+str(1)+':ncpus='+str(cpusPerNode)+':model='+options.nodeType
+ ' -- '+ scriptCalls[0] +')')
# Correlation stage
pbsStrings.append('subjob2=$( ' + corePbsString + ' -N pbs_stereo2 -l walltime="'+stepHours[1]+'"'
+ ' -e '+ errLogPaths[1] +' -o '+ stdLogPaths[1]
+ ' -l select='+str(options.numCorrelationNodes)+':ncpus='+str(cpusPerNode)+':model='+options.nodeType
+ ' -W depend=afterok:$subjob1 -- '+ scriptCalls[1] +')')
# Filtering stage
pbsStrings.append('subjob3=$( ' + corePbsString + ' -N pbs_stereo3 -l walltime="'+stepHours[2]+'"'
+ ' -e '+ errLogPaths[2] +' -o '+ stdLogPaths[2]
+ ' -l select='+str(1)+':ncpus='+str(cpusPerNode)+':model='+options.nodeType
+ ' -W depend=afterok:$subjob2 -- '+ scriptCalls[2] +')')
# Triangulation stage
pbsStrings.append(corePbsString + ' -N pbs_stereo4 -l walltime="'+stepHours[3]+'"'
+ ' -e '+ errLogPaths[3] +' -o '+ stdLogPaths[3]
+ ' -l select='+str(options.numTriangulationNodes)+':ncpus='+str(cpusPerNode)+':model='+options.nodeType
+ ' -W depend=afterok:$subjob3 -- '+ scriptCalls[3])
# Set up the command line for parallel_stereo
commandList = ['parallel_stereo', '--nodes-list', '$PBS_NODEFILE']
commandList = commandList + options.extraArgs # Append other options
commandString = IrgSystemFunctions.argListToString(commandList)
phases = [' --entry-point 0 --stop-point 1', # Init
' --entry-point 1 --stop-point 3', # Correlation
' --entry-point 3 --stop-point 4', # Filtering
' --entry-point 4 --stop-point 6'] # Triangulation
# Generate a set of four script files
for i in range(4):
print 'Writing script file ' + scriptCalls[i]
scriptFile = open(scriptCalls[i], 'w')
scriptFile.write('#!/bin/bash\n\n')
thisCommandString = commandString + phases[i]
scriptFile.write(thisCommandString)
scriptFile.close()
# Set the script file to be executable
os.system('chmod +x ' + scriptCalls[i])
# Write the PBS script
print 'Writing main PBS script ' + pbsPath
scriptFile = open(pbsPath, 'w')
scriptFile.write('#!/bin/bash\n\n\n')
scriptFile.write('# The parallel_stereo command which is implemented:\n')
scriptFile.write('# '+ commandString) # Show the implemented command in comments
for i in range(4):
scriptFile.write('\n\n\n' + pbsStrings[i])
scriptFile.close()
# Set the PBS file to be executable
os.system('chmod +x ' + pbsPath)
## Clean up temporary files
#if not options.keep:
# IrgFileFunctions.removeFolderIfExists(tempFolder)
#endTime = time.time()
#
#print "Finished in " + str(endTime - startTime) + " seconds."
print 'Finished! To run parallel stereo, run the file ' + pbsPath
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
We have carefully selected some of our most popular items to present the most stylish, comfortable gift bundle. Our signature Baby's First Jeans for girls are made of the softest knit denim engineered especially for babies. Our luxurious ombre cardigan knit sweater features a gradation of colors from light to dark with a festive tassel on the hood and can be embroidered for a personal touch to this perfect gift! Our matching blanket with tassels and cuddly unicorn toy makes this a perfect gift for any occasion! |
# Copyright (C) Linaro Limited 2015,2017
# Author: Milo Casagrande <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""The model that represents a report document to store in the db.
Reports here refer to boot or build email reports as sent (or not sent).
"""
import bson
import copy
import datetime
import types
import models
import models.base as modb
# pylint: disable=too-many-instance-attributes
class ReportDocument(modb.BaseDocument):
"""A report document that should be stored in the db.
This is used to provide some historic data about email reports sent or
which error they had when sending.
"""
def __init__(self, name, version="1.1"):
self._created_on = None
self._id = None
self._name = name
self._version = version
self.errors = []
self.job = None
self.kernel = None
self.git_branch = None
# The report type.
self.report_type = None
self.status = None
self.updated_on = None
@property
def collection(self):
return models.REPORT_COLLECTION
@property
def name(self):
"""The name of the report."""
return self._name
# pylint: disable=invalid-name
@property
def id(self):
"""The ID of this object as returned by mongodb."""
return self._id
# pylint: disable=invalid-name
@id.setter
def id(self, value):
"""Set the ID of this object with the ObjectID from mongodb.
:param value: The ID of this object.
:type value: str
"""
self._id = value
@property
def version(self):
"""The schema version of this document."""
return self._version
@version.setter
def version(self, value):
"""Set the schema version of this document."""
self._version = value
@property
def created_on(self):
"""When this object was created."""
if self._created_on is None:
self._created_on = datetime.datetime.now(tz=bson.tz_util.utc)
return self._created_on
@created_on.setter
def created_on(self, value):
"""Set the creation date of this lab object.
:param value: The lab creation date, in UTC time zone.
:type value: datetime
"""
self._created_on = value
def to_dict(self):
report_dict = {
models.CREATED_KEY: self.created_on,
models.ERRORS_KEY: self.errors,
models.GIT_BRANCH_KEY: self.git_branch,
models.JOB_KEY: self.job,
models.KERNEL_KEY: self.kernel,
models.NAME_KEY: self.name,
models.STATUS_KEY: self.status,
models.TYPE_KEY: self.report_type,
models.UPDATED_KEY: self.updated_on,
models.VERSION_KEY: self.version
}
if self.id:
report_dict[models.ID_KEY] = self.id
return report_dict
@staticmethod
def from_json(json_obj):
report_doc = None
if json_obj and isinstance(json_obj, types.DictionaryType):
local_obj = copy.deepcopy(json_obj)
j_pop = local_obj.pop
report_doc = ReportDocument(
j_pop(models.NAME_KEY), version=j_pop(models.VERSION_KEY))
report_doc.report_type = j_pop(models.TYPE_KEY)
for key, val in local_obj.iteritems():
setattr(report_doc, key, val)
report_doc.updated_on = datetime.datetime.now(tz=bson.tz_util.utc)
return report_doc
|
This week’s Northern Miner podcast sets the stage for our month-long focus on forecasts heading into 2018 (5:00). Matt tackles Scotiabank’s “Top Themes for 2018” (8:02), courtesy of portfolio strategist Vincent Delisle, which lays out the case for gold and oil heading into the new year. Scotia discusses the state of the U.S. dollar, Canadian small-cap equities, and an “equity-over-bond preference” strategy for 2018.
Meanwhile, Lesley’s back with a brand-new edition of the Geology Corner (12:05). This episode deals with the Novo Resources (TSXV: NVO; US-OTC: NSRPF) recent woes at its high-profile 7,600-sq.-km Karratha gold project, in Western Australia. Lesley examines the company’s “revised geological model,” and what it means for the project moving forward.
Novo stated in a Nov. 24 press release that after “careful review of sample consistency, integrity and recovery” the company intends to review other options for collecting bulk samples from drilling.
Our Sponsor Spotlight (27:09) for the week features PwC partner Marelize Konig, who discusses the firms annual Art of Mining and Vision to Reality competitions. She also talks about the impact of innovation on PwC’s business.
Bonus: Matt rants about company disclosure and securities regulators (20:40).
1 Comment on "The Northern Miner podcast – episode 82: Forward-looking statements & Wits 2.(no)"
Hey all, there was a comment posted on our soundcloud website that a listener was “disappointed” I never discussed the precipitation theory, as it’s the “heart of Novo’s story.” The precipitation theory being that gold precipitated directly out of seawater and into the conglomerate horizon at Comet Wells/Purdy’s Reward.
Just to clarify (in case others are thinking the same), I never ventured into the precipitation theory in this weeks podcast because it ventures away from science and into arm-waving territory, as there has been no evidence presented to support it.
Novo’s geological model overwrites the precipitation theory altogether, as the gold was transported and deposited through wave action along an ancient shoreline – which probably only lasted a few hundred thousand years because the Bellary Formation (the target conglomerate horizon) is exceptionally thin, compared to the Wits Basin in which deposition took place over 200 million years.
The only mention of any “submarine biogenic activity” in the company’s presentation is written in tiny font on slide 12, suggesting the activity occurred offshore in deeper parts of the ocean basin, but no evidence is provided to suggest this is the case! In fact, from a geological perspective this might not make sense either – if the seawater was “chemically aggressive” enough to hold gold in solution (as Hennigh suggested in one of his papers) why don’t the clasts of pillow basalts show signs of intense chemical weathering? Anyway, just one of many unanswered questions I have about it, but that’s for another episode another time!
We also covered the precipitation theory in podcast episode 77, an interview with Hartwig Frimmel, one of the world’s leading experts on Wits geology and former co-author with Quinton Hennigh on the subject. If you’re interested please check it out and if you have any questions, feel free to contact me at [email protected]. |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import xml.etree.ElementTree as ET
import pickle
import os
def voc_ap(rec, prec, use_07_metric=True):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:True).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
|
I received so many great tutorials (especially Good-Tutorials.com), it’s about time I start to re-pay my debt. Here is a tutorial for an abstract “refract” effect.
Voila! If you want, you can see a larger image. |
#coding=utf8
import tushare as ts;
import pymysql;
import time as dt
from datashape.coretypes import string
from pandas.io.sql import SQLDatabase
import sqlalchemy
import datetime
from sqlalchemy import create_engine
from pandas.io import sql
import threading
import pandas as pd;
import sys
sys.path.append('../') #添加配置文件
from common_function import *
def create_table(table_name):
cmd='''
create table if not exists %s
(
opDate VARCHAR (63) comment '信用交易日期'
,rzye BIGINT comment '本日融资余额(元)'
,rzmre BIGINT comment '本日融资买入额(元)'
,rqyl BIGINT comment '本日融券余量'
,rqylje BIGINT comment '本日融券余量金额(元)'
,rqmcl BIGINT comment '本日融券卖出量'
,rzrqjyzl BIGINT comment '本日融资融券余额(元)'
,PRIMARY KEY(`opDate`)
,index(opDate)
)DEFAULT CHARSET=utf8
'''%table_name
print (cmd)
run_mysql_cmd(cmd,conn)
def load_data():
start_date=get_date_add_days(get_max_date_sh_margins(),1)
print(start_date,end_date)
rs=ts.sh_margins(start=start_date, end=end_date)
pd.DataFrame.to_sql(rs, table_name, con=conn, flavor='mysql', if_exists='append', index=False)
if __name__ == '__main__':
#--------------------设置基本信息---------------------------------
print("--------------加载股票日k线-----------------------------")
startTime=dt.time()
iphost,user,passwd=get_mysql_conn()
db='ods_data'
charset='utf8'
table_name='ods_invest_refer_sh_margins'
conn = pymysql.connect(user=user, passwd=passwd,host=iphost, db=db,charset=charset)
end_date= dt.strftime('%Y-%m-%d',dt.localtime(dt.time()))
#--------------------脚本运行开始--------------------------------
create_table(table_name=table_name)
load_data()
endTime=dt.time()
print("---------------脚本运行完毕,共计耗费时间%sS------------------"%(endTime-startTime))
|
Fundamentally, I'm a story teller. I love coaching people to find their story and tell it powerfully. I'm a former national tv reporter turned communications coach and intuitive entertainer. My master's degree is in transpersonal psychology and I am certified with Gaia coaching, the world's premiere women's leadership program. Let's find the powerful you -- physically, mentally, emotionally & spiritually fit!
As a speaker and mystic entertainer, I help you create memorable events. Let's design a workshop or presentation that fits your needs.
What is it that is keeping you from accessing your natural intuition? Successful leaders engage their whole brain. This presentation is built on years of research and successful methods employed by the US military and university studies to grow intuition. I will help you and your team figure out what is coming next and your creative options.
I developed this interactive presentation while working with numerous clients who had tried unsuccessfully for years to meditate. Integrating ancient texts on meditation with modern scientific studies, I've developed a way to teach advanced meditation for simple application in daily life for increased performance and joy.
How can you make selling joyful for you and your customer? What are the habits and practices that only the most successful salespeople do? How can you make a sale without selling your soul? This workshop will help your sales team learn how to leverage their sales into the future and feel great about the work that they are doing, both for themselves and their clients.
I've helped top politicians, business people and civic leaders tell their story via the media, from broadcasts to blogs.
I love helping people tell their story in a way that brings crowds closer together. I enjoy coaching novices with a fear of public speaking, as well as seasoned professionals who want to "add a little magic" to their presentations.
English author Edward Bulwer-Lytton said, "The pen is mightier than the sword." Wield this weapon wisely. Whether you are writing a memoir, a book or corporate report, learn to remove creative blocks, boost your productivity and engage people's hearts and minds at a fundamental level.
Stories with happy endings are entirely possible.
Thank you Harley Davidson, Microsoft, Nordstrom, Amazon, Absolut Vodka, and others across the U.S.
Copyright © 2019 Deni Luna - All Rights Reserved. |
#!/usr/bin/env python
""" Visualization functions for connectivity analysis. """
import sys
import os.path as op
import numpy as np
import scipy as sci
import matplotlib.pyplot as pl
import mne
import yaml
import pickle
def sensor_connectivity_3d(raw, picks, con, idx, n_con=20, min_dist=0.05,
scale_factor=0.005, tube_radius=0.001):
""" Function to plot sensor connectivity showing strongest
connections(n_con) excluding sensors that are less than min_dist apart.
https://github.com/mne-tools/mne-python/blob/master/examples/connectivity/plot_sensor_connectivity.py
Parameters
----------
raw : Raw object
Instance of mne.io.Raw
picks : list
Picks to be included.
con : ndarray (n_channels, n_channels)
Connectivity matrix.
idx : list
List of indices of sensors of interest.
n_con : int
Number of connections of interest.
min_dist : float
Minimum distance between sensors allowed.
Note: Please modify scale factor and tube radius to appropriate sizes
if the plot looks scrambled.
"""
# Now, visualize the connectivity in 3D
try:
from enthought.mayavi import mlab
except:
from mayavi import mlab
mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
# Plot the sensor location
sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx]
sens_loc = np.array(sens_loc)
pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2],
color=(1, 1, 1), opacity=1, scale_factor=scale_factor)
# Get the strongest connections
threshold = np.sort(con, axis=None)[-n_con]
ii, jj = np.where(con >= threshold)
# Remove close connections
con_nodes = list()
con_val = list()
for i, j in zip(ii, jj):
if sci.linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist:
con_nodes.append((i, j))
con_val.append(con[i, j])
con_val = np.array(con_val)
# Show the connections as tubes between sensors
vmax = np.max(con_val)
vmin = np.min(con_val)
for val, nodes in zip(con_val, con_nodes):
x1, y1, z1 = sens_loc[nodes[0]]
x2, y2, z2 = sens_loc[nodes[1]]
points = mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val],
vmin=vmin, vmax=vmax, tube_radius=tube_radius,
colormap='RdBu')
points.module_manager.scalar_lut_manager.reverse_lut = True
mlab.scalarbar(title='Phase Lag Index (PLI)', nb_labels=4)
# Add the sensor names for the connections shown
nodes_shown = list(set([n[0] for n in con_nodes] +
[n[1] for n in con_nodes]))
for node in nodes_shown:
x, y, z = sens_loc[node]
mlab.text3d(x, y, z, raw.ch_names[picks[node]], scale=0.005,
color=(0, 0, 0))
view = (-88.7, 40.8, 0.76, np.array([-3.9e-4, -8.5e-3, -1e-2]))
mlab.view(*view)
def plot_grouped_connectivity_circle(yaml_fname, con, orig_labels,
node_order_size=68, indices=None,
out_fname='circle.png', title=None,
subplot=111, include_legend=False,
n_lines=None, fig=None, show=True,
vmin=None, vmax=None, colormap='hot',
colorbar=False):
'''
Plot the connectivity circle grouped and ordered according to
groups in the yaml input file provided.
orig_labels : list of str
Label names in the order as appears in con.
'''
# read the yaml file with grouping
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.load(f)
else:
print '%s - File not found.' % yaml_fname
sys.exit()
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g',
'g', 'r', 'c', 'y', 'b', 'm']
# make list of label_names (without individual cortex locations)
label_names = list()
for lab in labels:
label_names.extend(labels[lab])
lh_labels = [name + '-lh' for name in label_names]
rh_labels = [name + '-rh' for name in label_names]
# Save the plot order and create a circular layout
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
assert len(node_order) == node_order_size, 'Node order length is correct.'
# the respective no. of regions in each cortex
group_bound = [len(labels[key]) for key in labels.keys()]
group_bound = [0] + group_bound[::-1] + group_bound
group_boundaries = [sum(group_bound[:i+1]) for i in range(len(group_bound))]
# remove the first element of group_bound
# make label colours such that each cortex is of one colour
group_bound.pop(0)
label_colors = []
for ind, rep in enumerate(group_bound):
label_colors += [cortex_colors[ind]] * rep
assert len(label_colors) == len(node_order), 'Number of colours do not match'
# remove the last total sum of the list
group_boundaries.pop()
from mne.viz.circle import circular_layout
node_angles = circular_layout(orig_labels, node_order, start_pos=90,
group_boundaries=group_boundaries)
# the order of the node_colors must match that of orig_labels
# therefore below reordering is necessary
reordered_colors = [label_colors[node_order.index(orig)]
for orig in orig_labels]
# Plot the graph using node_order and colours
# orig_labels is the order of nodes in the con matrix (important)
from mne.viz import plot_connectivity_circle
plot_connectivity_circle(con, orig_labels, n_lines=n_lines,
facecolor='white', textcolor='black',
node_angles=node_angles, colormap=colormap,
node_colors=reordered_colors,
node_edgecolor='white', fig=fig,
fontsize_names=6, vmax=vmax, vmin=vmin,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
colorbar=colorbar, show=show, subplot=subplot,
indices=indices, title=title)
if include_legend:
import matplotlib.patches as mpatches
legend_patches = [mpatches.Patch(color=col, label=key)
for col, key in zip(['g', 'r', 'c', 'y', 'b', 'm'],
labels.keys())]
pl.legend(handles=legend_patches, loc=(0.02, 0.02), ncol=1,
mode=None, fontsize='small')
if out_fname:
pl.savefig(out_fname, facecolor='white', dpi=300)
def plot_generic_grouped_circle(yaml_fname, con, orig_labels,
node_order_size,
out_fname='circle.png', title=None,
subplot=111, include_legend=False,
n_lines=None, fig=None, show=True,
vmin=None, vmax=None,
colorbar=False):
'''
Plot the connectivity circle grouped and ordered according to
groups in the yaml input file provided. This is not specific to aparc and
does not automatically split the labels into left and right hemispheres.
orig_labels : list of str
Label names in the order as appears in con.
'''
# read the yaml file with grouping
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.load(f)
else:
print '%s - File not found.' % yaml_fname
sys.exit()
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g']
# make list of label_names (without individual cortex locations)
label_names = list()
for lab in labels:
label_names.extend(labels[lab])
# here label_names are the node_order
node_order = label_names
assert len(node_order) == node_order_size, 'Node order length is correct.'
# the respective no. of regions in each cortex
group_bound = [len(labels[key]) for key in labels.keys()]
group_bound = [0] + group_bound
group_boundaries = [sum(group_bound[:i+1]) for i in range(len(group_bound))]
# remove the first element of group_bound
# make label colours such that each cortex is of one colour
group_bound.pop(0)
label_colors = []
for ind, rep in enumerate(group_bound):
label_colors += [cortex_colors[ind]] * rep
assert len(label_colors) == len(node_order), 'Number of colours do not match'
# remove the last total sum of the list
group_boundaries.pop()
from mne.viz.circle import circular_layout
node_angles = circular_layout(orig_labels, label_names, start_pos=90,
group_boundaries=group_boundaries)
# the order of the node_colors must match that of orig_labels
# therefore below reordering is necessary
reordered_colors = [label_colors[node_order.index(orig)]
for orig in orig_labels]
# Plot the graph using node_order and colours
# orig_labels is the order on nodes in the con matrix (important)
from mne.viz import plot_connectivity_circle
plot_connectivity_circle(con, orig_labels, n_lines=n_lines,
facecolor='white', textcolor='black',
node_angles=node_angles,
node_colors=reordered_colors,
node_edgecolor='white', fig=fig,
fontsize_names=8, vmax=vmax, vmin=vmin,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
colorbar=colorbar, show=show, subplot=subplot,
title=title)
if include_legend:
import matplotlib.patches as mpatches
legend_patches = [mpatches.Patch(color=col, label=key)
for col, key in zip(['g', 'r', 'c', 'y', 'b', 'm'],
labels.keys())]
pl.legend(handles=legend_patches, loc=(0.02, 0.02), ncol=1,
mode=None, fontsize='small')
if out_fname:
pl.savefig(out_fname, facecolor='white', dpi=300)
|
Piglet 808 MP Pre-Starter is the ultimate in taste and digestibility for young piglets. This nutritious mix of milk products, cooked cereals, fats and highly digestible proteins ensures piglets have a smooth introduction to solid food.
Piglet 808 MP Pre-Starter is micro-pelleted for improved feed consumption and contains digestive enzymes, feed acidifiers, yeast extracts, dextrose, emulsifiers, feed flavours, zinc oxide and an extensive vitamin and trace mineral premix to enhance growth, health and improve food digestibility.
A major advantage of the micro-pelleting process compared to extruded and mash feeds is that it increases the physical density of the food which is important as the gut capacity of a young piglet is a limiting factor to it’s nutrient intake.
Feed Piglet 808 MP Pre-Starter ad lib. from birth to 3 weeks of age.
From 3 to 5 weeks feed Piglet 800 MP Starter.
To maximise consumption and to help avoid digestive problems ensure that Piglet 808 MP Pre-Starter is fed fresh daily.
Piglet 808 MP Pre-Starter is formulated from a selection of the following ingredients: Cooked cereals, wheat, legumes, soyabean, canola and products derived from these ingredients. Fishmeal, whey, butter milk, skim milk, meat meal, blood meal, fat, lysine, methionine, threonine, tryptophan, dicalcium phosphate, bentonite, zinc oxide, enzymes, emulsifiers, yeast extracts, flavours, feed acidifiers and antioxidants. |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.framework.python.framework.checkpoint_utils import *
from tensorflow.contrib.framework.python.framework.experimental import experimental
from tensorflow.contrib.framework.python.framework.tensor_util import *
# pylint: enable=wildcard-import
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_arg_values
from tensorflow.python.util.deprecation import deprecated_args
|
eTwinning is an initiative of the European Union. It is part of the Comenius strand of the Lifelong Learning Programme (LLP). It is steered by the European Commission and implemented by its Executive Agency for Education, Audiovisual and Culture (EACEA).
The various tools present on the eTwinning Platform allow users to submit information in order to participate to eTwinning activities. When registering online and submitting other information, the data is collected and further processed for the purposes detailed hereafter under point 2. The relevant processing operations thereof are under the responsibility of the Data Controller, which is the head of the “Lifelong Learning: Comenius, ICT and Languages” unit at the EACEA.
(b) Other data: Registrants may also participate in project TwinSpaces, eTwinning Groups, Teachers’ Rooms and Learning Events and develop a Project Diary. In these cases, registrants can submit information for the purpose of participating in such initiatives. Access to such platforms is restricted to their registered members only.
The collected personal data and all related information are stored on secured servers of the service provider (European Schoolnet). The operations of the service provider’s computer centres contractually abide to the European Commission’s security directives and provisions established by the Directorate of Security for these kinds of servers and services.
Data related to the profile of the user, as described in point 2, are kept for one year after the user’s last login. After one year from the last login, the user profile will automatically be set to inactive, i.e., no longer visible to other users or the outside world. A notification is sent to the user to inform her/him that her/his profile has been set to inactive and that he/she can re-activate his/her account by logging in again. A second and final reminder is sent after an additional year informing the user that, 2 years after her/his last login, his/her profile will be deactivated permanently. All personal information is then made anonymous.
Should the conflict not be resolved by the Controller or the Data Protection Officer you may lodge a complaint with the European Data Protection Supervisor at any time: website – http://www.edps.europa.eu; email – [email protected]. |
# -*- coding: utf-8 -*-
"""
utility functions to train the RNN-based VariationalAutoencoder
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import numpy as np
import tensorflow as tf
import attention
import cells
# TODO: pydocs
TRAIN_INFO_LM = {
"epoch": 0,
"best_valid_ppx": np.inf,
"best_epoch": 0,
"estop_counter": 0,
"current_cost": 0.0
}
def get_2d_tensor_shapes(tensor):
""" """
length = tensor.get_shape()[0].value
if length is None:
length = tf.shape(tensor)[0]
dim = tensor.get_shape()[1].value
return length, dim
def get_3d_tensor_shapes(tensor):
""" """
batch = tensor.get_shape()[0].value
length = tensor.get_shape()[1].value
if length is None:
length = tf.shape(tensor)[2]
dim = tensor.get_shape()[2].value
return batch, length, dim
def reshape_attention(attention_states):
""" """
_, attn_length, attn_dim = get_3d_tensor_shapes(attention_states)
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = tf.reshape(attention_states, [-1, attn_length, 1, attn_dim])
return hidden, attn_dim
def convolve(tensor, matrix):
""" """
return tf.nn.conv2d(tensor, matrix, [1, 1, 1, 1], "SAME")
def build_lm_layers(num_layers,
size,
is_training=False,
decoding_function_name=None,
keep_prob=1.0,
keep_attention_weights=False):
""" Helper to build recurrent layers for he LM. """
decoding_function = None
# building the layers
lstm_cell0 = tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=1.0, reuse=not is_training)
# lstm_cell0 = tf.contrib.rnn.LSTMBlockCell(
# size, forget_bias=1.0)
lstm_cell1 = tf.contrib.rnn.DropoutWrapper(
lstm_cell0, output_keep_prob=keep_prob
) if is_training and keep_prob < 1.0 else lstm_cell0
if decoding_function_name is not None:
decoding_function = attention.get_decoder_content_function(
decoding_function_name)
lstm_cellA = cells.AttentionLSTMCell(
size, forget_bias=1.0, state_is_tuple=True,
init_constant_output_bias=False,
decoding_function=decoding_function,
keep_attention_weights=keep_attention_weights,
reuse=tf.get_variable_scope().reuse)
lstm_cellA = tf.contrib.rnn.DropoutWrapper(
lstm_cellA, output_keep_prob=keep_prob
) if is_training and keep_prob < 1.0 else lstm_cellA
# internal_cell = [lstm_cell1] * (num_layers - 1)
internal_cell = [lstm_cell1 for _ in range(num_layers - 1)]
internal_cell = internal_cell + [lstm_cellA]
else:
internal_cell = [lstm_cell1 for _ in range(num_layers)]
cell = tf.contrib.rnn.MultiRNNCell(internal_cell, state_is_tuple=True)
return cell
def create_queue(data_size,
num_steps,
capacity=128,
dtype=tf.int32):
""" Create the queue and related ops and placeholders to be used to
feed the network.
"""
# Feeds for inputs.
input_data = tf.placeholder(
dtype, shape=[data_size, num_steps], name="input_data")
targets = tf.placeholder(
dtype, shape=[data_size, num_steps], name="targets")
queue = tf.FIFOQueue(
capacity=capacity,
# min_after_dequeue=min_after_dequeue,
dtypes=[dtype, dtype],
shapes=[[num_steps]] * 2)
enqueue_op = queue.enqueue_many(
[input_data, targets])
placeholders = {
"input_data": input_data,
"targets": targets
}
return queue, enqueue_op, placeholders
|
My name is Leila. I am a transvestite, latex designer and fetish model.
I have a strong attraction towards latex and I am always looking for unique things. I love to experiment and I have a profound philosophy.
My latex creations are completely unconventional.
I mix extravagance, sophistication, elegance and sensuality into an unique style. But I also have a thing for roughness and shapelessness.
Buttons that are sewn on are very important to me. I hardly use press buttons in my latex creations.
The colour, shape and size of the buttons have to fit my creations perfectly.
To achieve this, I put a lot of effort in finding exactly what I imagined.
I also like to use buttons as ornaments.
It is very important to me, that every little detail fits just right.
I prefer to use latex with at least 0.6mm thickness or more.
Furthermore, I create accessory like latex hats and headscarfs to match my latex designs.
The latex creations are manufactured www.rubberik.at to fit my size. A make-up artist styles me and a professional photographer is in charge of taking pictures of me wearing every latex dress.
I design latex coats, latex OP coats, latex rainwear, latex skirts, latex blouses, latex suits and costumes, latex mini dresses, latex nurse uniformes, latex maid costumes, latex nun habits, latex smocks, latex aprons and latex uniforms.
Every latex creation is a customized copy made by hand by Rubberik in an extensive and dedicated manner to fit your measures perfectly.
You also can order my latex outfits in standart sizes.
Naturally we can make some modifications of the designs for you.
My latex creations are base-models. They are designed to fit my needs.
You can choose from a variety of color and your preferred latex thickness, as well as from structured and printed latex.
If you desire so, we can have your latex garment with a chlorinated finish.
Please note: Rubberik understands english, too, but their website is just in german.
I wish you a lot of fun browsing through my latex creations.
From now on official sales and shipping of Vivishine products for your latex care. |
#
# Copyright 2014-2015 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import AlarmModify
class AlarmCreate(AlarmModify):
def __init__(self):
AlarmModify.__init__(self, False)
self.method = "POST"
self.cli_description = "Creates a new alarm definition in an Boundary account"
def addArguments(self):
self.parser.add_argument('-n', '--alarm-name', dest='alarm_name', action='store', required=True,
metavar='alarm_name', help='Name of the alarm')
AlarmModify.addArguments(self)
def getArguments(self):
"""
Extracts the specific arguments of this CLI
"""
AlarmModify.getArguments(self)
self.path = 'v1/alarms'
def getDescription(self):
return 'Creates an alarm definition in an Boundary account'
|
Since the dawn of civilization, mankind has been engaged in the conception and manufacture of discrete products to serve the functional needs of local customers and the tools (technology) needed by other craftsmen. In fact, much of the progress in civilization can be attributed to progress in discrete product manufacture. The functionality of a discrete object depends on two entities: form, and material composition. For instance, the aesthetic appearance of a sculpture depends upon its form whereas its durability depends upon the material composition. An ideal manufacturing process is one that is able to automatically generate any form (freeform) in any material. However, unfortunately, most traditional manufacturing processes are severely constrained on all these counts. There are three basic ways of creating form: conservative, subtractive, and additive. In the first approach, we take a material and apply the needed forces to deform it to the required shape, without either adding or removing material, i. e. , we conserve material. Many industrial processes such as forging, casting, sheet metal forming and extrusion emulate this approach. A problem with many of these approaches is that they focus on form generation without explicitly providing any means for controlling material composition. In fact, even form is not created directly. They merely duplicate the external form embedded in external tooling such as dies and molds and the internal form embedded in cores, etc. Till recently, we have had to resort to the 'subtractive' approach to create the form of the tooling. |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import shutil
import pytest
from translate.filters import checks
from django.db import IntegrityError
from pytest_pootle.factories import (
LanguageDBFactory, ProjectDBFactory, TranslationProjectFactory)
from pootle.core.plugin import getter
from pootle.core.delegate import tp_tool
from pootle_app.models import Directory
from pootle_language.models import Language
from pootle_project.models import Project
from pootle_store.models import Store
from pootle_translationproject.models import TranslationProject
from pootle_translationproject.utils import TPTool
@pytest.mark.django_db
def test_tp_create_fail(tutorial, english):
# Trying to create a TP with no Language raises a RelatedObjectDoesNotExist
# which can be caught with Language.DoesNotExist
with pytest.raises(Language.DoesNotExist):
TranslationProject.objects.create()
# TP needs a project set too...
with pytest.raises(Project.DoesNotExist):
TranslationProject.objects.create(language=english)
# There is already an english tutorial was automagically set up
with pytest.raises(IntegrityError):
TranslationProject.objects.create(project=tutorial, language=english)
@pytest.mark.django_db
def test_tp_create_templates(tutorial, klingon_vpw, templates):
# As there is a tutorial template it will automatically create stores for
# our new TP
template_tp = TranslationProject.objects.get(
language=templates, project=tutorial)
tp = TranslationProject.objects.create(
project=tutorial, language=klingon_vpw)
tp.init_from_templates()
assert tp.stores.count() == template_tp.stores.count()
assert (
[(s, t)
for s, t
in template_tp.stores.first().units.values_list("source_f",
"target_f")]
== [(s, t)
for s, t
in tp.stores.first().units.values_list("source_f",
"target_f")])
@pytest.mark.django_db
def test_tp_create_with_files(tutorial, klingon, settings):
# lets add some files by hand
trans_dir = settings.POOTLE_TRANSLATION_DIRECTORY
shutil.copytree(
os.path.join(trans_dir, "tutorial/en"),
os.path.join(trans_dir, "tutorial/kl"))
TranslationProject.objects.create(project=tutorial, language=klingon)
@pytest.mark.django_db
def test_tp_empty_stats():
"""Tests if empty stats is initialized when translation project (new language)
is added for a project with existing but empty template translation project.
"""
# Create an empty template translation project for project0.
project = Project.objects.get(code="project0")
english = Language.objects.get(code="en")
TranslationProjectFactory(project=project, language=english)
# Create a new language to test.
language = LanguageDBFactory()
tp = TranslationProject.objects.create(language=language, project=project)
tp.init_from_templates()
# There are no files on disk so TP was not automagically filled.
assert list(tp.stores.all()) == []
# Check if zero stats is calculated and available.
stats = tp.get_stats()
assert stats['total'] == 0
assert stats['translated'] == 0
assert stats['fuzzy'] == 0
assert stats['suggestions'] == 0
assert stats['critical'] == 0
assert not tp.is_dirty()
@pytest.mark.django_db
def test_tp_stats_created_from_template(tutorial, templates):
language = LanguageDBFactory()
tp = TranslationProject.objects.create(language=language, project=tutorial)
tp.init_from_templates()
assert tp.stores.all().count() == 1
stats = tp.get_stats()
assert stats['total'] == 2 # there are 2 words in test template
assert stats['translated'] == 0
assert stats['fuzzy'] == 0
assert stats['suggestions'] == 0
assert stats['critical'] == 0
assert not tp.is_dirty()
@pytest.mark.django_db
def test_can_be_inited_from_templates(tutorial, templates):
language = LanguageDBFactory()
tp = TranslationProject(project=tutorial, language=language)
assert tp.can_be_inited_from_templates()
@pytest.mark.django_db
def test_cannot_be_inited_from_templates():
language = LanguageDBFactory()
project = Project.objects.get(code='project0')
tp = TranslationProject(project=project, language=language)
assert not tp.can_be_inited_from_templates()
@pytest.mark.django_db
def test_tp_checker(tp_checker_tests):
language = Language.objects.get(code="language0")
checker_name_, project = tp_checker_tests
tp = TranslationProject.objects.create(project=project, language=language)
checkerclasses = [
checks.projectcheckers.get(tp.project.checkstyle,
checks.StandardChecker)
]
assert [x.__class__ for x in tp.checker.checkers] == checkerclasses
@pytest.mark.django_db
def test_tp_create_with_none_treestyle(english, templates, settings):
project = ProjectDBFactory(
source_language=english,
treestyle="none")
language = LanguageDBFactory()
TranslationProjectFactory(
language=templates, project=project)
tp = TranslationProject.objects.create(
project=project, language=language)
assert not tp.abs_real_path
assert not os.path.exists(
os.path.join(
settings.POOTLE_TRANSLATION_DIRECTORY,
project.code))
tp.save()
assert not tp.abs_real_path
assert not os.path.exists(
os.path.join(
settings.POOTLE_TRANSLATION_DIRECTORY,
project.code))
@pytest.mark.django_db
def test_tp_tool_move(language0, project0, templates):
tp = project0.translationproject_set.get(language=language0)
original_stores = list(tp.stores.all())
TPTool(project0).move(tp, templates)
assert tp.language == templates
assert (
tp.pootle_path
== tp.directory.pootle_path
== "/%s/%s/" % (templates.code, project0.code))
assert tp.directory.parent == templates.directory
# all of the stores and their directories are updated
for store in original_stores:
store = Store.objects.get(pk=store.pk)
assert store.pootle_path.startswith(tp.pootle_path)
assert store.parent.pootle_path.startswith(tp.pootle_path)
assert not Store.objects.filter(
pootle_path__startswith="/%s/%s"
% (language0.code, project0.code))
assert not Directory.objects.filter(
pootle_path__startswith="/%s/%s/"
% (language0.code, project0.code))
# calling with already set language does nothing
assert TPTool(project0).move(tp, templates) is None
@pytest.mark.django_db
def test_tp_tool_bad(tp0, templates, english):
other_project = ProjectDBFactory(source_language=english)
other_tp = TranslationProjectFactory(
project=other_project,
language=LanguageDBFactory())
tp_tool = TPTool(tp0.project)
with pytest.raises(ValueError):
tp_tool.check_tp(other_tp)
with pytest.raises(ValueError):
tp_tool.set_parents(tp0.directory, other_tp.directory)
with pytest.raises(ValueError):
tp_tool.set_parents(other_tp.directory, tp0.directory)
with pytest.raises(ValueError):
tp_tool.move(other_tp, templates)
with pytest.raises(ValueError):
tp_tool.clone(other_tp, templates)
with pytest.raises(ValueError):
# cant set tp to a language if a tp already exists
tp_tool.move(
tp0, Language.objects.get(code="language1"))
with pytest.raises(ValueError):
# cant clone tp to a language if a tp already exists
tp_tool.clone(
tp0, Language.objects.get(code="language1"))
def _test_tp_match(source_tp, target_tp):
source_stores = []
for store in source_tp.stores.live():
source_stores.append(store.pootle_path)
update_path = (
"/%s/%s"
% (target_tp.language.code,
store.pootle_path[(len(source_tp.language.code) + 2):]))
updated = Store.objects.get(pootle_path=update_path)
assert store.state == updated.state
updated_units = updated.units
for i, unit in enumerate(store.units):
updated_unit = updated_units[i]
assert unit.source == updated_unit.source
assert unit.target == updated_unit.target
assert unit.state == updated_unit.state
for store in target_tp.stores.live():
source_path = (
"/%s/%s"
% (source_tp.language.code,
store.pootle_path[(len(target_tp.language.code) + 2):]))
assert source_path in source_stores
@pytest.mark.django_db
def test_tp_tool_clone(tp0, templates):
new_lang = LanguageDBFactory()
tp_tool = TPTool(tp0.project)
_test_tp_match(tp0, tp_tool.clone(tp0, new_lang))
@pytest.mark.django_db
def test_tp_tool_update(tp0, templates):
new_lang = LanguageDBFactory()
tp0_tool = TPTool(tp0.project)
new_tp = tp0.project.translationproject_set.create(
language=new_lang)
# this will clone stores/directories as new_tp is empty
tp0_tool.update_from_tp(tp0, new_tp)
_test_tp_match(tp0, new_tp)
tp0_tool.update_from_tp(tp0, new_tp)
tp0.stores.first().delete()
tp0.stores.first().units.first().delete()
unit = tp0.stores.first().units.first()
unit.target = "NEW TARGET"
unit.save()
tp0_tool.update_from_tp(tp0, new_tp)
_test_tp_match(tp0, new_tp)
# doing another update does nothing
tp0_tool.update_from_tp(tp0, new_tp)
_test_tp_match(tp0, new_tp)
@pytest.mark.django_db
def test_tp_tool_getter(project0):
assert tp_tool.get(Project) is TPTool
assert isinstance(project0.tp_tool, TPTool)
@pytest.mark.django_db
def test_tp_tool_custom_getter(project0, no_tp_tool_):
class CustomTPTool(TPTool):
pass
@getter(tp_tool, sender=Project, weak=False)
def custom_tp_tool_getter(**kwargs_):
return CustomTPTool
assert tp_tool.get(Project) is CustomTPTool
assert isinstance(project0.tp_tool, CustomTPTool)
|
Anthony Horowitz’s Mindgame comes to the Ambassadors Theatre for 3 weeks only following a UK tour from 15th May.
Mark Styler, a writer of glossy 'true crime' paperbacks, has no idea what he’s walking into when he tries to get an interview with Easterman, a notorious serial killer.
First he has to get past Dr Farquhar, the quixotic head of Fairfields - the asylum where Easterman is kept.
Mindgame is a mind bending psychological thriller from the pen of Anthony Horowitz – creator of Foyles War, the BBC's New Blood, Alex Rider, the Sherlock Holmes novels House of Silk and Moriarty and the James Bond novel Trigger Mortis.
Please note that the recommended age limit is 12+ for this production.
The running time is approximately 2 hours. |
from dbcommands import *
import logging
import json
from recipe import *
import os.path
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename='log',
filemode='a')
DATABASE_PATH = 'cookbook'
DB = DataBase(DATABASE_PATH)
'''
recipes = "'title':title,\\\n"
recipes = recipes + "'url':url,\\\n"
recipes = recipes + "'source':source,\\"
recipes = recipes + "'directions':directions,\\\n"
recipes = recipes + "'time':time,\\\n"
recipes = recipes + "'total_cost':total_cost,\\\n"
recipes = recipes + "'total_cost_per_serving':total_cost_per_serving,\\\n"
recipes = recipes + "'serving_size':serving_size,\\\n"
recipes = recipes + "'total_grams':total_grams,\\\n"
recipes = recipes + "'num_ingredients':num_ingredients,\\\n"
with open('list_of_nutrients.txt','r') as f:
for line in f:
recipes = recipes + "'" + formatIngredientKey(line) + "':0,\\\n"
print(recipes)
'''
logger = logging.getLogger('build_database.createTable')
recipes = 'recipes (id INTEGER PRIMARY KEY AUTOINCREMENT, '
recipes = recipes + 'title TEXT, '
recipes = recipes + 'url TEXT UNIQUE, '
recipes = recipes + 'source TEXT, '
recipes = recipes + 'directions TEXT, '
recipes = recipes + 'time TEXT, '
recipes = recipes + 'total_cost REAL, '
recipes = recipes + 'total_cost_per_serving REAL, '
recipes = recipes + 'serving_size REAL, '
recipes = recipes + 'total_grams REAL, '
recipes = recipes + 'num_ingredients INTEGER, '
with open('list_of_nutrients.txt','r') as f:
for line in f:
recipes = recipes + formatIngredientKey(line) + " REAL,"
recipes = recipes[:-1] + ')'
if not DB.tableExists('recipes'):
logger.warning('"recipes" table not found')
logger.info('Creating "recipes" table...')
DB.createTable(recipes)
else:
logger.debug('Table "recipes" found')
logger = logging.getLogger('build_database.createIngredients')
recipes = 'ingredients (id INTEGER PRIMARY KEY AUTOINCREMENT, '
recipes = recipes + 'recipe_id INTEGER, '
recipes = recipes + 'ingredient_uuid TEXT UNIQUE, '
recipes = recipes + 'actual TEXT, '
recipes = recipes + 'measurement TEXT, '
recipes = recipes + 'description TEXT, '
recipes = recipes + 'ndb_no TEXT, '
recipes = recipes + 'cost REAL, '
recipes = recipes + 'grams REAL)'
if not DB.tableExists('ingredients'):
logger.warning('"ingredients" table not found')
logger.info('Creating "ingredients" table...')
DB.createTable(recipes)
else:
logger.debug('Table "ingredients" found')
'''
def newRecipe( self,\
title,\
url,\
source,\
directions,\
time,\
total_cost,\
total_cost_per_serving,\
serving_size,\
total_grams,\
num_ingredients):
'''
'''
'recipe_id':recipe_id,\
'ingredient_uuid':ingredient_uuid,\
'actual':actual,\
'measurement':measurement,\
'description':description,\
'ndb_no':ndb_no,\
'cost':cost,\
'grams':grams\
'''
startNum = 9620
logger = logging.getLogger('build_database.building')
with open('get_recipes/recipes/index0_10.txt','r') as f:
for line in f:
#try:
try:
data = line.strip().split()
recipeNum = int(data[0])
url = data[1]
title = ' '.join(data[2:])
except:
recipeNum = 0
file = 'get_recipes/recipes/' + str(recipeNum/500) + '/' + str(recipeNum) + '.md'
if recipeNum>startNum and os.path.isfile(file):
logger.info(line)
try:
a = Recipe('get_recipes/recipes/' + str(recipeNum/500) + '/' + str(recipeNum) + '.md')
recipe = a.returnJson()
recipe['url'] = url
recipe['title'] = title
# Insert the new recipe
try:
recipeID = DB.newRecipe(recipe['title'],recipe['url'],recipe['source'],recipe['directions'],recipe['time'],recipe['total_cost'],recipe['total_cost_per_serving'],recipe['serving_size'],recipe['total_grams'],len(recipe['ingredients']))
except:
recipeID = DB.getRecipeIDfromURL(recipe['url'])
# Update the nutrients
for nutritionClass in recipe['nutrition'].keys():
for nutrient in recipe['nutrition'][nutritionClass].keys():
DB.updateIngredient(nutrient,recipe['nutrition'][nutritionClass][nutrient],recipeID)
# Insert the ingredients
for ingredient in recipe['ingredients']:
try:
actual = ingredient['actual']
ingredient_uuid = recipe['url']+ingredient['ndb_no']
measurement = ingredient['measurement']
description = ingredient['description']
ndb_no = ingredient['ndb_no']
cost = ingredient['cost']
grams = ingredient['grams']
foo = DB.addIngredient(recipeID,ingredient_uuid,actual,measurement,description,ndb_no,cost,grams)
except:
logger.warning("ingredient already exists")
except:
logger.error("Unexpected error:", sys.exc_info()[0])
'''
recipe = Recipe(sys.argv[1])
recipe['url']='asdlfkj'
try:
recipeID = DB.newRecipe(recipe['title'],recipe['url'],recipe['source'],recipe['directions'],recipe['time'],recipe['total_cost'],recipe['total_cost_per_serving'],recipe['serving_size'],recipe['total_grams'],len(recipe['ingredients']))
except:
recipeID = DB.getRecipeIDfromURL(recipe['url'])
for nutritionClass in recipe['nutrition'].keys():
for nutrient in recipe['nutrition'][nutritionClass].keys():
DB.updateIngredient(nutrient,recipe['nutrition'][nutritionClass][nutrient],recipeID)
for ingredient in recipe['ingredients']:
print(ingredient)
actual = ingredient['actual']
ingredient_uuid = recipe['url']+ingredient['ndb_no']
measurement = ingredient['measurement']
description = ingredient['description']
ndb_no = ingredient['ndb_no']
cost = ingredient['cost']
grams = ingredient['grams']
foo = DB.addIngredient(recipeID,ingredient_uuid,actual,measurement,description,ndb_no,cost,grams)
break
'''
|
The Lake of Bays. One of the most serene and marvellous spots for a wedding. So gorgeous it might just bowl you over. And having your wedding at Port Cunnington Lodge during the height of the Fall Season is enough to make you cry big fat tears of beauty overload. This couple decided that one day was just not enough to get the whole experience and knew that a whole weekend was definitely necessary to celebrate their love with all their friends and family. The weather definitely cooperated. It rained all morning right up until when I arrived and then the sun just burst through the clouds to say hello. Boy am I glad it did. Because Mother Nature was in a great mood that day they were able to say their vows out on the deck, right by the water.
Muskoka is a dream location for any wedding and I am so glad I got to be there with 3Photography to shoot this wedding. Click HERE to check out their blog to see their vision of the day.
All the images and edits posted below are my own. |
#
# Copyright (C) 2012 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""ipdevpoll plugin to poll Cisco HSRP address information"""
from IPy import IP
from twisted.internet import defer
from nav.ipdevpoll import Plugin
from nav.ipdevpoll.shadows import GwPortPrefix
from nav.mibs.vrrp_mib import VRRPMib
from nav.mibs.cisco_hsrp_mib import CiscoHSRPMib
class VirtualRouter(Plugin):
"""ipdevpoll plugin to collect Virtual Router addresses from VRRP and
HSRP routers.
These addresses are marked as virtual in NAV database,
and will ensure that networks with redundant routers aren't classified
incorrectly as link networks.
This plugin will only update existing addresses that were collected by a
plugin that ran before this one in the same job (such as the Prefix
plugin). This is to ensure we don't create addresses that aren't active
on the router.
"""
@classmethod
def can_handle(cls, netbox):
daddy_says_ok = super(VirtualRouter, cls).can_handle(netbox)
return daddy_says_ok and netbox.category.id in ('GW', 'GSW')
def __init__(self, *args, **kwargs):
super(VirtualRouter, self).__init__(*args, **kwargs)
self.mibs = [mib(self.agent) for mib in (CiscoHSRPMib, VRRPMib)]
@defer.inlineCallbacks
def handle(self):
"""Handles address collection"""
if self.gwportprefixes_found():
mibs = []
virtual_addrs = set()
for mib in self.mibs:
addrs_from_mib = yield mib.get_virtual_addresses()
virtual_addrs.update(addrs_from_mib)
if addrs_from_mib:
mibs.append(mib.mib['moduleName'])
self.update_containers_with(virtual_addrs, mibs)
def gwportprefixes_found(self):
if GwPortPrefix not in self.containers:
self._logger.debug("there are no collected GwPortPrefixes to "
"update")
return False
else:
return True
def update_containers_with(self, addresses, from_mib=None):
if addresses:
self._logger.debug("Found virtual addresses from %s: %r",
from_mib, addresses)
for gwp_prefix in self.containers[GwPortPrefix].values():
gwp_prefix.virtual = IP(gwp_prefix.gw_ip) in addresses
|
If you are a fan of lemon desserts, then you are going to absolutely love these Easy Lemon Squares. A buttery shortbread crust with a tangy lemon topping creates this luscious citrus dessert.
These lemon squares are a real favourite in my home, and with good reason too. Not heavy or dense, these make a great dessert or treat. What I really love about this dessert is that it can be made even a day in advance. Bake and keep refrigerated, then just before serving, sift powdered sugar over the top and slice into squares. Dessert sorted!!!
Besides being so easy to make, this recipe uses minimal and basic ingredients. with just 6 ingredients, baking these lemon squares is an absolute breeze.
Made with a sweet buttery shortbread crust and topped with a tangy lemon filling, this is one amazing dessert.
Start by making the shortbread crust. In a bowl, combine the powdered / icing sugar, white granulated sugar and flour.
Using your fingertips, rub the butter into the sugar and flour mixture and mix to form a soft dough.
320°F / 160°C for 15 minutes until lightly browned. Remove from the oven and pour the lemon filling over the base.
Using an electric, handheld beater or whisk beat the eggs and sugar very well, for about 5 minutes. This will help dissolve the sugar and avoid the grainy texture once the lemon squares have been baked.
Add in the lemon juice, lemon zest and flour and beat for 1 minute or until smooth. Pour this filling over the partially baked crust. Return it immediately to the oven and bake for a further 20 to 25 minutes until golden brown and firmly set.
Allow to cool before slicing and dusting with powdered / icing sugar.
It is important to allow the lemon bars to cool completely before slicing. This ensures that the inside is well set and makes slicing into neat squares easier. To help speed the cooling process, I usually pop the tin into the refrigerator for about 30 minutes or until completely cool before slicing.
Easy, and lemon are two words I like to see when I’m looking for a dessert to make! Terrific recipe!!
The delicious lemon filling reminds me of Lemon meringue pie Ashika. That was always a favourite of mine. These look super tasty! Thanks for sharing this awesome recipe.
Lemon squares are one of my favorite desserts and these look unbeatable! I have to try them soon!
I am such a huge fan of lemon desserts! Love the smooth lemony filling in these squares and the shortbread crust looks divine! Thanks for a great recipe Ashika!
I love lemon squares, Ashika! One of my all-time favorites! I need to make some ASAP! ‘Tis the season for them, too! |
##Take the number 192 and multiply it by each of 1, 2, and 3:
##
##192 × 1 = 192
##192 × 2 = 384
##192 × 3 = 576
##
##By concatenating each product we get the 1 to 9 pandigital, 192384576.
##We will call 192384576 the concatenated product of 192 and (1,2,3)
##
##The same can be achieved by starting with 9 and multiplying by
##1, 2, 3, 4, and 5, giving the pandigital, 918273645,
##which is the concatenated product of 9 and (1,2,3,4,5).
##
##What is the largest 1 to 9 pandigital 9-digit number that can be
##formed as the concatenated product of an integer with (1,2, ... , n)
##where n > 1?
check_set = [x for x in range(1, 10)]
def concat(number):
result = ""
count = 1
while True:
result += str(number*count)
count += 1
if len(result) > 9:
return False
if len(result) == 9 == len(set([int(x) for x in result])) and count > 1:
if sorted([int(x) for x in list(result)]) == sorted(check_set):
return int(result)
def problem_38():
cur_max = 0
cur_value = 0
for x in range(1, 99999):
if x % 1000000 == 0:
print(x)
value = concat(x)
if not value is False:
if value > cur_max:
cur_max = value
cur_value = x
print(cur_value, cur_max)
return (cur_value, cur_max)
problem_38()
|
Vincenzo Nibali (Astana) consolidated his lead at the Giro d’Italia with victory in stage 18’s mountain time trial. In the process, the Sicilian put a huge two minutes and 36 seconds into closest rival Cadel Evans (BMC), and now holds a lead of four minutes and two seconds over him in the General Classification.
The stage win, Nibali’s first this Giro, confirms him as comfortably the strongest rider of the race, and it will take a dramatic last few days for anyone to take the pink jersey from him now.
The battle for the podium looks set for a more exciting finale, with Rigoberto Uran (Sky), who finished sixth today, in third at four minutes 12 seconds down, and Michele Scarponi (Lampre), who finished fourth, within touching distance at five minutes 14 seconds.
Another exciting battle is that in the young riders classification. It has been nip and tuck for a while now between Rafal Majka (Saxo-Tinkoff) and Carlos Betancur (Ag2r), and both put in very impressive times today, with Majka fifth and Betancur seventh. The seven seconds gained by Majka is enough to see him usurp the white jersey, and the Pole also moves to sixth overall on the GC.
Second place on the day at nearly a minute down was Samuel Sanchez (Euskaltel), who looked for a while as though he may take the stage win, but once Nibali’s time checks started to come through it was clear whose hands the win was in.
Sanchez does, however, move into the top 10, and may yet improve his position further with his form evidently improving. Ahead on the stage before him was Damiano Caruso (Cannondale), who had looked to take a much-needed first stage of the Giro for his team, however was pushed down into third.
Stage eight time trial winner Alex Dowsett (Movistar) suffered a mechanical issue with his time trial bike shortly after his start, and was forced to swap to a road machine, losing valuable time and momentum. The Briton eventually finished in 90th place, 5-27 down on Nibali.
Fellow British rider Steve Cummings (BMC) put down an early fast time, and came 26th on the stage, just one place and three seconds behind team-mate Evans.
Tomorrow sees another potentially decisive stage in the mountains. Hopefully we will see some aggressive racing, and a route not heavily revised by the weather conditions. |
# @Author: Manuel Rodriguez <valle>
# @Date: 14-Jul-2017
# @Email: [email protected]
# @Filename: pagenavigations.py
# @Last modified by: valle
# @Last modified time: 13-Aug-2017
# @License: Apache license vesion 2.0
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import (StringProperty, ListProperty, NumericProperty,
ObjectProperty, DictProperty)
from kivy.animation import Animation
from kivy.lang import Builder
import components.resources as res
Builder.load_file(res.get_kv("pagenavigations"))
class MainPage(RelativeLayout):
title = StringProperty('')
title_bgColor = StringProperty("#ffffff")
page_manager = ObjectProperty(None)
show = ObjectProperty(None)
def __init__(self, **kargs):
super(MainPage, self).__init__(**kargs)
def add_widget(self, widget):
if len(self.children) < 1:
super(MainPage, self).add_widget(widget)
else:
self.content_page.add_widget(widget)
class Page(RelativeLayout):
title = StringProperty('')
title_bgColor = StringProperty("#ffffff")
id_page = StringProperty("")
bgColor = StringProperty("#ffffff")
show = ObjectProperty(None)
def add_widget(self, widget):
if len(self.children) < 1:
super(Page, self).add_widget(widget)
else:
self.content_page.add_widget(widget)
def collide_point(self, x, y):
return (x > self.x and x < self.x +self.width) and (y > self.y and y < self.y +self.height)
def on_touch_down(self, touch, *args):
super(Page, self).on_touch_down(touch)
if self.collide_point(touch.x, touch.y):
return True
class PageManager(FloatLayout):
pages = DictProperty({})
stack_pages = ListProperty([])
bgColor = StringProperty('#FFFFFF')
def __init__(self, **kargs):
super(PageManager, self).__init__(**kargs)
def add_widget(self, widget):
widget.page_manager = self
if self.__esPage__(widget, MainPage):
self.stack_pages.append(widget)
elif self.__esPage__(widget, Page):
widget.bind(id_page=self.on_id_pages)
super(PageManager,self).add_widget(widget)
def on_width(self, w, val):
for child in self.pages.values():
child.pos = val +10, 0
def on_id_pages(self, w, val):
self.pages[val] = w
def navigate(self, nav):
if nav in self.pages:
w = self.pages[nav]
self.stack_pages.append(self.pages[nav])
self.remove_widget(w)
self.add_widget(w)
ai = Animation(x=0, duration=.1)
ai.start(w)
if w.show:
w.show(self)
def back_page(self):
w = self.stack_pages.pop()
ai = Animation(x=self.width+10, duration=.1)
ai.start(w)
def __esPage__(self, widget, clase):
esPage = type(widget) == clase
for base in widget.__class__.__bases__:
esPage = esPage or (base == clase)
return esPage
|
SUNNYVALE, CA--(Marketwire - March 8, 2011) - Juniper Networks (NYSE: JNPR) today confirmed its Intrusion Detection and Prevention (IDP) security systems and Integrated Security Gateway (ISG) firewall/virtual private network (VPN) systems with IDP offer protection for new Microsoft vulnerabilities announced today.
Additional information about Juniper Networks security solutions can be found at http://www.juniper.net/products. More detailed information about all of the month's Microsoft vulnerabilities can be obtained from the Juniper Networks J-Security Center at http://www.juniper.net/security or from Microsoft at http://www.microsoft.com/technet/security/current.aspx. |
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from . import constant
from . import util
class TrainJob(object):
"""Class for train method.
A TrainJob instance provides variables getter and setter apis. After
specifying the necessary parameters, users can call start_run func to start
the train job.
"""
def __init__(self,
job_name,
module_name,
trainer_uri,
job_args=None,
cpu_limit=None,
gpu_limit=None,
memory_limit=None,
ps_count=None,
worker_count=None,
framework=None,
framework_version=None,
docker_image=None,
docker_command=None,
volume_type=None,
volume_path=None,
mount_path=None,
mount_read_only=None,
prepare_command=None,
finish_command=None,
node_selector_key=None,
node_selector_value=None):
"""Creates a new TrainJob with given definition.
The `job_name`, `module_name` and `trainer_uri` arguments must be provided
when the object is creating.
Args:
job_name: The name of specific job.
module_name: The name of module.
trainer_uri: The uri that save the source code of job.
"""
self.job_name = job_name
self.module_name = module_name
self.trainer_uri = trainer_uri
self.job_args = job_args
self.cpu_limit = cpu_limit
self.memory_limit = memory_limit
self.gpu_limit = gpu_limit
self.ps_count = ps_count
self.worker_count = worker_count
self.framework = framework
self.framework_version = framework_version
self.docker_image = docker_image
self.docker_command = docker_command
self.volume_type = volume_type
self.volume_path = volume_path
self.mount_path = mount_path
self.mount_read_only = mount_read_only
self.prepare_command = prepare_command
self.finish_command = finish_command
self.node_selector_key = node_selector_key
self.node_selector_value = node_selector_value
@property
def job_name(self):
return self._job_name
@job_name.setter
def job_name(self, value):
"""Function for setting job_name.
Args:
value: String type value that is going to be set to job_name. Which
cannot be empty.
Raises:
ValueError: If value is not str instance or empty.
"""
if not isinstance(value, str):
raise ValueError("job_name must be a string!")
if value == "":
raise ValueError("job_name cannot be None!")
if not util.check_kube_resource_name_regex(value):
raise StandardError("job_name must match {}.".format(
util.kube_resource_name_regex))
self._job_name = value
@property
def module_name(self):
return self._module_name
@module_name.setter
def module_name(self, value):
"""Function for setting module_name.
Args:
value: String type value that is going to be set to module_name. Which
cannot be empty.
Raises:
ValueError: If value is not str instance or empty.
"""
if not isinstance(value, str):
raise ValueError("module_name must be a string!")
if value == "":
raise ValueError("module_name cannot be None!")
self._module_name = value
@property
def trainer_uri(self):
return self._trainer_uri
@trainer_uri.setter
def trainer_uri(self, value):
"""Function for setting trainer_uri.
Args:
value: String type value that is going to be set to trainer_uri. Which
cannot be empty.
Raises:
ValueError: If value is not str instance or does not start with `http://`
or `https://`.
"""
if not isinstance(value, str):
raise ValueError("trainer_uri must be a string!")
self._trainer_uri = value
@property
def job_args(self):
return self._job_args
@job_args.setter
def job_args(self, value):
"""Function for setting job_args.
Args:
value: The job arguments.
Raises:
ValueError: If value is not string instance.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("job_args must be a string!")
self._job_args = value
@property
def cpu_limit(self):
return self._cpu_limit
@cpu_limit.setter
def cpu_limit(self, value):
"""Function for setting cpu_limit.
Args:
value: Cpu limit.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("cpu_limit must be a string!")
if not value.replace(".", "", 1).isdigit():
raise ValueError("cpu_limit must be a number!")
digits = value.split(".")
if len(digits) == 2 and len(digits[1]) > constant.QUOTA_ACCURACY_PLACE:
raise StandardError(
"The value of cpu_limit accurate to two decimal places, for example: {}".format(
round(
float(value), constant.QUOTA_ACCURACY_PLACE)))
self._cpu_limit = value
@property
def memory_limit(self):
return self._memory_limit
@memory_limit.setter
def memory_limit(self, value):
"""Function for setting memory_limit.
Args:
value: Memory limit.
Raises:
ValueError: Doesn't end with K, M or G.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("memory_limit must be a string")
unit = value[-1:]
float_value = value[:-1]
if unit not in constant.CLOUDML_MEMORY_UNITS:
raise ValueError("memory_limit unit must be one of %s!" %
constant.CLOUDML_MEMORY_UNITS)
if not float_value.replace(".", "", 1).isdigit():
raise ValueError("memory_limit must be a number!")
digits = float_value.split(".")
if len(digits) == 2 and len(digits[1]) > constant.QUOTA_ACCURACY_PLACE:
raise StandardError(
"The value of memory_limit accurate to two decimal places, for example: {}".format(
round(
float(float_value), constant.QUOTA_ACCURACY_PLACE)))
self._memory_limit = value
@property
def gpu_limit(self):
return self._gpu_limit
@gpu_limit.setter
def gpu_limit(self, value):
"""Function for setting gpu_limit.
Args:
value: GPU limit.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("gpu_limit must be a postive integer!")
self._gpu_limit = value
@property
def ps_count(self):
return self._ps_count
@ps_count.setter
def ps_count(self, value):
"""Function for setting ps_count.
Args:
value: TensorFlow PS count.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("ps_count must be a positive integer!")
self._ps_count = value
@property
def worker_count(self):
return self._worker_count
@worker_count.setter
def worker_count(self, value):
"""Function for setting worker_count.
Args:
value: TensorFlow worker count.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("worker_count must be a positive integer!")
self._worker_count = value
@property
def framework(self):
return self._framework
@framework.setter
def framework(self, value):
"""Function for setting framework.
Args:
value: The framework.
Raises:
ValueError: If value is not string instance.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("Must be a string!")
self._framework = value
@property
def framework_version(self):
return self._framework_version
@framework_version.setter
def framework_version(self, value):
"""Function for setting version of framework.
Args:
value: The version of framework.
Raises:
ValueError: If value is not string instance.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("Must be a string!")
self._framework_version = value
@property
def docker_image(self):
return self._docker_image
@docker_image.setter
def docker_image(self, value):
"""Function for setting docker_image.
Args:
value: The docker_image.
Raises:
ValueError: If value is not string instance.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("Must be a string!")
self._docker_image = value
@property
def docker_command(self):
return self._docker_command
@docker_command.setter
def docker_command(self, value):
"""Function for setting docker_command.
Args:
value: The docker_command.
Raises:
ValueError: If value is not string instance.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("Must be a string!")
self._docker_command = value
@property
def volume_type(self):
return self._volume_type
@volume_type.setter
def volume_type(self, value):
"""Function for set.
Args:
value: String value.
Raises:
ValueError: If value is not str instance or empty.
"""
if value == "":
raise ValueError("Volume type can not be None!")
self._volume_type = value
@property
def volume_path(self):
return self._volume_path
@volume_path.setter
def volume_path(self, value):
"""Function for set.
Args:
value: String value.
Raises:
ValueError: If value is not str instance or empty.
"""
if value == "":
raise ValueError("Volume path can not be None!")
self._volume_path = value
@property
def mount_path(self):
return self._mount_path
@mount_path.setter
def mount_path(self, value):
"""Function for set.
Args:
value: String value.
Raises:
ValueError: If value is not str instance or empty.
"""
if value == "":
raise ValueError("Mount path can not be None!")
self._mount_path = value
@property
def mount_read_only(self):
return self._mount_read_only
@mount_read_only.setter
def mount_read_only(self, value):
"""Function for set.
Args:
value: Boolean value.
Raises:
ValueError: If value is not boolean instance or empty.
"""
if value != None and type(value) != bool:
raise ValueError("Mount read only should be boolean!")
self._mount_read_only = value
@property
def prepare_command(self):
return self._prepare_command
@prepare_command.setter
def prepare_command(self, value):
"""Function for set prepare_command.
Args:
value: String value.
Raises:
ValueError: If value is not string instance or empty.
"""
if value == "":
raise ValueError("Prepare command can not be None!")
self._prepare_command = value
@property
def finish_command(self):
return self._finish_command
@finish_command.setter
def finish_command(self, value):
"""Function for set finish_command.
Args:
value: String value.
Raises:
ValueError: If value is not string instance or empty.
"""
if value == "":
raise ValueError("Finish command can not be None!")
self._finish_command = value
@property
def node_selector_key(self):
return self._node_selector_key
@node_selector_key.setter
def node_selector_key(self, value):
"""Function for set node_selector_key.
Args:
value: String value.
Raises:
ValueError: If value is not string instance or empty.
"""
if value == "":
raise ValueError("Node selector key can not be None!")
self._node_selector_key = value
@property
def node_selector_value(self):
return self._node_selector_value
@node_selector_value.setter
def node_selector_value(self, value):
"""Function for set node_selector_value.
Args:
value: String value.
Raises:
ValueError: If value is not string instance or empty.
"""
if value == "":
raise ValueError("Node selector value can not be None!")
self._node_selector_value = value
def get_json_data(self):
"""Get the needed train job data after setting necessary varibles.
Returns:
data: The json data which is necessary for the train job.
Raises:
ValueError: If endpoint is not a string starting with `http://`.
If _job_name, _module_name or _trainer_uri is empty.
"""
data = {
"job_name": self._job_name,
"module_name": self._module_name,
"trainer_uri": self._trainer_uri,
}
if self._job_args is not None:
data["job_args"] = self._job_args
if self._cpu_limit is not None:
data["cpu_limit"] = self._cpu_limit
if self._memory_limit is not None:
data["memory_limit"] = self._memory_limit
if self._gpu_limit is not None:
data["gpu_limit"] = self._gpu_limit
if self._ps_count is not None:
data["ps_count"] = self._ps_count
if self._worker_count is not None:
data["worker_count"] = self._worker_count
if self._docker_image is not None:
data["docker_image"] = self._docker_image
if self._docker_command is not None:
data["docker_command"] = self._docker_command
if self._framework is not None:
data["framework"] = self._framework
if self._framework_version is not None:
data["framework_version"] = self._framework_version
if self._volume_type is not None:
data["volume_type"] = self._volume_type
if self._volume_path is not None:
data["volume_path"] = self._volume_path
if self._mount_path is not None:
data["mount_path"] = self._mount_path
if self._mount_read_only is not None:
data["mount_read_only"] = self._mount_read_only
if self._prepare_command:
data["prepare_command"] = self._prepare_command
if self._finish_command:
data["finish_command"] = self._finish_command
if self._node_selector_key:
data["node_selector_key"] = self._node_selector_key
if self._node_selector_value:
data["node_selector_value"] = self._node_selector_value
return json.dumps(data)
|
When you want to choose an online accounting services firm, you may find the task quite a challenge. This is mainly because you are putting your company’s financials and trust in the hands of people you have not met in person. However, there are also some benefits of hiring an online accounting firm for your bookkeeping tasks. If you have decided that an online accounting firm will be right for you, there are a number of things you can do to ensure you find the right firm. To find the right accounting services firm, you should determine your needs. From here, it will be important to evaluate a number of factors to know whether the firm you want to work with will be good for you. You can find a good online accounting services firm by following the tips below.
What Accounting Services Do You Require?
Come up with a list of bookkeeping services you need. Most small businesses have a difficult time dealing with accounts and hence they need bookkeeping services. The main problem the businesses face is the high turnover of in-house accountants. The bookkeeping services are required but not on a daily basis. If you don’t have a qualified accountant on your payroll, you may also want to hire a qualified online services accounting services.
Decide how often you will have to do your bookkeeping or reconciliation. For example, depending on your business, you may need the services on a monthly basis. After this, find an online accounting firm that will be ready to customize a service package based on your needs. Professional online accounting services firms will provide you with a contract that guarantees of the services they promise.
If you would like to meet the individuals handling your bookkeeping in person, then working with an online accounting firm is not for you. However, if you are ready to lower your costs by hiring a virtual accounting firm, carefully consider their location. For example, if you are in the US, you want to hire a firm located in the US. The firm you choose will have qualified, trained and certified bookkeepers that understand the accounting conventions in the country. Ideally, you should choose an accounting firm that is based in the country whose laws you follow in your accounting procedures.
You will come across a number of online accounting software. Find out whether the online accounting firm you want to work with is proficient in the software you already use for bookkeeping. If you do not use any software, ask the firm which ones it uses. It is crucial to ensure the software that the accounting firm uses is secure.
The above are some things to consider when choosing an online account services company. |
# -*- coding: utf-8 -*-
"""Set up custom error handlers."""
# standard library imports
# third-party imports
from flask import render_template, request, Response, current_app
# application imports
def init_error_handlers():
"""Custom error pages for the app."""
current_app.logger.info('Initialize error handling')
# pylint really hates these 'unused' decorated functions.
# In reality, they are callbacks for Flask
# pylint: disable=unused-variable
@current_app.errorhandler(404)
def not_found(error):
"""Custom 404 handler to return error page."""
current_app.logger.debug(error)
if len(request.form) > 0:
# Requests with form data are likely AJAX
return Response(None, 404)
return render_template('errors/404.html', http_error=True), 404
@current_app.errorhandler(403)
def forbidden(error):
"""Custom 404 handler to return error page."""
current_app.logger.debug(error)
return render_template('errors/403.html', http_error=True), 403
@current_app.errorhandler(401)
def unauthorized(error):
"""Custom 401 handler to return error page."""
current_app.logger.debug(error)
return render_template('errors/401.html', http_error=True), 401
@current_app.errorhandler(500)
def uhoh(error):
"""Custom 500 handler to return error page."""
current_app.logger.error(error)
return render_template('errors/500.html', http_error=True), 500
|
Rowan Atkinson returns to save the world, for the third time, as secret agent buffoon Johnny English.
Hi licence... renewed. His intelligence... restricted. |
import random
import re
import os
import traceback
import urllib.parse
import importlib
def print_notice(s):
print('\033[92m'+s+'\033[0m\n')
def path_exists(path):
if not os.path.exists(path):
answer = input("That path doesn't exist. Create it? [y/n]").strip().lower()
if answer=='y':
os.makedirs(path)
return True
else:
return False
else:
return True
class Question(object):
def __init__(self, key, question, default, validation=None):
self.key = key
self.question = question
self.default = default
self.validation = validation
def get_default(self, values):
if callable(self.default):
return self.default(values)
else:
return self.default
def validate(self, value):
return self.validation is None or self.validation(value)
class Command(object):
questions = [
Question('DEBUG', 'Is this installation for development?', False),
Question('NUMBAS_PATH', 'Path of the Numbas compiler:','/srv/numbas/compiler/', validation=path_exists),
Question('DB_ENGINE', 'Which database engine are you using? (Common options: postgres, mysql, sqlite3)', lambda v: 'sqlite3' if v['DEBUG'] else 'mysql'),
Question('STATIC_ROOT', 'Where are static files stored?','/srv/numbas/static/', validation=path_exists),
Question('MEDIA_ROOT', 'Where are uploaded files stored?','/srv/numbas/media/', validation=path_exists),
Question('PREVIEW_PATH', 'Where are preview exams stored?','/srv/numbas/previews/', validation=path_exists),
Question('PREVIEW_URL', 'Base URL of previews:','/numbas-previews/'),
Question('PYTHON_EXEC', 'Python command:','python3'),
Question('SITE_TITLE', 'Title of the site:','Numbas'),
Question('ALLOW_REGISTRATION', 'Allow new users to register themselves?', True),
Question('DEFAULT_FROM_EMAIL', 'Address to send emails from:', ''),
]
db_questions = [
Question('DB_NAME', 'Name of the database:','numbas_editor'),
Question('DB_USER', 'Database user:', 'numbas_editor'),
Question('DB_PASSWORD', 'Database password:', ''),
Question('DB_HOST', 'Database host:', 'localhost'),
]
sqlite_template = """DATABASES = {{
'default': {{
'ENGINE': 'django.db.backends.{DB_ENGINE}',
'NAME': os.path.join(BASE_DIR, '{DB_NAME}'),
}}
}}"""
other_db_template = """DATABASES = {{
'default': {{
'ENGINE': 'django.db.backends.{DB_ENGINE}',
'NAME': '{DB_NAME}',
'USER': '{DB_USER}',
'PASSWORD': '{DB_PASSWORD}',
'HOST': '{DB_HOST}',
}}
}}"""
def __init__(self):
self.written_files = []
def handle(self):
print_notice("This script will configure the Numbas editor up to a point where you can open it in a web browser, based on your answers to the following questions.")
self.get_values()
self.write_files()
import numbas.settings
importlib.reload(numbas.settings)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "numbas.settings")
print_notice("Now we'll check that everything works properly")
self.run_management_command('check')
if self.get_input('Would you like to automatically set up the database now?',True):
self.run_management_command('migrate')
import django
django.setup()
self.setup_site()
from django.contrib.auth.models import User
superusers = User.objects.filter(is_superuser=True)
if superusers.exists():
if self.get_input("There's already at least one admin user.\nWould you like to create another admin user now?",False):
self.run_management_command('createsuperuser')
else:
if self.get_input('Would you like to create an admin user now?',True):
self.run_management_command('createsuperuser')
print_notice("Done!")
if self.values['DEBUG']:
print_notice("Run\n python manage.py runserver\nto start a development server at http://localhost:8000.")
else:
self.run_management_command('collectstatic')
print_notice("The Numbas editor is now set up. Once you've configured your web server, it'll be ready to use at http://{}".format(self.domain))
def setup_site(self):
from django.contrib.sites.models import Site
try:
domain = Site.objects.first().domain
except Site.DoesNotExist:
domain = 'numbas.example.com'
domain = self.get_input('What domain will the site be accessed from?', domain)
try:
url = urllib.parse.urlparse(domain)
self.domain = url.netloc if url.netloc else domain
except ValueError:
self.domain = domain
s, created = Site.objects.get_or_create(domain=self.domain)
s.name = self.values['SITE_TITLE']
self.rvalues['SITE_ID'] = str(s.id)
s.save()
self.sub_settings(confirm_overwrite=False)
import numbas.settings
importlib.reload(numbas.settings)
def get_values(self):
self.values = {}
self.values['SECRET_KEY'] =''.join(random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50))
self.values['PWD'] = os.getcwd()
for question in self.questions:
self.get_value(question)
if question.key=='DB_ENGINE':
if 'sqlite' not in self.values[question.key]:
for question in self.db_questions:
self.get_value(question)
else:
self.get_value(Question('DB_NAME', 'Name of the database file:','db.sqlite3'))
def enrep(value):
rep = repr(value)
if isinstance(value,str):
rep = rep[1:-1]
return rep
self.values['SITE_ID'] = self.get_default_value(Question('SITE_ID','','1'))
self.rvalues = {key: enrep(value) for key, value in self.values.items()}
def get_default_value(self, question):
default = question.get_default(self.values)
if os.path.exists('numbas/settings.py'):
import numbas.settings
try:
if question.key=='DB_ENGINE':
default = numbas.settings.DATABASES['default']['ENGINE'].replace('django.db.backends.', '')
elif question.key[:3]=='DB_' and question.key[3:] in numbas.settings.DATABASES['default']:
default = numbas.settings.DATABASES['default'][question.key[3:]]
else:
try:
default = getattr(numbas.settings, question.key)
except AttributeError:
default = numbas.settings.GLOBAL_SETTINGS[question.key]
if isinstance(default,list):
default = default[0] if len(default)==1 else ''
except (AttributeError,KeyError):
pass
return default
def get_value(self, question):
self.values[question.key] = self.get_input(question.question, self.get_default_value(question), question.validation)
def write_files(self):
self.sub_settings()
if not self.values['DEBUG']:
self.sub_file('web/django.wsgi',[ (r"sys.path.append\('(.*?)'\)", 'PWD') ])
index_subs = [
(r"Welcome to (the Numbas editor)", 'SITE_TITLE'),
]
self.sub_file('editor/templates/index_message.html', index_subs)
self.sub_file('editor/templates/terms_of_use_content.html', [])
self.sub_file('editor/templates/privacy_policy_content.html', [])
if len(self.written_files):
print_notice("The following files have been written. You should look at them now to see if you need to make any more changes.")
for f in self.written_files:
print_notice(' * '+f)
print('')
def sub_settings(self, confirm_overwrite=True):
def set_database(m, rvalues):
template = self.sqlite_template if 'sqlite' in rvalues['DB_ENGINE'] else self.other_db_template
return template.format(**rvalues)
settings_subs = [
(r"^DEBUG = (True)", 'DEBUG'),
(r"'NUMBAS_PATH': '(.*?)',", 'NUMBAS_PATH'),
(r"^STATIC_ROOT = '(static/)'", 'STATIC_ROOT'),
(r"^MEDIA_ROOT = '(media/)'", 'MEDIA_ROOT'),
(r"'PREVIEW_PATH': '(.*?)'", 'PREVIEW_PATH'),
(r"'PREVIEW_URL': '(.*?)',", 'PREVIEW_URL'),
(r"'PYTHON_EXEC': '(.*?)',", 'PYTHON_EXEC'),
(r"^SITE_TITLE = '(.*?)'", 'SITE_TITLE'),
(r"^DATABASES = {.*?^}", set_database),
(r"^SECRET_KEY = '(.*?)'", 'SECRET_KEY'),
(r"^ALLOW_REGISTRATION = (True|False)", 'ALLOW_REGISTRATION'),
(r"^DEFAULT_FROM_EMAIL = '(.*?)'", 'DEFAULT_FROM_EMAIL'),
(r"^SITE_ID = (\d+)", 'SITE_ID'),
]
self.sub_file('numbas/settings.py', settings_subs, confirm_overwrite)
def sub_file(self, fname, subs, confirm_overwrite=True):
if os.path.exists(fname) and confirm_overwrite:
overwrite = self.get_input("{} already exists. Overwrite it?".format(fname),True)
if not overwrite:
return
self.written_files.append(fname)
with open(fname+'.dist') as f:
text = f.read()
for pattern, key in subs:
pattern = re.compile(pattern, re.MULTILINE | re.DOTALL)
if callable(key):
text = self.sub_fn(text, pattern, key)
else:
text = self.sub(text,pattern,self.rvalues[key])
with open(fname,'w') as f:
f.write(text)
print("Wrote",fname)
def sub_fn(self, source, pattern, fn):
m = pattern.search(source)
if not m:
raise Exception("Didn't find {}".format(pattern.pattern))
start, end = m.span(0)
out = fn(m, self.rvalues)
return source[:start]+out+source[end:]
def sub(self, source, pattern, value):
def fix(m):
t = m.group(0)
start, end = m.span(1)
ts,te = m.span(0)
start -= ts
end -= ts
return t[:start]+value+t[end:]
if not pattern.search(source):
raise Exception("Didn't find {}".format(pattern.pattern))
return pattern.sub(fix, source)
def run_management_command(self, *args):
from django.core.management import ManagementUtility
args = ['manage.py'] + list(args)
utility = ManagementUtility(args)
try:
utility.execute()
except SystemExit:
pass
print('')
def get_input(self, question, default, validation=None):
v = None
try:
while v is None:
if isinstance(default,bool):
if default is not None:
q = question+(' [Y/n]' if default else ' [y/N]')
else:
q = question
t = input(q+' ').strip().lower()
if t=='' and default is not None:
v = default
if t=='y':
v = True
if t=='n':
v = False
else:
if default is not None:
q = "{} ['{}']".format(question,str(default))
else:
q = question
t = input(q+' ').strip()
if t=='' and default is not None:
v = default
if t:
v = t
if validation is not None and not validation(v):
v = None
except KeyboardInterrupt:
print('')
raise SystemExit
print('')
return v
if __name__ == '__main__':
command = Command()
try:
command.handle()
except Exception as e:
traceback.print_exc()
print_notice("The setup script failed. Look at the error message above for a description of why.")
|
By Nancy Young in Blogging. Updated on July 27, 2016 .
For those who are serious about blogging or just cannot seem to get their blogs to take off, sometimes all you need is some first-hand tips from the experts themselves. We hear you and since there are plenty of such ebooks lying around online, we’ve done the legwork and collected 12 such eBooks that are great collections of blogging experience, knowledge and tips that are the result of years of trial and error from those who have been there, done that.
Here you’ll see free ebooks on how to start blogging, how to become a successful writer, how to get money with blogging and get traffic, how to organize your blog and create strong visuals, and more. So, scroll down and enjoy!
Editor’s note: 7 of the books below are available absolutely free; the links you see are the direct download links. The last 5 books require some form of registration or at least an email address to download.
This book is a step-by-step guide on how to setup your blog and make it successful. The ebook contains 7 chapters to help you set up, maintain, monetise, and promote your blog along with helpful resources and tools for bloggers. Ogi Djuraskovic is behind FirstSiteGuide, and a web enthusiast who helps people start their own blog/site. Other team members of FirstSiteGuide who took part in writing the ebook are experts in hosting, marketing, SEO and design.
365 Writing Prompts was written by The Daily Post team who do all kind of jobs (from blogging to coding) and have the goal of helping people start their own blogs. Get this on your reader, tablet or laptop and get inspirational prompts on each day of the year to write. For example, for June 20, you will get, "Moment of kindness. Describe a moment of kindness, between you and someone else — loved one or complete stranger.” Also available in Spanish, French and Indonesian.
If you feel like a complete dummy in blogging, this ebook will teach you a lot new things in simple language. This ebook is helpful for amateurs and pros, who want to integrate their blog with social media and optimize it for search engines. Susannah Gardner is a writer and editor who helps people say exactly what they want to say, and the co-author, Shane Birley is a technologist (with a literature degree) who helps people build stuff online.
Seth Godin is an american author, marketer, and speaker, who doesn’t really need an introduction. Despite the title, Incomplete Guide to Blogs and the New Web, this ebook is not going to tell you how to set up your blog to sell your products. It’smore about the influence of blogging on your future, career, ideas and the whole lifestyle that comes with.
This 10 page ebook was written by web designer and developer Heather Jones, who has been creating animated graphics for over a decade. The Blogger’s Workbook is her first ebook with first-hand tips. In this ebook you’ll find information on how to start a blog, add proper categories, schedule your articles, guest posts and more.
As we live in the world of overloaded information, to create a really awesome blog you need to not only write a good copy, but also frame your articles with proper images and other visual element. This 60-page ebook will tell you how to create high-quality images, photos, graphs, infographics and other design elements for your blog.
Forty agency has been designing remarkable user experiences for over a decade. If you have a blog, but have no visits then this is the guide you need. This awesome 15-page Pocket Guide to SEO will tell and show you everything you need to know about SEO: proper keywords, headlines, descriptions and more.
Copyblogger is a software and training organization which provides expert advice, exceptional products and smart copywriting. This ebook is an in-depth guide on how to write killer headlines, touching on mistakes to avoid, working formulas, templates you can use and more. It is part of a 14-book how-to series on everything to do with content marketing, which you can get if you register on the Copyblogger site.
Wishpond is a company which helps people to create, publish and track their online marketing campaigns. They have worked with Facebook, Twitter, Mailchimp just to name a few brands. This ebook will help you to understand all the benefits of having great content on your site. You will be taught how to start blogging, find your target audience, write good content, promote your blog, measure results and more.
Crystal Paine is a wife, mom of three, and speaker. In 2007 she founded MoneySavingMom.com, which has over a million unique visitors and around 4 million pageviews per month. If you have a blog and want to get money with it, you going to need this ebook. It shares insider tips and tricks on how to monetize your blog as well as list useful resources for blogging.
Available with Noisetrade account, Facebook login or email address.
There are thousands of blogs out there around the Web. Thus, it’s really difficult to create a blog which will stand out from the rest ones. This ebook is packed with 15 years of experience and 14 sections of advice on how to find writing ideas, create competitive content, overcome writer’s block and draw visitors to your site.
Here is an ebook that will tell you how to grow your email list up to 10,000 subscribers in just 12 months. Glen Allsop moved to South Africa when he was 18, to be a social media manager for huge brands like Land Rover, Nissan and Hewlett Packard. These days, he is a successful blogger who runs his own marketing company and helps people make a living online. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.