text
stringlengths
29
850k
from __future__ import absolute_import import pytest import os.path import jss from jss import JSS, QuerySet from xml.etree import ElementTree from jss.exceptions import GetError def mock_expanduser(path): return path class TestJSS(object): def test_construct_without_jssprefs(self, jss_prefs_dict): j = JSS(url=jss_prefs_dict['jss_url'], user=jss_prefs_dict['jss_user'], password=jss_prefs_dict['jss_password']) assert j is not None def test_construct_with_jssprefs(self, jss_prefs, monkeypatch, tmpdir): def mock_expanduser(path): return tmpdir.join(path.replace('~', 'HOME')) monkeypatch.setattr(os.path, 'expanduser', mock_expanduser) # monkeypatch.setattr(os.path, 'startswith', lambda p: False) j = JSS(jss_prefs=jss_prefs) assert j is not None def test_trailing_slash_removed(self, jss_prefs_dict): j = JSS(url=jss_prefs_dict['jss_url']+'/') assert j.base_url[-1] != '/' def test_get_packages(self, j): result = j.Package() assert result is not None assert isinstance(result, QuerySet) def test_scrape(self, j): #scrape_url = '/' scrape_url = 'legacy/packages.html?id=-1&o=c' r = j.scrape(scrape_url) assert r is not None
Egis Rail prises itself one of European leader in providing professional services in railway engineering covering Light Rail, Metros and Railways (including High Speed, Commuter and suburban railways). Egis' areas of intervention in the railway sector are multiple: high-speed lines, conventional lines, urban and peri-urban lines, tourist lines, railway nodes, railway stations and interchanges, platforms, terminal freight facilities, technical rail-road projects, maritime and fluvial. In addition, Egis' interventions in the railway sector concern indistinctively the creation of new infrastructure or the development of existing infrastructure (operation, safety, modernization, regeneration). Egis provides design and supervision of railroad equipment works (track, catenaries, signalling, telecommunications) for new high-speed lines or for conventional operating lines. Our extensive experience within French domestic market gives us a solid basis from which to export our expertise effectively throughout the world and to meet the expectations of international clients. In 50 years, Egis Rail has been involved in the design and construction of more than 750 km of Metro lines in France and throughout the world and Engineering of more than 12,000 km of railways including High Speed. Egis has made a vital contribution in the revival and modernization of the tramway taking part to the success of iconic projects, delivering Engineering Project Management of over 800 km of Tramways in the world. The technical skills offered by Egis have the advantage of covering all rail business activities through a global and coherent approach to rail, allowing project optimization and better management of the many interfaces. They cover, in particular, transport planning, modeling and socio-economics, contractual engineering / assembly, project management and construction management, the various technical trades (civil engineering, railway equipment, telecom and signaling, catenary and traction power, rolling stock, etc.), safety, maintenance and repair, maintenance or operational planning. Egis can also support public and private client developing and implementing various form of project such as D&B, EPC, PPP and turnkey projects.
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon import exceptions from horizon import tables from openstack_dashboard import api LOG = logging.getLogger(__name__) class CreateNetworkProfile(tables.LinkAction): name = "create" verbose_name = _("Create Network Profile") url = "horizon:router:nexus1000v:create_network_profile" classes = ("ajax-modal",) icon = "plus" class DeleteNetworkProfile(tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u"Delete Network Profile", u"Delete Network Profiles", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Deleted Network Profile", u"Deleted Network Profiles", count ) def delete(self, request, obj_id): try: api.neutron.profile_delete(request, obj_id) except Exception: msg = _('Failed to delete network profile (%s).') % obj_id LOG.info(msg) redirect = reverse('horizon:router:nexus1000v:index') exceptions.handle(request, msg, redirect=redirect) class EditNetworkProfile(tables.LinkAction): name = "update" verbose_name = _("Edit Network Profile") url = "horizon:router:nexus1000v:update_network_profile" classes = ("ajax-modal",) icon = "pencil" class NetworkProfile(tables.DataTable): id = tables.Column("id", hidden=True) name = tables.Column("name", verbose_name=_("Network Profile"), ) project = tables.Column("project_name", verbose_name=_("Project")) segment_type = tables.Column("segment_type", verbose_name=_("Segment Type")) sub_type = tables.Column("sub_type", verbose_name=_("Sub Type")) segment_range = tables.Column("segment_range", verbose_name=_("Segment Range")) multicast_ip_range = tables.Column("multicast_ip_range", verbose_name=_("Multicast IP Range")) physical_network = tables.Column("physical_network", verbose_name=_("Physical Network Name")) class Meta(object): name = "network_profile" verbose_name = _("Network Profile") table_actions = (CreateNetworkProfile, DeleteNetworkProfile,) row_actions = (EditNetworkProfile, DeleteNetworkProfile,) class PolicyProfile(tables.DataTable): id = tables.Column("id", hidden=True) name = tables.Column("name", verbose_name=_("Policy Profile"), ) project = tables.Column("project_name", verbose_name=_("Project")) class Meta(object): name = "policy_profile" verbose_name = _("Policy Profile")
And just like that, Fall has come! Hello friends. This is my first post since the middle of the summer! I found myself so focused the last two months on 1) enjoying every bit of summer while Madison was home with me and 2) prepare her for school and a new chapter for our family. This post is all about a few quick and easy, inexpensive steps I’m using to update my home for fall, making it feel warm and cozy for the season! My favorite season. Being a stay at home mom does affect how much and where we spend our money, it requires me to be a little creative when I would like to spend on things that are a want instead of need. So while I would love to head over to Homesense (my fave!) and load up on all new fall decor – it certainly isn’t realistic. As I’ve always said in my home decor and party planning posts – a little creativity and some DIY goes a long way. We’re also in the middle of some home updating – anyone whose lived through that knows how chaotic and messy that can be. So bringing in more doesn’t exactly help our situation as we try and redo and reorganize parts of our home. But with just a few easy elements I can still bring Fall into our home without too much planning or shopping! Whether you like going for clean neutrals, warm browns with rustic vibes or classic red plaid, find the colors, vibe and theme you’d like to incorporate into your home for fall. You can focus on one room (dining or living room) or your whole main floor – whatever you like. Little details around the entry way, coffee table or mantle in your choice of fall colors easily make your home feel ready for the season. Green Eucalyptus, yellow daisy’s, purple dahlia’s, orange chrysanthemums, white pumpkins and colorful gourds. These are just some of the fall friendly flowers and edibles that you can find everywhere. Pumpkin patches, farmers markets and grocery stores are full of them right now, so load up on some and watch how quickly your home will have that fall feel. The easiest way to do this – a seasonal scented candle. Create a lush and warm environment by lighting a pumpkin spice, pumpkin pie or apple cinnamon scented candle. It fills the room with everyone’s favorite fall scents and bring the calm vibes at the same time! Bonus : it acts as decor too. Even if expenses are tight or time is of the essence, they’re are always little was to ring in a new season. You have to do what’s best for your family and what best fits your lifestyle, but there is always little ways and small details to make your house a home! So tell me guys, how are you welcoming the fall season? Apple picking, pumpkin carving? Tell me below, I’d love to hear from you!
#!python # Create 3D mesh files. # Huihui Weng (Geoazur, 2018) # # ====================================================================== from __future__ import print_function import numpy import os import sys # Please set up the path for CUBIT (or Trelis) and GEOCUBIT in your system. # Instead, you could set up the path from ~/.bashrc sys.path.append('/opt/linux64/Trelis-14.0/bin/') sys.path.append('/opt/linux64/specfem3d/CUBIT_GEOCUBIT/') import cubit print("Init CUBIT...") try: # print all the information to the screen. cubit.init([""]) # stop all the outout information and warnings to the screen. #cubit.init(["-noecho","-nojournal","-information=off","-warning=off"]) except: pass from geocubitlib import absorbing_boundary from geocubitlib import save_fault_nodes_elements from geocubitlib import cubit2specfem3d #===================================== # Set up parameters === #===================================== # Please set up the mesh parametes in this section # If DEBUG is True, then this script only create CUBIT script, otherwise create CUBIT script and mesh file. # It is recommended to debug this script by GUI of CUBIT before to create Specfem3D mesh. #DEBUG = True DEBUG = False # The radius of the semi-sphere (km) R_model = 100 # The radius of the Cylinder that cut through both the free surface and fault (km) R_cylinder = 10 work_dir = os.getcwd() # If Interface is False, then use planar fault (given by the strike, dip, and dep). Otherwise run the scripts in ./Interface and give the path of the created interface (in the directory ./output) # If Topography is False, then use planar surface. Otherwise run the scripts in ./Surface and give the path of the created planarsur (in the directory ./output) Interface = False Topography = False Int_name = work_dir + "/output/interface_sigma_1_inc_12.sat" Top_name = work_dir + "/output/surface_sigma_1_inc_12.sat" Strike = 230 Dip = 70 Dep = -5.7 # Uniform material properties. vp = 5770 # P wave speed (m/s) vs = 3330 # S wave speed (m/s) rho = 2705 # density (g/m^3) Q = 13 # The mesh size (km). Smaller grid size can better sample curved geometries. fine_size = 4 coarse_size = 8 # The mesh scheme: thex # Thex: firstly create a tetrahedral unstructured mesh, then convert into a hexahedral mesh (reduce the grid size by hal). This mesh scheme have good flexibility for curved geometries. # Noted that the final mesh is hexahedral mesh mesh_scheme = "thex" # The element type for hexahedral mesh: HEX8 or HEX27 (supported by Specfem3D) # Higer order nodes can be moved to curved geometry by defaut, if set Node Constraint ON. element_type = "HEX8" #element_type = "HEX27" # Set up the lower depth of seimogenic zone. The rupture can propogate to the free surface here. Lower_cutoff = -30 # The name of CUBIT script. One can run this script under the GUI of CUBIT for debuging. This python code will run this script without GUI. journalFile = "./output/Kumamoto.jou" # The name (prefix name) of created mesh directory for Specfem3D. The full name is the prefix name + features of fault and free surface. mesh_name = "Kumamoto" #===================================== #============================== # Main code === #============================== # There is no need to change anything below. If you need to change something, # please send me an email. I will try to make it more automatic. # print("Initial check...") # Initial check if(not os.path.isfile(Int_name) and Interface): print("The interface data does not exis!!! Please create it in ./Interface.") exit() elif(os.path.isfile(Int_name) and Interface): print("Using interface slab: ", Int_name) else: print("Using planar fault with strike: ", Strike, " dip: ", Dip, " depth(reference point): ", Dep) if(not os.path.isfile(Top_name) and Topography): print("The topography data does not exis!!! Please create it in ./Surface.") elif(os.path.isfile(Top_name) and Topography): print("Using topography: ", Top_name) else: print("Using planar topography.") # The name of output mesh file if(Interface and Topography): output_mesh = mesh_name + "_semisphere_curvedfault_curvedtopo" elif(not Interface and Topography): output_mesh = mesh_name + "_semisphere_planarfault" + "_strike_" + str(Strike) + "_dip_" + str(Dip) + "_depth_" + str(Dep) + "_curvedtopo" elif(Interface and not Topography): output_mesh = mesh_name + "_semisphere_curvedfault_planarsur" else: output_mesh = mesh_name + "_semisphere_planarfault" + "_strike_" + str(Strike) + "_dip_" + str(Dip) + "_depth_" + str(Dep) + "_planarsur" # Add the info of mesh scheme output_mesh = output_mesh + "_" + str(fine_size) + "_" + str(coarse_size) + "_" + element_type # Vertical length of cylinder L_cylinder = abs(2 * Lower_cutoff) # Create the journal file for debuging print("Create journal file...") j = open(journalFile, 'w') j.write("# Journal file formatting, etc.\n" + \ "# ----------------------------------------------------------------------\n" + \ "# Set units to SI.\n" + \ "# ----------------------------------------------------------------------\n" \ "${Units('si')}\n" + \ "# Reset geometry.\n" + \ "# ----------------------------------------------------------------------\n" \ "reset\n") j.write("# ----------------------------------------------------------------------\n" + \ "# Create a cylinder.\n" + \ "# ----------------------------------------------------------------------\n") j.write("create cylinder height {0} radius {1}\n".format("{"+str(L_cylinder)+"*km}",\ "{"+str(R_cylinder)+"*km}")) j.write("${idVol1=Id('volume')}\n") if(Interface): j.write("# ----------------------------------------------------------------------\n" + \ "# Import interface data.\n" + \ "# ----------------------------------------------------------------------\n") j.write("import Acis '%s'\n" % Int_name) j.write("${idInt=Id('surface')}\n") else: j.write("# ----------------------------------------------------------------------\n" + \ "# Create planar interface.\n" + \ "# ----------------------------------------------------------------------\n") j.write("create planar surface zplane\n") j.write("${idInt=Id('surface')}\n") j.write("rotate surface {idInt} about Y angle %f\n" % Dip) if(Strike != 0): j.write("rotate surface {idInt} about Z angle %f\n" % -Strike) j.write("surface {idInt} move z {%f*km}\n" % Dep) if(Topography): j.write("# ----------------------------------------------------------------------\n" + \ "# Import topography data\n" + \ "# ----------------------------------------------------------------------\n") j.write("import Acis '%s'\n" % Top_name) j.write("${idSur=Id('surface')}\n") else: j.write("# ----------------------------------------------------------------------\n" + \ "# Create planar free surface.\n" + \ "# ----------------------------------------------------------------------\n") j.write("create planar surface zplane\n") j.write("${idSur=Id('surface')}\n") j.write("# ----------------------------------------------------------------------\n" + \ "# Webcut blocks.\n" + \ "# ----------------------------------------------------------------------\n") j.write("webcut volume {idVol1} with sheet extended from surface {idSur}\n") j.write("${idVol2=Id('volume')}\n") j.write("webcut volume {idVol2} with sheet extended from surface {idInt}\n") j.write("${idVol3=Id('volume')}\n") j.write("# ----------------------------------------------------------------------\n" + \ "# Find and name the fault surface.\n" + \ "# ----------------------------------------------------------------------\n") j.write("find surface overlap volume {idVol2} {idVol3}\n") j.write("${idF1=GroupMemberId('surf_overlap','surface',0)}\n") j.write("${idF2=GroupMemberId('surf_overlap','surface',1)}\n") j.write("surface {idF1} name 'fault1'\n") j.write("# ----------------------------------------------------------------------\n" + \ "# Create semi-sphere\n" + \ "# ----------------------------------------------------------------------\n") j.write("create sphere radius {%f *km}\n" % R_model) j.write("${idVol4=Id('volume')}\n") j.write("webcut volume {idVol4} with sheet extended from surface {idSur}\n") j.write("${idVol5=Id('volume')}\n") j.write("${idround=Id('surface')}\n") j.write("surface {idround} name 'spheresurf'\n") j.write("# ----------------------------------------------------------------------\n" + \ "# Substract the semi-spehere from the blocks that contain the fault\n" + \ "# ----------------------------------------------------------------------\n") j.write("subtract volume {idVol2} {idVol3} from volume {idVol5} keep\n") j.write("${idVol6=Id('volume')}\n") j.write("# ----------------------------------------------------------------------\n" + \ "# Delete unused blocks and surfaces.\n" + \ "# ----------------------------------------------------------------------\n") j.write("delete surface all\n") j.write("delete volume {idVol1} {idVol4} {idVol5} \n") j.write("# ----------------------------------------------------------------------\n" + \ "# imprint and merge.\n" + \ "# ----------------------------------------------------------------------\n") j.write("imprint all\n" + \ "merge all\n") j.write("# ----------------------------------------------------------------------\n" + \ "# Generate the mesh.\n" + \ "# ----------------------------------------------------------------------\n") if(mesh_scheme == "thex"): j.write("volume all scheme TetMesh\n") j.write("volume {idVol2} {idVol3} size {%f*km}\n" % fine_size) j.write("mesh volume {idVol2} \n") j.write("mesh volume {idVol3}\n") j.write("volume {idVol6} size {%f*km}\n" % coarse_size) j.write("mesh volume {idVol6} \n") j.write("THex Volume all\n") else: print("Error mesh scheme!") exit() j.write("# ----------------------------------------------------------------------\n" + \ "# Smooth mesh to improve quality.\n" + \ "# ----------------------------------------------------------------------\n") j.write("volume all smooth scheme condition number beta 2.0 cpu 4\n" + \ "smooth volume all\n") j.write("set unmerge Duplicate_mesh on\n") j.write("unmerge surface fault1 only\n") j.write("surface {idF2} name 'fault2'\n") j.write("# ----------------------------------------------------------------------\n" + \ "# Seperate nodes on fault.\n" + \ "# ----------------------------------------------------------------------\n") j.write("set node constraint off\n") j.write("node in surface fault1 move normal to surface fault1 distance {-0.01*m}\n") j.write("node in surface fault2 move normal to surface fault2 distance {-0.01*m}\n") j.write("compress all\n") j.write("set node constraint on\n") j.write("# End of file\n") j.close() if(DEBUG): exit() # ================================================== # Read the CUBIT journal and playback it. # ================================================== print("Playback journal file...") with open(journalFile) as f: content = f.readlines() for line in content: cubit.cmd(line) # ================================================== # Save the mesh to txt files # This part is revised from the code of Specfem3D # ================================================== print("") print("Convert mesh to Specfem-format...") os.system('mkdir -p MESH') ## fault surfaces (up/down) Au = [cubit.get_id_from_name("fault1")] Ad = [cubit.get_id_from_name("fault2")] ### Obtain the id of boundaries # I define the original sphere surface as spheresurf. After webcut, CUBIT renames the new-cutted surface by adding @A, @B ... SpheresurfID = [cubit.get_id_from_name("spheresurf@A")] # Find the surface ID for the free surface freesur_tolerance = 3e3 FreesurfID = [] list_surf=cubit.parse_cubit_list("surface","all") for k in list_surf: center_point = cubit.get_center_point("surface", k) if abs(center_point[2]) <= freesur_tolerance: FreesurfID.append(k) print(SpheresurfID,FreesurfID) # define blocks Vol_num = cubit.get_volume_count() for i in range(Vol_num): cubit.cmd('block {0} hex in vol {0}'.format(i+1)) cubit.cmd('block 1000 face in surface ' + str(list(SpheresurfID)).replace("["," ").replace("]"," ")) cubit.cmd('block 1000 name "face_semisphere"') cubit.cmd('block 1001 face in surface ' + str(list(FreesurfID)).replace("["," ").replace("]"," ")) cubit.cmd('block 1001 name "face_topo"') #### Define material properties for the 4 volumes ################ cubit.cmd('#### DEFINE MATERIAL PROPERTIES #######################') for i in range(Vol_num): cubit.cmd('block {0} name "elastic {0}" '.format(i+1)) # material region cubit.cmd('block {0} attribute count {1}'.format(i+1,6)) cubit.cmd('block {0} attribute index 1 1'.format(i+1)) cubit.cmd('block {0} attribute index 2 {1}'.format(i+1,vp)) # vp cubit.cmd('block {0} attribute index 3 {1}'.format(i+1,vs)) # vs cubit.cmd('block {0} attribute index 4 {1}'.format(i+1,rho)) # rho cubit.cmd('block {0} attribute index 5 {1}'.format(i+1,Q)) # Q flag (see constants.h: #IATTENUATION_ ... ) cubit.cmd('block {0} attribute index 6 0'.format(i+1)) # q flag (see constants.h: iattenuation_ ... ) #### Export to SPECFEM3D format using cubit2specfem3d.py of GEOCUBIT if(element_type == "HEX27"): cubit2specfem3d.export2SPECFEM3D('MESH',hex27=True) else: cubit2specfem3d.export2SPECFEM3D('MESH') # You need to create fault mesh file in the last, if using hex27. faultA = save_fault_nodes_elements.fault_input(1,Au,Ad) print("Save created mesh...") # Save create directory as given name os.system('rm -rf output/' + output_mesh) os.system('mv MESH output/' + output_mesh) # End of script
The primary functions of this job include but are not limited to inspecting and receiving diagnostic products into various WMS (Warehouse Management System). Put away of diagnostic products into a designated location. 20C freezer; 2C to 8C cooler and 15C to 30C. High bay area requires stocking and picking in equipment that reaches 40' in height. Failure to properly receive and process orders could result in fines to the company; product loss; negative. impact on compliance or regulatory status. Improperly processed orders could result in product shortages. preventing customer or manufacturing orders from being fulfilled. Failure to perform systems transactions. accurately could result in inventory discrepancies; inadequate tracing and destruction of product. Albertsons Safeway is working to become the favorite food and drug retailer in every market it serves. The organization includes 2,230 stores, 27 distribution facilities and 19 manufacturing plants with over 250,000 employees across 34 states and the District of Columbia. The company is privately owned with supermarkets across the country that include: Albertsons, Safeway, Vons, Pavilions, Randalls, Tom Thumb, Carrs, ACME, Jewel-Osco, Lucky, Shaw's, Star Market, Super Saver, United Supermarkets, Market Street and Amigos. The Supply Chain Department has an opening for a Battery Servicer. This position is located in Irvine, CA. Under the direct supervision of the shop or service manager the servicer is responsible for performing periodic preventative maintenance on material handling equipment. Must be proficient in completing all required paperwork needed to perform all job requirements. Ensure that batteries are watered, and equipment is charges to support operations. Cleaning of the battery rooms and emptying trash in the shop. Ability to lift up to 75 pounds and to stand, walk, bend, stoop, twist and turn frequently. Ability to do repetitious arm, wrist and hand movements required for maintenance and service procedures. Must be able to work with hands and arms overhead and to work in or under the equipment. Requires manual dexterity, overall coordination and good balance to work both at ground level and in high places to perform job duties. Ability to operate work related equipment such as maintenance related power equipment and hand tools, forklifts and pallet jacks. Mental alertness is necessary to ensure safe and accurate completion of work activities. Ability to learn and follow Company and Distribution Center policies regarding safe operation of forklifts and pallet jacks, maintenance related power equipment and hand tools. Ability to learn and follow Distribution Center paperwork policies and procedures. Ability to read and comprehend all labeling on hazardous materials and equipment in the facility. Most work is performed under typical warehouse conditions. Exposure to potential hazards exists with respect to equipment and materials necessary to perform job functions. Potential exposure exists to high voltage current, vehicle and equipment lubricants and solvents. Associates work in departments which may range in temperature from 80° to -20°F. In cases of temperature outside the norm, Personal Protective Equipment is provided. Noise exposure is within normal warehouse expectations. Diversity is fundamental at Safeway. We foster an inclusive working environment where the different strengths and perspectives of each employee is both recognized and valued. We believe that building successful relationships with our customers and our communities is only possible through the diversity of our people. And a diverse workforce leads to better teamwork and creative thinking, as well as mutual understanding and respect. Sears Home Services, the home solutions division of Sears Holdings Corporation, is the nation's largest product repair service provider, providing more than 52 million solutions for homeowners annually is looking for an experienced In-Home Service Technician with a great attitude and the ability to help customers with their appliance repairs. Responsible for the daily activities associated with servicing commercial loan portfolios. Responsible for accurate completion of transactions and maintenance of the loan. Some of the daily transactions include; responding to client queries, processing advances, payments, updating rates, reconciliation, updating payment methods, preparing payoff letters, calculating prepayment penalties, preparing satisfactions, working with external counsel on preparation of assignments, responding to billing and payment inquiries, booking all loan types. In addition, there are several exception reports that are monitored and actioned to proactively monitor the risk of the portfolio. A successful loan servicer works closely with the Closing Team, Real Estate Administration Team, Managing Directors and their teams, Credit, Asset Recovery, Collateral Management, Deposit Operations, Legal, Finance, Quality Control and Compliance. Ensure compliance with regulatory requirements and with bank's policies and procedures. Responsible for the accurate and timely preparation of payoff letters and satisfactions for customers and attorneys. Assures that financial transactions are accurately reflected on the banks books which financially impact the Bank. Handles and books more straight forward extensions and renewals. Monitors and clears exceptions in a timely fashion. Responds to Service Request and emails relating to commercial queries in a courteous manner and within established SLA's. Queries includes billing, loan maintenance, advance requests and payment processing. Participates in special projects and performs additional duties as required. Works under supervision of the loan servicing supervisor to service the commercial loan portfolio.Assist in maintaining the rate index tables with multiple systems.Ensures that all suspense items are posted accurately and borrower received credit for their payment within the established SLA. Handles client escalations with supervision. Identifies and escalates opportunities for risk. Actively works to identify areas of process improvement. Candidates with advanced degrees (Associates, Bachelor's, and/or Master's) preferred, yet a High School diploma or GED is required. Candidates with some combination of coursework and experience, or else extensive related professional experience, are eligible for consideration. Effective time management, organizational and prioritization skills. Working knowledge of Microsoft Office product suite and certifications as applicable to the position.
#!/usr/bin/python # -*- coding: utf-8 -*- # standard lib import re import json import datetime # The custom modules from . import message # imports in the same folder (module) from . import emojis class MessagePreProcessor(object): """ This class is used as the user message preanalyser. This class will primary be used so that the code will be easily reusable. The MessageObject will only contains a single message object, so that this class will be thread save and so that we can run multiple instances per unit. The message object will contain all the following parts.\n .. code-block:: python\n { 'message': { 'date': 1439471738, 'text': '/start', 'from': { 'id': 3xxxxxx6, 'last_name': 'Sample', 'first_name': 'Max', 'username': 'TheUserName' }, 'message_id': 111, 'chat': { 'id': -xxxxxxx, 'title': 'Drive' } }, 'update_id': 469262057 } } """ def __init__(self, MessageObject, OutputQueue, SqlObject, Cursor, LanguageObject, LoggingObject, ConfigurationObject,): """ Variables: MessageObject ``object`` the message to be analysed message """ self.LastSendCommand = None self.LastSendCommand = None self.LastUsedId = None # This variable will stop the system if the message was send during process self.MessageSend = False # Predefining attributes so that it later can be used for evil. self.LoggingObject = None self.ConfigurationObject = None # output queue self._OutputQueue_ = OutputQueue # SqlObjects self.SqlObject = SqlObject self.SqlCursor = Cursor self.LoggingObject = LoggingObject self.ConfigurationObject = ConfigurationObject # This variable is needed for the logger so that the log end up # getting printed in the correct language. self.M_ = LanguageObject.CreateTranslationObject().gettext if "update_id" in MessageObject: # The update‘s unique identifier. Update identifiers start from a # certain positive number and increase sequentially. This ID # becomes especially handy if you’re using web hooks, since it # allows you to ignore repeated updates or to restore the correct # update sequence, should they get out of order. self.UpdateId = MessageObject["update_id"] if "message_id" in MessageObject["message"]: # Unique message identifier self.MessageID = MessageObject["message"]["message_id"] # get the user of the message # get user data from the message if "first_name" in MessageObject["message"]["from"]: # User‘s or bot’s first name self.UserFirstName = MessageObject["message"]["from"]["first_name"] else: self.UserFirstName = "" if "last_name" in MessageObject["message"]["from"]: # Optional. User‘s or bot’s last name self.UserLastName = MessageObject["message"]["from"]["last_name"] else: self.UserLastName = "" if "username" in MessageObject["message"]["from"]: # Optional. User‘s or bot’s username self.UserName = MessageObject["message"]["from"]["username"] else: self.UserName = "" if "id" in MessageObject["message"]["from"]: # Unique identifier for this user or bot self.UserId = MessageObject["message"]["from"]["id"] # Add user to the system if not exists if self.UserExists() is False: self.AddUser() # Get the Internal user id self.InternalUserId, self.IsAdmin = self.GetUserData() # Here we are initialising the function for the translations. # Get the user settings from the user that has send the message Query = ("SELECT User_Setting_Table.User_String FROM " "User_Setting_Table INNER JOIN Setting_Table ON " "User_Setting_Table.Master_Setting_Id=" "Setting_Table.Id WHERE Setting_Table.Setting_Name=%s" " AND User_Setting_Table.Set_By_User=%s;" ) Data = ("Language", self.InternalUserId) self.LanguageName = ( self.SqlObject.ExecuteTrueQuery( self.SqlCursor, Query, Data )[0]["User_String"]) self.LanguageObject = LanguageObject Language = self.LanguageObject.CreateTranslationObject( Languages=[self.LanguageName] ) # create the translator self._ = Language.gettext # Get the text message with the command if "text" in MessageObject["message"]: self.Text = MessageObject["message"]["text"] else: self.Text = None # where was the message send from the user or the group # Get the chat id if "id" in MessageObject["message"]["chat"]: # Unique identifier for this group chat self.ChatId = MessageObject["message"]["chat"]["id"] # Check if message is from a group or not. if self.ChatId == self.UserId: self.InGroup = False else: self.InGroup = True self.GroupName = MessageObject["message"]["chat"]["title"] # Check if group exists if self.GroupExists() is False: self.AddGroup() self.InternalGroupId = self.GetInternalGroupId() if "date" in MessageObject["message"]: # changing the arrival time to a python understandable time # as well as a MySql understandable format self.MessageDate = datetime.datetime.fromtimestamp( int(MessageObject["message"]["date"]) ).strftime('%Y-%m-%d %H:%M:%S') if "forward_from" in MessageObject["message"]: self.ForwardedFrom = MessageObject["message"]["forward_from"] if "forward_date" in MessageObject["message"]: # Optional. For forwarded messages, date the original # message was sent in Unix time self.forward_date = MessageObject["message"]["forward_from"] if "reply_to_message" in MessageObject["message"]: # Optional. For replies, the original message. Note that # the Message object in this field will not contain further # reply_to_message fields even if it itself is a reply. self.ReplyToMessage = MessageObject["message"]["reply_to_message"] if "audio" in MessageObject["message"]: # Optional. Message is an audio file, information about the file self.MessageAudio = MessageObject["message"]["audio"] if "document" in MessageObject["message"]: # Optional. Message is a general file, information about the file self.MEssageDocument = MessageObject["message"]["document"] if "photo" in MessageObject["message"]: # Optional. Message is a photo, available sizes of the photo self.MessagePhoto = MessageObject["message"]["photo"] if "sticker" in MessageObject["message"]: # Optional. Message is a sticker, information about the sticker self.MessageSticker = MessageObject["message"]["sticker"] if "video" in MessageObject["message"]: # Optional. Message is a video, information about the video self.MessageVideo = MessageObject["message"]["video"] if "caption" in MessageObject["message"]: # Optional. Caption for the photo or video self.MessageCaption = MessageObject["message"]["caption"] if "contact" in MessageObject["message"]: # Optional. Message is a shared contact, information about # the contact self.MessageContact = MessageObject["message"]["contact"] if "location" in MessageObject["message"]: # Optional. Message is a shared location, information about # the location self.MessageLocation = MessageObject["message"]["location"] if "venue" in MessageObject["message"]: # Optional. Message is a venue, information about the venue self.Venue = MessageObject["message"]["venue"] if "new_chat_participant" in MessageObject["message"]: # Optional. A new member was added to the group, information # about them (this member may be bot itself) self.MessageNewChatParticipant = ( MessageObject["message"]["new_chat_participant"] ) if "left_chat_participant" in MessageObject["message"]: # Optional. A member was removed from the group, information # about them (this member may be bot itself) self.MessageLeftChatParticipant = ( MessageObject["message"]["left_chat_participant"] ) if "new_chat_title" in MessageObject["message"]: # Optional. A group title was changed to this value self.MessageNewChatTitle = ( MessageObject["message"]["new_chat_title"] ) if "new_chat_photo" in MessageObject["message"]: # Optional. A group photo was change to this value self.MessageNewChatPhoto = ( MessageObject["message"]["new_chat_photo"] ) if "delete_chat_photo" in MessageObject["message"]: # Optional. Informs that the group photo was deleted self.MessageDeleteChatPhoto = ( MessageObject["message"]["delete_chat_photo"] ) if "group_chat_created" in MessageObject["message"]: # Optional. Informs that the group has been created self.MessageGroupChatCreated = ( MessageObject["message"]["group_chat_created"] ) if "supergroup_chat_created" in MessageObject["message"]: # Optional. Service message: the supergroup has been created self.SupergroupChatCreated = MessageObject["message"]["supergroup_chat_created"] if "channel_chat_created" in MessageObject["message"]: # Optional. Service message: the channel has been created self.ChannelChatCreated = MessageObject["message"]["channel_chat_created"] if "migrate_to_chat_id" in MessageObject["message"]: # Optional. The group has been migrated to a supergroup with # the specified identifier, not exceeding 1e13 by absolute # value self.MigrateToChatId = MessageObject["message"]["migrate_to_chat_id"] if "migrate_from_chat_id" in MessageObject["message"]: # Optional. The supergroup has been migrated from a group # with the specified identifier, not exceeding 1e13 by # absolute value self.migrate_from_chat_id = MessageObject["message"]["migrate_from_chat_id"] if "pinned_message" in MessageObject["message"]: # Optional. Specified message was pinned. Note that the # Message object in this field will not contain further # reply_to_message fields even if it is itself a reply. self.PinnedMessage = MessageObject["message"]["pinned_message"] def _SendToQueue_(self, MessageObject): """ This methode will be a private function with the task to send the finished message to the postprocessing and shipping class. Variables: - MessageObject ``object`` is the message object that has to be send """ MessageObjectList = [] if MessageObject is not None: if len(MessageObject.Text) > 4096: TemporaryObjectHolder = MessageObject for TextPart in MessageProcessor.Chunker(MessageObject.Text, 4095): TemporaryObjectHolder.Text = TextPart MessageObjectList.append(TemporaryObjectHolder) else: MessageObjectList.append(MessageObject) if self.MessageSend is False: Workload = [] if isinstance(MessageObject, list): Workload.extend(MessageObject) elif isinstance(MessageObject, dict): Workload.append(MessageObject) for Message in MessageObjectList: self._OutputQueue_.put(Message) def UserExists(self, ): """ This method will detect if the use already exists or not. The following query will return 1 if a user with the specified username exists a 0 otherwise. .. code-block:: sql\n SELECT EXISTS(SELECT 1 FROM mysql.user WHERE user = 'username') It will return a True if the database returns a 1 and a False if the database a 0. Variables: \- """ exists = self.SqlObject.ExecuteTrueQuery( self.SqlObject.CreateCursor(Dictionary=False), Query=("SELECT EXISTS(SELECT 1 FROM User_Table WHERE" " External_Id = %s);" ), Data=self.UserId )[0][0] if exists == 0: return False else: return True def AddUser(self, ): """ This method will add a new user to the database. Variables: \- """ # Insert into user TableName = "User_Table" Columns = { "External_Id": self.UserId, "User_Name": self.UserName, "First_Name": self.UserFirstName, "Last_Name": self.UserLastName } self.SqlObject.InsertEntry(self.SqlCursor, TableName, Columns) self.SqlObject.Commit() # insert default settings # get default values # get the default settings # get the default language FromTable = "Setting_Table" Columns = ["Id", "Default_String"] Where = [["Setting_Name", "=", "%s"]] Data = ("Language") MasterSetting = self.SqlObject.SelectEntry( self.SqlCursor, FromTable=FromTable, Columns=Columns, Where=Where, Data=Data )[0] TableName = "User_Setting_Table" self.InternalUserId = self.GetUserData()[0] Columns = { "Master_Setting_Id": MasterSetting["Id"], "Set_By_User": self.InternalUserId, "User_String": MasterSetting["Default_String"] } self.SqlObject.InsertEntry( self.SqlCursor, TableName, Columns ) self.SqlObject.Commit() def GetUserData(self): """ This method will get the internal user id and the admin state from the database. Variables: \- """ # first the internal user id FromTable = "User_Table" Columns = ["Internal_Id", "Is_Admin"] Where = [["External_Id", "=", "%s"]] Data = (self.UserId,) temp = self.SqlObject.SelectEntry( self.SqlCursor, FromTable=FromTable, Columns=Columns, Where=Where, Data=Data )[0] internalUserId = temp["Internal_Id"] Is_Admin = temp["Is_Admin"] if Is_Admin == 0: Is_Admin = False else: Is_Admin = True return internalUserId, Is_Admin def GetMessageObject(self,): """ This method will generate a default message object to work with. Variables: \- """ MessageObject = message.MessageToBeSend(ToChatId=self.ChatId) MessageObject.Text = self._("Sorry, but this command could not be" " interpreted.") return MessageObject @staticmethod def Chunker(ListOfObjects, SizeOfChunks): """ Yield successive n-sized (SizeOfChunks) chunks from the list (ListOfObjects). This methode will not return anything, but act as a generator object. Variables: - ListOfObjects ``generator, list or string`` This variable holds all the stuff to split. - SizeOfChunks ``integer`` Holds the size of the chunks to turn the ListOfObjects into. """ for i in range(0, len(ListOfObjects), SizeOfChunks): yield ListOfObjects[i:i+SizeOfChunks] @staticmethod def SpacedChunker(String, SizeOfChunks): """ This method will split a sting by the spaces inside and will separate them correctly. Variables: - String ``string`` This variable holds all the stuff to split. SizeOfChunks ``integer`` Holds the size of the chunks to turn the ListOfObjects into. """ EndList = [] StringSize = 0 TempString = "" for i in String.split(" "): StringSize += len(i) if StringSize > SizeOfChunks: TempString += i else: EndList.append(TempString) StringSize = 0 TempString = "" StringSize += len(i) StringSize = 0 pass return EndList def GroupExists(self): """ This method checks if the group exists or not. The following query will return a 1 if a user with the specified username exists a 0 otherwise. From that on the system will return True if the group exists and if it doesn't False.\n .. code-block:: sql\n SELECT EXISTS(SELECT 1 FROM mysql.user WHERE user = 'username') Variables: \- """ # This method checks in the database if the group (if it is one) # exists. Exists = self.SqlObject.ExecuteTrueQuery( self.SqlObject.CreateCursor(Dictionary=False), Query="SELECT EXISTS(SELECT 1 FROM Group_Table WHERE" " External_Group_Id = %s);", Data=self.ChatId )[0][0] if Exists == True: return True else: return False def AddGroup(self): """ This method will add an not existing group to the database. Variables: \- """ # This method will add the group if it doen't exit. self.SqlObject.InsertEntry( self.SqlCursor, TableName="Group_Table", Columns={ "External_Id": self.ChatId, "Group_Name": self.GroupName }, ) self.SqlObject.Commit(self.SqlCursor) def GetInternalGroupId(self): """ This method will get the user internal group id. This method will return the the internal group id directly from the database. Variables: \- """ return self.SqlObject.SelectEntry( self.SqlCursor, FromTable="Group_Table", Columns=["Internal_Group_Id"], Where=[["External_Group_Id", "=", "%s"]], Data=self.ChatId ) def SetLastSendCommand(self, Command, LastUsedId=None, LastUsedData = None): """ This method will save the last user command into the database. The commands used can be set manually from the programmer so that it can be user for flow control. Example:\n .. code-block:: guess\n /Command option Variables: Command ``string`` This is the used command with the option, that was used. LastUsedId ``integer`` This is the last used id, it can be every id, depending the situation. """ TableName = "Session_Table" Columns = { "Command_By_User": self.InternalUserId, "Command": Command, } Duplicate = { "Command": Command, } if LastUsedId is not None: Columns["Last_Used_Id"] = LastUsedId Duplicate["Last_Used_Id"] = LastUsedId if LastUsedData is not None: Columns["Last_Used_Data"] = LastUsedData Duplicate["Last_Used_Data"] = LastUsedData SetLastSendCommand = self.SqlObject.InsertEntry( self.SqlCursor, TableName=TableName, Columns=Columns, Duplicate=Duplicate) self.SqlObject.Commit() def GetLastSendCommand(self): """ This method will get the last user command. This method will get the last user command from the database, so that the last command can be used for flow control. The command are mostly set by the system and not by the user, at least not direct. Example:\n .. code-block:: guess\n /command option Variables: \- Return: - LastSendCommand["Last_Used_Id"] - LastSendCommand["Command"] """ FromTable = "Session_Table" Columns = ["Command", "Last_Used_Id", "Last_Used_Data"] Where = [["Command_By_User", "=", "%s"]] Data = (self.InternalUserId,) LastSendCommand = self.SqlObject.SelectEntry( self.SqlCursor, FromTable=FromTable, Columns=Columns, Where=Where, Data=Data ) if len(LastSendCommand) > 0: LastSendCommand = LastSendCommand[0] else: LastSendCommand["Last_Used_Id"] = None LastSendCommand["Command"] = None LastSendCommand["Last_Used_Data"] = none return LastSendCommand def ClearLastCommand(self): """ This method clears the last set command if the process finished. Variables: \- """ self.SqlObject.UpdateEntry( Cursor=self.SqlCursor, TableName="Session_Table", Columns={ "Command": "0", "Last_Used_Id": 0 }, Where=[["Command_By_User", self.InternalUserId]], Autocommit=True ) def ChangeUserLanguage(self, Language): """ This method changes the user language. This method is responsible for initialising the language change, as well as activating the new language. It will return True if the new language could be initialised and False if there has been an error. Variables: Language ``string`` should be a string with the new language file """ if Language == "English": Language = "en_US" elif Language == "Deutsch": Language = "de_DE" self.SqlObject.UpdateEntry( Cursor=self.SqlCursor, TableName="User_Setting_Table", Columns={"User_String": Language}, Where=[["Master_User_Id", self.InternalUserId]], Autocommit=True ) try: self.LanguageName = Language Language = self.LanguageObject.CreateTranslationObject(self.LanguageName) self._ = self.LanguageObject.gettext if self.LanguageObject.info()["language"] != Language: raise ImportError( self.M_("Unknown language error") ) return True except ImportError as Error: self.LoggingObject.error("{} {}".format( self.M_("There has been an error with the changing of the " "language class, this error has been returned: {Error}" ).format(Error=Error), self.M_("Please, contact your administrator.") ) ) return False def InterpretMessage(self): """ This method is here to be overriden by a child class. Variables: \- """ raise NotImplementedError class MessageProcessor(MessagePreProcessor): """ This class is used as the user message analyser. It extends the MessagePreProcessor class with the needed methodes for analysing the message object. The MessageObject will only contains a single message object, so that this class will be thread save and so that we can run multiple instances per unit. The message object will contain all the following parts.\n .. code-block:: python\n { 'message': { 'date': 1439471738, 'text': '/start', 'from': { 'id': 3xxxxxx6, 'last_name': 'Sample', 'first_name': 'Max', 'username': 'TheUserName' }, 'message_id': 111, 'chat': { 'id': -xxxxxxx, 'title': 'Drive' } }, 'update_id': 469262057 } } """ def InterpretMessage(self): """ This method interprets the user text. This method is used as an pre interpreter of the user send text. It primarily chooses if the user send text is a command or not. It will choose the correct interpretation system, if the text has been send by a group or not. It returns the MessageObject after letting it get modified. Variables: \- """ MessageObject = self.GetMessageObject() # check if message is a command if self.Text is not None: # Analyse the text and do your stuff. # delete the annoying bot command from the text to analyse # If the name of the bot is used in the # command delete the @NameOfBot self.Text = re.sub(r"^(@\w+[bB]ot\s+)?", "", self.Text) if self.Text.startswith("/"): if self.InGroup is False: MessageObject = self.InterpretUserCommand(MessageObject) else: MessageObject = self.InterpretGroupCommand(MessageObject) else: # Get the last send command and the last used id LastSendCommand = self.GetLastSendCommand() self.LastUsedId = LastSendCommand["Last_Used_Id"] self.LastSendCommand = LastSendCommand["Command"] self.LastSendData = LastSendCommand["Last_Used_Data"] if self.InGroup is False: MessageObject = self.InterpretUserNonCommand(MessageObject) #else: # MessageObject = self.InterpretGroupNonCommand(MessageObject) else: MessageObject = None # checking that the lenght of the message never will be longer then # 4096 characters long self._SendToQueue_(MessageObject) def InterpretUserCommand(self, MessageObject): """ This method interprets the commands form the user text. This method is used as an interpreter of the user send commands. It returns the MessageObject after analysing and modifying the MessageObject to respond the user Text. Variables: - MessageObject ``object`` is the message object that has to be modified """ # register the command in the database for later use if self.Text.startswith("/start"): MessageObject.Text = self._("Welcome.\nWhat can I do for you?" "\nPress /help for all my commands" ) Markup = [ ["/help"], ["/list"] ] if self.IsAdmin is True: Markup[0].append("/admin") MessageObject.ReplyKeyboardMarkup(Markup, OneTimeKeyboard=True ) self.ClearLastCommand() # this command will list the anime content on the server elif self.Text == "/list": # this command will send the anime list MessageObject.Text = self._("Sorry\nAt the moment this command is not supported") elif self.Text == "/done": self.Text = "/start" MessageObject = self.InterpretUserCommand(MessageObject) elif self.Text == "/help": MessageObject.Text = self._( "Work in progress! @AnimeSubBot is a bot." ) elif self.Text == "/admin": # if that person is an administrator. if self.IsAdmin: self.InterpretAdminCommands(MessageObject) self.SetLastSendCommand("/admin", None) else: MessageObject.Text = self._("You don't have the right to use that command.") # the settings are right now not supported, maybe later. """elif self.Text == "/settings": # This command will send the possible setting to the user self.SetLastSendCommand("/settings", None) MessageObject.Text = self._("Please, choose the setting to change:" ) MessageObject.ReplyKeyboardMarkup( [ ["/language"], ["/comming soon"] ], OneTimeKeyboard=True ) elif self.Text == "/language": # This option will change the user language # Set the last send command self.SetLastSendCommand("/language") MessageObject.Text = self._( "Please choose your preferred language:" ) MessageObject.ReplyKeyboardMarkup([ ["English"], ["Deutsch"], ["Français"] ], OneTimeKeyboard=True ) """ else: # send that the command is unknown MessageObject.Text = self._("I apologize, but this command is not supported.\n" "Press or enter /help to get help.") return MessageObject def InterpretUserNonCommand(self, MessageObject): """ This method interprets the non commands from user text. This method is used as an interpreter of the system set commands and the user send text. It returns the MessageObject after modifying it. Variables: MessageObject ``object`` is the message object that has to be modified """ if self.LastSendCommand is None: # if there is nothing return the default. return MessageObject """ if LastSendCommand == "/language": self.ChangeUserLanguage(self.Text) MessageObject.Text = self._("Language changed successfully.") MessageObject.ReplyKeyboardHide() self.ClearLastCommand() """ if self.LastSendCommand.startswith("/admin"): # see that the admin commands are interpreted correctly MessageObject = self.InterpretAdminCommands(MessageObject) return MessageObject def InterpretGroupCommand(self, MessageObject): """ This command will interpret all the group send commands. Variables: MessageObject ``object`` is the message object that has to be modified """ if self.Text == "/help": MessageObject.Text = self._( "Work in progress! @AnimeSubBot is a bot" ) return MessageObject def InterpretAdminCommands(self, MessageObject): """ This command will interpret all the admin send commands. Variables: MessageObject ``object`` is the message object that has to be modified Commands: Channel - add channel - change description - send description - delete channel Anime list - publish list - add anime - configure anime - remove Anime """ if self.Text != "/admin": if self.LastSendCommand == "/admin": # the default screen if self.Text.startswith(self._("anime")): MessageObject.Text = self._("What do you want to do?") MessageObject.ReplyKeyboardMarkup( [ [self._("publish list")], [self._("add anime")], [self._("configure anime")], [self._("remove anime")], [self._("back")], ], OneTimeKeyboard=True ) self.SetLastSendCommand("/admin anime", None) elif self.Text.startswith(self._("channel")): MessageObject.Text = self._("What do you want to do?") MessageObject.ReplyKeyboardMarkup( [ [self._("add channel")], [self._("change description")], [self._("send description")], [self._("delete channel")], [self._("back")], ], OneTimeKeyboard=True ) self.SetLastSendCommand("/admin channel", None) elif self.Text == self._("back"): self.Text = "/start" MessageObject = self.InterpretUserCommand(MessageObject) elif self.LastSendCommand.startswith("/admin anime"): # the anime commands if self.Text == "publish list": # 1) publish to channel pass elif self.Text == "add anime": # Please enter the url and be patient while the program extracts the information. To cancel please write CANCEL. -- ;:; -> delimeter # 1) automatic (a) vs manual entry (b) # 2a) extract URL =?> CANCEL -> to admin # 3a) confirm Yes -> save data / No -> to admin # 4a) add telegram url # 2b) enter name # 3b) enter publish date # 4b) enter myanimelist.net url # 5b) enter telegram url pass elif self.Text == "configure anime": # 1) search by name # 2) show possible names (repeats until correct) # 3) change by data => Telegram URL; Date; Name; pass elif self.Text == "remove anime": # 1) search by name # 2) show possible names (repeats until correct) # 3) check if user is sure and then delete anime pass elif self.Text == "back": self.Text = "/admin" self.ClearLastCommand() self.InterpretUserCommand(MessageObject) elif self.LastSendCommand.startswith("/admin channel"): # the channel commands ChannelObject = Channel(self.SqlObject, self.SqlCursor) if self.LastSendCommand.startswith("/admin channel"): if self.Text == "add channel" or self.LastSendCommand.startswith("/admin channel"): # add new channel # 1) Please enter the name of the channel - enter CANSEL to exit # 1a) back to admin hub # 2) check if channel exists - save (a) or error (b) # 2a) save channel name # 2b) back to admin channnel # 3a) enter description # 3b) chancel => return to admin hub # 3ab) is the text ok Yes / No # 4a) enter buttons to use with description YES / NO # 4b) chancel => return to admin hub # 5a) success if self.Text == "add channel": MessageObject.Text = self._("Please send the name of the channel in this form @example_channel or send /done") self.SetLastSendCommand("/admin channel add", None) if self.LastSendCommand.startswith("/admin channel add"): if self.LastSendCommand == "/admin channel add": # 2) check if channel exists - save (a) or error (b) if self.Text.startswith("@"): # enter the channel name into the database if the channel doesnt't exists yet if ChannelObject.ChannelExists(self.Text) is True: # 2b) back to admin channnel MessageObject.Text = self._("The channel already exists.\nTo change the description choose \"change description\" in the options.") self.SetLastSendCommand("/admin channel") else: # 3a) enter description ChannelObject.AddChannel(self.Text, ByUser = self.InternalUserId) MessageObject.Text = self._("Please enter the channel description, to chancel send CANCEL") self.SetLastSendCommand("/admin channel add channel description", LastUsedData = self.Text) elif self.LastSendCommand == "/admin channel add description": if self.Text != "CANCEL": MessageObject.Text = self._("Is the description to your liking?") MessageObject.ReplyKeyboardMarkup([ [self._("YES")], [self._("NO")] ], OneTimeKeyboard=True ) # 4a) enter buttons to use with description if self.Text != "CANCEL": MessageObject.Text = self._("Do you wish to add buttons?") MessageObject.ReplyKeyboardMarkup([ [self._("YES")], [self._("NO")] ], OneTimeKeyboard=True ) # saving the description without buttons ChannelObject.ChangeDescription(self.LastSendData, self.Text, ByUser = self.InternalUserId) # saving the description without buttons self.SetLastSendCommand("/admin channel add description buttons unsure", LastUsedData = self.LastSendData) else: MessageObj.Text = self._("To change the description choose \"change description\" in the options.") self.SetLastSendCommand("/admin channel") elif self.LastSendCommand == "/admin channel add description buttons unsure": if self.Text == self._("YES"): # 4a) enter buttons to use with description YES MessageObject.Text = self._("Please send the buttons like this:\nText;Url\nText;Url") self.SetLastSendCommand("/admin channel add description buttons sure", LastUsedData = self.LastSendData) else: # 4b) no => return to admin hub self.SetLastSendCommand("/admin channel") elif self.LastSendCommand == "/admin channel add description buttons sure": ChannelObject.ChangeDescriptionButton(self.LastSendData, self.Text, self.InternalUserId) Description, Buttons = ChannelObject.GetDescription() MessageObject.Text = Description if Buttons is not None: for Line in Buttons.split("\n"): Text, Url = Line.split(";") MessageObject.AddInlineButton(Text, Url) self._SendToQueue_(MessageObject) elif self.Text == "change description": pass elif self.Text == "send description": MessageObject.Text = Description if Buttons is not None: for Line in Buttons.split("\n"): Text, Url = Line.split(";") MessageObject.AddInlineButton(Text, Url) elif self.Text == "delete channel": pass elif self.Text == "back": self.Text = "/admin" self.ClearLastCommand() self.InterpretUserCommand(MessageObject) else: MessageObject.Text = self._("How can I help you?") MessageObject.ReplyKeyboardMarkup( [ [self._("anime")], [self._("channel")], [self._("back")], ], OneTimeKeyboard=True ) self.SetLastSendCommand("/admin", None) return MessageObject class Channel(object): def __init__(self, SqlObject, Cursor): self.SqlObject = SqlObject self.Cursor = Cursor def AddChannel(self, Name, Description = None, ByUser = None): """ This methode will insert the channel into the database. Variables: - Name ``string`` the true name of the channnel, this will be used as autifications methode. - Desciption ``string`` the channnel description - ByUser ``integer`` the user by which the channel was created by """ Data = {"True_Name": Name} if Description is not None: Data["Description"] = Description if ByUser is not None: Data["By_User"] = ByUser Data["Last_Changes"] = ByUser self.SqlObject.InsertEntry(self.Cursor, "Channel_Table", Data, ) self.SqlObject.Commit() def ChangeDescription(self, Name, Description, ByUser = None): """ This methode will change the description of the channel. Variables: - Name ``string`` the true name of the channnel, this will be used as autifications methode. - Desciption ``string`` the channnel description - ByUser ``string`` the user that changed the value """ Data = {"Description": Description} if ByUser is not None: Data["Last_Changes"] = ByUser Where = [ [ "True_Name", "=", Name, ], ] self.SqlObject.UpdateEntry(self.Cursor, "Channel_Table", Data, Where ) self.SqlObject.Commit() def ChangeDescriptionButton(self, Name, Buttons, ByUser = None): """ This methode will change the description buttons of the channel. Variables: - Name ``string`` the true name of the channnel, this will be used as autifications methode. - Desciption ``string`` the channnel description - ByUser ``string`` the user that changed the value """ Data = {"Description_Buttons": Buttons} if ByUser is not None: Data["Last_Changes"] = ByUser Where = [ [ "True_Name", "=", Name, ], ] self.SqlObject.UpdateEntry(self.Cursor, "Channel_Table", Data, Where ) self.SqlObject.Commit() def ChannelExists(self, Name): """ This method will detect if the use already exists or not. The following query will return 1 if a user with the specified username exists a 0 otherwise. .. code-block:: sql\n SELECT EXISTS(SELECT 1 FROM mysql.user WHERE user = 'username') It will return a True if the database returns a 1 and a False if the database a 0. Variables: \- """ exists = self.SqlObject.ExecuteTrueQuery( self.SqlObject.CreateCursor(Dictionary=False), Query=("SELECT EXISTS(SELECT 1 FROM Channel_Table WHERE" " True_Name = %s);" ), Data=Name )[0][0] if exists == 0: return False else: return True def GetChannels(self): """ this method will get all the channels """ Channels = None Columns = ("True_Name",) ChannelsTemp = self.SqlObject.Select( Cursor = self.Cursor, FromTable = "Channel_Table", Columns = Columns,) return Channels def GetDescription(self, Name): pass class Anime(object): def __init__(self, SqlObject, Cursor): self.SqlObject = SqlObject self.Cursor = Cursor
Planning a fabulous wedding or a special occasion, but need that extra bit of oomph to make your guests go wow? You’ve come to the right place, we’re here to help! Heaven in a Wild Flower is dedicated to helping you create the perfect setting and occasion with our gorgeous florals, creative design and beautiful props! Who doesn’t dream of having a fairy godmother? If you want everything taken care of, this is the option for you. Like a fairy godmother, we will make your dream a reality without you having to do anything. We’ll brainstorm ideas, find inspiration, design a concept, source and produce all items, actual day set-up and styling and tear-down. Simply arrive and enjoy — you’ll look like a fabulous host and have a wonderful time, all without breaking a sweat. Don’t worry, we won’t tell. We only take a limited number of Styling Suite clients each month, so that we can give each one the full attention that it deserves. Styling Suite services include design & conceptualisation, florals, venue and area styling and decor and dessert table services.
import base64 import logging from io import StringIO try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse from django.core.exceptions import ImproperlyConfigured from django.utils.encoding import smart_str from django_ses import settings logger = logging.getLogger(__name__) class BounceMessageVerifier(object): """ A utility class for validating bounce messages See: http://docs.amazonwebservices.com/sns/latest/gsg/SendMessageToHttp.verify.signature.html """ def __init__(self, bounce_dict): """ Creates a new bounce message from the given dict. """ self._data = bounce_dict self._verified = None def is_verified(self): """ Verifies an SES bounce message. """ if self._verified is None: signature = self._data.get('Signature') if not signature: self._verified = False return self._verified # Decode the signature from base64 signature = base64.b64decode(signature) # Get the message to sign sign_bytes = self._get_bytes_to_sign() if not sign_bytes: self._verified = False return self._verified if not self.certificate: self._verified = False return self._verified # Extract the public key pkey = self.certificate.get_pubkey() # Use the public key to verify the signature. pkey.verify_init() pkey.verify_update(sign_bytes) verify_result = pkey.verify_final(signature) self._verified = verify_result == 1 return self._verified @property def certificate(self): """ Retrieves the certificate used to sign the bounce message. TODO: Cache the certificate based on the cert URL so we don't have to retrieve it for each bounce message. *We would need to do it in a secure way so that the cert couldn't be overwritten in the cache* """ if not hasattr(self, '_certificate'): cert_url = self._get_cert_url() # Only load certificates from a certain domain? # Without some kind of trusted domain check, any old joe could # craft a bounce message and sign it using his own certificate # and we would happily load and verify it. if not cert_url: self._certificate = None return self._certificate try: import requests except ImportError: raise ImproperlyConfigured("requests is required for bounce message verification.") try: import M2Crypto except ImportError: raise ImproperlyConfigured("M2Crypto is required for bounce message verification.") # We use requests because it verifies the https certificate # when retrieving the signing certificate. If https was somehow # hijacked then all bets are off. response = requests.get(cert_url) if response.status_code != 200: logger.warning('Could not download certificate from %s: "%s"', cert_url, response.status_code) self._certificate = None return self._certificate # Handle errors loading the certificate. # If the certificate is invalid then return # false as we couldn't verify the message. try: self._certificate = M2Crypto.X509.load_cert_string(response.content) except M2Crypto.X509.X509Error as e: logger.warning('Could not load certificate from %s: "%s"', cert_url, e) self._certificate = None return self._certificate def _get_cert_url(self): """ Get the signing certificate URL. Only accept urls that match the domains set in the AWS_SNS_BOUNCE_CERT_TRUSTED_DOMAINS setting. Sub-domains are allowed. i.e. if amazonaws.com is in the trusted domains then sns.us-east-1.amazonaws.com will match. """ cert_url = self._data.get('SigningCertURL') if cert_url: if cert_url.startswith('https://'): url_obj = urlparse(cert_url) for trusted_domain in settings.BOUNCE_CERT_DOMAINS: parts = trusted_domain.split('.') if url_obj.netloc.split('.')[-len(parts):] == parts: return cert_url logger.warning('Untrusted certificate URL: "%s"', cert_url) else: logger.warning('No signing certificate URL: "%s"', cert_url) return None def _get_bytes_to_sign(self): """ Creates the message used for signing SNS notifications. This is used to verify the bounce message when it is received. """ # Depending on the message type the fields to add to the message # differ so we handle that here. msg_type = self._data.get('Type') if msg_type == 'Notification': fields_to_sign = [ 'Message', 'MessageId', 'Subject', 'Timestamp', 'TopicArn', 'Type', ] elif (msg_type == 'SubscriptionConfirmation' or msg_type == 'UnsubscribeConfirmation'): fields_to_sign = [ 'Message', 'MessageId', 'SubscribeURL', 'Timestamp', 'Token', 'TopicArn', 'Type', ] else: # Unrecognized type logger.warning('Unrecognized SNS message Type: "%s"', msg_type) return None outbytes = StringIO() for field_name in fields_to_sign: field_value = smart_str(self._data.get(field_name, ''), errors="replace") if field_value: outbytes.write(field_name) outbytes.write("\n") outbytes.write(field_value) outbytes.write("\n") return outbytes.getvalue() def verify_bounce_message(msg): """ Verify an SES/SNS bounce notification message. """ verifier = BounceMessageVerifier(msg) return verifier.is_verified()
Co-Convenor: Maria Ela L. Atienza, Ph.D. The task of the Program is to provide a platform for understanding these varied social and political challenges facing the country today. Broadly, the aim of the Program is to allow experts from a variety of disciplines in the University to develop a better understanding of past, current, and future social and political tensions that can arise and impact on modern Philippine society and polity. It is designed to produce empirical studies using a variety of methods and approaches to better understand the different social and political issues, transitions, and disruptions affecting the country and world. These studies form the basis for policy inputs and discussions at both the local, national, and international levels. The Program seeks to (a) describe and analyze the current social and political issues facing the country as well as (b) highlight and anticipate the likely shifts and transformations that might follow from such issues and (c) examine their implications on society, economy, and public policy as well as (d) communicate its findings to a wider audience of intellectuals, policy makers, and the general public. The main objective of the research is to lead to possible sets of policy options, responses, and recommendations by government as well as by other social institutions including development agencies.
#! /bin/usr/env python # D.J. Bennett # 26/05/2014 """ Test phylogeny stage. """ import unittest import os import shutil import pickle from pglt.stages import phylogeny_stage as pstage from Bio import AlignIO from Bio import Phylo from cStringIO import StringIO from pglt.tools.phylogeny_tools import raxml # DIRS working_dir = os.path.dirname(__file__) # FUNCTIONS def genPhylogeny(): treedata = "(outgroup, (B, C), (D, E))" handle = StringIO(treedata) tree = Phylo.read(handle, "newick") return tree # DUMMIES def dummyCountNPhylos(nphylos, f): return nphylos class DummyAlignmentStore(object): def __init__(self, clusters, genedict, allrankids, indir, logger): pass class DummyGenerator(object): phylogenies = [] def __init__(self, alignment_store, rttstat, outdir, maxtrys, logger, wd): pass def run(self): self.phylogenies.append(genPhylogeny()) return True # TEST DATA with open(os.path.join(working_dir, 'data', 'test_alignment_ref.faa'), 'r')\ as file: alignment = AlignIO.read(file, 'fasta') paradict = {'nphylos': '1', 'maxtrys': '1', 'rttstat': '0.5', 'constraint': '3'} genedict = {} allrankids = [] @unittest.skipIf(not raxml, "Requires RAxML") class PhylogenyStageTestSuite(unittest.TestCase): def setUp(self): # stub out self.true_AlignmentStore = pstage.ptools.AlignmentStore self.true_Generator = pstage.ptools.Generator self.true_countNPhylos = pstage.ptools.countNPhylos pstage.ptools.Generator = DummyGenerator pstage.ptools.AlignmentStore = DummyAlignmentStore pstage.ptools.countNPhylos = dummyCountNPhylos # create input data os.mkdir('tempfiles') with open(os.path.join('tempfiles', "paradict.p"), "wb") as file: pickle.dump(paradict, file) with open(os.path.join('tempfiles', "genedict.p"), "wb") as file: pickle.dump(genedict, file) with open(os.path.join('tempfiles', "allrankids.p"), "wb") as file: pickle.dump(allrankids, file) os.mkdir('3_alignment') os.mkdir('4_phylogeny') os.mkdir(os.path.join('3_alignment', 'COI')) os.mkdir(os.path.join('3_alignment', 'rbcl')) with open(os.path.join('3_alignment', 'rbcl', 'test_alignment_rbl.faa'), 'w') as file: count = AlignIO.write(alignment, file, "fasta") del count with open(os.path.join('3_alignment', 'COI', 'test_alignment_COI.faa'), 'w') as file: count = AlignIO.write(alignment, file, "fasta") del count def tearDown(self): # remove all folders potentially generated by phylogeny stage phylogeny_folders = ['3_alignment', '4_phylogeny', 'tempfiles'] while phylogeny_folders: try: phylogeny_folder = phylogeny_folders.pop() shutil.rmtree(phylogeny_folder) except OSError: pass # stub in pstage.ptools.Generator = self.true_Generator pstage.ptools.AlignmentStore = self.true_AlignmentStore pstage.ptools.countNPhylos = self.true_countNPhylos def test_phylogeny_stage(self): # run res = pstage.run() # clean dir os.remove(os.path.join('4_phylogeny', 'distribution.tre')) os.remove(os.path.join('4_phylogeny', 'distribution_unconstrained.tre')) os.remove(os.path.join('4_phylogeny', 'consensus.tre')) os.rmdir('4_phylogeny') # assert self.assertIsNone(res) if __name__ == '__main__': unittest.main()
Playsam is a haven for contemporary, cosmopolitan, classic design. Simply put, Playsam are Scandinavian design at its finest. Revisit your childhood with this nostalgic and classic design.
import io import sys import time import wave import logging import argparse import contextlib from os import path import pyaudio import zmq from vexmessage import create_vex_message, decode_vex_message from microphone.command_manager import CommandManager PYAUDIO_BIT_MAPPING = {8: pyaudio.paInt8, 16: pyaudio.paInt16, 24: pyaudio.paInt24, 32: pyaudio.paInt32} def bits_to_samplefmt(bits): if bits in PYAUDIO_BIT_MAPPING.keys(): return PYAUDIO_BIT_MAPPING[bits] class PyAudio: def __init__(self, messaging, settings): self._pyaudio = pyaudio.PyAudio() self.messaging = messaging self.command_manager = CommandManager(self, messaging) self._logger = logging.getLogger(__name__) self._logger.info("Initializing PyAudio. ALSA/Jack error messages " + "that pop up during this process are normal and " + "can usually be safely ignored.") # NOTE: pyaudio SPAMS the terminal, this seperates everything print('\n') self._logger.info("Initialization of PyAudio engine finished") self.devices = {} self.get_devices() def __del__(self): self._pyaudio.terminate() def run(self): messaging = self.messaging # TODO: create better type here startup_frame = create_vex_message('', 'microphone', 'STATUS', status='recording') messaging.publish_socket.send_multipart(startup_frame) while True: # NOTE: `frame` is a list of byte strings # Once we recv here, MUST reply in order to loop again! try: frame = self.messaging.subscribe_socket.recv_multipart() except KeyboardInterrupt: break msg = decode_vex_message(frame) if msg.type == 'CMD': self.command_manager.handle_command(msg) def get_devices(self, device_type='all'): num_devices = self._pyaudio.get_device_count() self._logger.debug('Found %d PyAudio devices', num_devices) for i in range(num_devices): info = self._pyaudio.get_device_info_by_index(i) name = info['name'] if name in self.devices: continue else: self.devices[name] = PyAudioDevice(self, info) return self.devices """ if device_type == plugin.audioengine.DEVICE_TYPE_ALL: return devs else: return [device for device in devs if device_type in device.types] """ def invoke_device(self): pass def get_default_output_device(self): info = self._pyaudio.get_default_output_device_info() return PyAudioDevice(self, info) def get_default_device(self, type='input'): try: info = self._pyaudio.get_default_input_device_info() except IOError: devices = self.get_devices(device_type=type) if len(devices) == 0: msg = 'No %s devices available!' % direction self._logger.warning(msg) raise plugin.audioengine.DeviceNotFound(msg) try: device = self.devices['default'] except KeyError: self._logger.warning('default device not found') # FIXME device = None return device else: return PyAudioDevice(self, info) class PyAudioDevice: def __init__(self, engine, info, context=None, address='inproc://microphone'): super().__init__() self._logger = logging.getLogger(__name__) self._engine = engine self.info = info self._index = info['index'] self._max_output_channels = info['maxOutputChannels'] self._max_input_channels = info['maxInputChannels'] # FIXME self._sample_width = self._engine._pyaudio.get_sample_size(pyaudio.paInt16) self._default_sample_rate = int(self.info['defaultSampleRate']) res_file = path.abspath(path.join(path.dirname(__file__), 'resources')) wave_file = path.join(res_file, 'congo.wav') wf = wave.open(wave_file, 'rb') self._output_rate = wf.getframerate() self._output_format = wf.getsampwidth() self._output_channels = wf.getnchannels() self._output_file = wave_file wf.close() close_file = path.join(res_file, 'done.wav') wf = wave.open(close_file, 'rb') self._close_rate = wf.getframerate() self._close_format = wf.getsampwidth() self._close_channels = wf.getnchannels() self._close_file = close_file def supports_format(self, bits, channels, rate, output=False): req_dev_type = ('output' if output else 'input') sample_fmt = bits_to_samplefmt(bits) if not sample_fmt: return False direction = 'output' if output else 'input' fmt_info = { ('%s_device' % direction): self._index, ('%s_format' % direction): sample_fmt, ('%s_channels' % direction): channels, 'rate': rate } try: supported = self._engine._pyaudio.is_format_supported(**fmt_info) except ValueError as e: if e.args in (('Sample format not supported', -9994), ('Invalid sample rate', -9997), ('Invalid number of channels', -9998)): return False else: raise else: return supported @contextlib.contextmanager def open_stream(self, bits, channels, rate=None, chunksize=1024, output=True): if rate is None: rate = int(self.info['defaultSampleRate']) # Check if format is supported is_supported_fmt = self.supports_format(bits, channels, rate, output=output) if not is_supported_fmt: msg_fmt = ("PyAudioDevice {index} ({name}) doesn't support " + "%s format (Int{bits}, {channels}-channel at" + " {rate} Hz)") % ('output' if output else 'input') msg = msg_fmt.format(index=self.index, name=self.name, bits=bits, channels=channels, rate=rate) self._logger.critical(msg) raise plugin.audioengine.UnsupportedFormat(msg) # Everything looks fine, open the stream direction = ('output' if output else 'input') stream_kwargs = { 'format': bits_to_samplefmt(bits), 'channels': channels, 'rate': rate, 'output': output, 'input': not output, ('%s_device_index' % direction): self._index, 'frames_per_buffer': chunksize if output else chunksize*8 # Hacky } stream = self._engine._pyaudio.open(**stream_kwargs) """ self._logger.debug("%s stream opened on device '%s' (%d Hz, %d " + "channel, %d bit)", "output" if output else "input", self.slug, rate, channels, bits) """ try: yield stream finally: stream.close() """ self._logger.debug("%s stream closed on device '%s'", "output" if output else "input", self.slug) """ def play_beep(self): chunksize = 1024 f = self._engine._pyaudio.get_format_from_width(self._output_format) stream = self._engine._pyaudio.open(format=f, channels = self._output_channels, rate = self._output_rate, output=True) wf = wave.open(self._output_file) data = wf.readframes(chunksize) while len(data) > 0: stream.write(data) data = wf.readframes(chunksize) stream.stop_stream() stream.close() def play_done(self): chunksize = 1024 f = self._engine._pyaudio.get_format_from_width(self._close_format) stream = self._engine._pyaudio.open(format=f, channels = self._close_channels, rate = self._output_rate, output=True) wf = wave.open(self._close_file) data = wf.readframes(chunksize) while len(data) > 0: stream.write(data) data = wf.readframes(chunksize) stream.stop_stream() stream.close() def record(self, chunksize, *args): channels = args[1] with self.open_stream(*args, chunksize=chunksize, output=False) as stream: record_seconds = 5 rate = int(self.info['defaultSampleRate']) steps = int(rate/chunksize * record_seconds) data_list = io.BytesIO() # NOTE: need the rate info and sample width for ASR for _ in range(steps): try: data_list.write(stream.read(chunksize)) except IOError as e: if type(e.errno) is not int: # Simple hack to work around the fact that thmt_from_width # errno/strerror arguments were swapped in older # PyAudio versions. This was fixed in upstream # commit 1783aaf9bcc6f8bffc478cb5120ccb6f5091b3fb. strerror, errno = e.errno, e.strerror else: strerror, errno = e.strerror, e.errno self._logger.warning("IO error while reading from device" + " '%s': '%s' (Errno: %d)", self.slug, strerror, errno) return data_list.getvalue()
Corn planted later will germinate and emerge more quickly, and go through crop stages faster than corn planted early. Typically, corn requires about 100 growing degree days (GDDs) to emerge and about 200 GDDs to reach the V2 growth stage. An accumulation of about 475 GDDs is required to reach V6, the stage when the growing point moves above ground. Wet soils cause nitrogen losses, and determining how much nitrogen is lost is necessary to choose the proper management options. In cases where high intensity rain results in high runoff, leaching losses will probably be low. The primary nitrogen loss mechanism in saturated soils in Kentucky is denitrification, which occurs when soil nitrate nitrogen (NO3-N) is converted to nitrogen gas by soil bacteria. Two to three days of soil saturation is required for bacteria to begin the denitrification process. Well-drained upland soils that have been wet from a series of rains probably have not experienced much denitrification. Soils in lower landscape positions that stay saturated longer will likely lose more N. Losses can be calculated by estimating 3 to 4 percent loss of fertilizer NO3-N for each day of saturation. Use the Table below to determine how much fertilizer NO3-N was in the soil. A very wet April has prohibited any early soybean planting. Little, if any, soybeans have been planted as of April 25. Wet weather forecast through the end of April indicates soybean planting may not get started until after May 1. Be patient; we still have time to get good yields on soybeans. Recent soybean planting date research in west Kentucky indicates soybean yield losses do not normally occur until after mid-May. So we still have about three weeks to plant soybeans for top yield potential. Soybeans planted after May 15 to 20 has an average yield loss of ½ bu per acre per day, and soybeans planted in early June has an average yield loss of 1 bu/acre/day. Currently, soil temperatures are adequate for planting soybeans, but wet soil conditions are not suitable. Planting into wet soil conditions can result in sidewall compaction, poor emergence, and a reduced soybean stand. According to the latest Kentucky Pest News, armyworm populations are tracking similar to the outbreak years of 2006 and 2008. Armyworms can cause problems in corn, wheat and other grass crops. Stay tuned to population counts to see if management is necessary. In the same Kentucky Pest News, Ric Bessin writes that the EPA has approved SmartStax refuge in the bag, being marketed as SmartStax Refuge Complete by Monsanto and SmartStax Refuge Advanced by Dow AgroSciences. See the latest Kentucky Pest News for more information. Common seed-treatment fungicides provide a fair amount of protection against seed and seedling diseases. It’s always a good idea to monitor corn stands for emergence and stand establishment, but I am less worried about widespread seedling problems than I am about leaf diseases. Continued sogginess could eventually result in major delays in planting. Corn crops planted in early to mid-May and beyond are at greater risk from a variety of destructive diseases, especially gray leaf spot, northern leaf blight, and southern rust. We’re not at that stage yet, but it is worth being aware of this potential risk. Tom Priddy, with the University of Kentucky Ag Weather Center sent out the following link for April rainfall history. With six days left to report, Lexington and Frankfort both have the wettest April in history. Louisville is less than an inch from beating its record totals for rainfall. Bowling Green is fairing much better with 'only' 6.82 inches so far. No doubt many of you are finding ways to keep busy while we all wait for fields to dry by fixing the old tractor that always leaked fluid, mending the fence that the calves always seem to break at weaning, or cleaning up the shop, or... perish the thought... catching up on the honey-do list. One more thing you can do while you wait for the weather to clear up is to take a look at the Kentucky SoyMVP report for 2009 and 2010. Be on alert for stripe rust. The disease may have overwintered in your area and there may be some hot spots that could serve as fuel for a rapidly developing problem on a field by field basis. I wrote a KY Pest News article on this a couple of issues ago. It may not be a problem, but it is easy to miss things this time of year unless active crop scouting is taking place. The disease level in this particular field really increased over the past two weeks. Phil Needham said he has seen stripe rust in other fields too, but this one in Oak Grove is the worst so far. Still, there may be fields with a similar level of disease. For information on management of stripe rust go to Chapter 7 of the Wheat Management Guide. Figure 1. Stripe rust in wheat. Figure 2. Stripe rust in wheat. Are University Corn Planting Date Recommendations any Good? The recommended window to plant corn in central Kentucky is April 15 to May 15. Yet, many producers will plant earlier than this if the weather allows. Often, we hear that the first fields planted have the best yields. But, this observation is confounded, because the earliest fields planted are often well-drained and best suited for early planting. So, were the high yields due to early planting or good soils or both? In an attempt to get at this question, data from production farms in central Kentucky were evaluated only for planting and yield. Other variables such as soil type, hybrid number, hybrid maturity, seeding rate, weed control, etc. were not considered. Each data point on each figure represents planting date and yield for a single field in a single year. Figure 1 is the combined data for all years and the x-axis is “day of year” where 105 = April 15, 120 = May 1 and 135 = May 15. Yields in 2005 were extremely low and most planting dates were between April 10 and April 25, about the earliest planting for any of the years. The the USDA rates about 81% of Kentucky wheat either good or excellent for the April 3, 2011 reporting period in the Kentucky Weekly Crop and Weather Report. Meanwhile, the USDA reported that only 37% of the wheat was either good or excellent in 18 states for the same reporting period. Considering how much of the wheat was behind just a month ago, this report is good news. While we are not out of the woods yet, this is a promising start as wheat joints and pushes toward the flag leaf. There is still a chance for a freeze event, as the 50% probability for final freeze is around April 15 for much of southern Kentucky. Also, producers need to check stands for any signs of diseases as well. Even though there are some risks to this crop, it nice to hear some good news.
import scrapy import time from scrapy.selector import Selector from selenium import webdriver import traceback class VeloBaseScrapper(scrapy.Spider): name = 'velobase' start_urls = ['http://velobase.com/ListComponents.aspx?ClearFilter=true'] def __init__(self): self.driver = webdriver.Firefox() def __del__(self): self.driver.stop() def parse(self, response): hxs = Selector(response) self.driver.get(response.url) links = [] while True: try: self.logger.info("="*60) self.logger.info("HXS %s ", hxs.xpath('//tr[@class="GroupHeader1"]/td/a/text()').extract()[0]) self.logger.info("="*60) self.logger.info("="*60) # self.logger.info("LINKS %s ", hxs.xpath('//table[@class="content"]/tr[@class="content_normal" or @class="content_alternate"]/td[0]/a[@class="ttiptxt"]/@href).extract()[0]')) links.extend(hxs.xpath( '//table[@class="content"]//tr[@class="content_normal" or @class="content_alternate"]/td/a[@class=" ttiptxt"]/@href' ).extract()) self.logger.info("LINKS %s ", links) self.logger.info("="*60) for link in links: full_url = response.urljoin(link) yield scrapy.Request(full_url, callback=self.parse_details) nextPage = self.driver.find_element_by_link_text('Next') nextPage.click() time.sleep(3) hxs = Selector(text=self.driver.page_source) except: traceback.print_exc() break def parse_details(self, response): componentCategory = response.xpath('//td[@id="ctl00_ContentPlaceHolder1_GenInfo"]/table/tr[1]/td/text()').extract()[-1] componentName = response.xpath('//td[@id="ctl00_ContentPlaceHolder1_GenInfo"]/table/tr[2]/td/text()').extract()[-1] componentBrand = response.xpath('//td[@id="ctl00_ContentPlaceHolder1_GenInfo"]/table/tr[3]/td/a/text()').extract()[-1] componentCountry = response.xpath('//td[@id="ctl00_ContentPlaceHolder1_GenInfo"]/table/tr[7]/td/text()').extract()[-1] componentDescription = response.xpath('//td[@id="ctl00_ContentPlaceHolder1_GenInfo"]/table/tr/td[contains(text(),"Country:")]/following-sibling::td').extract() self.logger.info("-" * 70) self.logger.info(" COMPONENT %s ", componentName) self.logger.info("-" * 70) yield { 'category': componentCategory, 'name': componentName, 'brand': componentBrand, 'country': componentCountry, 'description': componentDescription }
We are delighted that you are considering becoming a member of Havurah Shalom. More than 400 households in the Portland-Vancouver Metro area have made Havurah their spiritual home. You will find a wealth of information about Havurah’s activities and programs on our website, but if you have any questions, please don't hesitate to call our office at 503-248-4662 or email us. You can print and complete our membership application here. Havurah offers flexibility with regard to dues; we don't want finances to be an obstacle to membership. Havurah member Karen Westerman would be happy to answer any questions you might have. Pictured above are Havurah members playing Settlers of Catan on Game Day.
import matplotlib.pyplot as plt import cPickle as pkl import numpy as np import seaborn.apionly as sns from lab.plotting import histogram def enrichment(positions): distances = np.abs(positions[np.isfinite(positions)]) return np.mean(distances), np.std(distances) / np.sqrt(len(distances)) def calc_enrichment(pos, masks): enrich = [] for rep_positions, rep_masks in zip(pos, masks): enrich.append( [np.pi / 2 - enrichment(iter_positions[iter_mask])[0] for iter_positions, iter_mask in zip( rep_positions, rep_masks)]) return enrich def calc_final_distributions(pos, masks): final_dist = [] for rep_positions, rep_masks in zip(pos, masks): final_dist.extend(rep_positions[-1][rep_masks[-1]].tolist()) return final_dist def plot_enrichment(ax, enrichment, color, title='', rad=True): ax.plot(range(9), np.mean(enrichment, axis=0), color=color) ax.plot(range(9), np.percentile(enrichment, 5, axis=0), ls='--', color=color) ax.plot(range(9), np.percentile(enrichment, 95, axis=0), ls='--', color=color) ax.fill_between( range(9), np.percentile(enrichment, 5, axis=0), np.percentile(enrichment, 95, axis=0), facecolor=color, alpha=0.5) sns.despine(ax=ax) ax.tick_params(length=3, pad=2, direction='out') ax.set_xlim(-0.5, 8.5) if rad: ax.set_ylim(-0.15, 0.5) ax.set_ylabel('Enrichment (rad)') else: ax.set_ylim(-0.15, 0.10 * 2 * np.pi) y_ticks = np.array(['0', '0.05', '0.10']) ax.set_yticks(y_ticks.astype('float') * 2 * np.pi) ax.set_yticklabels(y_ticks) ax.set_ylabel('Enrichment (fraction of belt)') ax.set_xlabel("Iteration ('session' #)") ax.set_title(title) def plot_final_distributions( ax, final_dists, colors, labels=None, title='', rad=True): if labels is None: labels = [None] * len(final_dists) for final_dist, color, label in zip(final_dists, colors, labels): histogram( ax, final_dist, bins=50, range=(-np.pi, np.pi), color=color, filled=False, plot_mean=False, normed=True, label=label) ax.tick_params(length=3, pad=2, direction='out') ax.axvline(ls='--', color='0.3') ax.set_xlim(-np.pi, np.pi) if rad: ax.set_xlabel('Distance from reward (rad)') else: ax.set_xlabel('Distance from reward (fraction of belt)') ax.set_xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi]) ax.set_xticklabels(['-0.50', '-0.25', '0', '0.25', '0.50']) ax.set_ylim(0, 0.3) ax.set_ylabel('Normalized density') ax.set_title(title) def plot_parameters(axs, model, enrich): positions = np.linspace(-np.pi, np.pi, 1000) bs, ks = model.shift_mean_var(positions) recur = model.recur_by_position(positions) axs[0].plot(positions, recur) axs[0].set_xlim(-np.pi, np.pi) axs[0].set_ylim(0., 1.) axs[0].set_xlabel('Position') axs[0].set_ylabel('Recurrence probability') axs[1].plot(positions, bs) axs[1].set_xlim(-np.pi, np.pi) axs[1].set_xlabel('Position') axs[1].set_ylabel('Offset') axs[2].plot(positions, 1 / ks) axs[2].set_xlim(-np.pi, np.pi) axs[2].set_xlabel('Position') axs[2].set_ylabel('Variance') axs[3].plot(range(9), np.mean(enrich, axis=0), color='b') axs[3].fill_between( range(9), np.percentile(enrich, 5, axis=0), np.percentile(enrich, 95, axis=0), facecolor='b', alpha=0.5) axs[3].set_xlabel('Iteration') axs[3].set_ylabel('Enrichment (rad)') def plot_models( models, model_labels=None, n_cells=1000, n_runs=100, n_iterations=8): if model_labels is None: model_labels = ['Model {}'.format(idx) for idx in range(len(models))] fig, axs = plt.subplots(4, len(models), figsize=(10, 10)) models[0].initialize(n_cells=n_cells) for model in models[1:]: model.initialize_like(models[0]) initial_mask = models[0].mask initial_positions = models[0].positions masks = [] positions = [] enrichment = [] for model, model_axs in zip(models, axs.T): masks.append([]) positions.append([]) for _ in range(n_runs): model.initialize( initial_mask=initial_mask, initial_positions=initial_positions) model.run(n_iterations) masks[-1].append(model._masks) positions[-1].append(model._positions) enrichment.append(calc_enrichment(positions[-1], masks[-1])) plot_parameters(model_axs, model, enrichment[-1]) for ax in axs[:, 1:].flat: ax.set_ylabel('') for ax in axs[:2, :].flat: ax.set_xlabel('') for label, ax in zip(model_labels, axs[0]): ax.set_title(label) offset_min, offset_max = np.inf, -np.inf for ax in axs[1]: offset_min = min(offset_min, ax.get_ylim()[0]) offset_max = max(offset_max, ax.get_ylim()[1]) for ax in axs[1]: ax.set_ylim(offset_min, offset_max) var_min, var_max = np.inf, -np.inf for ax in axs[2]: var_min = min(var_min, ax.get_ylim()[0]) var_max = max(var_max, ax.get_ylim()[1]) for ax in axs[2]: ax.set_ylim(var_min, var_max) enrich_min, enrich_max = np.inf, -np.inf for ax in axs[3]: enrich_min = min(enrich_min, ax.get_ylim()[0]) enrich_max = max(enrich_max, ax.get_ylim()[1]) for ax in axs[3]: ax.set_ylim(enrich_min, enrich_max) return fig if __name__ == '__main__': import enrichment_model as em import enrichment_model_theoretical as emt params_path_A = '/analysis/Jeff/Df16A/Df_remap_paper_v2/data/enrichment_model/Df_model_params_A.pkl' params_path_B = '/analysis/Jeff/Df16A/Df_remap_paper_v2/data/enrichment_model/Df_model_params_B.pkl' params_path_C = '/analysis/Jeff/Df16A/Df_remap_paper_v2/data/enrichment_model/Df_model_params_C.pkl' # # WT to theoretical # # WT_params_path = params_path_C # WT_params = pkl.load(open(WT_params_path, 'r')) # WT_model = em.EnrichmentModel2(**WT_params) # recur_model = emt.EnrichmentModel2_recur( # kappa=1, span=0.8, mean_recur=0.4, **WT_params) # offset_model = emt.EnrichmentModel2_offset(alpha=0.25, **WT_params) # var_model = emt.EnrichmentModel2_var( # kappa=1, alpha=10, mean_k=3, **WT_params) # models = [WT_model, recur_model, offset_model, var_model] # model_labels = ['WT model', 'Stable recurrence', 'Shift towards reward', # 'Stable position'] params_A = pkl.load(open(params_path_A, 'r')) params_B = pkl.load(open(params_path_B, 'r')) params_C = pkl.load(open(params_path_C, 'r')) model_A = em.EnrichmentModel2(**params_A) model_B = em.EnrichmentModel2(**params_B) model_C = em.EnrichmentModel2(**params_C) models = [model_A, model_B, model_C] model_labels = ['A', 'B', 'C'] fig = plot_models( models, model_labels, n_cells=1000, n_runs=100, n_iterations=8) fig.savefig('Df_model_parameters.pdf') from pudb import set_trace set_trace()
Wildenvey.com - Ugg Look Alike Boots Cheap. The Ugg Boot is one of by far the most well known styles of footwear on the market right this moment. Any person who's not acquainted with them should really think about slipping one on! They're remarkably smooth and luxurious. They provide a wonderful degree of comfort and ease plus a superb fashion as well. They can be very heat and cozy. Your ft is going to be in shoe heaven! Let us have a closer glimpse at what on earth is obtainable during the Ugg line and find out if we won't locate something which you are going to love! This is relevant to Ugg Look Alike Boots Cheap. Boots are fantastic because they not simply maintain your ft heat but they cover a number of the legs, likewise supplying best warmth. Here's a look at many of the different varieties of boots you will find available for women and men, which include ankle boots, knee duration boots, Ugg boots and more. Just whenever your shoes or boots start out having comfortable you notice a split about the aspect on the sole. Or maybe a heel starts to different. You could take the culprit into a cobbler for many 'shoe restoration' to acquire the only real fixed or perhaps the boot heel glued back again or else you can do it by yourself in the couple seconds. You can find a pair of selections. Shoe Goo and a complicated or business quality cyanoacrylate. An awesome strategy to retain the lengthy long lasting glance on UGG boots. These treatment products and solutions help protect the pure look about the boots and therefore are very simple to implement. Caring for boots hasn't been less difficult. Connected to Ugg Look Alike Boots Cheap, The marketplace is flooded with different types of sneakers and new styles sign up for the catalogue each and every year. Footwear are broadly divided into handful of categories like Formal, Everyday, Social gathering, Vacation, Trend, and Sporting activities. Each classification has further sub-categories, each individual of which even have an array of footwear to provide.
import cv2 import numpy as np import time from matplotlib import pyplot as plt cap = cv2.VideoCapture(1) # 读入视频文件 #timeF = 0 #计时器 start=False if cap.isOpened(): # 判断是否正常打开 rval, frame = cap.read() start = True else: rval = False time1 = time.time() #计时器 while start: # 循环读取视频帧 rval, frame = cap.read() cv2.imshow("capture", frame) #视频窗口 time2 = time.time() timeF = time2 - time1 timeF = int(timeF) print(timeF) if (timeF % 10 == 0): # 每隔timeF帧进行存储操作 count='%d'%timeF url='%d'%timeF + ".png" #src='%d'%c cv2.imwrite(url, frame) # 存储为图像 #读图矫正 a = cv2.imread(url) rows, cols, channels = a.shape list1 = np.float32([[86, 21], [514, 12], [39, 464], [566, 462]]) list2 = np.float32([[0, 0], [720, 0], [0, 720], [720, 720]]) M = cv2.getPerspectiveTransform(list1, list2) img_perspective = cv2.warpPerspective(a, M, (720, 720)) print('perspective:\n', M) cv2.imwrite(url, img_perspective) #矫正后图片 # cv2.imshow(url, img_perspective) cv2.waitKey(5) #保存黑棋灰度差值图 if timeF != 0 and (timeF / 10) % 2 == 1 : a = cv2.imread(url) src= '%d'%(timeF-10) lasturl = src + '.png' print(lasturl) b = cv2.imread(lasturl) Graya = cv2.cvtColor(a,cv2.COLOR_BGR2GRAY) Grayb = cv2.cvtColor(b,cv2.COLOR_BGR2GRAY) c = Grayb - Graya; Grayurl='sub'+count+'.png' cv2.imwrite(Grayurl, c) #灰度图 #cv2.imshow(Grayurl, c) #模板匹配 x=0 y=0 img = cv2.imread(Grayurl, 0) img2 = img.copy() template = cv2.imread('test.png', 0) w, h = template.shape[::-1] methods = ['cv2.TM_SQDIFF'] for meth in methods: img = img2.copy() method = eval(meth) # Apply template Matching res = cv2.matchTemplate(img, template, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) print(int(top_left[0] + w / 2), int(top_left[1] + h / 2)) cv2.rectangle(img, top_left, bottom_right, 255, 2) plt.figure() plt.subplot(121), plt.imshow(res, cmap='gray') plt.title('Matching Result'), plt.xticks([]), plt.yticks([]) plt.subplot(122), plt.imshow(img, cmap='gray') plt.title('Detected Point'), plt.xticks([]), plt.yticks([]) plt.suptitle(meth) #plt.show() #显示模板匹配图 cap.release()
Rabbi Yechiel Eckstein, the president of the International Fellowship of Christians and Jews, which raises about $100 million per year in small donations from evangelical Christians and gives the money to Jewish and Zionist causes, is miffed that the Jewish people are not coming up with more money to help Jews in the former Soviet Union. He is miffed that the government of Israel is seemingly Welsching on several million dollars that it pledged to help the Heftsiba school system in the FSU. Heftsiba, which was started as a covert operation run out of the office of Israel’s prime minister during the 1960s as a way to sneak Zionist education, Hebrew education and Jewish religious supplies to Soviet Jews, became its own school system after the fall of communism. It was run out of the Israeli Education Ministry for nearly 20 years before being handed off to the Jewish Agency for Israel about two years ago, according to Eckstein. Last year the 26-school Heftsiba system looked like it was going to be a casualty of the economic downturn, the Madoff scandal and the Jewish Agency facing an $80 million budget cut. But then in February Eckstein’s IFCJ stepped in with emergency funding. Eckstein said that he would match up to $5 million in funding over the course of this year to keep the schools afloat, so long as other Jewish donors matched the funds. The Israeli government pledged to pitch in $1 million. Based on that pledge, the fellowship cut a $1 million check to Heftsiba. According to Eckstein, Israel has yet to pay up, even though it said it would do so immediately. Eckstein said that he has been pushing the government to come up with the money. But the responsibility for paying the $1 million is being passed from ministry to ministry, with no one wanting the money to come out of their own budget, Eckstein said. He had a meeting with Knesset members on June 2 to press for the money. He was unable to get an answer, but was told that the Education Ministry will take over the Heftsiba system from the Jewish Agency next fall. The money to Heftsiba was part of an $12 million pledge that the fellowship made to save three Jewish school systems in the FSU, including $6 million to keep the Chabad school system running. The Chabad system was heavily financed by Lev Leviev, the diamond mogul who saw his stock plummet by 90 percent, losing him a half a billion dollars over the past year. The fellowship also gave $1 million to the Shma Yisrael school system, which had been heavily financed by the Reichmann family of Canada. While most charities are floundering, the fellowship is on pace to raise more money this year than it ever has. Last year the organization took in roughly $90 million; this year, Eckstein said, it is already 20 percent above pace. He project that the fellowship will end up raising between $110 million and $120 million in 2009. But Eckstein wants to know why the Jewish community is not stepping up to offer more assistance to Jews in the former Soviet Union, after spending 20 years of helping to build a community there. “The Jewish community can’t come up with $2 million for future of Jewish children?” he said.
#coding: utf8 import os import shutil import subprocess import glob from distutils.core import setup def install_discount(): root = os.path.dirname(os.path.abspath(__file__)) os.chdir('_discount') subprocess.call('chmod 755 configure.sh'.split()) subprocess.call( ['./configure.sh', '--with-fenced-code', '--with-urlencoded-anchor', '--enable-all-features', '--shared', ], env=os.environ) subprocess.call(['make', 'install']) os.chdir(root) _md_path = None for path in glob.glob('_discount/libmarkdown.so*') + glob.glob('_discount/libmarkdown.dylib'): if not os.path.islink(path): _md_path = path break if not _md_path: return # ignore md_path = os.path.join(root, 'discount/markdown.so') shutil.copy(_md_path, md_path ) install_discount() setup( name='discount', license='BSD', version='0.2.1STABLE', author='Trapeze', author_email='[email protected]', url="http://github.com/trapeze/python-discount", download_url='http://pypi.python.org/pypi/discount', description='A Python interface for Discount, the C Markdown parser', long_description=open('README.rst').read(), keywords='markdown discount ctypes', packages = ['discount', ], package_data={'discount': ['markdown.so']}, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Programming Language :: C', 'Programming Language :: Python', 'Topic :: Text Processing :: Markup' ], )
At West Meon Pottery we can reproduce any design of terracotta finial. Click on any photo to see an enlargement. This will be shown as a pop-up - if you have software to stop pop-ups you may need to turn it off.
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # From "RUNGE-KUTTA METHODS FOR THE STRONG APPROXIMATION OF SOLUTIONS OF STOCHASTIC DIFFERENTIAL EQUATIONS". # For diagonal noise structure. # (ODE order, SDE strong order) = (3.0, 1.5). STAGES = 4 C0 = (0, 1, 1 / 2, 0) C1 = (0, 1 / 4, 1, 1 / 4) A0 = ( (), (1,), (1 / 4, 1 / 4), (0, 0, 0) ) A1 = ( (), (1 / 4,), (1, 0), (0, 0, 1 / 4) ) B0 = ( (), (0,), (1, 1 / 2), (0, 0, 0), ) B1 = ( (), (-1 / 2,), (1, 0), (2, -1, 1 / 2) ) alpha = (1 / 6, 1 / 6, 2 / 3, 0) beta1 = (-1, 4 / 3, 2 / 3, 0) beta2 = (1, -4 / 3, 1 / 3, 0) beta3 = (2, -4 / 3, -2 / 3, 0) beta4 = (-2, 5 / 3, -2 / 3, 1)
It's a shame less that so few people are familiar with the Led Zeppelin's catalog. We wrote earlier about "things we don't like" memes, and this one is pretty related. For this particular format, memers generally google someone or something they love (very popular within the animemes community) and then the life expectancy. When it's revealed that their favorite character is near-death, we're met with a weeping face. The meme can be used in the opposite way, as is seen with one of the Lil Pump memes, to lament that someone will be living so long. The meme is pretty basic, but some people are having fun with it. Sure, you "accidentally" Googled that. Fyrkantig, Sparsam, Dagstorp, Grundtal. Unless you speak Swedish, these words can only mean one thing: Ikea. But no more. Ikea, who for years has named their comfortable, affordable, and dorm room-ready furniture by following a pretty strict system, is now in the trolling business, and business is good. This is all apart of “Ikea's Retail Therapy.” Through this site, Ikea doesn’t just furnish your apartment with practical and fashionable Swedish goods, but also fixes your life — or, at least, gives you something to buy, so you can forget about your actual problems. Check it out.
""" This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> @author: Josiah Walker """ import numpy def BresenhamLine(A,B): """ This implements the bresenham algorithm to draw a line from A to B Returns: all x,y index pairs that are in the line """ if A[0] == B[0]: #if A and B share the same X coord, draw a straight line in Y increment = 1 if B[1] >= A[1] else -1 #decide whether to draw forwards or backwards return [(A[0],i) for i in xrange(A[1],B[1]+increment,increment)] elif A[1] == B[1]: #if A and B share the same Y coord, draw a straight line in X increment = 1 if B[0] >= A[0] else -1 #decide whether to draw forwards or backwards return [(i,A[1]) for i in xrange(A[0],B[0]+increment,increment)] else: #this draws a diagonal line incrementx = 1 if B[0] >= A[0] else -1 #set the direction of line drawing incrementy = 1 if B[1] >= A[1] else -1 result = [] yval = A[1] #set Y start slope = A-B #calculate slope - assuming A and B are numpy arrays slope = abs(slope[1]/float(slope[0])) error = 0.0 #initialise Y error counter for i in xrange(A[0],B[0]+incrementx,incrementx): #for all X values, step forward in X result.append((i,yval)) error += slope while error >= 0.5: #while the Y error is too large, step forward in Y yval += incrementy error -= 1.0 result.append((i,yval)) return result def BresenhamBorder(A,B): """ Unlike the line, this does only one pixel per Y row, so it can be used in fill algorithms efficiently Returns: all x,y index pairs that are in the border """ if A[0] == B[0]: #if A and B share the same X coord, draw a straight line in Y increment = 1 if B[1] >= A[1] else -1 return [(A[0],i) for i in xrange(A[1],B[1]+increment,increment)] elif A[1] == B[1]: #we're screwed - we can only return one Y value return [numpy.round((A+B)/2).astype(numpy.int32)] else: #work out what to do for a diagonal incrementy = 1 if B[1] >= A[1] else -1 #set the direction of line drawing slope = A-B slope = slope[0]/float(slope[1])*incrementy xvals = numpy.round(A[0] + slope*numpy.arange(0.,abs(A[1]-B[1])+1,1.)).astype(numpy.int32) return [(xvals[i], y) for i,y in enumerate(xrange(A[1],B[1]+incrementy,incrementy))] def BresenhamPolygon(vertices): """ Rasterizes a convex polygon from a list of 2d int vertices. All pixels within the polygon are returned as a list. """ #put the largest value at the head of the list: maxvert = 0 for i in xrange(len(vertices)): if vertices[i][1] > vertices[maxvert][1]: maxvert = i vertices = vertices[maxvert:] + vertices[:maxvert] #split the list in to two sides based on max->min paths minvert = 0 for i in xrange(len(vertices)): if vertices[i][1] < vertices[minvert][1]: minvert = i #skip everything of equal Y height on the top start = 0 while start < len(vertices)-2 and vertices[start][1] == vertices[start+1][1]: start += 1 side1 = vertices[start:minvert+1] #create the "left" border l = BresenhamBorder(side1[0],side1[1]) for i in xrange(1,len(side1)-1): l += BresenhamBorder(side1[i],side1[i+1])[1:] #skip everything of equal Y height on the bottom while minvert < len(vertices)-2 and vertices[minvert][1] == vertices[minvert+1][1]: minvert += 1 side2 = vertices[minvert:] side2.reverse() side2 = [vertices[0]] + side2 #create the "right" border r = BresenhamBorder(side2[0],side2[1]) for i in xrange(1,len(side2)-1): r += BresenhamBorder(side2[i],side2[i+1])[1:] #do horizontal scans and save all the cell locations in the triangle result = [] for i in xrange(len(l)): increment = 1 if r[i][0] >= l[i][0] else -1 result += [(j,l[i][1]) for j in xrange(l[i][0],r[i][0]+increment,increment)] return result def BresenhamTriangle(A,B,C): #this is here because not all the functions have been upgraded to polygon yet return BresenhamPolygon([A,B,C])
Toddler Collection Mini Beanz Lounge – Toddlers claim almost everything as their own. Well now they can! Mini Beanz has created an arm chair just for them! The Toddler Collection offers cute little lounges that will compliment any nursery, childrens bedroom, play area or lounge room. Great for your childs little reading nook too! Newborn Collection Mini Beanz Bean Bags are exclusively designed to be used from birth through to pre teens (approx 30kg). They are a registered product of Mini Beanz. With flexible seating options you can interchange covers to suit your colour style and preference. Newborn collection Mini Beanz Bean Bags are so functional they have been designed to be used indoors and outdoors with the help of waterproof material. Mini Beanz Bean Bags also help to Relieve Reflux and Prevent Flat Head in newborns. Position the beans to help keep your baby upright or to mould around your baby, enveloping them in a world of safety and comfort. Extra features include and oversized shape, side pocket and carry handle. Each Mini Beanz is PACKAGED FLAT in a Library style bag with a handle, shoulder straps and a clear pocket to insert a name card. We also have toddler lounges.
# -*- coding: utf-8 -*- from datetime import datetime import mock import pytest import random from nose.tools import * # noqa: from api.base.settings.defaults import API_BASE from api.nodes.serializers import NodeContributorsCreateSerializer from framework.auth.core import Auth from osf_tests.factories import ( fake_email, AuthUserFactory, OSFGroupFactory, ProjectFactory, UnconfirmedUserFactory, UserFactory, ) from osf.utils import permissions from rest_framework import exceptions from tests.base import capture_signals, fake from website.project.signals import contributor_added, contributor_removed from api_tests.utils import disconnected_from_listeners @pytest.fixture() def user(): return AuthUserFactory() @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation @pytest.mark.enable_implicit_clean class NodeCRUDTestCase: @pytest.fixture() def user(self): return AuthUserFactory() @pytest.fixture() def user_two(self): return AuthUserFactory() @pytest.fixture() def title(self): return 'Cool Project' @pytest.fixture() def title_new(self): return 'Super Cool Project' @pytest.fixture() def description(self): return 'A Properly Cool Project' @pytest.fixture() def description_new(self): return 'An even cooler project' @pytest.fixture() def category(self): return 'data' @pytest.fixture() def category_new(self): return 'project' @pytest.fixture() def project_public(self, user, title, description, category): return ProjectFactory( title=title, description=description, category=category, is_public=True, creator=user ) @pytest.fixture() def project_private(self, user, title, description, category): return ProjectFactory( title=title, description=description, category=category, is_public=False, creator=user ) @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/'.format(API_BASE, project_private._id) @pytest.fixture() def url_fake(self): return '/{}nodes/{}/'.format(API_BASE, '12345') @pytest.fixture() def make_contrib_id(self): def contrib_id(node_id, user_id): return '{}-{}'.format(node_id, user_id) return contrib_id @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation @pytest.mark.enable_implicit_clean class TestNodeContributorList(NodeCRUDTestCase): @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/contributors/'.format( API_BASE, project_private._id) @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/contributors/'.format(API_BASE, project_public._id) def test_concatenated_id(self, app, user, project_public, url_public): res = app.get(url_public) assert res.status_code == 200 assert res.json['data'][0]['id'].split('-')[0] == project_public._id assert res.json['data'][0]['id'] == '{}-{}'.format( project_public._id, user._id) def test_permissions_work_with_many_users( self, app, user, project_private, url_private): users = { permissions.ADMIN: [user._id], permissions.WRITE: [], permissions.READ: [] } for i in range(0, 25): perm = random.choice(list(users.keys())) user = AuthUserFactory() project_private.add_contributor(user, permissions=perm) users[perm].append(user._id) res = app.get(url_private, auth=user.auth) data = res.json['data'] for user in data: api_perm = user['attributes']['permission'] user_id = user['id'].split('-')[1] assert user_id in users[api_perm], 'Permissions incorrect for {}. Should not have {} permission.'.format( user_id, api_perm) def test_return( self, app, user, user_two, project_public, project_private, url_public, url_private, make_contrib_id): # test_return_public_contributor_list_logged_in res = app.get(url_public, auth=user_two.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert len(res.json['data']) == 1 assert res.json['data'][0]['id'] == make_contrib_id( project_public._id, user._id) # test_return_private_contributor_list_logged_out res = app.get(url_private, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_return_private_contributor_list_logged_in_non_contributor res = app.get(url_private, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] # test_return_private_contributor_list_logged_in_osf_group_member res = app.get(url_private, auth=user_two.auth, expect_errors=True) osf_group = OSFGroupFactory(creator=user_two) project_private.add_osf_group(osf_group, permissions.READ) res = app.get(url_private, auth=user_two.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert len(res.json['data']) == 1 assert res.json['data'][0]['id'] == make_contrib_id( project_private._id, user._id) def test_return_public_contributor_list_logged_out( self, app, user, user_two, project_public, url_public, make_contrib_id): project_public.add_contributor(user_two, save=True) res = app.get(url_public) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert len(res.json['data']) == 2 assert res.json['data'][0]['id'] == make_contrib_id( project_public._id, user._id) assert res.json['data'][1]['id'] == make_contrib_id( project_public._id, user_two._id) def test_return_private_contributor_list_logged_in_contributor( self, app, user, user_two, project_private, url_private, make_contrib_id): project_private.add_contributor(user_two) project_private.save() res = app.get(url_private, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert len(res.json['data']) == 2 assert res.json['data'][0]['id'] == make_contrib_id( project_private._id, user._id) assert res.json['data'][1]['id'] == make_contrib_id( project_private._id, user_two._id) def test_filtering_on_obsolete_fields(self, app, user, url_public): # regression test for changes in filter fields url_fullname = '{}?filter[fullname]=foo'.format(url_public) res = app.get(url_fullname, auth=user.auth, expect_errors=True) assert res.status_code == 400 errors = res.json['errors'] assert len(errors) == 1 assert errors[0]['detail'] == '\'fullname\' is not a valid field for this endpoint.' # middle_name is now middle_names url_middle_name = '{}?filter[middle_name]=foo'.format(url_public) res = app.get(url_middle_name, auth=user.auth, expect_errors=True) assert res.status_code == 400 errors = res.json['errors'] assert len(errors) == 1 assert errors[0]['detail'] == '\'middle_name\' is not a valid field for this endpoint.' def test_disabled_contributors_contain_names_under_meta( self, app, user, user_two, project_public, url_public, make_contrib_id): project_public.add_contributor(user_two, save=True) user_two.is_disabled = True user_two.save() res = app.get(url_public) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert len(res.json['data']) == 2 assert res.json['data'][0]['id'] == make_contrib_id( project_public._id, user._id) assert res.json['data'][1]['id'] == make_contrib_id( project_public._id, user_two._id) assert res.json['data'][1]['embeds']['users']['errors'][0]['meta']['full_name'] == user_two.fullname assert res.json['data'][1]['embeds']['users']['errors'][0]['detail'] == 'The requested user is no longer available.' def test_total_bibliographic_contributor_count_returned_in_metadata( self, app, user_two, project_public, url_public): non_bibliographic_user = UserFactory() project_public.add_contributor( non_bibliographic_user, visible=False, auth=Auth(project_public.creator)) project_public.save() res = app.get(url_public, auth=user_two.auth) assert res.status_code == 200 assert res.json['links']['meta']['total_bibliographic'] == len( project_public.visible_contributor_ids) def test_unregistered_contributor_field_is_null_if_account_claimed( self, app, user): project = ProjectFactory(creator=user, is_public=True) url = '/{}nodes/{}/contributors/'.format(API_BASE, project._id) res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 200 assert len(res.json['data']) == 1 assert res.json['data'][0]['attributes'].get( 'unregistered_contributor') is None def test_unregistered_contributors_show_up_as_name_associated_with_project( self, app, user): project = ProjectFactory(creator=user, is_public=True) project.add_unregistered_contributor( 'Robert Jackson', '[email protected]', auth=Auth(user), save=True) url = '/{}nodes/{}/contributors/'.format(API_BASE, project._id) res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 200 assert len(res.json['data']) == 2 assert res.json['data'][1]['embeds']['users']['data']['attributes']['full_name'] == 'Robert Jackson' assert res.json['data'][1]['attributes'].get( 'unregistered_contributor') == 'Robert Jackson' project_two = ProjectFactory(creator=user, is_public=True) project_two.add_unregistered_contributor( 'Bob Jackson', '[email protected]', auth=Auth(user), save=True) url = '/{}nodes/{}/contributors/'.format(API_BASE, project_two._id) res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 200 assert len(res.json['data']) == 2 assert res.json['data'][1]['embeds']['users']['data']['attributes']['full_name'] == 'Robert Jackson' assert res.json['data'][1]['attributes'].get( 'unregistered_contributor') == 'Bob Jackson' def test_contributors_order_is_the_same_over_multiple_requests( self, app, user, project_public, url_public): project_public.add_unregistered_contributor( 'Robert Jackson', '[email protected]', auth=Auth(user), save=True ) for i in range(0, 10): new_user = AuthUserFactory() if i % 2 == 0: visible = True else: visible = False project_public.add_contributor( new_user, visible=visible, auth=Auth(project_public.creator), save=True ) req_one = app.get( '{}?page=2'.format(url_public), auth=Auth(project_public.creator)) req_two = app.get( '{}?page=2'.format(url_public), auth=Auth(project_public.creator)) id_one = [item['id'] for item in req_one.json['data']] id_two = [item['id'] for item in req_two.json['data']] for a, b in zip(id_one, id_two): assert a == b @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation @pytest.mark.enable_implicit_clean class TestNodeContributorAdd(NodeCRUDTestCase): @pytest.fixture() def user_three(self): return AuthUserFactory() @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/contributors/?send_email=false'.format( API_BASE, project_private._id) @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/contributors/?send_email=false'.format( API_BASE, project_public._id) @pytest.fixture() def data_user_two(self, user_two): return { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True, }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_two._id, } } } } } @pytest.fixture() def data_user_three(self, user_three): return { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True, }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_three._id, } } } } } def test_add_contributors_errors( self, app, user, user_two, user_three, url_public): # test_add_node_contributors_relationships_is_a_list data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True }, 'relationships': [{'contributor_id': user_three._id}] } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail # test_add_contributor_no_relationships data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True } } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'A user ID or full name must be provided to add a contributor.' # test_add_contributor_empty_relationships data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True }, 'relationships': {} } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'A user ID or full name must be provided to add a contributor.' # test_add_contributor_no_user_key_in_relationships data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True }, 'relationships': { 'id': user_two._id, 'type': 'users' } } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail # test_add_contributor_no_data_in_relationships data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True }, 'relationships': { 'users': { 'id': user_two._id } } } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Request must include /data.' # test_add_contributor_no_target_type_in_relationships data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True }, 'relationships': { 'users': { 'data': { 'id': user_two._id } } } } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Request must include /type.' # test_add_contributor_no_target_id_in_relationships data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True }, 'relationships': { 'users': { 'data': { 'type': 'users' } } } } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'A user ID or full name must be provided to add a contributor.' # test_add_contributor_incorrect_target_id_in_relationships data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': '12345' } } } } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 404 # test_add_contributor_no_type data = { 'data': { 'attributes': { 'bibliographic': True }, 'relationships': { 'users': { 'data': { 'id': user_two._id, 'type': 'users' } } } } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['source']['pointer'] == '/data/type' # test_add_contributor_incorrect_type data = { 'data': { 'type': 'Incorrect type', 'attributes': { 'bibliographic': True }, 'relationships': { 'users': { 'data': { 'id': user_two._id, 'type': 'users' } } } } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_unregistered_contributor_invalid_email data = { 'data': { 'type': 'contributors', 'attributes': { 'permission': 'admin', 'email': '[email protected]', 'full_name': 'John Doe' } } } res = app.post_json_api( url_public, data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Unregistered contributor email address domain is blacklisted.' def test_contributor_create_invalid_data( self, app, user_three, url_public): res = app.post_json_api( url_public, 'Incorrect data', auth=user_three.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail res = app.post_json_api( url_public, ['Incorrect data'], auth=user_three.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail def test_add_contributor_dont_expose_email( self, app, user, user_two, project_public, data_user_two, url_public): res = app.post_json_api( url_public, data_user_two, auth=user.auth) assert res.status_code == 201 assert res.json['data']['attributes'].get('email') is None def test_add_contributor_is_visible_by_default( self, app, user, user_two, project_public, data_user_two, url_public): del data_user_two['data']['attributes']['bibliographic'] res = app.post_json_api( url_public, data_user_two, auth=user.auth, expect_errors=True) assert res.status_code == 201 assert res.json['data']['id'] == '{}-{}'.format( project_public._id, user_two._id) project_public.reload() assert user_two in project_public.contributors assert project_public.get_visible(user_two) def test_adds_bibliographic_contributor_public_project_admin( self, app, user, user_two, project_public, data_user_two, url_public): res = app.post_json_api(url_public, data_user_two, auth=user.auth) assert res.status_code == 201 assert res.json['data']['id'] == '{}-{}'.format( project_public._id, user_two._id) project_public.reload() assert user_two in project_public.contributors def test_adds_non_bibliographic_contributor_private_project_admin( self, app, user, user_two, project_private, url_private): data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': False }, 'relationships': { 'users': { 'data': { 'id': user_two._id, 'type': 'users' } } } } } res = app.post_json_api( url_private, data, auth=user.auth, expect_errors=True) assert res.status_code == 201 assert res.json['data']['id'] == '{}-{}'.format( project_private._id, user_two._id) assert res.json['data']['attributes']['bibliographic'] is False project_private.reload() assert user_two in project_private.contributors assert not project_private.get_visible(user_two) def test_adds_contributor_public_project_non_admin( self, app, user, user_two, user_three, project_public, data_user_three, url_public): project_public.add_contributor( user_two, permissions=permissions.WRITE, auth=Auth(user), save=True) res = app.post_json_api(url_public, data_user_three, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 project_public.reload() assert user_three not in project_public.contributors.all() def test_adds_contributor_public_project_non_admin_osf_group( self, app, user, user_two, user_three, project_public, data_user_three, url_public): group = OSFGroupFactory(creator=user_two) project_public.add_osf_group(group, permissions.WRITE) res = app.post_json_api(url_public, data_user_three, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 project_public.reload() assert user_three not in project_public.contributors.all() def test_adds_contributor_public_project_non_contributor( self, app, user_two, user_three, project_public, data_user_three, url_public): res = app.post_json_api(url_public, data_user_three, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert user_three not in project_public.contributors.all() def test_adds_contributor_public_project_not_logged_in( self, app, user_two, project_public, data_user_two, url_public): res = app.post_json_api(url_public, data_user_two, expect_errors=True) assert res.status_code == 401 assert user_two not in project_public.contributors.all() def test_adds_contributor_private_project_admin( self, app, user, user_two, project_private, data_user_two, url_private): res = app.post_json_api(url_private, data_user_two, auth=user.auth) assert res.status_code == 201 assert res.json['data']['id'] == '{}-{}'.format( project_private._id, user_two._id) project_private.reload() assert user_two in project_private.contributors def test_adds_contributor_private_project_osf_group_admin_perms( self, app, user, user_two, user_three, project_private, data_user_two, url_private): osf_group = OSFGroupFactory(creator=user_three) project_private.add_osf_group(osf_group, permissions.ADMIN) res = app.post_json_api(url_private, data_user_two, auth=user_three.auth) assert res.status_code == 201 assert res.json['data']['id'] == '{}-{}'.format( project_private._id, user_two._id) project_private.reload() assert user_two in project_private.contributors def test_adds_contributor_without_bibliographic_private_project_admin( self, app, user, user_two, project_private, url_private): data = { 'data': { 'type': 'contributors', 'attributes': { }, 'relationships': { 'users': { 'data': { 'id': user_two._id, 'type': 'users' } } } } } res = app.post_json_api( url_private, data, auth=user.auth, expect_errors=True) assert res.status_code == 201 project_private.reload() assert user_two in project_private.contributors def test_adds_admin_contributor_private_project_admin( self, app, user, user_two, project_private, url_private): data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True, 'permission': permissions.ADMIN }, 'relationships': { 'users': { 'data': { 'id': user_two._id, 'type': 'users' } } } } } res = app.post_json_api(url_private, data, auth=user.auth) assert res.status_code == 201 assert res.json['data']['id'] == '{}-{}'.format( project_private._id, user_two._id) project_private.reload() assert user_two in project_private.contributors assert project_private.get_permissions(user_two) == [ permissions.READ, permissions.WRITE, permissions.ADMIN] def test_adds_write_contributor_private_project_admin( self, app, user, user_two, project_private, url_private): data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True, 'permission': permissions.WRITE }, 'relationships': { 'users': { 'data': { 'id': user_two._id, 'type': 'users' } } } } } res = app.post_json_api(url_private, data, auth=user.auth) assert res.status_code == 201 assert res.json['data']['id'] == '{}-{}'.format( project_private._id, user_two._id) project_private.reload() assert user_two in project_private.contributors assert project_private.get_permissions( user_two) == [permissions.READ, permissions.WRITE] def test_adds_read_contributor_private_project_admin( self, app, user, user_two, project_private, url_private): data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True, 'permission': permissions.READ }, 'relationships': { 'users': { 'data': { 'id': user_two._id, 'type': 'users' } } } } } res = app.post_json_api(url_private, data, auth=user.auth) assert res.status_code == 201 assert res.json['data']['id'] == '{}-{}'.format( project_private._id, user_two._id) project_private.reload() assert user_two in project_private.contributors assert project_private.get_permissions(user_two) == [ permissions.READ] def test_adds_invalid_permission_contributor_private_project_admin( self, app, user, user_two, project_private, url_private): data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True, 'permission': 'invalid', }, 'relationships': { 'users': { 'data': { 'id': user_two._id, 'type': 'users' } } } } } res = app.post_json_api( url_private, data, auth=user.auth, expect_errors=True) assert res.status_code == 400 project_private.reload() assert user_two not in project_private.contributors.all() def test_adds_none_permission_contributor_private_project_admin_uses_default_permissions( self, app, user, user_two, project_private, url_private): data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True, 'permission': None }, 'relationships': { 'users': { 'data': { 'id': user_two._id, 'type': 'users' } } } } } res = app.post_json_api(url_private, data, auth=user.auth) assert res.status_code == 201 project_private.reload() assert user_two in project_private.contributors assert project_private.has_permission(user_two, permissions.WRITE) def test_adds_already_existing_contributor_private_project_admin( self, app, user, user_two, project_private, data_user_two, url_private): project_private.add_contributor(user_two, auth=Auth(user), save=True) project_private.reload() res = app.post_json_api(url_private, data_user_two, auth=user.auth, expect_errors=True) assert res.status_code == 400 def test_adds_non_existing_user_private_project_admin( self, app, user, project_private, url_private): data = { 'data': { 'type': 'contributors', 'attributes': { 'bibliographic': True }, 'relationships': { 'users': { 'data': { 'id': 'FAKE', 'type': 'users' } } } } } res = app.post_json_api( url_private, data, auth=user.auth, expect_errors=True) assert res.status_code == 404 project_private.reload() assert len(project_private.contributors) == 1 def test_adds_contributor_private_project_non_admin( self, app, user, user_two, user_three, project_private, data_user_three, url_private): project_private.add_contributor( user_two, permissions=permissions.WRITE, auth=Auth(user)) res = app.post_json_api( url_private, data_user_three, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 project_private.reload() assert user_three not in project_private.contributors.all() def test_adds_contributor_private_project_non_contributor( self, app, user_two, user_three, project_private, data_user_three, url_private): res = app.post_json_api(url_private, data_user_three, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 project_private.reload() assert user_three not in project_private.contributors.all() def test_adds_contributor_private_project_not_logged_in( self, app, user_two, project_private, data_user_two, url_private): res = app.post_json_api(url_private, data_user_two, expect_errors=True) assert res.status_code == 401 project_private.reload() assert user_two not in project_private.contributors.all() def test_add_unregistered_contributor_with_fullname( self, app, user, project_public, url_public): payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'John Doe', } } } res = app.post_json_api(url_public, payload, auth=user.auth) project_public.reload() assert res.status_code == 201 assert res.json['data']['attributes']['unregistered_contributor'] == 'John Doe' assert res.json['data']['attributes'].get('email') is None assert res.json['data']['embeds']['users']['data']['id'] in project_public.contributors.values_list( 'guids___id', flat=True) def test_add_contributor_with_fullname_and_email_unregistered_user( self, app, user, project_public, url_public): payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'John Doe', 'email': '[email protected]' } } } res = app.post_json_api(url_public, payload, auth=user.auth) project_public.reload() assert res.status_code == 201 assert res.json['data']['attributes']['unregistered_contributor'] == 'John Doe' assert res.json['data']['attributes'].get('email') is None assert res.json['data']['attributes']['bibliographic'] is True assert res.json['data']['attributes']['permission'] == permissions.WRITE assert res.json['data']['embeds']['users']['data']['id'] in project_public.contributors.values_list( 'guids___id', flat=True) def test_add_contributor_with_fullname_and_email_unregistered_user_set_attributes( self, app, user, project_public, url_public): payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'John Doe', 'email': '[email protected]', 'bibliographic': False, 'permission': permissions.READ } } } res = app.post_json_api(url_public, payload, auth=user.auth) project_public.reload() assert res.status_code == 201 assert res.json['data']['attributes']['unregistered_contributor'] == 'John Doe' assert res.json['data']['attributes'].get('email') is None assert res.json['data']['attributes']['bibliographic'] is False assert res.json['data']['attributes']['permission'] == permissions.READ assert res.json['data']['embeds']['users']['data']['id'] in project_public.contributors.values_list( 'guids___id', flat=True) def test_add_contributor_with_fullname_and_email_registered_user( self, app, user, project_public, url_public): user_contrib = UserFactory() payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': user_contrib.fullname, 'email': user_contrib.username } } } res = app.post_json_api(url_public, payload, auth=user.auth) project_public.reload() assert res.status_code == 201 assert res.json['data']['attributes']['unregistered_contributor'] is None assert res.json['data']['attributes'].get('email') is None assert res.json['data']['embeds']['users']['data']['id'] in project_public.contributors.values_list( 'guids___id', flat=True) def test_add_unregistered_contributor_already_contributor( self, app, user, project_public, url_public): name, email = fake.name(), fake_email() project_public.add_unregistered_contributor( auth=Auth(user), fullname=name, email=email) payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'Doesn\'t Matter', 'email': email } } } res = app.post_json_api( url_public, payload, auth=user.auth, expect_errors=True) project_public.reload() assert res.status_code == 400 assert res.json['errors'][0]['detail'] == '{} is already a contributor.'.format( name) def test_add_contributor_user_is_deactivated_registered_payload( self, app, user, url_public): user_contrib = UserFactory() user_contrib.date_disabled = datetime.utcnow() user_contrib.save() payload = { 'data': { 'type': 'contributors', 'attributes': {}, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_contrib._id } } } } } res = app.post_json_api( url_public, payload, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Deactivated users cannot be added as contributors.' def test_add_contributor_user_is_deactivated_unregistered_payload( self, app, user, url_public): user_contrib = UserFactory() user_contrib.date_disabled = datetime.utcnow() user_contrib.save() payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': user_contrib.fullname, 'email': user_contrib.username }, } } res = app.post_json_api( url_public, payload, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Deactivated users cannot be added as contributors.' def test_add_contributor_index_returned( self, app, user, data_user_two, data_user_three, url_public): res = app.post_json_api(url_public, data_user_two, auth=user.auth) assert res.status_code == 201 assert res.json['data']['attributes']['index'] == 1 res = app.post_json_api(url_public, data_user_three, auth=user.auth) assert res.status_code == 201 assert res.json['data']['attributes']['index'] == 2 def test_add_contributor_set_index_out_of_range( self, app, user, user_two, project_public, url_public): user_contrib_one = UserFactory() project_public.add_contributor(user_contrib_one, save=True) user_contrib_two = UserFactory() project_public.add_contributor(user_contrib_two, save=True) payload = { 'data': { 'type': 'contributors', 'attributes': { 'index': 4 }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_two._id } } } } } res = app.post_json_api( url_public, payload, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == '4 is not a valid contributor index for node with id {}'.format( project_public._id) def test_add_contributor_set_index_first( self, app, user, user_two, project_public, url_public): user_contrib_one = UserFactory() project_public.add_contributor(user_contrib_one, save=True) user_contrib_two = UserFactory() project_public.add_contributor(user_contrib_two, save=True) payload = { 'data': { 'type': 'contributors', 'attributes': { 'index': 0 }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_two._id } } } } } res = app.post_json_api(url_public, payload, auth=user.auth) project_public.reload() assert res.status_code == 201 contributor_obj = project_public.contributor_set.get(user=user_two) index = list( project_public.get_contributor_order() ).index(contributor_obj.pk) assert index == 0 def test_add_contributor_set_index_last( self, app, user, user_two, project_public, url_public): user_contrib_one = UserFactory() project_public.add_contributor(user_contrib_one, save=True) user_contrib_two = UserFactory() project_public.add_contributor(user_contrib_two, save=True) payload = { 'data': { 'type': 'contributors', 'attributes': { 'index': 3 }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_two._id } } } } } res = app.post_json_api(url_public, payload, auth=user.auth) project_public.reload() assert res.status_code == 201 contributor_obj = project_public.contributor_set.get(user=user_two) index = list( project_public.get_contributor_order() ).index(contributor_obj.pk) assert index == 3 def test_add_inactive_merged_user_as_contributor( self, app, user, url_public): primary_user = UserFactory() merged_user = UserFactory(merged_by=primary_user) payload = { 'data': { 'type': 'contributors', 'attributes': {}, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': merged_user._id } } } } } res = app.post_json_api(url_public, payload, auth=user.auth) assert res.status_code == 201 contributor_added = res.json['data']['embeds']['users']['data']['id'] assert contributor_added == primary_user._id def test_add_unconfirmed_user_by_guid( self, app, user, project_public, url_public): unconfirmed_user = UnconfirmedUserFactory() payload = { 'data': { 'type': 'contributors', 'attributes': {}, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': unconfirmed_user._id } } } } } res = app.post_json_api( url_public, payload, auth=user.auth, expect_errors=True) assert res.status_code == 404 # if adding unregistered contrib by guid, fullname must be supplied assert ( res.json['errors'][0]['detail'] == 'Cannot add unconfirmed user {} to resource {}. You need to provide a full_name.' .format(unconfirmed_user._id, project_public._id)) payload['data']['attributes']['full_name'] = 'Susan B. Anthony' res = app.post_json_api( url_public, payload, auth=user.auth, expect_errors=True) assert res.status_code == 201 assert res.json['data']['attributes']['unregistered_contributor'] == 'Susan B. Anthony' @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation @pytest.mark.enable_implicit_clean class TestNodeContributorCreateValidation(NodeCRUDTestCase): @pytest.fixture() def create_serializer(self): return NodeContributorsCreateSerializer @pytest.fixture() def validate_data(self, create_serializer): return create_serializer.validate_data def test_add_contributor_validation(self, project_public, validate_data, create_serializer): # test_add_contributor_validation_user_id validate_data( create_serializer(), project_public, user_id='abcde') # test_add_contributor_validation_user_id_fullname validate_data( create_serializer(), project_public, user_id='abcde', full_name='Kanye') # test_add_contributor_validation_user_id_email with pytest.raises(exceptions.ValidationError): validate_data( create_serializer(), project_public, user_id='abcde', email='[email protected]') # test_add_contributor_validation_user_id_fullname_email with pytest.raises(exceptions.ValidationError): validate_data( create_serializer(), project_public, user_id='abcde', full_name='Kanye', email='[email protected]') # test_add_contributor_validation_fullname validate_data( create_serializer(), project_public, full_name='Kanye') # test_add_contributor_validation_email with pytest.raises(exceptions.ValidationError): validate_data( create_serializer(), project_public, email='[email protected]') # test_add_contributor_validation_fullname_email validate_data( create_serializer(), project_public, full_name='Kanye', email='[email protected]') @pytest.mark.django_db @pytest.mark.enable_bookmark_creation @pytest.mark.enable_enqueue_task class TestNodeContributorCreateEmail(NodeCRUDTestCase): @pytest.fixture() def url_project_contribs(self, project_public): return '/{}nodes/{}/contributors/'.format(API_BASE, project_public._id) @mock.patch('framework.auth.views.mails.send_mail') def test_add_contributor_no_email_if_false( self, mock_mail, app, user, url_project_contribs): url = '{}?send_email=false'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'Kanye West', 'email': '[email protected]' } } } res = app.post_json_api(url, payload, auth=user.auth) assert res.status_code == 201 assert mock_mail.call_count == 0 @mock.patch('framework.auth.views.mails.send_mail') def test_add_contributor_sends_email( self, mock_mail, app, user, user_two, url_project_contribs): url = '{}?send_email=default'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_two._id } } } } } res = app.post_json_api(url, payload, auth=user.auth) assert res.status_code == 201 assert mock_mail.call_count == 1 @mock.patch('website.project.signals.contributor_added.send') def test_add_contributor_signal_if_default( self, mock_send, app, user, user_two, url_project_contribs): url = '{}?send_email=default'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_two._id } } } } } res = app.post_json_api(url, payload, auth=user.auth) args, kwargs = mock_send.call_args assert res.status_code == 201 assert 'default' == kwargs['email_template'] def test_add_contributor_signal_preprint_email_disallowed( self, app, user, user_two, url_project_contribs): url = '{}?send_email=preprint'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_two._id } } } } } res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'preprint is not a valid email preference.' @mock.patch('framework.auth.views.mails.send_mail') def test_add_unregistered_contributor_sends_email( self, mock_mail, app, user, url_project_contribs): url = '{}?send_email=default'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'Kanye West', 'email': '[email protected]' } } } res = app.post_json_api(url, payload, auth=user.auth) assert res.status_code == 201 assert mock_mail.call_count == 1 @mock.patch('website.project.signals.unreg_contributor_added.send') def test_add_unregistered_contributor_signal_if_default( self, mock_send, app, user, url_project_contribs): url = '{}?send_email=default'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'Kanye West', 'email': '[email protected]' } } } res = app.post_json_api(url, payload, auth=user.auth) args, kwargs = mock_send.call_args assert res.status_code == 201 assert 'default' == kwargs['email_template'] def test_add_unregistered_contributor_signal_preprint_email_disallowed( self, app, user, url_project_contribs): url = '{}?send_email=preprint'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'Kanye West', 'email': '[email protected]' } } } res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'preprint is not a valid email preference.' @mock.patch('framework.auth.views.mails.send_mail') def test_add_contributor_invalid_send_email_param( self, mock_mail, app, user, url_project_contribs): url = '{}?send_email=true'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'Kanye West', 'email': '[email protected]' } } } res = app.post_json_api( url, payload, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'true is not a valid email preference.' assert mock_mail.call_count == 0 @mock.patch('framework.auth.views.mails.send_mail') def test_add_unregistered_contributor_without_email_no_email( self, mock_mail, app, user, url_project_contribs): url = '{}?send_email=default'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'Kanye West', } } } with capture_signals() as mock_signal: res = app.post_json_api(url, payload, auth=user.auth) assert contributor_added in mock_signal.signals_sent() assert res.status_code == 201 assert mock_mail.call_count == 0 @pytest.mark.django_db class TestNodeContributorBulkCreate(NodeCRUDTestCase): @pytest.fixture() def user_three(self): return AuthUserFactory() @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/contributors/?send_email=false'.format( API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/contributors/?send_email=false'.format( API_BASE, project_private._id) @pytest.fixture() def payload_one(self, user_two): return { 'type': 'contributors', 'attributes': { 'bibliographic': True, 'permission': permissions.ADMIN }, 'relationships': { 'users': { 'data': { 'id': user_two._id, 'type': 'users' } } } } @pytest.fixture() def payload_two(self, user_three): return { 'type': 'contributors', 'attributes': { 'bibliographic': False, 'permission': permissions.READ }, 'relationships': { 'users': { 'data': { 'id': user_three._id, 'type': 'users' } } } } def test_node_contributor_bulk_create_contributor_exists( self, app, user, user_two, project_public, payload_one, payload_two, url_public): project_public.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) res = app.post_json_api( url_public, {'data': [payload_two, payload_one]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert 'is already a contributor' in res.json['errors'][0]['detail'] res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 2 def test_node_contributor_bulk_create_errors( self, app, user, user_two, project_private, payload_one, payload_two, url_public, url_private): # test_bulk_create_contributors_blank_request res = app.post_json_api( url_public, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 # test_node_contributor_bulk_create_logged_out_public_project res = app.post_json_api( url_public, {'data': [payload_one, payload_two]}, expect_errors=True, bulk=True) assert res.status_code == 401 res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 1 # test_node_contributor_bulk_create_logged_out_private_project res = app.post_json_api( url_private, {'data': [payload_one, payload_two]}, expect_errors=True, bulk=True) assert res.status_code == 401 res = app.get(url_private, auth=user.auth) assert len(res.json['data']) == 1 # test_node_contributor_bulk_create_logged_in_non_contrib_private_project res = app.post_json_api(url_private, {'data': [payload_one, payload_two]}, auth=user_two.auth, expect_errors=True, bulk=True) assert res.status_code == 403 res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 1 # test_node_contributor_bulk_create_logged_in_read_only_contrib_private_project project_private.add_contributor( user_two, permissions=permissions.READ, save=True) res = app.post_json_api( url_private, {'data': [payload_two]}, auth=user_two.auth, expect_errors=True, bulk=True) assert res.status_code == 403 res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 1 def test_node_contributor_bulk_create_logged_in_public_project_project( self, app, user, payload_one, payload_two, url_public): res = app.post_json_api( url_public, {'data': [payload_one, payload_two]}, auth=user.auth, bulk=True) assert res.status_code == 201 assert_equals([res.json['data'][0]['attributes']['bibliographic'], res.json['data'][1]['attributes']['bibliographic']], [True, False]) assert_equals([res.json['data'][0]['attributes']['permission'], res.json['data'][1]['attributes']['permission']], [permissions.ADMIN, permissions.READ]) assert res.content_type == 'application/vnd.api+json' res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 3 def test_node_contributor_bulk_create_logged_in_contrib_private_project( self, app, user, payload_one, payload_two, url_private): res = app.post_json_api(url_private, {'data': [payload_one, payload_two]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 201 assert len(res.json['data']) == 2 assert_equals([res.json['data'][0]['attributes']['bibliographic'], res.json['data'][1]['attributes']['bibliographic']], [True, False]) assert_equals([res.json['data'][0]['attributes']['permission'], res.json['data'][1]['attributes']['permission']], [permissions.ADMIN, permissions.READ]) assert res.content_type == 'application/vnd.api+json' res = app.get(url_private, auth=user.auth) assert len(res.json['data']) == 3 def test_node_contributor_bulk_create_payload_errors( self, app, user, user_two, payload_one, payload_two, url_public): # test_node_contributor_bulk_create_all_or_nothing invalid_id_payload = { 'type': 'contributors', 'attributes': { 'bibliographic': True }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': '12345' } } } } res = app.post_json_api( url_public, {'data': [payload_one, invalid_id_payload]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 404 res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 1 # test_node_contributor_bulk_create_limits node_contrib_create_list = {'data': [payload_one] * 101} res = app.post_json_api(url_public, node_contrib_create_list, auth=user.auth, expect_errors=True, bulk=True) assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.' assert res.json['errors'][0]['source']['pointer'] == '/data' # test_node_contributor_ugly_payload payload = 'sdf;jlasfd' res = app.post_json_api( url_public, payload, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail # test_node_contributor_bulk_create_invalid_permissions_all_or_nothing payload = { 'type': 'contributors', 'attributes': { 'permission': 'super-user', 'bibliographic': True }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_two._id } } } } payload = {'data': [payload_two, payload]} res = app.post_json_api( url_public, payload, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 1 @pytest.mark.django_db class TestNodeContributorBulkUpdate(NodeCRUDTestCase): @pytest.fixture() def user_three(self): return AuthUserFactory() @pytest.fixture() def user_four(self): return AuthUserFactory() @pytest.fixture() def project_public( self, user, user_two, user_three, title, description, category): project_public = ProjectFactory( title=title, description=description, category=category, is_public=True, creator=user ) project_public.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_public.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_public @pytest.fixture() def project_private( self, user, user_two, user_three, title, description, category): project_private = ProjectFactory( title=title, description=description, category=category, is_public=False, creator=user ) project_private.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_private.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_private @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/contributors/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/contributors/'.format( API_BASE, project_private._id) @pytest.fixture() def payload_public_one(self, user_two, project_public, make_contrib_id): return { 'id': make_contrib_id(project_public._id, user_two._id), 'type': 'contributors', 'attributes': { 'bibliographic': True, 'permission': permissions.ADMIN } } @pytest.fixture() def payload_private_one(self, user_two, project_private, make_contrib_id): return { 'id': make_contrib_id(project_private._id, user_two._id), 'type': 'contributors', 'attributes': { 'bibliographic': True, 'permission': permissions.ADMIN } } @pytest.fixture() def payload_public_two(self, user_three, project_public, make_contrib_id): return { 'id': make_contrib_id(project_public._id, user_three._id), 'type': 'contributors', 'attributes': { 'bibliographic': False, 'permission': permissions.WRITE } } @pytest.fixture() def payload_private_two( self, user_three, project_private, make_contrib_id): return { 'id': make_contrib_id(project_private._id, user_three._id), 'type': 'contributors', 'attributes': { 'bibliographic': False, 'permission': permissions.WRITE } } def test_bulk_update_contributors_errors( self, app, user, user_two, user_four, project_public, payload_public_one, payload_public_two, payload_private_one, payload_private_two, url_public, url_private, make_contrib_id): # test_bulk_update_contributors_blank_request res = app.patch_json_api( url_public, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 # test_bulk_update_contributors_dict_instead_of_list res = app.put_json_api( url_public, {'data': payload_public_one}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 # test_bulk_update_contributors_public_project_one_not_found invalid_id = { 'id': '12345-abcde', 'type': 'contributors', 'attributes': {} } empty_payload = {'data': [invalid_id, payload_public_one]} res = app.put_json_api( url_public, empty_payload, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Could not find all objects to update.' res = app.get(url_public, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ] ) # test_bulk_update_contributors_public_projects_logged_out res = app.put_json_api( url_public, { 'data': [payload_public_one, payload_public_two] }, expect_errors=True, bulk=True) assert res.status_code == 401 res = app.get(url_public, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ] ) # test_bulk_update_contributors_private_projects_logged_out res = app.put_json_api( url_private, { 'data': [payload_private_one, payload_private_two] }, expect_errors=True, bulk=True) assert res.status_code == 401 res = app.get(url_private, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ]) # test_bulk_update_contributors_private_projects_logged_in_non_contrib res = app.put_json_api( url_private, { 'data': [payload_private_one, payload_private_two] }, auth=user_four.auth, expect_errors=True, bulk=True) assert res.status_code == 403 res = app.get(url_private, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ]) # test_bulk_update_contributors_private_projects_logged_in_read_only_contrib res = app.put_json_api( url_private, { 'data': [payload_private_one, payload_private_two] }, auth=user_two.auth, expect_errors=True, bulk=True) assert res.status_code == 403 res = app.get(url_private, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ] ) # test_bulk_update_contributors_projects_send_dictionary_not_list res = app.put_json_api( url_public, {'data': payload_public_one}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".' # test_bulk_update_contributors_id_not_supplied res = app.put_json_api( url_public, {'data': [{ 'type': 'contributors', 'attributes': {} }]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert len(res.json['errors']) == 1 assert res.json['errors'][0]['detail'] == 'Contributor identifier not provided.' # test_bulk_update_contributors_type_not_supplied res = app.put_json_api( url_public, {'data': [{ 'id': make_contrib_id( project_public._id, user_two._id ), 'attributes': {} }]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert len(res.json['errors']) == 1 assert res.json['errors'][0]['source']['pointer'] == '/data/0/type' assert res.json['errors'][0]['detail'] == 'This field may not be null.' # test_bulk_update_contributors_wrong_type invalid_type = { 'id': make_contrib_id(project_public._id, user_two._id), 'type': 'Wrong type.', 'attributes': {} } res = app.put_json_api(url_public, {'data': [invalid_type]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 409 # test_bulk_update_contributors_invalid_id_format invalid_id = { 'id': '12345', 'type': 'contributors', 'attributes': {} } res = app.put_json_api(url_public, {'data': [invalid_id]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Contributor identifier incorrectly formatted.' # test_bulk_update_contributors_wrong_id invalid_id = { 'id': '12345-abcde', 'type': 'contributors', 'attributes': {} } res = app.put_json_api( url_public, {'data': [invalid_id]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Could not find all objects to update.' # test_bulk_update_contributors_limits contrib_update_list = {'data': [payload_public_one] * 101} res = app.put_json_api( url_public, contrib_update_list, auth=user.auth, expect_errors=True, bulk=True) assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.' assert res.json['errors'][0]['source']['pointer'] == '/data' # test_bulk_update_contributors_invalid_permissions res = app.put_json_api( url_public, { 'data': [ payload_public_two, { 'id': make_contrib_id( project_public._id, user_two._id ), 'type': 'contributors', 'attributes': { 'permission': 'super-user'} } ] }, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == '"super-user" is not a valid choice.' res = app.get(url_public, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ] ) # test_bulk_update_contributors_invalid_bibliographic res = app.put_json_api( url_public, { 'data': [ payload_public_two, { 'id': make_contrib_id( project_public._id, user_two._id ), 'type': 'contributors', 'attributes': { 'bibliographic': 'true and false' } } ] }, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == '"true and false" is not a valid boolean.' res = app.get(url_public, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ] ) # test_bulk_update_contributors_must_have_at_least_one_bibliographic_contributor res = app.put_json_api( url_public, { 'data': [ payload_public_two, { 'id': make_contrib_id( project_public._id, user._id ), 'type': 'contributors', 'attributes': { 'permission': permissions.ADMIN, 'bibliographic': False } }, { 'id': make_contrib_id( project_public._id, user_two._id ), 'type': 'contributors', 'attributes': { 'bibliographic': False } } ] }, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Must have at least one visible contributor' # test_bulk_update_contributors_must_have_at_least_one_admin res = app.put_json_api( url_public, {'data': [ payload_public_two, { 'id': make_contrib_id( project_public._id, user._id ), 'type': 'contributors', 'attributes': { 'permission': permissions.READ } } ]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == '{} is the only admin.'.format( user.fullname) def test_bulk_update_contributors_public_projects_logged_in( self, app, user, payload_public_one, payload_public_two, url_public): res = app.put_json_api( url_public, {'data': [payload_public_one, payload_public_two]}, auth=user.auth, bulk=True ) assert res.status_code == 200 data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission']], [permissions.ADMIN, permissions.WRITE] ) def test_bulk_update_contributors_private_projects_logged_in_contrib( self, app, user, payload_private_one, payload_private_two, url_private): res = app.put_json_api( url_private, {'data': [payload_private_one, payload_private_two]}, auth=user.auth, bulk=True ) assert res.status_code == 200 data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission']], [permissions.ADMIN, permissions.WRITE] ) @pytest.mark.django_db class TestNodeContributorBulkPartialUpdate(NodeCRUDTestCase): @pytest.fixture() def user_three(self): return AuthUserFactory() @pytest.fixture() def user_four(self): return AuthUserFactory() @pytest.fixture() def project_public( self, user, user_two, user_three, title, description, category): project_public = ProjectFactory( title=title, description=description, category=category, is_public=True, creator=user ) project_public.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True ) project_public.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True ) return project_public @pytest.fixture() def project_private( self, user, user_two, user_three, title, description, category): project_private = ProjectFactory( title=title, description=description, category=category, is_public=False, creator=user ) project_private.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_private.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_private @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/contributors/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/contributors/'.format( API_BASE, project_private._id) @pytest.fixture() def payload_public_one(self, user_two, project_public, make_contrib_id): return { 'id': make_contrib_id(project_public._id, user_two._id), 'type': 'contributors', 'attributes': { 'bibliographic': True, 'permission': permissions.ADMIN } } @pytest.fixture() def payload_public_two(self, user_three, project_public, make_contrib_id): return { 'id': make_contrib_id(project_public._id, user_three._id), 'type': 'contributors', 'attributes': { 'bibliographic': False, 'permission': permissions.WRITE } } @pytest.fixture() def payload_private_one(self, user_two, project_private, make_contrib_id): return { 'id': make_contrib_id(project_private._id, user_two._id), 'type': 'contributors', 'attributes': { 'bibliographic': True, 'permission': permissions.ADMIN } } @pytest.fixture() def payload_private_two( self, user_three, project_private, make_contrib_id): return { 'id': make_contrib_id(project_private._id, user_three._id), 'type': 'contributors', 'attributes': { 'bibliographic': False, 'permission': permissions.WRITE } } def test_bulk_partial_update_errors( self, app, user, user_two, user_four, project_public, payload_public_one, payload_public_two, payload_private_one, payload_private_two, url_public, url_private, make_contrib_id): # test_bulk_partial_update_contributors_blank_request res = app.patch_json_api( url_public, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 # test_bulk_partial_update_contributors_public_project_one_not_found invalid_id = { 'id': '12345-abcde', 'type': 'contributors', 'attributes': {} } empty_payload = {'data': [invalid_id, payload_public_one]} res = app.patch_json_api( url_public, empty_payload, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Could not find all objects to update.' res = app.get(url_public, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ] ) # test_bulk_partial_update_contributors_public_projects_logged_out res = app.patch_json_api( url_public, {'data': [payload_public_one, payload_public_two]}, bulk=True, expect_errors=True) assert res.status_code == 401 res = app.get(url_public, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ] ) # test_bulk_partial_update_contributors_private_projects_logged_out res = app.patch_json_api( url_private, {'data': [payload_private_one, payload_private_two]}, expect_errors=True, bulk=True ) assert res.status_code == 401 res = app.get(url_private, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ]) # test_bulk_partial_update_contributors_private_projects_logged_in_non_contrib res = app.patch_json_api( url_private, {'data': [payload_private_one, payload_private_two]}, auth=user_four.auth, expect_errors=True, bulk=True) assert res.status_code == 403 res = app.get(url_private, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ] ) # test_bulk_partial_update_contributors_private_projects_logged_in_read_only_contrib res = app.patch_json_api( url_private, {'data': [payload_private_one, payload_private_two]}, auth=user_two.auth, expect_errors=True, bulk=True) assert res.status_code == 403 res = app.get(url_private, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ]) # test_bulk_partial_update_contributors_projects_send_dictionary_not_list res = app.patch_json_api( url_public, {'data': payload_public_one}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".' # test_bulk_partial_update_contributors_id_not_supplied res = app.patch_json_api( url_public, {'data': [{ 'type': 'contributors', 'attributes': {} }]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert len(res.json['errors']) == 1 assert res.json['errors'][0]['detail'] == 'Contributor identifier not provided.' # test_bulk_partial_update_contributors_type_not_supplied res = app.patch_json_api( url_public, {'data': [{ 'id': make_contrib_id( project_public._id, user_two._id ), 'attributes': {} }]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert len(res.json['errors']) == 1 assert res.json['errors'][0]['source']['pointer'] == '/data/0/type' assert res.json['errors'][0]['detail'] == 'This field may not be null.' # test_bulk_partial_update_contributors_wrong_type invalid_type = { 'id': make_contrib_id(project_public._id, user_two._id), 'type': 'Wrong type.', 'attributes': {} } res = app.patch_json_api( url_public, {'data': [invalid_type]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 409 # test_bulk_partial_update_contributors_wrong_id invalid_id = { 'id': '12345-abcde', 'type': 'contributors', 'attributes': {} } res = app.patch_json_api( url_public, {'data': [invalid_id]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Could not find all objects to update.' # test_bulk_partial_update_contributors_limits contrib_update_list = {'data': [payload_public_one] * 101} res = app.patch_json_api( url_public, contrib_update_list, auth=user.auth, expect_errors=True, bulk=True) assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.' assert res.json['errors'][0]['source']['pointer'] == '/data' # test_bulk_partial_update_invalid_permissions res = app.patch_json_api( url_public, { 'data': [ payload_public_two, { 'id': make_contrib_id( project_public._id, user_two._id ), 'type': 'contributors', 'attributes': {'permission': 'super-user'} }] }, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == '"super-user" is not a valid choice.' res = app.get(url_public, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ]) # test_bulk_partial_update_invalid_bibliographic res = app.patch_json_api( url_public, { 'data': [ payload_public_two, { 'id': make_contrib_id( project_public._id, user_two._id), 'type': 'contributors', 'attributes': {'bibliographic': 'true and false'} } ] }, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == '"true and false" is not a valid boolean.' res = app.get(url_public, auth=user.auth) data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']], [permissions.ADMIN, permissions.READ, permissions.READ]) def test_bulk_partial_update_contributors_public_projects_logged_in( self, app, user, payload_public_one, payload_public_two, url_public): res = app.patch_json_api( url_public, {'data': [payload_public_one, payload_public_two]}, auth=user.auth, bulk=True) assert res.status_code == 200 data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission']], [permissions.ADMIN, permissions.WRITE]) def test_bulk_partial_update_contributors_private_projects_logged_in_contrib( self, app, user, payload_private_one, payload_private_two, url_private): res = app.patch_json_api( url_private, {'data': [payload_private_one, payload_private_two]}, auth=user.auth, bulk=True) assert res.status_code == 200 data = res.json['data'] assert_equals( [data[0]['attributes']['permission'], data[1]['attributes']['permission']], [permissions.ADMIN, permissions.WRITE]) class TestNodeContributorBulkDelete(NodeCRUDTestCase): @pytest.fixture() def user_three(self): return AuthUserFactory() @pytest.fixture() def user_four(self): return AuthUserFactory() @pytest.fixture() def project_public( self, user, user_two, user_three, title, description, category): project_public = ProjectFactory( title=title, description=description, category=category, is_public=True, creator=user ) project_public.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_public.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_public @pytest.fixture() def project_private( self, user, user_two, user_three, title, description, category): project_private = ProjectFactory( title=title, description=description, category=category, is_public=False, creator=user ) project_private.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_private.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_private @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/contributors/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/contributors/'.format( API_BASE, project_private._id) @pytest.fixture() def payload_public_one(self, user_two, project_public, make_contrib_id): return { 'id': make_contrib_id(project_public._id, user_two._id), 'type': 'contributors' } @pytest.fixture() def payload_public_two(self, user_three, project_public, make_contrib_id): return { 'id': make_contrib_id(project_public._id, user_three._id), 'type': 'contributors' } @pytest.fixture() def payload_private_one(self, user_two, project_private, make_contrib_id): return { 'id': make_contrib_id(project_private._id, user_two._id), 'type': 'contributors', } @pytest.fixture() def payload_private_two( self, user_three, project_private, make_contrib_id): return { 'id': make_contrib_id(project_private._id, user_three._id), 'type': 'contributors', } def test_bulk_delete_contributors_errors( self, app, user, user_two, user_four, project_public, payload_public_one, payload_public_two, payload_private_one, payload_private_two, url_public, url_private, make_contrib_id): # test_bulk_delete_contributors_blank_request res = app.delete_json_api( url_public, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 # test_bulk_delete_invalid_id_format res = app.delete_json_api( url_public, {'data': [{ 'id': '12345', 'type': 'contributors' }]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Contributor identifier incorrectly formatted.' # test_bulk_delete_invalid_id res = app.delete_json_api( url_public, {'data': [{ 'id': '12345-abcde', 'type': 'contributors' }]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Could not find all objects to delete.' # test_bulk_delete_non_contributor res = app.delete_json_api( url_public, {'data': [{ 'id': make_contrib_id( project_public._id, user_four._id ), 'type': 'contributors' }]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 404 # test_bulk_delete_all_contributors res = app.delete_json_api( url_public, {'data': [ payload_public_one, payload_public_two, { 'id': make_contrib_id( project_public._id, user._id ), 'type': 'contributors' } ]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] in [ 'Must have at least one registered admin contributor', 'Must have at least one visible contributor'] project_public.reload() assert len(project_public.contributors) == 3 # test_bulk_delete_contributors_no_id res = app.delete_json_api( url_public, {'data': [{'type': 'contributors'}]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Request must include /data/id.' # test_bulk_delete_contributors_no_type res = app.delete_json_api( url_public, {'data': [{'id': make_contrib_id( project_public._id, user_two._id )}]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Request must include /type.' # test_bulk_delete_contributors_invalid_type res = app.delete_json_api( url_public, {'data': [{ 'type': 'Wrong type', 'id': make_contrib_id( project_public._id, user_two._id) }]}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 409 # test_bulk_delete_dict_inside_data res = app.delete_json_api( url_public, { 'data': { 'id': make_contrib_id( project_public._id, user_two._id), 'type': 'contributors'}}, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".' # test_bulk_delete_contributors_public_projects_logged_out res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 3 res = app.delete_json_api( url_public, {'data': [payload_public_one, payload_public_two]}, expect_errors=True, bulk=True) assert res.status_code == 401 res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 3 # test_bulk_delete_contributors_private_projects_logged_out res = app.get(url_private, auth=user.auth) assert len(res.json['data']) == 3 res = app.delete_json_api( url_private, {'data': [payload_private_one, payload_private_two]}, expect_errors=True, bulk=True) assert res.status_code == 401 res = app.get(url_private, auth=user.auth) assert len(res.json['data']) == 3 # test_bulk_delete_contributors_private_projects_logged_in_non_contributor res = app.get(url_private, auth=user.auth) assert len(res.json['data']) == 3 res = app.delete_json_api( url_private, {'data': [payload_private_one, payload_private_two]}, auth=user_four.auth, expect_errors=True, bulk=True) assert res.status_code == 403 res = app.get(url_private, auth=user.auth) assert len(res.json['data']) == 3 # test_bulk_delete_contributors_private_projects_logged_in_read_only_contributor res = app.get(url_private, auth=user.auth) assert len(res.json['data']) == 3 res = app.delete_json_api( url_private, {'data': [payload_private_one, payload_private_two]}, auth=user_two.auth, expect_errors=True, bulk=True) assert res.status_code == 403 res = app.get(url_private, auth=user.auth) assert len(res.json['data']) == 3 # test_bulk_delete_contributors_all_or_nothing res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 3 invalid_id = { 'id': '12345-abcde', 'type': 'contributors', } new_payload = {'data': [payload_public_one, invalid_id]} res = app.delete_json_api( url_public, new_payload, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Could not find all objects to delete.' res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 3 # test_bulk_delete_contributors_limits new_payload = {'data': [payload_public_one] * 101} res = app.delete_json_api( url_public, new_payload, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.' assert res.json['errors'][0]['source']['pointer'] == '/data' # test_bulk_delete_contributors_no_payload res = app.delete_json_api( url_public, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 def test_bulk_delete_contributors_public_project_logged_in( self, app, user, payload_public_one, payload_public_two, url_public): res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 3 # Disconnect contributor_removed so that we don't check in files # We can remove this when StoredFileNode is implemented in osf-models with disconnected_from_listeners(contributor_removed): res = app.delete_json_api( url_public, {'data': [payload_public_one, payload_public_two]}, auth=user.auth, bulk=True) assert res.status_code == 204 res = app.get(url_public, auth=user.auth) assert len(res.json['data']) == 1 def test_bulk_delete_contributors_private_projects_logged_in_contributor( self, app, user, payload_private_one, payload_private_two, url_private): res = app.get(url_private, auth=user.auth) assert len(res.json['data']) == 3 # Disconnect contributor_removed so that we don't check in files # We can remove this when StoredFileNode is implemented in osf-models with disconnected_from_listeners(contributor_removed): res = app.delete_json_api( url_private, {'data': [payload_private_one, payload_private_two]}, auth=user.auth, bulk=True) assert res.status_code == 204 res = app.get(url_private, auth=user.auth) assert len(res.json['data']) == 1 @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation @pytest.mark.enable_implicit_clean class TestNodeContributorFiltering: @pytest.fixture() def project(self, user): return ProjectFactory(creator=user) @pytest.fixture() def url(self, project): return '/{}nodes/{}/contributors/'.format( API_BASE, project._id) def test_filtering(self, app, user, url, project): # test_filtering_full_name_field filter_url = '{}?filter[full_name]=Freddie'.format(url) res = app.get(filter_url, auth=user.auth, expect_errors=True) assert res.status_code == 400 errors = res.json['errors'] assert len(errors) == 1 assert errors[0]['detail'] == '\'full_name\' is not a valid field for this endpoint.' user_two = AuthUserFactory() user_three = AuthUserFactory() project.add_contributor(user_two, permissions.WRITE) project.add_contributor(user_three, permissions.READ, visible=False) # test_filtering_permission_field_admin filter_url = '{}?filter[permission]=admin'.format(url, project._id) res = app.get(filter_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 assert len(res.json['data']) == 1 assert res.json['data'][0]['attributes'].get('permission') == permissions.ADMIN # test_filtering_permission_field_write filter_url = '{}?filter[permission]=write'.format(url, project._id) res = app.get(filter_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 assert len(res.json['data']) == 2 # test_filtering_permission_field_read filter_url = '{}?filter[permission]=read'.format(url, project._id) res = app.get(filter_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 assert len(res.json['data']) == 3 # test_filtering_node_with_only_bibliographic_contributors # no filter res = app.get(url, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']) == 3 # filter for bibliographic contributors filter_url = url + '?filter[bibliographic]=True' res = app.get(filter_url, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']) == 2 assert res.json['data'][0]['attributes'].get('bibliographic', None) # filter for non-bibliographic contributors filter_url = url + '?filter[bibliographic]=False' res = app.get(filter_url, auth=user.auth) assert len(res.json['data']) == 1 # test_filtering_on_invalid_field filter_url = '{}?filter[invalid]=foo'.format(url, project._id) res = app.get(filter_url, auth=user.auth, expect_errors=True) assert res.status_code == 400 errors = res.json['errors'] assert len(errors) == 1 assert errors[0]['detail'] == '\'invalid\' is not a valid field for this endpoint.' def test_filtering_node_with_non_bibliographic_contributor( self, app, user, project, url): non_bibliographic_contrib = UserFactory() project.add_contributor(non_bibliographic_contrib, visible=False) project.save() # no filter res = app.get(url, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']) == 2 # filter for bibliographic contributors filter_url = url + '?filter[bibliographic]=True' res = app.get(filter_url, auth=user.auth) assert len(res.json['data']) == 1 assert res.json['data'][0]['attributes'].get('bibliographic', None) # filter for non-bibliographic contributors filter_url = url + '?filter[bibliographic]=False' res = app.get(filter_url, auth=user.auth) assert len(res.json['data']) == 1 assert not res.json['data'][0]['attributes'].get('bibliographic', None)
Let us host all your BIG events at the range! Tired of the same boring company retreats? Try something different, nothing quite compares to the adrenaline rush you receive from holding cold steel in your hands and shooting rounds down range. Private events are a “blast” on the range. In addition to relieving stress and having fun, shooting activities and events teach concentration, discipline and team building. H&H can provide a unique and fun opportunity for your next company event. Book your corporate group, executive outing or team building events H&H Shooting Sports. It’s a great way to reward your employees and say “thank you”. Let us take care of it 4U! 4U Catering offers an exquisite menu built by our head chef, Yvonne. We have multiple meeting rooms available, which can be combined into one massive room for big events. We focus on the entire event, creating the mood, atmosphere and surroundings you’ll remember long after the plates are cleared. With us, you’ll see that service is every bit as important as food. Our catering selection is perfect for private parties, business lunches, banquets, and much more. Let H&H host to an event you’ll never forget. We are the perfect site for a birthday or bachelor/bachelorette party, team-building exercises, corporate retreats and more. Depending on how many guests you’re inviting, we can work with you to customize the ultimate party package. We’ll handle all the details, so you can focus on having fun! Use the form below to request information; email [email protected]; or give us a shout at (405) 947-3888 to begin customizing your private event to your exact needs.
#!/usr/bin/env python3 """Plots the evolution of an algorithm(s) by defining the relationships between them. """ import os from collections import defaultdict from functools import partial import argparse from textwrap import wrap import yaml from graphviz import Digraph # TODO: remove from pprint import pprint __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) STYLE_FILE = os.path.join(__location__, 'format.yml') def compose(*a): def composed(f, g, *args, **kwargs): return f(g(*args, **kwargs)) try: return partial(composed, a[0], compose(*a[1:])) except: return a[0] def load_data(file): with open(file, encoding='utf-8') as f: return yaml.safe_load(f) def make_multi_font_label(labels, attributes, widths): def ensure_string(maybe_string): return ' ' if not maybe_string else str(maybe_string) labels = map(ensure_string, labels) return '< {} >'.format('<BR/>'.join( '<FONT {}>{} </FONT>'.format( ' '.join('{}="{}"'.format(k, v) for k, v in attr.items()), '<BR/>'.join(wrap(label, width))) for label, attr, width in zip(labels, attributes, widths))) def by_year_subgraph_constructor(): subgraph = Digraph() subgraph.body.append('rank=same') return subgraph def add_edges(g, node, relation, styles): if relation in node and node[relation]: name = node['short name'] for link_obj in node[relation]: # link might be listed as string or as only key of a dict try: link = ''.join(list(link_obj.keys())) # if dict except: link = link_obj # link name may or may not be defined try: link_name = data[link]['short name'] except: link_name = link g.node(link_name, **styles['unknown nodes']) g.edge(link_name, name, **styles[relation]) def generate_evolution_plot(data): g = Digraph(format='png') styles = load_data(STYLE_FILE) # apply global graph styles g.body.extend('{}={}'.format(k, v) for k, v in styles['graph'].items()) # plot nodes subgraphs = defaultdict(by_year_subgraph_constructor) for node in data.values(): name = node['short name'] label = make_multi_font_label(*zip(*( (name, styles['node name font'], styles['node name width']), (node['title'], styles['node title font'], styles['node title width']), (node['authors'], styles['node authors font'], styles['node authors width']), (node['description'], styles['node description font'], styles['node description width'])))) subgraphs[node['year']].node(name, label, **styles['nodes']) # plot edges for id, node in data.items(): name = node['short name'] add_edges(g, node, 'develops on', styles) add_edges(g, node, 'similar to', styles) # plot year legend years = sorted(list(subgraphs.keys())) for year, graph in subgraphs.items(): graph.node(str(year), **styles['year nodes']) for first, second in zip(years, years[1:]): g.edge(str(first), str(second), **styles['year edges']) for graph in subgraphs.values(): g.subgraph(graph) g.render('img') return g if __name__ == '__main__': parser = argparse.ArgumentParser( description='Plot algorithm evolution.') parser.add_argument('data', help='Yaml file containing data.') args = parser.parse_args() data_file = args.data data = load_data(data_file) graph = generate_evolution_plot(data) print(str(str(graph).encode( 'ascii', errors='backslashreplace').decode()))
This is an unusual Brass Weighted earring for a 4mm (6 gauge) stretched ear piercing. It is a Mandala design and has a natural shell in the design which looks stunning. It weighs roughly 12 grams and the external diameter measures about 55mm. The price is for 1 ear weight so please buy 2 if you would like a pair, they are available in left and right.
from skidl import * @SubCircuit def osc(osc1, osc2, gnd, crystal = Part("Device", 'Crystal', footprint='Crystal:Crystal_HC49-U_Vertical', dest=TEMPLATE), cap = Part("Device", 'C', value='10pf', footprint='Capacitors_SMD:C_0603', dest=TEMPLATE) ): '''Attach a crystal and two caps to the osc1 and osc2 nets.''' xtal = crystal(1) # Instantiate the crystal from the template. num_xtal_pins = len(xtal['.*']) # Get the number of pins on the crystal. if num_xtal_pins == 4: # This handles a 4-pin crystal... xtal[2, 4] += gnd # Connect the crystal ground pins. xtal[3, 1] += osc1, osc2 # Connect the crystal pins to the oscillator nets. else: # Otherwise assume it's a 2-pin crystal... xtal[1,2] += osc1, osc2 # Using a two-pin crystal. trim_cap = cap(2) # Instantiate some trimmer caps. trim_cap[0][1, 2] += osc1, gnd # Connect the trimmer caps to the crystal. trim_cap[1][1, 2] += osc2, gnd # Libraries. xess_lib = r'C:\xesscorp\KiCad\libraries\xess.lib' pic32_lib = r'C:\xesscorp\KiCad\libraries\pic32.lib' pickit3_lib = r'C:\xesscorp\KiCad\libraries\pickit3.lib' # Global nets. gnd = Net('GND') gnd.drive = POWER vusb = Net('VUSB') vusb.drive = POWER vdd = Net('+3.3V') # Some common parts used as templates. cap = Part("Device", 'C', footprint='Capacitors_SMD:C_0603', dest=TEMPLATE) res = Part("Device", 'R', footprint='Resistors_SMD:R_0603', dest=TEMPLATE) # Regulate +5V VUSB down to +3.3V for VDD. vreg = Part(xess_lib, 'TPS793XX', footprint='TO_SOT_Packages_SMD:SOT-23-5') noise_cap = cap(value='0.01uf') vreg['IN, EN'] += vusb vreg['GND'] += gnd vreg['OUT'] += vdd vreg['NR'] += noise_cap[1] noise_cap[2] += gnd # Microcontroller. pic32 = Part(pic32_lib, 'pic32MX2\*0F\*\*\*B-QFN28', footprint='Housings_DFN_QFN:QFN-28-1EP_6x6mm_Pitch0.65mm') pic32['VSS'] += gnd pic32['VDD'] += vdd # Main CPU power. pic32['VUSB3V3'] += vdd # Power to USB transceiver. pic32['^VBUS$'] += vusb # Monitor power pin of USB connector. pic32['PAD'] += gnd # Power pad on bottom attached to ground. # Bypass capacitors for microcontroller. bypass = cap(3, value='0.1uf') bypass[0][1, 2] += vdd, gnd bypass[1][1, 2] += vdd, gnd bypass[2][1, 2] += pic32['VCAP'], gnd # Microcontroller MCLR circuitry: # Pull-up resistor to VDD. # Filter capacitor to delay exit of reset or eliminate glitches. # Series resistor to isolate capacitor from device programmer. r_pullup = res(value='10K') r_series = res(value='1K') filter_cap = cap(value='0.1uf') r_series[1, 2] += r_pullup[1], pic32['MCLR'] r_pullup[2] += vdd filter_cap[1, 2] += r_series[1], gnd # USB connector. usb_conn = Part(xess_lib, 'USB-MicroB', footprint='XESS:USB-microB-1') usb_conn['D\+, D-, VBUS, GND, NC'] += pic32['D\+, D-'], vusb, gnd, NC # Noise filtering/isolation on the USB connector shield. shld_cap = cap(value='4.7nf') shld_res = res(value='1M') shld_cap[1] += usb_conn['shield'] shld_res[1] += usb_conn['shield'] gnd += shld_cap[2], shld_res[2] # LED with current-limiting resistor driven by microcontroller pin. led = Part("Device", 'led', footprint='Diodes_SMD:D_0603') led_curr_limit = res(value='1K') led_curr_limit[1, 2] += pic32['RB4'], led['A'] led['K'] += gnd # Crystal and trim capacitors. # crystal = Part(xess_lib, 'XTAL4', footprint='XESS:32x25-4', dest=TEMPLATE) # osc(pic32['OSC1'], pic32['OSC2'], gnd, crystal, cap) osc(pic32['OSC1'], pic32['OSC2'], gnd) # Use default crystal and trim caps. # Port for attachment of device programmer. prg_hdr = Part(pickit3_lib, 'pickit3_hdr', footprint='Pin_Headers:Pin_Header_Straight_1x06') prg_hdr.ref = 'PRG' prg_hdr['MCLR'] += pic32['MCLR'] prg_hdr['VDD'] += vdd prg_hdr['GND'] += gnd prg_hdr['PGC'] += pic32['PGEC1'] prg_hdr['PGD'] += pic32['PGED1'] # Port for attachment of FPGA programming pins. port = Part('conn', 'CONN_01x06', footprint='Pin_Headers:Pin_Header_Straight_1x06') port.ref = 'JTAG' port[1, 2] += vusb, gnd port[3] += pic32['SCK1'] # SCK1 output. port[5] += pic32['RB5'] # PPS: SDI1 input. port[4] += pic32['RB15'] # PPS: SS1 output. port[6] += pic32['RA4'] # PPS: SDO1 output. ERC() generate_netlist()
The International Committee of the Red Cross has made a fresh appeal to Boko Haram to release the two remaining hostages in their custody: Alice Loksha, who was working for UNICEF and Leah Sharibu, the Dapchi schoolgirl kidnapped February 19. ICRC Regional Director for Africa, Patricia Danzi made the appeal Tuesday, more than 24 hours after the terrorists killed the second Red Cross worker in their custody, Hauwa Mohammed Liman, ‘in what the ICRC described as ” an act of despicable cruelty”. On September 26, the terrorists had killed their first Red Cross hostage, Saifura Hussaini Ahmed Khorsa, drawing national and global condemnations. On Sunday, the ICRC in a video made an 11th hour appeal to the Nigerian government and religious leaders to speedily intervene to save Hauwa, as the deadline for her execution by Boko Haram abductors loomed. The appeal did not stop the terrorists from carrying out their threat. “Hauwa and Saifura’s deaths are not only a tragedy for their families, but to thousands of people in Rann and other conflict-affected areas of north-east Nigeria where accessing health care remains a challenge. “We urge the group holding Alice and Leah to release them safely,” said Danzi. Hauwa, together with Saifura Hussaini Ahmed Khorsa and Alice Loksha were abducted by Boko Haram from an IDP camp in the north-eastern town of Rann on March 1. Alice, who worked in a centre supported by UNICEF remains in captivity, along with Leah Sharibu, the 15 year old student abducted by the group from Government Girls’ Science and Technical College, in Dapchi, Yobe state, along with 109 schoolmates. While Boko Haram freed the rest of the girls in March, they held on to Leah because she refused to renounce her Christian faith. “The news of Hauwa’s death has broken our hearts”, said Danzi. “We appealed for mercy and an end to such senseless murders. How can it be that two female health care workers were killed back-to-back? Nothing can justify this. “Hauwa, 24, was full of life, become a midwife at an early age and people who knew her described her as a sociable, dynamic and enthusiastic woman who was loved by family and friends. “She was truly dedicated to her work helping vulnerable women in her family’s home area,” she added.
import socket, time, os, pprint, pickle, sys from time import mktime from datetime import datetime, timedelta # import os.path from libs.dbconnect import dbConnector import libs.utils global cnf, db cnf = "door.cnf" # outpath = "%s/output" % os.path.dirname(__file__) outpath = "%s/output" % sys.path[0] def main(): global options, db options = libs.utils.load_options() db = dbConnector() print "OK" if len(sys.argv)>1: if sys.argv[1] == '--test': sendToLogger() sys.exit() if sys.argv[1] == '--lock': updateDoor('locked') sys.exit() if sys.argv[1] == '--unlock': updateDoor('unlocked') sys.exit() if sys.argv[1] == '--state': print db.getState() sys.exit() if sys.argv[1] == '--sparks': sendSparklines() sendToServer() sys.exit() if sys.argv[1] == '--health': checkHealth() sys.exit() if sys.argv[1] == '--log': sendToLogger() sys.exit() if sys.argv[1] == '--tweet': sendTweets() sys.exit() if sys.argv[1] == '--test-tweet': from libs.services._twitter import Tweet tweet = Tweet() msg = "d queenvictoria %s" % (db.getState()) if len(sys.argv) == 3: msg = "%s %s" % (msg, sys.argv[2]) tweet.setMessage(msg) tweet.send() sys.exit() sendUpdates() def sendUpdates(): global options, db state = db.getState() if libs.utils.get_option('state') != state: libs.utils.set_option('state', state) stateChanged(state) sendToLogger() sendTweets() sendRSS(state) sendSparklines() sendToServer() def sendToLogger(): global options, db from libs.services._pachube import Logger from libs.services._health import Health if db.getState().lower() == "unlocked": state = 0 else: state = 1 anxiety = db.getCurrentScore(3) health = Health() data = { "Anxiety" : anxiety, "Temperature" : get_int(health.data['temp']), "Signal" : get_int(health.data['wifi']), "State" : state, "Transmit" : int(health.data['traffic']['transmit']), "Receive" : int(health.data['traffic']['receive']), } logger = Logger(data) def checkHealth(): from libs.services._health import Health health = Health() output = [] for k, v in health.data.items(): output.append("%s %s" % (k,v)) output = ', '.join(output) print "Sending %s" % output # send a state changed message from libs.services._twitter import Tweet tweet = Tweet() msg = "d queenvictoria %s" % (output) tweet.setMessage(msg) tweet.send() def stateChanged(state): # update the twitter icon updateIcon(state) sys.stderr.write("Door state changed: %s" % state) # send a state changed message from libs.services._twitter import Tweet tweet = Tweet() msg = "d queenvictoria %s" % (state) tweet.setMessage(msg) tweet.send() # update the door last as it will die if the controller board is disconnected updateDoor(state) def updateDoor(state): from libs.services._door import Door door = Door() if state.lower() == 'locked': door.lock() elif state.lower() == 'unlocked': door.unlock() def sendSparklines(): global options, db from libs.services._sparkline import Sparkline print "sendSparklines" # FOO=`sqlite3 door_wordlist.db "SELECT r1q3 from scores WHERE date > date('$UTC') ORDER BY date DESC limit 144;"`; # smoothed results over the last day, 7 days and 3 weeks periods = [21, 1, 7, 91, 365] for p in periods: # hours ago, column scores = db.getScores(24 * p, 3) print "Number of scores: %d" % len(scores) if not len(scores): print "No results for %d" % p continue data = [] for a_score in scores: data.append(a_score[0]) # if we're doing 21 days then calculate the average so we can use it everywhere if p == 21: mean = sum(data)/len(data) print mean data.reverse() print "Data length: %d" % len(data) # every pth item (24th etc) - is this to shorten the spark ? # data = data[::p] # instead return an optimal sized array max_width = 240 interval = int(len(data) / 240) if ( interval > 1 ): print "Interval %d" % interval data = data[::interval] print "Data length: %d" % len(data) spark = Sparkline() spark.setFormat('png') spark.setData(data) spark.setOutfile('%s/spark_%dd.png' % (outpath,p)) spark.setXAxis(mean) if p == 1: spark.setXTicks(24) elif p == 7: spark.setXTicks(7) elif p == 21: spark.setXTicks(3) spark.setFillColour((255,0,0,25)) im = spark.getImage() # this one is all the results in order today def sendRSS(state): global options, db # a list of days # 1, 7, 91 print "sendRSS" from libs.services._rss2 import RSS2 # db = dbConnector() # happy d7_q4 = db.getStrongestResult(7, 4) # fear # TO FIX why do we have to specify 2 days here ? d1_q3 = db.getStrongestResult(2, 3) d7_q3 = db.getStrongestResult(7, 3) d21_q3 = db.getStrongestResult(21, 3) d91_q3 = db.getStrongestResults(91, 3, 3) current_state = [] current_state.append(state) current_state.append('http://door.just1.name') # def getScores(self, from_hours_ago=24, quality_column=3, limit=-1): current_score = db.getCurrentScore(3) if current_score: current_score = current_score[0] current_state.append('[Now] %d [3wk mean] %d' % (current_score, db.getMean(21))) else: current_state.append('Unknown') current_state.append(time.strftime('%Y-%m-%d %H:%M:%S', datetime.today().timetuple())) # print d7_q4 # print d7_q3 rss = RSS2() rss.appendItem("Currently", current_state) rss.appendItem("24h fear", d1_q3) rss.appendItem("7d fear", d7_q3) rss.appendItem("3wk fear", d21_q3) rss.appendItem("7d happy", d7_q4) for item in d91_q3: rss.appendItem("3mo fear", item) print rss.getXML() rss.saveRSS('%s/door.rss' % outpath) def updateTwitterFollowing(): from libs.services._twitter import Tweet tweet = Tweet() print tweet.updateFollowing() def sendTweets(): global options, db # search the db for the weeks happiest news # $UTC = getUTCByDateAndTimezone(date('Y-m-d H:i:s', strtotime('-7 days')), "Australia/Sydney"); # $SELECT = " # $S # FROM articles # LEFT JOIN ratings_1 ON articles.id = ratings_1.id # WHERE articles.date_utc > date('$UTC') # ORDER BY ratings_1.q4 DESC # LIMIT 1 # "; # $results3 = $db->query($SELECT); # the_date = datetime.today() - timedelta(days=7) # one_week_ago = getUTCDate(time.strftime('%Y-%m-%d %H:%M:%S', the_date.timetuple()), "Australia/Sydney") # query = 'SELECT articles.title, articles.link, articles.id FROM articles LEFT JOIN ratings_1 ON articles.id = ratings_1.id WHERE date(articles.date_utc) > "%s" ORDER BY ratings_1.q4 DESC LIMIT 1' % (one_week_ago) # from pysqlite2 import dbapi2 as sqlite # db = sqlite.connect('door_wordlist.db') # cursor = db.cursor() # cursor.execute(query) # item = cursor.fetchone() # d7_q4 = db.getStrongestResult(7, 4) d7_q4 = db.getStrongestMood(7, 'happy') # d7_q4 = db.getStrongestMoods(7, 'happy', 3) item = d7_q4 print item # if we've already announced the joyous news do nothing last_update = libs.utils.get_option('last_update') # last_update = '' print last_update if last_update == item[4]: print "Already sent" return else: print "New update %d" % item[4] # otherwise continue on and send updates from libs.services._twitter import Tweet tweet = Tweet() # update our followers first tweet.updateFollowing() # create a short url pointing to the original article url = tweet.ShortenUrl(item[1], 'trim').strip() print url # determine any hashtags to use title = item[0] max_length = 140 # trim the title if necessary if len(title) + len(url) >= max_length: print 'Trim required.' title = title[:max_length-len(url)-1] msg = "%s %s" % (title, url) print "%s [%d]" % (msg, len(msg)) tweet.setMessage(msg) tweet.send() libs.utils.set_option('last_update', item[4]) def updateIcon(state): # global db from libs.services._twitter import Tweet tweet = Tweet() # if db.isLocked(): # tweet.setImage('http://door.just1.name/wp-content/themes/icon-locked.png') # tweet.setImage('/home/rossetti/door/icon-locked.jpg') # else: # tweet.setImage('http://door.just1.name/wp-content/themes/icon-unlocked.png') # tweet.setImage('/home/rossetti/door/icon-unlocked.jpg') tweet.setImage("/home/rossetti/door/icon-%s.jpg" % state.lower()) def sendToServer(): # paramiko has a nice sftp.put(self, localpath, remotepath, callback=None) # Carroll Oct 1 at 13:28 print "Sending to the server" import paramiko import os, glob, hashlib host = "house.laudanum.net" port = 2220 try: transport = paramiko.Transport((host, port)) privatekeyfile = os.path.expanduser('~/.ssh/id_rsa') mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile) username = 'rossetti' transport.connect(username = username, pkey = mykey) sftp = paramiko.SFTPClient.from_transport(transport) glob_pattern = "*" files_copied = 0 for fname in glob.glob(outpath + os.sep + glob_pattern): is_up_to_date = False local_file = os.path.join(outpath, fname) remote_file = '/home/rossetti/door/waikato/output/' + os.path.basename(fname) try: if sftp.stat(remote_file): local_file_data = open(local_file, "rb").read() remote_file_data = sftp.open(remote_file).read() md1 = md5.new(local_file_data).digest() md2 = md5.new(remote_file_data).digest() if md1 == md2: is_up_to_date = True except: print "NEW: ", os.path.basename(fname), if not is_up_to_date: sftp.put(local_file, remote_file) files_copied += 1 sftp.close() transport.close() except socket.error as inst: # socket.error: (113, 'No route to host') pass except: print "Couldn't send to server." def get_int(val): import re m = re.match("^\d+", val) return int(m.group(0)) if __name__ == "__main__": main()
Been to Perth a few years back and realised that its has become so Asian. Name it and you get it at Northbridge street. But expensive for me with the currency conversion. Food was not too bad but still nothing like what we get over here. The dumpling in chilli oil is tempting. Also the rest of the Asian dishes. Great gastronomic journey I see.
# -*- coding: utf-8 -*- import datetime from django.db import models from django import forms from django.contrib.auth.models import User from incidents.models import Incident class Nugget(models.Model): date = models.DateTimeField(default=datetime.datetime.now, blank=True) raw_data = models.TextField() source = models.TextField() start_timestamp = models.DateTimeField(default=datetime.datetime.now, blank=True, null=True) end_timestamp = models.DateTimeField(blank=True, null=True) interpretation = models.TextField() incident = models.ForeignKey(Incident) found_by = models.ForeignKey(User) def __unicode__(self): return u"Nugget: {} in {} ({})".format(self.source, self.incident, self.interpretation) class NuggetForm(forms.ModelForm): class Meta: model = Nugget exclude = ('incident', 'found_by') widgets = { 'source': forms.TextInput(attrs={'placeholder': 'NTUSER, $MFT, %APPDATA%, RAM, etc...'}), 'interpretation': forms.Textarea(attrs={'cols': 100, 'rows': 5, 'placeholder': 'What the raw data means to the case.'}), 'raw_data': forms.Textarea(attrs={'placeholder': 'Raw data: log lines, directory listings, registry keys...'}), 'end_timestamp': forms.TextInput(attrs={'placeholder': 'Leave blank if atomic event'}), }
But i wish there was a quick way to build a song besides recording in the sequence changes manually, as they come. go back to seqs and select new pattern to play and deactivate during that section. It's easier for me (selfish) to build a song within the 16 seqs then order them into song sections. But i could be crazy, and eventually over time get used to this mode and love it. 1) Create the sequences you want. 2) Create the new section you want and select it. 3) Make sure song mode is on. 4) When stopped, switch on all the sequences you want in the new section. 5) Hold Record then press play. 6) Wait for the section to begin recording. (Get past the lead in). You now have an 8-bar section consisting of your sequences even though you didn't let them play out. If you want something longer or shorter, select the section and press Info. Adjust the length there.
from m2x_mqtt.v2.resource import Resource class Command(Resource): """ Wrapper for AT&T M2X `Commands API <https://m2x.att.com/developer/documentation/v2/commands>`_ """ COLLECTION_PATH = 'devices/{device_id}/commands' ITEM_PATH = 'devices/{device_id}/commands/{id}' ITEMS_KEY = 'commands' def __init__(self, api, device, **data): self.device = device super(Command, self).__init__(api, **data) def subpath(self, path): return self.item_path(self.id, device_id=self.device.id) + path def process(self, **response_data): """ Method for `Device Marks a Command as Processed <https://m2x.att.com/developer/documentation/v2/commands#Device-Marks-a-Command-as-Processed>`_ endpoint. :param params: Query parameters passed as keyword arguments. View M2X API Docs for listing of available parameters. :return: The API response, see M2X API docs for details :rtype: dict """ return self.api.post(self.subpath('/process'), data=response_data) def reject(self, **response_data): """ Method for `Device Marks a Command as Rejected <https://m2x.att.com/developer/documentation/v2/commands#Device-Marks-a-Command-as-Rejected>`_ endpoint. :param params: Query parameters passed as keyword arguments. View M2X API Docs for listing of available parameters. :return: The API response, see M2X API docs for details :rtype: dict """ return self.api.post(self.subpath('/reject'), data=response_data)
Our research team is at the core of all our product & services ! Information Technology (IT) is the single, most rapidly changing and growing industry in the world. Every day, new technology is developed while old technologies are retired or improved. Secure networks, gaming, sharing​ pictures with friends, national security, conducting business, electronic medical records, distance learning, secure protection of sensitive and personal ID data are only some of the millions of uses for technology today. Add in trends such as the move to flexible work arrangements, contract work, an aging work force, global labour sourcing, data-driven decision making and the need for both more specialization and more fundamental skills, and it becomes clear just how complex the IT process have become. That’s why it is critical to ensure that our skilled, knowledgeable and qualified professionals are ahead of the curve and on top of new IT developments. We utilize the latest information, to provide our clients with best of software and services ! Currently emerging technologies, contains some of the most prominent ongoing developments, advances, and innovations in various fields of modern technology. Emerging technologies are those technical innovations which represent progressive developments within a field for competitive advantage. We can help create the next generation of natural, immersive, and intuitive applications—that include hand and finger tracking, facial analysis, speech recognition, augmented reality, and 3D scanning. With imagination and technology, the possibilities are endless. We believe this innovative technology will shape the future of everything from catching up with friends to enhancing our everyday lives. Evaluating future tools, gives our clients the advantage to leverage technology ahead of time, in this competitive world ! Econoxy R & D Lab is a skunkworks team and in-house technology incubator. We work on shorter projects, granting project leaders only one year in which to move a project from concept to proven product. The ideal project combines technology and science, requires a certain amount of novel research, and creates a marketable product within the one-year time frame. The projects are ideally shortlisted, based on the problem statement we receive from our clients. Research at our Lab tackles the most challenging problems, faced by our clients, in Computer Science and related fields. Being bold and taking risks is essential to what we do, and research teams are embedded throughout Econoxy allowing our discoveries to affect many users every year.
import re from base_page import BasePage, script_returns_true from page_locators import FormPageLocators as locators from selenium.webdriver.support import expected_conditions as EC from arches.urls import uuid_regex class FormPage(BasePage): """ class to initialize the form-manager page """ def __init__(self, driver, live_server_url, graph_id): super(FormPage, self).__init__(driver, live_server_url, '/graph/' + graph_id + '/form_manager') def add_new_form(self): """ Clicks on the add new form button and returns a new form_id """ self.open() form_id = None self.wait.until( EC.element_to_be_clickable(locators.ADD_FORM_BUTTON) ).click() try: form_id = self.wait.until( script_returns_true(''' try{ var matches = window.location.pathname.match(/(''' + uuid_regex + ''')/i); console.log(window.location) if (matches && matches.length === 2){ console.log(matches) return matches[1]; }else{ return false; } }catch(err){ return false; } ''') ) except: pass return form_id def configure_form(self, form_name): self.wait.until( EC.element_to_be_clickable(locators.ADD_FORM_CARD_BUTTON) ).click() form_name_input = self.driver.find_element(*locators.FORM_NAME_INPUT) form_name_input.clear() form_name_input.send_keys(form_name) self.wait.until( EC.element_to_be_clickable(locators.SAVE_EDITS_BUTTON) ).click()
Back in those pre-historic ages of paying per minute for internet connection. Ah, the good old days. sorry but I find this is total BS. who do you think you are to just do an unread/delete to all these people? This site would only be a waste of time, if this is your primary source for trying to get dates. Im talking if you ONLY sat on here and tried to email woman after woman, after woman, and this is all you had, then yeah....You'd be wasting major time. Put it to you fellas like this....Most women on here, have PAGES of emails in their inbox for the most part....so...out of those pages, they are going to only choose the top percentage of good looking men to respond to. Its THAT simple. They have alot of options like that. I dont buy the nonsense that men feed themselves here that if you write some shakespearian sonnet, that she'll respond. Thats just bullsh*t in itself completely. If you arent attractive to the woman, then she isnt responding. Chances are, she wont even read the email. Lets keep it real, it is what it is. With that being said, this should be treated as just another way to get a date, on TOP of you having an active social life, with hobbies, and interests outside of the internet. Chances are, she wont even read the email. That is probable, but chances are they do read over the actual profile. I hear women actually do at least that to see if the man has anything interesting to say in it or read something in it that would make them compatible with the man. I agree, it's just one way, and shouldn't be the only way. Although that's tempting because it's obviously so "easy". But nuthin's ever "free" and my own experience has been that the "quality" seems to improve more with folks I've met IRL (in real life), than online. Dunno, maybe it's the anonymous quality of internet dating and the lack of accountability that attracts a certain "type", but too many of the gals I've come across online just seem kinda skittish and flakey, sending all kinds of conflicting signals, like they're not even sure WHAT they really want, let alone WHO! But just personally, would recommend donating some time in local volunteer groups. You'll be doing some good work, and the quality of folks there is usually kinda nice too!
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from bambou import NURESTObject class NUPGExpressionTemplate(NURESTObject): """ Represents a PGExpressionTemplate in the VSD Notes: Policy Group Expression Template is an expression consisting of policy groups defined at Domain Template or L2 Domain Template """ __rest_name__ = "pgexpressiontemplate" __resource_name__ = "pgexpressiontemplates" ## Constants CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" def __init__(self, **kwargs): """ Initializes a PGExpressionTemplate instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> pgexpressiontemplate = NUPGExpressionTemplate(id=u'xxxx-xxx-xxx-xxx', name=u'PGExpressionTemplate') >>> pgexpressiontemplate = NUPGExpressionTemplate(data=my_dict) """ super(NUPGExpressionTemplate, self).__init__() # Read/Write Attributes self._name = None self._last_updated_by = None self._last_updated_date = None self._description = None self._entity_scope = None self._creation_date = None self._owner = None self._expression = None self._external_id = None self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=True) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="expression", remote_name="expression", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self._compute_args(**kwargs) # Properties @property def name(self): """ Get name value. Notes: Name of the Policy Group Expression Template """ return self._name @name.setter def name(self, value): """ Set name value. Notes: Name of the Policy Group Expression Template """ self._name = value @property def last_updated_by(self): """ Get last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): """ Set last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ self._last_updated_by = value @property def last_updated_date(self): """ Get last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): """ Set last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ self._last_updated_date = value @property def description(self): """ Get description value. Notes: Description of the Policy Group Expression Template """ return self._description @description.setter def description(self, value): """ Set description value. Notes: Description of the Policy Group Expression Template """ self._description = value @property def entity_scope(self): """ Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ return self._entity_scope @entity_scope.setter def entity_scope(self, value): """ Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ self._entity_scope = value @property def creation_date(self): """ Get creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ return self._creation_date @creation_date.setter def creation_date(self, value): """ Set creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ self._creation_date = value @property def owner(self): """ Get owner value. Notes: Identifies the user that has created this object. """ return self._owner @owner.setter def owner(self, value): """ Set owner value. Notes: Identifies the user that has created this object. """ self._owner = value @property def expression(self): """ Get expression value. Notes: Actual Policy Group Expression like (PG1 || PG2) && !PG3. Allowed operators are && (AND), ! (NOT), II (OR) and ( ) """ return self._expression @expression.setter def expression(self, value): """ Set expression value. Notes: Actual Policy Group Expression like (PG1 || PG2) && !PG3. Allowed operators are && (AND), ! (NOT), II (OR) and ( ) """ self._expression = value @property def external_id(self): """ Get external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ return self._external_id @external_id.setter def external_id(self, value): """ Set external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ self._external_id = value
If you or somebody you know has a need for addiction counseling in Morris County NJ and you aren’t certain where you can turn, get in touch with us at New Pathway Counseling Services. New Pathway specializes in holistic rehab, which mean that we look at the full picture of each individual we work with, in order to help them reach deeper healing and addiction recovery which lasts. Many addiction counseling programs does not deal with the deeper causes which led to addiction and just deal with the most obvious symptoms. This is not a very effective method when it comes to maintaining addiction recovery that’s going to be long term. By taking advantage of addiction group treatment in Rockaway NJ from New Pathway Counseling Services, the emotional roots of addiction can be addressed as a means of assisting individuals with long term recovery from addiction. New Pathway is a team of committed professionals possessing experience when it comes to helping adults, teens, and families heal from the damage that addiction has caused. There are a broad range of ways that substance abuse can take a significant toll on not only an individual’s own life, but their relationships as well. We address the damage which comes as a result of addiction to alcohol, drugs, or both at New Pathway, together with addressing the problem itself. Dedicated to assisting teens in avoiding developing a real substance abuse disorder, New Pathway supplies proven-effective early intervention group treatment in Morris County NJ. Teens going through the early stages of addiction can meet their peers who are going through the same experience in this eight week program consisting of group sessions. At New Pathways, we focus on holistic outpatient rehab in Morris County NJ. These programs include Qigong, which is a mind-body therapy. Qigong is an exercise practice which is comprised of meditative movements, which assist in improving the ‘chi’ within the body. The ‘chi’ is the body’s own inner energy that is capable of healing one from the daily stresses and deeper emotional stressors which can lead to an addiction problem. By assisting your body and restoring the balance of its chi, Qigong is essential when it comes to remaining centered and make it so your body is going to have the ability to recover from stress easier. With practices like Qigong, New Pathway helps individuals find new means of overcoming the issues which they may once have turned to alcohol or drugs to avoid. For holistic addiction rehabilitation in Rockaway NJ, get in touch with New Pathway right away. Get in touch with New Pathway immediately when you or someone you care about is in need of a holistic addiction rehab program in Rockaway NJ. If an outpatient program in Morris County NJ is what you’re looking for, New Pathway provides outpatient services in addition to our holistic practices and early intervention program. Outpatient rehab often is the best solution for people who are dealing with addiction, because they’re able to quickly apply what they learn in counseling to their everyday real life. Outpatient treatment is able to address the requirements of each individual. Our addiction recovery rehab program includes therapies including psycho-social treatment, and individual and group psychotherapy. To both assist individuals dealing with addiction to apply the coping mechanisms they’ve learned to social interactions and benefit from other’s experiences with addiction and learning to overcome it, working both individually and with a group can be extremely beneficial. An addiction group program is capable of providing a safe environment so individuals can share their addiction experiences. When it comes to overcoming addiction and discovering new means of coping with depression, anxiety and other issues, this can be an extremely helpful initial step. You or someone you know will be capable of obtaining the support needed to heal and move on from addiction thanks to New Pathway Counseling Services. For outpatient rehab in Rockaway NJ or any of the other services we provide, give us a call at New Pathway Counseling Services.
import io import struct import numpy as np class DataParseClass: def __init__(self, byteData): self.data = io.BytesIO(byteData) return def readUTF(self): length = int.from_bytes(self.data.read(2), byteorder='little') return self.data.read(length).decode('utf-8') def readInt8(self): return int.from_bytes(self.data.read(1), byteorder='little') def readInt16(self): return int.from_bytes(self.data.read(2), byteorder='little') def readInt32(self): return int.from_bytes(self.data.read(4), byteorder='little') def readFloat(self): return struct.unpack('f', self.data.read(4))[0] def readVector2D(self): return np.array([self.readFloat(), self.readFloat()], dtype='float') def readVector3D(self): return np.array([self.readFloat(), self.readFloat(), self.readFloat()], dtype='float') def readVector4D(self): return np.array([self.readFloat(), self.readFloat(), self.readFloat(), self.readFloat()], dtype='float')
Mass Gathering to create a Guinness Book World Record for the Most People attending a Didgeridoo Sound Healing Session. We invite you to be part of a world record breaking event in Sound Healing for Mother Earth. Be part of the 600 plus people joining together with a raised awareness and focused intent to energetically hold the earth and all its beauty and bounty in a place of healing, wholeness and abundance. Children are welcome. Facilitated by Mark Steinward from One Tribe Healing with Lou Van Stone, Heather Jean and Kyela this free event is one of a kind creating a sense of community and connection. Admission to the expo is included with your registration.
#encoding=UTF-8 """ @author [email protected] @link http://www.ideawu.net/ """ import new, socket from buffer import * LINK_ROLE_SERVER = 1 LINK_ROLE_CLIENT = 2 LINK_ROLE_ACCEPT = 3 class LinkBase: def __init__(self, sock=None): self.id = -1 self.fd = None self.sock = None self.local_addr = '' # ip:port self.remote_addr = '' # ip:port self.parent = None self.role = None self.ptr = None self.alive = False self.recv_pkt = None self.recv_buf = Buffer(); self.send_buf = Buffer(); def is_client(self): return self.role == LINK_ROLE_CLIENT def is_server(self): return self.role == LINK_ROLE_SERVER def is_accept(self): return self.role == LINK_ROLE_ACCEPT def listen(self, host, port, backlog=128): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((host, port)) sock.listen(backlog) except BaseException, e: return False self.role = LINK_ROLE_SERVER self.set_sock(sock) # TODO: accept_all(self): def accept(self): sock, addr = self.sock.accept() link = new.instance(self.__class__) link.__init__(sock) link.role = LINK_ROLE_ACCEPT link.parent = self link.remote_addr = "%s:%d" % sock.getpeername() return link def connect(self, host, port): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.role = LINK_ROLE_CLIENT self.set_sock(sock) self.remote_addr = "%s:%d" % sock.getpeername() def set_sock(self, sock): self.fd = sock.fileno() self.sock = sock self.alive = True self.local_addr = "%s:%d" % sock.getsockname() def is_alive(self): return self.alive def close(self): self.alive = False try: self.sock.close() self.sock = None except: pass def fileno(self): return self.fd """ 判断是否已经读就绪 """ def recv_ready(self): return self.recv_pkt.ready() """ 进行一次网络读操作 """ def net_recv(self, bufsize=8192): try: data = self.sock.recv(bufsize) #data = self.sock.recv(3) #print 'link <-', repr(data) except BaseException,e: return -1 if not data: return 0 self.recv_buf.append(data) return len(data) """ 进行一次网络写操作 @return -1: 错误 0 : 建议调用者关闭连接 """ def net_send(self): try: len = self.sock.send(self.send_buf.base) #len = self.sock.send(self.send_buf.base[0:3]) #print 'link ->', repr(self.send_buf.base[0:len]) except BaseException,e: return -1 self.send_buf.consume(len) return len """ 非阻塞发送(数据拷贝到发送缓冲) """ def async_send(self, data): return self.send(data, urgent=False) """ 非阻塞读取 """ def async_recv(self): return self.recv(block=False) """ 见 send_packet, 只传入要发送的报体 """ def send(self, data, urgent=True): packet = self.PacketClass() packet.set_body(data) ret = self.send_packet(packet, urgent) return ret """ 见 recv_packet, 只返回报体部分 """ def recv(self, block=True): ret = self.recv_packet(block) if ret == -1: return -1 elif ret == None: return None else: return ret.body """ 非阻塞的 send_packet """ def async_send_packet(self, packet): return self.send_packet(packet, urgent=False) """ 非阻塞的 recv_packet """ def async_recv_packet(self): return self.recv_packet(block=False) """ 将报文写到发送缓冲里 @param urgent: 若为True, 则等待网络发送完毕才返回. 默认等待. @return -1: 错误 """ def send_packet(self, packet, urgent=True): data = packet.encode() self.send_buf.append(data) if urgent: while self.send_buf.len() > 0: if self.net_send() == -1: return -1 return len(data)
3E supported the project team (architect, office special techniques, etc) with the aim of designing a cost efficient energy concept for the entire project. The shops and dwellings are conditioned and form a defined and insulated heated volume. Through dynamic simulations, the dwellings were optimised to reach the passive house standard and a comfortable inside temperature during summer. However, for a number of residential units using the current design, the heat demand either couldn’t be sufficiently reduced or the summer comfort temperature couldn’t be guaranteed. The shops were optimised to be an exemplary building, where passive measures guarantee a comfortable inside temperature during summer. The covered food market is closed off from the external environment for hygiene reasons, but the accessible areas for visitors are not actively cooled or heated, only ventilated. The actual cooling cells are conceived as box-in-box insulated and conditioned rooms. Dynamic simulations showed that the market building does not take advantages of insulating the building envelope. The efforts to maintain a certain indoor temperature in winter are limited. In summer, the indoor temperatures rise significantly but are not critical, partly due to the massive concrete structure. A combination of solar control measures and night cooling is sufficient to guarantee a comfortable indoor temperature during summer in the food market.
# -*- coding: utf-8 -*- # # Copyright 2017 Telefónica Digital España S.L. # # This file is part of URBO PGSQL connector. # # URBO PGSQL connector is free software: you can redistribute it and/or # modify it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # URBO PGSQL connector is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero # General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with URBO PGSQL connector. If not, see http://www.gnu.org/licenses/. # # For those usages not covered by this license please contact with # iot_support at tid dot es import requests import yaml import json try: from yaml import CLoader as Loader except ImportError: from yaml import Loader try: """ Remove InsecureRequestWarning for unverified HTTPS requests. For Requests library version < 2.4 an error raise in this import. """ from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) except ImportError as err: # raise ImportError("{}\nYou need to upgrade Requests Library".format(err)) pass class GetAuthTokenException(Exception): pass class DeleteSubscriptionException(Exception): pass def getAuthToken(url_authtk, fl_fw_auth, timeout=10, ssl=False): try: headers_authtk = {'Content-Type': 'application/json'} with open(fl_fw_auth) as fw_json_auth: json_data = json.load(fw_json_auth) payload = json.dumps(json_data) resp = requests.post(url_authtk, headers=headers_authtk, data=payload, verify=ssl, timeout=timeout) if resp.ok: auth_token = resp.headers.get('x-subject-token') resp_json = resp.json().get('token') exp_date = resp_json.get('expires_at') return(auth_token, exp_date) else: raise GetAuthTokenException("Error: {}".format(resp.json())) except Exception as err: print("Error: {}".format(err)) def deleteSubscriptions(subs, url_subs, fiw_serv, fiw_subsserv, authtoken, timeout=10, ssl=False): try: headers_authtk = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Fiware-Service': fiw_serv, 'Fiware-ServicePath': fiw_subsserv, 'x-auth-token': authtoken } for subs_id in subs: json_data = { "subscriptionId": subs_id } payload = json.dumps(json_data) resp = requests.post(url_subs, headers=headers_authtk, data=payload, verify=ssl, timeout=timeout) if resp.ok: #print resp.json() print("{0}. Deleted subscription: {1}".format(resp, subs_id)) else: print(resp) raise DeleteSubscriptionException("Error: {}".format(resp.json())) except Exception as err: print("Error: {}".format(err)) def main(): fl_fw_auth = "fiware_auth.json" url_authtk = 'https://195.235.93.224:15001/v3/auth/tokens' auth_token, exp_date = getAuthToken(url_authtk, fl_fw_auth) print(auth_token) url_subs = 'https://195.235.93.224:10027/v1/unsubscribeContext' fiw_serv = 'sc_smart_region_andalucia' fiw_subsserv = '/and_sr_torrox' subs = ['580f43b9fdc8301538a65ab4'] deleteSubscriptions(subs, url_subs, fiw_serv, fiw_subsserv, auth_token) if __name__ == '__main__': main()
If you or a loved one have been a victim of sexual assault or a related crime, contact our Pittsburgh sexual assault lawyers today for a consultation about your case. Our Pittsburgh Sexual assault lawyers have the resources and experience needed to represent clients in the most complicated sexual assault cases. Our Pittsburgh Personal injury lawyers of Allegheny County are here to make it right. A personal injury misfortune can leave a person in a very miserable situation, and they may find themselves facing resistance when pursuing compensation for the negligence of another. Our Pittsburgh based personal injury lawyers handle a wide range of personal injury cases such as distressing injuries produced by a car crash, work related injuries, property owner, dangerous product, or anything else. Our Pittsburgh accident attorneys know how to approach all details of a variety of personal injury matters. With the help of an experienced Pittsburgh personal injury lawyer, you can finally start feeling the relief you deserve in your perplexing situation. When you file a lawsuit in Pennsylvania for personal injury and win, you will collect damages in the form of monies from the at-fault party.
import time import pyautogui HESITATE = 3 # time to wait before starting clickstorm pyautogui.PAUSE = .06 # going faster may cause issues with placement SPEED = .1 positions = [] print("If you have a dual monitor set up, make sure the browser window is " "located on your main monitor.\n If you are using a screen and laptop, " "the laptop is your main monitor.") print("Please align your browser so you can see all category boxes. While you " "input and follow these commands, do not move or scroll your browswer " "window!") input("Please mouse over the top most draggable option in the leftmost " "category box. It should be labeled with 'Uncharacteristic'. Then press " "return.") left_position = pyautogui.position() input("Please mouse over the top most draggable option in the middle category " "box. It should be labeled with 'Neutral'. Then press return.") center_position = pyautogui.position() input("Please mouse over the top most draggable option in the rightmost " "category box. It should be labeled with 'Characteristic'. Then press " "return.") right_position = pyautogui.position() print("Now, for each of the bottom category boxes, mouse over them and press " "return, then move onto the next box. There should be 9 boxes in total.") for categorynum in range(9): input(categorynum) positions.append(pyautogui.position()) print("Please switch back to your browser window. The script will begin " "in {} seconds".format(HESITATE)) time.sleep(HESITATE) # The below counts in the array may look odd, but it was just the # easiest way to break the containers. Since the first one needs 3 in # it, second needs 6 in it etc. it adds up to 30 because at the end # of running the script for cat 3, each container will have 30 in # it. In the middle part, the 5 is there because that container has 5 # slots in it left over after 10 are placed in it from the first loop. # The general idea is, starting on the left, place cards into the # containers till full, then move onto the next container. if a # container fills up or contains no more cards, move onto the next. # The first array in the zip corresponds to the amount that needs to # go in the below containers on cat9, and adds up the 30. The range(4) # is there to grab the appropriate offset for containers 1-4 etc. for card_count, pos_count in zip([3, 6, 11, 10], range(4)): offset = (- left_position[0] + positions[pos_count][0], - left_position[1] + positions[pos_count][1]) for k in range(card_count): pyautogui.moveTo(left_position[0], left_position[1], SPEED) pyautogui.click() pyautogui.dragRel(offset[0], offset[1], SPEED, button='left') pyautogui.click() # The first array in the zip corresponds to the amount that needs to # go in the below containers on cat9, and adds up the 30. The # range(3,6) is there to grab the appropriate offset for containers # 3-6 etc. for card_count, pos_count in zip([5, 20, 5], range(3, 6)): offset = (- center_position[0] + positions[pos_count][0], - center_position[1] + positions[pos_count][1]) for k in range(card_count): pyautogui.moveTo(center_position[0], center_position[1], SPEED) pyautogui.click() pyautogui.dragRel(offset[0], offset[1], SPEED, button='left') pyautogui.click() # The first array in the zip corresponds to the amount that needs to # go in the below containers on cat9, and adds up the 30. The range(4) # is there to grab the appropriate offset for containers 5-6 etc. for card_count, pos_count in zip([10, 11, 6, 3], range(5, 9)): offset = (- right_position[0] + positions[pos_count][0], - right_position[1] + positions[pos_count][1]) for k in range(card_count): pyautogui.moveTo(right_position[0], right_position[1], SPEED) pyautogui.click() pyautogui.dragRel(offset[0], offset[1], SPEED, button='left') pyautogui.click()
The Canary Wharf Group has secured a £30m loan from Metro Bank, for a new private members club on its 97-acre site on the Isle of Dogs. The new club now plans to open in spring 2019, after securing a 60,000 sq ft site, which has been pre-let to the Arts Club Group. The club will be located on Middle Dock, and feature 17 guest rooms for club members and guests, as well as multiple restaurants and a roof terrace.
#-*- coding: utf-8 -*- from transtool.dictionary import INILoader from transtool.dictionary.models import KoreanPackage from transtool.dictionary.exc import WordNotFound, MultipleCandidates def assertlog(cond, *logitems): try: assert cond except AssertionError as e: if logitems: for item in logitems: print item, print '' def test_dictionary(): l = INILoader('test.ini') package = l.gen_package() package.build_index() w = package.get('list') assertlog(package.get('list').candidate == u'리스트', package.get('list'), u'리스트') assert package.get('tuple').candidate == u'튜플' assert package.get('list', 'python').candidate == u'리스트' try: package.get('list', 'web') except WordNotFound: pass try: package.get('form') except MultipleCandidates: pass return package def test_korean_dictionary(): l = INILoader('test.ini', Package=KoreanPackage) package = l.gen_package() package.build_index() assert package.get(u'list는').candidate == u'리스트는' assert package.get(u'list가').candidate == u'리스트가' assert package.get(u'list를').candidate == u'리스트를' assert package.get(u'tuple은').candidate == u'튜플은' assert package.get(u'tuple이').candidate == u'튜플이' assert package.get(u'tuple을').candidate == u'튜플을' assert package.get(u'list을').candidate == u'리스트를' assert package.get(u'tuple가').candidate == u'튜플이' assert package.get(u'dictionary는').candidate == u'딕셔너리는' return package if __name__ == '__main__': test_dictionary()
With maternal mortality and morbidity rates that are worse than any other developing country in the world, America’s need for more diverse and culturally-conscious health care providers is urgent. Many nursing institutions preach “diversity,” but schools like Frontier Nursing University (FNU) are achieving it through programming, initiatives and partnerships with like-minded organizations. Each year, FNU hosts its Diversity Impact Student Conference. In its eighth year, the conference is hosted by students and faculty leaders in FNU’s PRIDE Program, which was established to promote recruitment and retention to increase diversity in Nurse-Midwifery and Nurse Practitioner education. FNU has put the “impact” in Diversity Impact, especially in this year’s event. With a theme entitled “We Are One: Uniting Dreamers with Diverse Voices,” presenters at this year’s four-day conference spoke on mental health and cultural care, transcultural nursing and the current state of mortality rates in the African American community. Each student who attended was given opportunity to not only listen to an impactful keynote, but also to participate in a culturally eye-opening field trip, cross-cultural communication exercise, and collaborative discussions to improve minority health among underrepresented and marginalized groups. These powerful conversations ranged in topic from environmentally-sustainable healthcare to mental health in patient and police interactions, to vulnerable populations and sexual IQ risk reduction. Each year at Diversity Impact, attendees walk away with proactive solutions to create meaningful connections and provide better care within diverse communities. In addition to its annual event, FNU partnered with the Association of Women’s Health, Obstetric and Neonatal Nurses (AWHONN) in a brand new initiative this year to produce a video about the need for a diverse nurse-midwifery workforce to improve health outcomes across the United States. This five-minute video, filmed as part of AWHONN’s “Partners in Care" program, highlights how Frontier Nursing University students are providing significant contributions to address health disparities for women who are facing language, racial, geographical and other socioeconomic barriers. “The fact that African American women are 3 to 4 times more likely to die in and around childbirth than their white counterparts - that struck a chord with me,” said Ameenah Jackson, FNU nurse-midwifery student, in the video. Jackson, along with hundreds of other FNU students, is the future of quality care for women who, before now, have not felt heard or valued by a health care provider. A portion of the video is an interview with a new member of FNU’s Executive Leadership Team. Dr. Maria Valentin-Welch, DNP, MPH, CDP, CNM, FACNM, was brought on board in September 2017 as the inaugural Chief Diversity and Inclusion Officer (CDIO). Valentin-Welch’s position is designed to guide FNU on matters of equity, diversity and inclusion. Together with the president, dean, chief operations officer, chief advancement officer, and executive vice president for finance and facilities, the CDIO will lead the development of a vision and strategy that champions the importance of a diverse and inclusive environment that values and supports all members of the University community. With over 30 years of teaching experience, Dr. Valentin-Welch is working from the classroom outward in strategizing diversity initiatives for FNU. One such conversation was had in June 2018 between FNU President Dr. Susan Stone, DNSc, CNM, FACNM, FAAN, certified nurse-midwife, and President of the American College of Nurse-Midwives (ACNM) and Andrew Bennie, Product Director at Springer Publishing Group and guest host of the weekly “Nursecast” podcast series. Dr. Stone and Bennie’s discussion tackled the question: “Why is Maternal Mortality Growing in the United States?” In the eighteen minute podcast, Dr. Stone pinpoints a lack of racially-concordant care as a culprit. According to data, 700 women around the U.S. die of pregnancy complications per year, while 50,000 cases are near misses. Many of those cases are disproportionately correlated to race. Dr. Stone explains that patients are more receptive to care from a health provider who understands their culture and socioeconomic background. Currently, only 6% of midwives in the United States are women or men of color. FNU’s initiative is to diversify not only the field of midwifery, but the healthcare workforce as a whole. “Today about 22% of FNU students are men and women of color – up from just 9% in 2010,” said Dr. Stone. Each population in America’s melting pot, formed by race, socioeconomic status, sexual orientation, language, or a combination of other factors, will see better health outcomes with culturally-concordant healthcare providers and models. Institutions like Frontier Nursing University are equipping and encouraging their students to answer the call to make these underrepresented populations feel heard, valued and served.
w#!/usr/bin/env python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright INRIA # Contributors: Nicolas P. Rougier ([email protected]) # # DANA is a computing framework for the simulation of distributed, # asynchronous, numerical and adaptive models. # # This software is governed by the CeCILL license under French law and abiding # by the rules of distribution of free software. You can use, modify and/ or # redistribute the software under the terms of the CeCILL license as circulated # by CEA, CNRS and INRIA at the following URL # http://www.cecill.info/index.en.html. # # As a counterpart to the access to the source code and rights to copy, modify # and redistribute granted by the license, users are provided only with a # limited warranty and the software's author, the holder of the economic # rights, and the successive licensors have only limited liability. # # In this respect, the user's attention is drawn to the risks associated with # loading, using, modifying and/or developing or reproducing the software by # the user in light of its specific status of free software, that may mean that # it is complicated to manipulate, and that also therefore means that it is # reserved for developers and experienced professionals having in-depth # computer knowledge. Users are therefore encouraged to load and test the # software's suitability as regards their requirements in conditions enabling # the security of their systems and/or data to be ensured and, more generally, # to use and operate it in the same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL license and that you accept its terms. # ----------------------------------------------------------------------------- from dana import * n = 50 k = 2.5 G = zeros((n,n), 'dV/dt = k*(N/4-V); N') K = np.zeros((3,3))*np.NaN K[0,1] = K[1,0] = K[1,2] = K[2,1] = 1 print K SparseConnection(G('V'), G('N'), K) t, dt = 600.0, 0.1 for i in range(int(t/dt)): G.evaluate(dt=dt) G.V[0,:] = 0 G.V[:,n-1] = G.V[n-1,:] = G.V[:,0] = 1 fig = plt.figure(figsize=(10,7.5)) plt.imshow(G.V, cmap=plt.cm.hot, origin='lower', interpolation='bicubic', vmin=0, vmax=1) plt.colorbar() CS = plt.contour(G.V, 10, colors='k') plt.clabel(CS, inline=1, fontsize=16) plt.grid(), plt.show()
After months of speculation it's finally official: Ryan Coogler will direct Marvel's "Black Panther" movie. Marvel has just announced that Coogler will direct the standalone "Black Panther" feature, due to hit theaters on Feb. 16, 2018. Coogler will join the already-cast lead actor Chadwick Boseman, who is playing titular character T'Challa aka, "Black Panther." The superhero was previously revealed in the trailer for "Captain America: Civil War," in which Black Panther will also make his big debut. Marvel President Kevin Feige released this quote to Marvel, "The talents Ryan showcased in his first two films easily made him our top choice to direct 'Black Panther.' Many fans have waited a long time to see Black Panther in his own film, and with Ryan we know we've found the perfect director to bring T'Challa's story to life." Our only lingering question now is will actor Michael B. Jordan also find a role in this feature? Jordan was in Coogler's two other critical success stories, "Creed" and "Fruitvale Station." Who knows, however, we are excited to see Boseman don the black suit as Black Panther in "Civil War" this May.
from rrb3 import * from time import sleep class Motor: settings = {} try: rr = RRB3(8, 6) settings['drive'] = 'active' uis['drive'] = 'active' except NameError: print(" - No RaspiRover library available.") settings['drive'] = 'disabled' uis['drive'] = 'disabled' def __init__(self, start_settings): self.settings['motor_mode'] = 'immobile' self.can_rotate_in_place = False def test(self): track_right = 0.75 track_left = 0.75 rr.set_motors(track_right, 0.5, track_left, 0.5) sleep(2) rr.set_motors(0, 0.5, 0, 0.5) def set_setting(self, setting_name, new_value, category, specs): print(str(specs)) if not setting_name in specs: return 0 else: old_value = self.settings[setting_name] print(' ? Old value = ' + old_value) if specs[setting_name]['type'] == 'int': self.settings[setting_name] = int(new_value) elif specs[setting_name]['type'] == 'float': self.settings[setting_name] = float(new_value) elif specs[setting_name]['type'] == 'bool' and new_value.uppercase() == 'TRUE': self.settings[setting_name] = True elif specs[setting_name]['type'] == 'bool' and new_value.uppercase() == 'FALSE': self.settings[setting_name] = False else: self.settings[setting_name] = new_value # Does this change need a page redraw if 'refresh' in specs[setting_name]: return 1 else: return 0 def get_settings(self): return self.settings
According to Screen Daily, Tom Hardy is currently in talks to play the infamous British gangsters Ronald and Reginald Kray in a crime thriller. The working title of the movie is Legend, written and directed by Brian Helgeland, whose previous credits include the critically acclaimed pictures, L.A. Confidential and Mystic River. Helgeland talked about his upcoming movie at a Writer’s Guild of America event in LA in October last year, after he spent time researching the project in London, where he met Krays associate Freddie Foreman. The 36-year-old Hardy confirmed the talks in an interview with MailOnline: 'All the plans are on the table. There's a lot of crossing the t's and dotting the i's, and there's a lot of shift and geography to work out. It would be difficult. It's quite technical and I'm a bit of an anorak'. The lives of the Kray brothers have already been on screen in the 1990 feature movie, ‘The Krays’, which starred Spandau Ballet’s Martin and Gary Kemp portraying the gangsters. The Kray twins were involved in a series of armed robberies, arson, protection rackets, assaults, and several murders with their gang The Firm during the '50s and '60s, and were owners of the West End nightclub, Esmeralda's Barn. They were arrested on 9 May 1968 and were both sentenced to life imprisonment. Ronnie remained Broadmoor Hospital until his death on 17 March 1995, but Reggie was released from prison on compassionate grounds in August 2000, eight weeks before his death from cancer. Hardy is already set to star in the reboot of Mad Max in 2015, and he will also be playing the role of Elton John in the forthcoming biopic titled Rocketman, and it was recently reported that the actor will also be joining the cast of BBC drama Peaky Blinders for its second season.
# =============================================================================== # Copyright 2015 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== import hashlib import os import shutil from datetime import datetime from apptools.preferences.preference_binding import bind_preference from git.exc import GitCommandError # ============= enthought library imports ======================= from sqlalchemy.exc import OperationalError, DatabaseError from traits.api import Instance, Bool, Str from uncertainties import std_dev, nominal_value from yaml import YAMLError from pychron.core.helpers.binpack import encode_blob, pack from pychron.core.yaml import yload from pychron.dvc import dvc_dump, analysis_path, repository_path, NPATH_MODIFIERS from pychron.experiment.automated_run.persistence import BasePersister from pychron.git_archive.repo_manager import GitRepoManager from pychron.paths import paths from pychron.pychron_constants import DVC_PROTOCOL, NULL_STR, ARGON_KEYS, ARAR_MAPPING, EXTRACTION_ATTRS, \ META_ATTRS, NULL_EXTRACT_DEVICES, POSTCLEANUP, PRECLEANUP, CLEANUP, EXTRACT_UNITS, EXTRACT_VALUE, DURATION, WEIGHT, \ CRYO_TEMP def format_repository_identifier(project): return project.replace('/', '_').replace('\\', '_') def spectrometer_sha(settings, src, defl, gains): sha = hashlib.sha1() for d in settings + (src, defl, gains): for k, v in sorted(d.items()): sha.update(k.encode('utf-8')) sha.update(str(v).encode('utf-8')) return sha.hexdigest() class DVCPersister(BasePersister): active_repository = Instance(GitRepoManager) dvc = Instance(DVC_PROTOCOL) use_isotope_classifier = Bool(False) use_uuid_path_name = Bool(True) # isotope_classifier = Instance(IsotopeClassifier, ()) stage_files = Bool(True) default_principal_investigator = Str _positions = None save_log_enabled = Bool(False) arar_mapping = None def __init__(self, bind=True, *args, **kw): super(DVCPersister, self).__init__(*args, **kw) if bind: bind_preference(self, 'use_uuid_path_name', 'pychron.experiment.use_uuid_path_name') self._load_arar_mapping() def per_spec_save(self, pr, repository_identifier=None, commit=False, commit_tag=None, push=True): self.per_spec = pr if repository_identifier: self.initialize(repository_identifier, False) self.pre_extraction_save() self.pre_measurement_save() self.post_extraction_save() self.post_measurement_save(commit=commit, commit_tag=commit_tag, push=push) def push(self): # push changes self.dvc.push_repository(self.active_repository) # push commit self.dvc.meta_push() def initialize(self, repository, pull=True): """ setup git repos. repositories are guaranteed to exist. The automated run factory clones the required projects on demand. :return: """ self.debug('^^^^^^^^^^^^^ Initialize DVCPersister {} pull={}'.format(repository, pull)) self.dvc.initialize() repository = format_repository_identifier(repository) self.active_repository = repo = GitRepoManager() root = repository_path(repository) repo.open_repo(root) remote = 'origin' if repo.has_remote(remote) and pull: self.info('pulling changes from repo: {}'.format(repository)) try: repo.pull(remote=remote, use_progress=False, use_auto_pull=self.dvc.use_auto_pull) except GitCommandError: self.warning('failed pulling changes') self.debug_exception() def pre_extraction_save(self): pass def post_extraction_save(self): self.info('================= post extraction save started =================') per_spec = self.per_spec rblob = per_spec.response_blob # time vs measured response oblob = per_spec.output_blob # time vs %output sblob = per_spec.setpoint_blob # time vs requested gp = per_spec.grain_polygons if rblob is not None: rblob = encode_blob(rblob) if oblob is not None: oblob = encode_blob(oblob) if sblob is not None: sblob = encode_blob(sblob) if gp: gp = [encode_blob(g) for g in gp] obj = {'measured_response': rblob, 'requested_output': oblob, 'setpoint_stream': sblob, 'snapshots': per_spec.snapshots, 'videos': per_spec.videos, 'grain_polygons': gp, 'extraction_context': per_spec.extraction_context} pid = per_spec.pid if pid: obj['pid'] = pid for e in EXTRACTION_ATTRS: v = getattr(per_spec.run_spec, e) obj[e] = v if not per_spec.positions: ps = [dict()] else: ps = [] for i, pp in enumerate(per_spec.positions): pos, x, y, z = None, None, None, None if isinstance(pp, tuple): if len(pp) == 2: x, y = pp elif len(pp) == 3: x, y, z = pp else: pos = pp try: ep = per_spec.extraction_positions[i] x = ep[0] y = ep[1] if len(ep) == 3: z = ep[2] except IndexError: self.debug('no extraction position for {}'.format(pp)) except TypeError: self.debug('invalid extraction position') try: pos = int(pos) except BaseException: pos = None pd = {'x': x, 'y': y, 'z': z, 'position': pos, 'is_degas': per_spec.run_spec.identifier == 'dg'} ps.append(pd) obj['positions'] = ps self._positions = ps hexsha = self.dvc.get_meta_head() obj['commit'] = str(hexsha) path = self._make_path(modifier='extraction') dvc_dump(obj, path) self.info('================= post extraction save finished =================') def pre_measurement_save(self): pass def post_measurement_save(self, commit=True, commit_tag='COLLECTION', push=True): """ save - analysis.json - analysis.monitor.json check if unique spectrometer.json commit changes push changes :return: """ self.info('================= post measurement save started =================') ret = True ar = self.active_repository # save spectrometer spec_sha = self._get_spectrometer_sha() spec_path = os.path.join(ar.path, '{}.json'.format(spec_sha)) if not os.path.isfile(spec_path): self._save_spectrometer_file(spec_path) # self.dvc.meta_repo.save_gains(self.per_spec.run_spec.mass_spectrometer, # self.per_spec.gains) # save analysis if not self.per_spec.timestamp: timestamp = datetime.now() else: timestamp = self.per_spec.timestamp # check repository identifier before saving # will modify repository to NoRepo if repository_identifier does not exist self._check_repository_identifier() self._save_analysis(timestamp) # save monitor self._save_monitor() # save peak center self._save_peak_center(self.per_spec.peak_center) # stage files dvc = self.dvc if self.stage_files: if commit: try: ar.smart_pull(accept_their=True) paths = [spec_path, ] + [self._make_path(modifier=m) for m in NPATH_MODIFIERS] for p in paths: if os.path.isfile(p): ar.add(p, commit=False) else: self.debug('not at valid file {}'.format(p)) # commit files ar.commit('<{}>'.format(commit_tag)) # commit default data reduction add = False p = self._make_path('intercepts') if os.path.isfile(p): ar.add(p, commit=False) add = True p = self._make_path('baselines') if os.path.isfile(p): ar.add(p, commit=False) add = True if add: ar.commit('<ISOEVO> default collection fits') for pp, tag, msg in (('blanks', 'BLANKS', 'preceding {}'.format(self.per_spec.previous_blank_runid)), ('icfactors', 'ICFactor', 'default')): p = self._make_path(pp) if os.path.isfile(p): ar.add(p, commit=False) ar.commit('<{}> {}'.format(tag, msg)) if push: # push changes dvc.push_repository(ar) # update meta dvc.meta_pull(accept_our=True) dvc.meta_commit('repo updated for analysis {}'.format(self.per_spec.run_spec.runid)) if push: # push commit dvc.meta_push() except GitCommandError as e: self.warning(e) if self.confirmation_dialog('NON FATAL\n\n' 'DVC/Git upload of analysis not successful.' 'Do you want to CANCEL the experiment?\n', timeout_ret=False, timeout=30): ret = False with dvc.session_ctx(): try: ret = self._save_analysis_db(timestamp) and ret except DatabaseError as e: self.warning_dialog('Fatal Error. Cannot save analysis to database. Cancelling ' 'experiment. {}'.format(e)) ret = False self.info('================= post measurement save finished =================') return ret def save_run_log_file(self, path): if self.save_enabled and self.save_log_enabled: self.debug('saving run log file') npath = self._make_path('logs', '.log') shutil.copyfile(path, npath) ar = self.active_repository ar.smart_pull(accept_their=True) ar.add(npath, commit=False) ar.commit('<COLLECTION> log') self.dvc.push_repository(ar) # private def _load_arar_mapping(self): """ Isotope: IsotopeKey example arar_mapping.yaml { Ar40: 'Ar40', Ar39: 'Ar39', Ar38: 'Ar38', Ar37: 'Ar37', Ar36: 'Ar36L1' } :return: """ p = os.path.join(paths.setup_dir, 'arar_mapping.yaml') if os.path.isfile(p): self.debug('loading arar mapping from {}'.format(p)) # with open(p, 'r') as rfile: try: obj = yload(p) except YAMLError: obj = {} for k in ARGON_KEYS: if k not in obj: self.warning('Invalid arar_mapping.yaml file. required keys={}'.format(ARGON_KEYS)) return self.arar_mapping = obj def _check_repository_identifier(self): repo_id = self.per_spec.run_spec.repository_identifier db = self.dvc.db repo = db.get_repository(repo_id) if repo is None: self.warning('No repository named ="{}" changing to NoRepo'.format(repo_id)) self.per_spec.run_spec.repository_identifier = 'NoRepo' repo = db.get_repository('NoRepo') if repo is None: db.add_repository('NoRepo', self.default_principal_investigator) def _save_analysis_db(self, timestamp): ps = self.per_spec rs = ps.run_spec d = {k: getattr(rs, k) for k in ('uuid', 'analysis_type', 'aliquot', 'increment', 'mass_spectrometer', WEIGHT, CLEANUP, PRECLEANUP, POSTCLEANUP, CRYO_TEMP, DURATION, EXTRACT_VALUE, EXTRACT_UNITS)} d['comment'] = rs.comment[:200] if rs.comment else '' ed = rs.extract_device if ed in NULL_EXTRACT_DEVICES: d['extract_device'] = 'No Extract Device' else: d['extract_device'] = ed d['timestamp'] = timestamp # save script names d['measurementName'] = ps.measurement_name d['extractionName'] = ps.extraction_name d['experiment_type'] = self.per_spec.experiment_type db = self.dvc.db an = db.add_analysis(**d) if an is None: self.warning('Failed adding analysis to database. See full log for error') return # save currents self._save_currents(an) # for iso in ps.isotope_group.isotopes.values(): # self.add_current(iso) # db.add_analysis_result(an, iso) # save media if ps.snapshots: for p in ps.snapshots: db.add_media(p, an) if ps.videos: for p in ps.videos: db.add_media(p, an) if self._positions: if rs.load_name and rs.load_name != NULL_STR: load_name = rs.load_name load_holder = rs.load_holder db.add_load(load_name, load_holder, rs.username) db.flush() db.commit() for position in self._positions: self.debug('adding measured position {}'.format(position)) if not db.add_measured_position(an, load=load_name, **position): self.warning('failed adding position {}, load={}'.format(position, load_name)) # all associations are handled by the ExperimentExecutor._retroactive_experiment_identifiers # *** _retroactive_experiment_identifiers is currently disabled *** if ps.use_repository_association: db.add_repository_association(rs.repository_identifier, an) self.debug('get identifier "{}"'.format(rs.identifier)) pos = db.get_identifier(rs.identifier) self.debug('setting analysis irradiation position={}'.format(pos)) if pos is None: an.simple_identifier=int(rs.identifier) else: an.irradiation_position = pos t = ps.tag db.flush() change = db.add_analysis_change(tag=t) an.change = change db.commit() return True def _save_currents(self, dban): dvc = self.dvc if dvc.update_currents_enabled: ps = self.per_spec db = dvc.db for key, iso in ps.isotope_group.isotopes.items(): param = db.add_parameter('{}_intercept'.format(key)) db.add_current(dban, iso.value, iso.error, param, iso.units) param = db.add_parameter('{}_blank'.format(key), iso.blank.units) db.add_current(dban, iso.blank.value, iso.blank.error, param, iso.blank.units) param = db.add_parameter('{}_bs_corrected'.format(key)) v = iso.get_baseline_corrected_value() db.add_current(dban, nominal_value(v), std_dev(v), param, iso.units) param = db.add_parameter('{}_ic_corrected'.format(key)) v = iso.get_ic_corrected_value() db.add_current(dban, nominal_value(v), std_dev(v), param, iso.units) param = db.add_parameter(key) v = iso.get_non_detector_corrected_value() db.add_current(dban, nominal_value(v), std_dev(v), param, iso.units) param = db.add_parameter(iso.baseline.name) db.add_current(dban, iso.baseline.value, iso.baseline.error, param, iso.baseline.units) param = db.add_parameter('{}_n'.format(iso.baseline.name)) db.add_current(dban, iso.baseline.n, None, param, 'int') param = db.add_parameter('{}_n'.format(iso.name)) db.add_current(dban, iso.n, None, param, 'int') def _save_analysis(self, timestamp): isos = {} dets = {} signals = [] baselines = [] sniffs = [] blanks = {} intercepts = {} cbaselines = {} icfactors = {} endianness = '>' per_spec = self.per_spec source = {'emission': per_spec.emission, 'trap': per_spec.trap} clf = None if self.use_isotope_classifier: clf = self.application.get_service('pychron.classifier.isotope_classifier.IsotopeClassifier') for key, iso in per_spec.isotope_group.items(): sblob = encode_blob(iso.pack(endianness, as_hex=False)) snblob = encode_blob(iso.sniff.pack(endianness, as_hex=False)) for ss, blob in ((signals, sblob), (sniffs, snblob)): d = {'isotope': iso.name, 'detector': iso.detector, 'blob': blob} ss.append(d) detector = next((d for d in per_spec.active_detectors if d.name == iso.detector), None) isod = {'detector': iso.detector, 'name': iso.name, 'serial_id': detector.serial_id if detector else '00000'} if clf is not None: klass, prob = clf.predict_isotope(iso) isod.update(classification=klass, classification_probability=prob) isos[key] = isod if iso.detector not in dets: bblob = encode_blob(iso.baseline.pack(endianness, as_hex=False)) baselines.append({'detector': iso.detector, 'blob': bblob}) dets[iso.detector] = {'deflection': per_spec.defl_dict.get(iso.detector), 'gain': per_spec.gains.get(iso.detector)} icfactors[iso.detector] = {'value': float(nominal_value(iso.ic_factor or 1)), 'error': float(std_dev(iso.ic_factor or 0)), 'fit': 'default', 'references': []} cbaselines[iso.detector] = {'fit': iso.baseline.fit, 'error_type': iso.baseline.error_type, 'filter_outliers_dict': iso.baseline.filter_outliers_dict, 'value': float(iso.baseline.value), 'error': float(iso.baseline.error)} intercepts[key] = {'fit': iso.fit, 'error_type': iso.error_type, 'filter_outliers_dict': iso.filter_outliers_dict, 'value': float(iso.value), 'error': float(iso.error)} blanks[key] = {'fit': 'previous', 'error_type': '', 'references': [{'record_id': per_spec.previous_blank_runid, 'exclude': False}], 'value': float(iso.blank.value), 'error': float(iso.blank.error)} obj = self._make_analysis_dict() from pychron.version import __version__ as pversion from pychron.experiment import __version__ as eversion from pychron.dvc import __version__ as dversion obj['timestamp'] = timestamp.isoformat() obj['collection_version'] = '{}:{}'.format(eversion, dversion) obj['acquisition_software'] = 'pychron {}'.format(pversion) obj['data_reduction_software'] = 'pychron {}'.format(pversion) obj['environmental'] = {'lab_temperatures': per_spec.lab_temperatures, 'lab_humiditys': per_spec.lab_humiditys, 'lab_pneumatics': per_spec.lab_pneumatics} obj['laboratory'] = per_spec.laboratory obj['instrument_name'] = per_spec.instrument_name obj['analyst_name'] = per_spec.run_spec.username obj['whiff_result'] = per_spec.whiff_result obj['detectors'] = dets obj['isotopes'] = isos obj['spec_sha'] = self._get_spectrometer_sha() obj['intensity_scalar'] = per_spec.intensity_scalar obj['source'] = source # save the conditionals obj['conditionals'] = [c.to_dict() for c in per_spec.conditionals] if \ per_spec.conditionals else None obj['tripped_conditional'] = per_spec.tripped_conditional.result_dict() if \ per_spec.tripped_conditional else None # save the scripts ms = per_spec.run_spec.mass_spectrometer for si in ('measurement', 'extraction', 'post_measurement', 'post_equilibration', 'hops'): name = getattr(per_spec, '{}_name'.format(si)) blob = getattr(per_spec, '{}_blob'.format(si)) if name: self.dvc.meta_repo.update_script(ms, name, blob) obj[si] = name # save keys for the arar isotopes akeys = self.arar_mapping if akeys is None: akeys = ARAR_MAPPING obj['arar_mapping'] = akeys # save experiment self.debug('---------------- Experiment Queue saving disabled') # self.dvc.update_experiment_queue(ms, self.per_spec.experiment_queue_name, # self.per_spec.experiment_queue_blob) self._save_macrochron(obj) hexsha = str(self.dvc.get_meta_head()) obj['commit'] = hexsha # dump runid.json p = self._make_path() dvc_dump(obj, p) p = self._make_path(modifier='intercepts') dvc_dump(intercepts, p) # dump runid.blank.json p = self._make_path(modifier='blanks') dvc_dump(blanks, p) p = self._make_path(modifier='baselines') dvc_dump(cbaselines, p) p = self._make_path(modifier='icfactors') dvc_dump(icfactors, p) # dump runid.data.json p = self._make_path(modifier='.data') data = {'commit': hexsha, 'encoding': 'base64', 'format': '{}ff'.format(endianness), 'signals': signals, 'baselines': baselines, 'sniffs': sniffs} dvc_dump(data, p) def _save_macrochron(self, obj): pass def _save_monitor(self): if self.per_spec.monitor: p = self._make_path(modifier='monitor') checks = [] for ci in self.per_spec.monitor.checks: data = encode_blob(pack('>ff', ci.data)) params = dict(name=ci.name, parameter=ci.parameter, criterion=ci.criterion, comparator=ci.comparator, tripped=ci.tripped, data=data) checks.append(params) dvc_dump(checks, p) def _save_spectrometer_file(self, path): obj = dict(spectrometer=dict(self.per_spec.spec_dict), gains=dict(self.per_spec.gains), deflections=dict(self.per_spec.defl_dict), settings=self.per_spec.settings) # hexsha = self.dvc.get_meta_head() # obj['commit'] = str(hexsha) dvc_dump(obj, path) def _save_peak_center(self, pc): self.info('DVC saving peakcenter') p = self._make_path(modifier='peakcenter') if pc: fmt = '>ff' obj = {'reference_detector': pc.reference_detector.name, 'reference_isotope': pc.reference_isotope, 'fmt': fmt, 'interpolation': pc.interpolation_kind if pc.use_interpolation else ''} results = pc.get_results() if results: for result in results: points = encode_blob(pack(fmt, result.points)) obj[result.detector] = {'low_dac': result.low_dac, 'center_dac': result.center_dac, 'high_dac': result.high_dac, 'low_signal': result.low_signal, 'center_signal': result.center_signal, 'high_signal': result.high_signal, 'resolution': result.resolution, 'low_resolving_power': result.low_resolving_power, 'high_resolving_power': result.high_resolving_power, 'points': points} dvc_dump(obj, p) def _make_path(self, modifier=None, extension='.json'): runid = self.per_spec.run_spec.runid uuid = self.per_spec.run_spec.uuid repository_identifier = self.per_spec.run_spec.repository_identifier if self.use_uuid_path_name: name = uuid, uuid else: name = runid, runid return analysis_path(name, repository_identifier, modifier, extension, mode='w') def _make_analysis_dict(self, keys=None): if keys is None: keys = META_ATTRS def get(ki): obj = self.per_spec if not hasattr(obj, ki): obj = self.per_spec.run_spec try: return getattr(obj, ki) except AttributeError as e: self.warning('Attribute error: attr={}, error={}'.format(ki, e)) d = {k: get(k) for k in keys} return d def _get_spectrometer_sha(self): """ return a sha-1 hash. generate using spec_dict, defl_dict, and gains spec_dict: source parameters, cdd operating voltage defl_dict: detector deflections gains: detector gains make hash using for key,value in dictionary: sha1.update(key) sha1.update(value) to ensure consistence, dictionaries are sorted by key for key,value in sorted(dictionary) :return: """ return spectrometer_sha(self.per_spec.settings, self.per_spec.spec_dict, self.per_spec.defl_dict, self.per_spec.gains) # ============= EOF ============================================= # self._save_measured_positions() # # # def _save_measured_positions(self): # dvc = self.dvc # # load_name = self.per_spec.load_name # for i, pp in enumerate(self.per_spec.positions): # if isinstance(pp, tuple): # if len(pp) > 1: # if len(pp) == 3: # dvc.add_measured_position('', load_name, x=pp[0], y=pp[1], z=pp[2]) # else: # dvc.add_measured_position('', load_name, x=pp[0], y=pp[1]) # else: # dvc.add_measured_position(pp[0], load_name) # # else: # dbpos = dvc.add_measured_position(pp, load_name) # try: # ep = self.per_spec.extraction_positions[i] # dbpos.x = ep[0] # dbpos.y = ep[1] # if len(ep) == 3: # dbpos.z = ep[2] # except IndexError: # self.debug('no extraction position for {}'.format(pp))
By participating in iCount Panel for males ages 18-34, you will have the opportunity to shape decisions on some of your favorite programs and provide insight into many other TV and media related topics. To get started, click here. I'm Louida from Atlanta, Georgia and I'm a mother of two daughters, and a full-time blogger/influencer. I love helping others learn how to start working from home online free to help supplement their current income. Visit Lou's profile on Pinterest. Hey! I'm Louida (Lou-why-da) Martin, but I like to go by Lou for short. I was born and raised in Northern California, and now reside in Greater Atlanta, Georgia area with my two teen girls and high school sweetheart. I created this blog to show people that there are ways to make money online free. No need to take on a second job. ​Disclosure : EarningFreeMoney.com earns income through affiliate marketing programs, it does not affect the views of this content. Results may vary on how much you make with programs advertised on this blog.
''' This portion of the program sends an SMS message to the given number. This is one way the program will communicate with the user. Another potential way is through a push notification to a phone if this was a phone app. I noticed that Chrome supports browser based push notifications that are really nice and non obtrusive. Maybe use that instead. ''' #TODO: 2 Figure out how Facebook sends push notification to the browser. This might go under web_frontend.py #TODO: 1 Return the status of each message sent. This would be higher priority in product sold for money. from twilio.rest import TwilioRestClient class send_SMS(): """ Uses Twilio to send an SMS to a given number """ def __init__(self): """ Intializes self.client which is responsible for sending and receiving SMS in Twilio """ self.account_sid = "AC7162b52b47f7383aec3ad400e9cc40e4" self.auth_token = "c4dbfa0c3ed665fd699eac2f99d4976e" self.client = TwilioRestClient(self.account_sid, self.auth_token) def send(self, number, SMSbody): """ Sends SMS to a phone number corresponding to the number variable takes: number in the "+1XXXXXXX" format. SMSbody which should be <140 characters returns: nothing """ self.message = self.client.messages.create(to="+19373295511", from_="+19378136340" body=SMSbody) self.receivedSMS = self.client.messages.list(To='19378136340') print self.receivedSMS obj=send_SMS() obj.send('+19373295511')
The next stop for partypoker MILLIONS LIVE is Sochi, Russia. The first partypoker MILLIONS LIVE event in the post-World Series of Poker calendar portion of the year goes to Sochi, Russia. 690m in Rubles ($10.95m USD) is guaranteed to the series prize pools that includes a 300m RUB ($4.75m USD) guaranteed Main Event. A total of 13 events are on the schedule with buy-ins ranging from 66,000 RUB up to 6 million RUB. Dmitry Chop won the first edition of the partypoker MILLIONS in Russia. The $1,100 USD event attracted 1,170 entries to break the $1 million USD guarantee and Chop took home the largest percentage of the prize pool thanks to his $225,000 victory. partypoker’s tradition of online and live flights for their Main Events holds to form in Russia. Three live Day 1s and an online Day 1 on August 5 dot the schedule. Live Day 1A starts on August 7 and plays for eight 60-minute levels with the traditional starting stack of 1 million chips. Players are incentivized to use PP LIVE $$$ to buy-in into the Main Event. Any player who enters using that method among the first 100 is entered into a last longer funded by partypoker. The final player standing among those who enter using PP LIVE $$$ wins an additional $100,000 on top of their original payout. Other primary events on the schedule include the 66,000 RUB partypoker Open, 3 million RUB Triton High Roller and the 6 million RUB Triton Super High Roller. Triton is currently in Jeju, South Korea for the latest edition of the Triton Super High Roller Series. Short Deck poker is sweeping the poker world thanks to the use in the Triton Series. No news is available yet on whether or not the Triton events in Sochi will use the popular format. The full series runs from August 3 until August 14.
"""this module makes the connection to fenixedu information and ScheduleMaker""" import webbrowser from datetime import datetime import fenixedu class Login(): """ Login - asks authorization from user and his code""" def __init__(self): config = fenixedu.FenixEduConfiguration.fromConfigFile('fenixedu.ini') self.client = fenixedu.FenixEduClient(config) self.year = datetime.now().hour url = self.client.get_authentication_url() webbrowser.open_new(url) code = input("please insert the code that is presented to you on the browser:") self.user = self.client.get_user_by_code(code) def getClient(self): """returns login's client""" return self.client def getUser(self): """returns login's user""" return self.user def getCoursesUrls(self): """ get courses list of urls and of it's ids """ data = self.client.get_person_courses(self.user) urls = [] ids = [] for i in range(len(data['enrolments'])): urls += [data['enrolments'][i]['url']] ids += [data['enrolments'][i]['id']] return (urls, ids) def getDegrees(self): """ returns a tuple consisting of offered degrees and it's ids """ data = self.client.get_degrees() degrees = [] ids = [] for i in range(len(data)): degrees += [data[i]['name']] ids += [data[i]['id']] return (degrees, ids)
Reiki Awakening: Am I a Healer? "Any person who attempts to help another living being to transform them to a greater state of health is a healer. They don't need a medical degree because the energy used doesn't come from a pharmaceutical company or a corporation (at least not yet!). Reiki is the most well known modality of energy healing in the western world. If you ask a person what they do they will say they are a healer before anything else. If you ask a physician what they do they usually will say they are a doctor before they say they are a healer. To say a person who does Reiki is not a healer is like to saying a person who sings is not a singer or a person who drives a car is not a driver. It makes no sense." However, my opinion differs in that there is a connotation of arrogance to saying "I am a healer." It implies that those who I offer healing energy to are passive recipients, rather than participants in their own healing. I prefer to think of myself as an energy worker, one who works with subtle energies, Reiki being primary. I facilitate healing by offering one the opportunity to receive the healing that he or she is ready for. There is a dialogue, a give and take, and the recipient must be willing and open to receiving the Reiki energy. I am not "making you heal" as the word "healer" implies. I am offering healing energy to you, in the hopes that you will receive benefit from the energy. It's a little like teaching, really. Yes, I am a teacher. That word is completely acceptable by the masses as being one who offers knowledge, and who facilitates another's ability to assimilate the knowledge. As a teacher, I have this knowledge, these skills, to offer you, the learner. You can decide that you cannot or will not learn what I am trying to teach you. You can decide that I am not a teacher that you can relate to. In that case, you can choose to find another teacher or subject that best fits with your needs. In this role of teacher, therefore, I am more accurately "a facilitator of your ability to increase or assimilate new knowledge". I can't make you learn any more than I can make you heal. That part is up to you. I can do my best to make the knowledge clear, to adapt to your unique way of learning so that it makes sense to you. So as a "healer" I am attempting to offer you energy that you are able to assimilate into your own, to then benefit from this energy in the ways that make the most sense to your needs. The problem is in the connotation, rather than the definition here. It sounds arrogant to say that I am a healer. I am a healer in that I work with healing energy. I am not a guru, or one who can "make" you heal if you are not ready to heal, or willing to heal on some level. In another sense, we are all healers - people who recover from illnesses. We heal ourselves, so we are healers. But the word "healer" is taken more widely to mean one who heals other people. I don't think I do that. I present the opportunity for others to receive healing energy. That I'm comfortable saying. Do I want you to heal? Yes. I want you to feel better, I want you to enjoy and benefit from the beautiful energy that is flowing through me to you. I am deeply grateful for the ability to be connected to the Source of Reiki energy and to be able to pass that energy to you. I am also extremely grateful for the ability to pass this ability to others who wish to learn to do the same thing. I am a teacher. So you won't find me putting "healer" on my business card. It says Reiki Master Teacher. Reiki Master comes with its own connotations, for I am not a "Master of Reiki" (there's that guru connotation again). But it's easily explained that Reiki Master is a level designation that means one has received the proper training and attunements to enable one to teach and attune others. Like having a Master's degree - it is a definition of training. It doesn't make you any more holy or genius than anyone else. So to Debra Katz, and to my Reiki student, MD, I say, I am in the practice of working with healing energy. In that sense I may be a healer, but I will not refer to myself that way for the reasons that I don't want to be mistaken for someone who will heal you. Only you can heal you. I can offer you energy that can help you heal, and will joyfully do so. Lovely! I hear your high self speaking through the human body. Beautiful. Beautifully stated Alice. That is exactly how I feel too! I really appreciate the manner in which you honored the words healer, energy worker, teacher and master. I often struggle to find just the right way in which to talk to others about Reiki and how I envision myself a part of it. I may have to "borrow" your explanation! Your piece carries great energy! I respectfully honor your insights and thank you for sharing them with us all! I completely agree with your thoughts on what a "healer" and a "teacher" do, I have some of my own offerings though. To me anyone who holds a position in which they have had to gain knowledge and/or experience to adequately practice (and even those who have a natural knack for certain things) are facilitators of that. A facilitator can HELP you, but they never make you. It is always your conscious desires and effort that bring about the healing, the learning, etc. And even if you consciously wish to accept something, if you do not accept it on ALL levels it will impede the process. Recently I have just had a long discussion on the use of connotation vs denotation in dealing with other people. My point to my student was this, "When you use a word, you not only deal with your own definition of it, but also the definition of anyone else you are addressing." This can cause much confusion and even debate when people are not on the same page to so speak. I personally refer to myself by my title, as a Reverend & Reiki Master/Teacher, but also in general as a "Spiritual Healer". When I say these two terms I am referring to the fact that I am a spiritual person using a healing technique that is not what one might consider traditional medicine. I do energy work. But I also always sit and speak with my clients and students before embarking on the healing or learning journeys. I explain my philosophy, how I personally work, and feel them out to see if we fit together. Thank you for sharing your insight. I found it very interesting. So wonderful to see you! Thank you for your comment and your support. We are truly sisters in the light. Feel free to borrow or quote anything that resonates with you that I might offer. I'm honored. I love the point on connotation that you make here. That is the point - the connotation is the subjective value of the word, and it's at the whim of the listener. So my intended meaning of "healer" and someone's interpretation through his or her own experiential knowledge or veil of understanding, may be very different. The clearer we are, the better in cases like these. Thank you for sharing your perspectives with us - you enrich my blog. Thank you for gracing my blog with your comment. Come back again. Wow, I really enjoyed your blog and the distinction you make as beig fascilitors of healing. It is so true. As reiki practitioners, yes wer are healers in a snce that we are conduits. I really enjoyed reading your blog and your point was well stated. As a Reiki practitioner I think of a healer as someone who facilitates healing. For me it doesn't mean that the receiver is passive; they are still active participants and they are responsible for their own healing. Thank you for sharing your thoughts, they made me think about it and clarify it for my own benefit.
# @Float(label="Pixel Size (um)", required=false, value=1) pixel_size # @Float(label="Duration between Two Frames", required=false, value=1) dt # @String(label="Time unit", required=false, value='s', choices={'sec', 'min', 'hours'}) time_unit # @Dataset data # @ImagePlus imp # @ImageJ ij import os import sys from java.io import File from ij.measure import ResultsTable from fiji.plugin.trackmate.visualization.hyperstack import HyperStackDisplayer from fiji.plugin.trackmate.io import TmXmlReader from fiji.plugin.trackmate import Logger from fiji.plugin.trackmate import Settings from fiji.plugin.trackmate import SelectionModel from fiji.plugin.trackmate.providers import DetectorProvider from fiji.plugin.trackmate.providers import TrackerProvider from fiji.plugin.trackmate.providers import SpotAnalyzerProvider from fiji.plugin.trackmate.providers import EdgeAnalyzerProvider from fiji.plugin.trackmate.providers import TrackAnalyzerProvider from fiji.plugin.trackmate.visualization import PerTrackFeatureColorGenerator logger = Logger.IJ_LOGGER ### Open and display tracks dir_path = os.path.dirname(data.getSource()) trackmate_path = os.path.join(dir_path, "Trajectories.xml") stats_path = os.path.join(dir_path, "Statistics.csv") reader = TmXmlReader(File(trackmate_path)) if not reader.isReadingOk(): sys.exit(reader.getErrorMessage()) model = reader.getModel() spots = model.getSpots() trackIDs = model.getTrackModel().trackIDs(True) settings = Settings() detectorProvider = DetectorProvider() trackerProvider = TrackerProvider() spotAnalyzerProvider = SpotAnalyzerProvider() edgeAnalyzerProvider = EdgeAnalyzerProvider() trackAnalyzerProvider = TrackAnalyzerProvider() reader.readSettings(settings, detectorProvider, trackerProvider, spotAnalyzerProvider, edgeAnalyzerProvider, trackAnalyzerProvider) logger.log(str(settings)) sm = SelectionModel(model) displayer = HyperStackDisplayer(model, sm, imp) color = PerTrackFeatureColorGenerator(model, 'TRACK_INDEX') displayer.setDisplaySettings('TrackDisplaymode', 0) displayer.setDisplaySettings('TrackDisplayDepth', 20) displayer.setDisplaySettings('TrackColoring', color) displayer.render() ### Build stats table fm = model.getFeatureModel() table = ResultsTable() for id in model.getTrackModel().trackIDs(True): table.incrementCounter() track = model.getTrackModel().trackSpots(id) table.addValue('Track ID', id) table.addValue('TRACK_DURATION (%s)' % time_unit, fm.getTrackFeature(id, 'TRACK_DURATION') * dt) table.addValue('TRACK_DISPLACEMENT (um)', fm.getTrackFeature(id, 'TRACK_DISPLACEMENT') * pixel_size) table.addValue('TRACK_MEAN_SPEED (um/%s)' % time_unit, fm.getTrackFeature(id, 'TRACK_MEAN_SPEED') * pixel_size / dt) table.addValue('TRACK_MIN_SPEED (um/%s)' % time_unit, fm.getTrackFeature(id, 'TRACK_MIN_SPEED') * pixel_size / dt) table.addValue('TRACK_MAX_SPEED (um/%s)' % time_unit, fm.getTrackFeature(id, 'TRACK_MAX_SPEED') * pixel_size / dt) table.addValue('TRACK_STD_SPEED (um/%s)' % time_unit, fm.getTrackFeature(id, 'TRACK_STD_SPEED') * pixel_size / dt) table.save(stats_path) table.show("Statistics")
JoAnn was bitten with the aviation bug on her first commercial flight at the age of three. A very quiet and shy youngster, she was interested enough to ask the flight attendant if she could see the cockpit. That tour of the cockpit was the start of a healthy obsession with all things aviation related! Always an enthusiast, some of JoAnn’s fondest memories are taking a day off school and driving with her dad to watch the Blue Angels practice in Imperial Valley. Fast forward 25 years or so when JoAnn began her Private Pilot training at Gillespie Field, and now she has her Instrument Rating. JoAnn is involved with a few local flying groups and loves her job at High Performance Aircraft. She has been our Treasurer for most of the past seven years and enjoys keeping the chapter on track financially!
from tigreBrowser.database import * import xdrlib class Results: """This class is responsible for getting the results from the database using functions in database.py. """ def __init__(self, db, experiment_set, regulator, target_set, experiment, filters, supplementary_options, search_genes): """Initialization. Does not, however, fetch the results. Parameters: db: database object experiment_set: name of the experiment set regulator: name of the regulator if it exists target_set: name of the target set if it exists experiment: name of the experiment, appended with '_diff' if sorting is to be done by diff. Can also be 'zscore' if sorting is done by z-scores filters: filters that will be applied when fetching and filtering results (see create_filter_query() in database.py for explanation about the parameter type) supplementary_options: highlights will be fetched according to these options (see create_filter_query() in database.py for explanation about the parameter type) search_genes: search these gene names or aliases (list of strings) """ self.__db = db self.__set_id = db.get_experiment_set_id(experiment_set) self.__reg_id = db.get_regulator_id(regulator) self.__set_experiment_ids = db.get_experiment_ids_in_set_recursively(self.__set_id) if target_set: self.__target_ids = db.get_gene_ids_dict(target_set).values() self.__experiment_id = db.get_experiment_id(experiment.replace('_diff', ''), self.__reg_id) self.__sort_by_diff = experiment.endswith('_diff') self.__sort_by_zscore = False if experiment == 'zscore': self.__experiment_id = self.__set_experiment_ids[0] self.__sort_by_zscore = True if not self.__experiment_id: raise Exception('No experiment with the current TF') self.__dataset_id = db.get_experiment_dataset_id(self.__experiment_id) self.__supplementary_annotation_ids_dict = self.__create_supplementary_annotation_ids_dict(self.__reg_id) self.__filters = filters self.__supplementary_options = supplementary_options regulator_experiment_ids = db.get_experiment_ids(self.__reg_id) # If a regulator is defined (TARGET_RANKING), # choose only the experiments in a set that have the given regulator. # Use set intersection. if self.__reg_id: self.__set_experiment_ids = list(set(self.__set_experiment_ids) & set(regulator_experiment_ids)) self.__search_gene_ids = None if search_genes: self.__search_gene_ids = db.get_gene_ids_by_name_in_experiments(self.__set_experiment_ids, search_genes) if not self.__set_experiment_ids: raise Exception('No results for the given selection') for (name, symbol, value) in supplementary_options: if name not in self.__supplementary_annotation_ids_dict: raise Exception('Supplementary option %s not available for the given TF' % name) self.__results_all = {} self.__aliases_all = {} self.__params_all = {} self.__supps_all = {} self.__zscores_all = {} self.__probe_names = {} self.__highlights = {} def __create_supplementary_annotation_ids_dict(self, reg_id): d = {} for (name, ann_id) in self.__db.get_supplementary_annotation_ids(reg_id): d[name] = ann_id return d def __parse_results(self, results): self.__experiment_parameter_names = self.__get_experiment_parameter_names() result_gene_ids = set() probe_names = {} results_all = {} params_all = {} for row in results: probe_name = row[0] desc = row[1] likelihood = row[2] baseline_likelihood = row[3] gene_id = row[4] param_values = row[5] exp_id = row[6] result_gene_ids.add(gene_id) results_all.setdefault(gene_id, {})[desc] = likelihood # add results with appended '_baseline' and '_diff' # if baseline is defined to show them in results listing if baseline_likelihood: results_all[gene_id][desc + '_baseline'] = baseline_likelihood results_all[gene_id][desc + '_diff'] = likelihood - baseline_likelihood probe_names[gene_id] = probe_name params_all.setdefault(gene_id, {})[exp_id] = self.__map_params_to_names(exp_id, self.__experiment_parameter_names.get(exp_id, []), param_values) return result_gene_ids, probe_names, results_all, params_all def __query_supplementary_datas(self, gene_ids, supp_ids): supps = self.__db.get_gene_supplementary_datas(gene_ids, supp_ids) d = {} for (supp_id, name, value) in supps: d.setdefault(supp_id, {})[name] = value return d def __query_gene_aliases(self, gene_ids): aliases = self.__db.get_gene_aliases(gene_ids) d = {} for (alias_id, alias_class, alias) in aliases: d.setdefault(alias_id, {}).setdefault(alias_class, []).append(alias) return d def __query_zscores(self, gene_ids, dataset_id): d = {} for (gene_id, zscore) in self.__db.get_z_scores(gene_ids, dataset_id): d[gene_id] = zscore return d def __parse_rdata_double_raw_vector(self, data_buffer): # Python 2.5 and 3.x compatibility code try: header = [ord(x) for x in [data_buffer[0], data_buffer[1]]] except TypeError: header = [data_buffer[0], data_buffer[1]] # RData binary format if header[0] != ord('X') or header[1] != ord('\n'): return None xdr = xdrlib.Unpacker(data_buffer[2:]) xdr.unpack_int() xdr.unpack_int() xdr.unpack_int() xdr.unpack_int() xdr.unpack_int() data = [] while True: try: data.append(xdr.unpack_double()) except EOFError: break return data def __map_params_to_names(self, exp_id, param_names, param_values): if not param_values: return {} param_values = self.__parse_rdata_double_raw_vector(param_values) return dict(zip(param_names, param_values)) def fetch_results(self, number_of_genes, offset): """Fetches the results from the database. Parameters: number_of_genes: fetch at most this many genes offset: offset in the results listing Returns: (gene ids for the result genes, total number of genes with results) """ fq, fa = self.__db.create_filter_query(self.__reg_id, self.__filters, [], self.__supplementary_annotation_ids_dict) sq, sa = self.__db.create_sort_query(fq, fa, self.__experiment_id, self.__sort_by_diff, self.__sort_by_zscore) if self.__search_gene_ids != None: gene_ids = self.__search_gene_ids else: gene_ids = self.__db.get_gene_ids_for_results(sq, sa) count = len(gene_ids) # total result count gene_ids = gene_ids[offset:(offset + number_of_genes)] # OFFSET, LIMIT results = self.__db.get_results_for_gene_ids(self.__set_experiment_ids, gene_ids) result_gene_ids, self.__probe_names, self.__results_all, self.__params_all = self.__parse_results(results) if not results: # quick fix for the result count count = 0 # remove gene ids that are in gene_ids but not in result_gene_ids l = gene_ids[:] for gene_id in l: if gene_id not in result_gene_ids: gene_ids.remove(gene_id) self.__supps_all = self.__query_supplementary_datas(gene_ids, self.__supplementary_annotation_ids_dict) self.__aliases_all = self.__query_gene_aliases(gene_ids) self.__zscores_all = self.__query_zscores(gene_ids, self.__dataset_id) self.__highlights = self.__db.get_highlights(self.__supplementary_options, self.__supplementary_annotation_ids_dict, gene_ids) return gene_ids, count def get_experiment_results(self, gene_id): """Returns a dictionary of experiment results for the given gene_id. Returns: {experiment_name, log_likelihood or baseline_log_likelihood} Example: {'GPDISIM_diff': 494.65294095332217, 'GPDISIM': 6.7597099032014309, 'GPDISIM_baseline': -487.89323105012073} """ return self.__results_all.get(gene_id) def get_zscore(self, gene_id): """Returns z-score for the given gene_id. """ return self.__zscores_all.get(gene_id) def get_supplementary_data(self, gene_id): """Returns a dictionary representing supplementary data for the given gene_id. Returns: {supplementary dataset name, value} Example: {'ischip': 1.0, 'chipdist': 1671.0, 'isinsitu': 0.0, 'hasinsitu': 0.0} """ return self.__supps_all.get(gene_id, {}) def get_parameters(self, gene_id): """Returns a dictionary of parameters in different experiments for the given gene id. Returns: {experiment id: {parameter name: parameter value}} Example: {1: {'rbf1_variance/disim1_rbf_variance': 0.59039292169555113, 'disim1_variance': -5.9057868788275316, 'disim1_decay': -3.9377851775471258, 'disim1_di_variance': 0.0, 'Basal1': -8.9106876980653453, 'disim1_di_decay': -4.0190835928767878, 'rbf1_inverseWidth/disim1_inverseWidth': -0.51096542027712455} } """ return self.__params_all.get(gene_id, {}) def get_aliases(self, gene_id): """Returns a dictionary of aliases for the given gene id. Returns: {alias class, [aliases]} Example: {'ENTREZID': [u'38211'], 'SYMBOL': [u'CG12011'], 'FLYBASE': [u'FBgn0035257'], 'GENENAME': [u'CG12011 gene product from transcript CG12011-RA']} """ return self.__aliases_all.get(gene_id, {}) def get_probe_name(self, gene_id): """Returns the probe name for the given gene id. """ return self.__probe_names.get(gene_id) def get_highlights(self): """Gets a dictionary of supplementary dataset names to gene ids that will be highlighted. Returns: {supplementary dataset name: [gene ids]} Example: {'ischip': [4632, 8354, 10609], 'isinsitu': [], 'hasinsitu': [4632, 8354, 10609]} """ return self.__highlights def get_dataset_figure(self): """Gets the template URL to the dataset figure. Example: http://www.something.com/something/figures/${probe_name}.png """ return self.__db.get_dataset_figure_filename(self.__dataset_id) def get_experiment_figures(self): """Gets experiment figure annotations. Returns: [(figure id, filename, name, description, priority)] """ return self.__db.get_experiment_figures(self.__set_experiment_ids) def get_alias_annotations(self): """Gets all alias annotations. Returns: [(alias annotation id, alias class, source, description)] """ return self.__db.get_alias_annotations(self.__dataset_id) def get_experiment_names(self): """Gets a dictionary mapping from experiment ids to corresponding names. Returns: {experiment id: experiment name} Example: {1: 'GPDISIM', 2: 'GPSIM'} """ exp_dict = {} results = self.__db.get_experiment_ids_names(self.__set_experiment_ids) for (exp_id, name) in results: exp_dict[exp_id] = name return exp_dict def __get_experiment_parameter_names(self): params_dict = {} for exp_id in self.__set_experiment_ids: names = self.__db.get_experiment_parameter_names(exp_id) if not names: continue names = [name.strip() for name in names.strip().split(',')] params_dict[exp_id] = names return params_dict def get_experiment_parameter_names(self): """Gets a dictionary mapping from experiment ids to a list of parameter names. Returns: {experiment id: [parameter names]} Example: {1: ['rbf1_inverseWidth/disim1_inverseWidth', 'rbf1_variance/disim1_rbf_variance', 'disim1_di_decay', 'disim1_di_variance', 'disim1_decay', 'disim1_variance', 'Basal1'] } """ return self.__experiment_parameter_names def get_all_parameter_names(self): """Gets a list of parameters names in all experiments. Returns: [parameter names] """ all_names = sum(self.__experiment_parameter_names.values(), []) # remove duplicates names = [] [names.append(name) for name in all_names if not names.count(name)] return names
Florida’s business-friendly tax rate and strategic location are just two reasons the state’s business climate is ideal for accountants. Not only do you get the benefit of the pro-business environment, but you have your pick of clients, too. However, each client brings risk to your business. That’s why business insurance is a smart investment for accountants. For instance, Florida law requires any non-construction business to carry workers’ compensation insurance when it has four or more employees. This protects your business by covering injured employees’ medical bills and lost wages. Florida also requires CPA firms to have public liability insurance, which is usually included in general liability insurance. Other policies aren’t required by law but they still provide important protections. For example, clients often require professional liability insurance in case an accounting error results in losses to their business. That’s the same reason many commercial landlords put general liability and property insurance requirements in their leases. Insureon makes shopping for insurance easy with industry-leading technology that gathers quotes from top U.S. carriers that fit your needs. Read more to find out which policies are most common for accountants in Florida, and start an application to get your free quotes today. The most common policies for Florida accountants vary depending on a number of factors, including revenue, client contracts, and partnerships. The table below illustrates which policies might work best for your business type. General liability insurance: General liability insurance is the foundation of an accountant&apos;s business protection. Coverage extends to bodily injury, client property damage, advertising injury, and the cost of legal defense. Most commercial leases require you to have this coverage. Florida state law requires CPA firms to carry public liability insurance. This coverage is often included in general liability policies. Unless it’s a sole proprietorship, your firm has to be insured at a minimum of $50,000 per shareholder, officer, member, or partner and any Florida-licensed CPA up to a maximum of $2,000,000. Otherwise, you’ll have to get a signed waiver of limitation on liability. Florida workers&apos; compensation: In Florida, business owners with fewer than four employees are not required to have workers’ compensation. If an employee is injured on the job, workers’ comp can help pay for medical fees and lost wages. Even for a business with fewer than four employees, it’s worth considering workers’ comp; costly medical fees could quickly outweigh any savings you might have from not purchasing insurance. Accounting may not be a physically demanding job, but data from the Bureau of Labor Statistics shows accountants do periodically suffer work-related injuries. The most common are falls, slips, and trips; overexertion; and bodily reaction injuries, like pinched nerves or carpal tunnel syndrome. At just over $800 a year, workers&apos; compensation insurance has the highest median premium for accountants in Florida, just barely topping the national cost. General liability insurance is on the other end of the spectrum, typically costing around $500 for Florida accountants. However, bundling your general liability and commercial property coverages into a business owner’s policy can save you around $60 each year. Insureon’s industry-leading technology helps accountants in Florida save time and money shopping for insurance by comparing policies from dozens of top U.S. carriers. Simply apply online and review quotes for the policies that best fit your business needs. Our insurance specialists are licensed in Florida and can answer your questions as you consider coverage.
#!/usr/bin/env python class Solution(object): def minDistance(self, a, b): """ Returns the edit distance between strings a and b. """ n = len(a) m = len(b) # If either string is empty, we need to add all characters from other # string. if n == 0 or m == 0: return max(n, m) # n x m matrix where each dp[i][j] represents the edit distance for # a[:i + 1] and b[:j + 1]. dp = [([0] * (m + 1)) for i in range(0, n + 1)] for i in range(0, n + 1): for j in range(0, m + 1): if i == 0: dp[i][j] = j elif j == 0: dp[i][j] = i elif a[i - 1] == b[j - 1]: # If the trailing characters are the same, we don't need to # perform an operation to bring these characters in sync. dp[i][j] = dp[i - 1][j - 1] else: dp[i][j] = 1 + \ min(dp[i - 1][j - 1], # Replace a[i] with b[j] dp[i][j - 1], # Add a[i] to b[:j] (Insert) dp[i - 1][j]) # Add b[j] to a[:i] (Delete) return dp[n][m] def main(): print('Please run this solution on LeetCode.') print('https://leetcode.com/problems/edit-distance/') if __name__ == '__main__': main()
Most of the time we will find cat urine contamination within 3 to 4 feet from the walls. We find the urine using a special day or night light made to spot urine contamination in carpet and other surfaces, such as furniture and walls. Cat urine has a very sharp odor. The smell is usually much stronger than dog urine in most cases. We find that because cats urinate next to walls, the urine will get behind the baseboard and behind the wall, which makes it more difficult to remove the odor completely. Our vapor gassing system guarantees complete odor removal because our gas will penetrate those areas, destroying the odor completely. No other system, that we know of, can find the urine behind walls and other impossible areas like our system can. Is Cat Urine Bad To Breathe Indoors? Cat urine contains a high concentration of ammonia. The cat pee contains hydrogen and nitrogen. A small urine deposit will most likely not affect your breathing, but moderate to large pee deposits can affect your breathing. The risk occurs when either there’s a very strong odor of cat urine in the room or when someone in the household has a respiratory condition, such as asthma or COPD. The amount of ammonia in a room that reeks of cat pee can irritate and eventually harm healthy lungs. Lungs that are already compromised, such as in someone with asthma, may be irritated by even a mild odor of cat urine, especially if they’re frequently exposed to the odor. Finding Dog urine contamination is slightly different than finding cat urine in the carpet. Dog urine contamination can be found all over the carpeted area. Dogs will urinate on the edge of the carpet as well as in the middle of the carpet. We find dog urine using the same special day or night light made to spot cat urine contamination in carpet and other surfaces, such as furniture and walls.
from __future__ import print_function import email.Utils from gi.repository import Gtk import math import pychess import random import signal import subprocess from threading import Thread from pychess.compat import urlopen, urlencode from pychess.Players.PyChess import PyChess from pychess.System.prefix import addDataPrefix, isInstalled from pychess.System.repeat import repeat_sleep from pychess.System import GtkWorker, fident from pychess.System.Log import log from pychess.Utils.const import * from pychess.Utils.lutils.LBoard import LBoard from pychess.Utils.lutils.lmove import determineAlgebraicNotation, toLAN, parseSAN from pychess.Utils.lutils import lsearch from pychess.Utils.repr import reprResult_long, reprReason_long from pychess.ic.FICSConnection import FICSMainConnection class PyChessFICS(PyChess): def __init__ (self, password, from_address, to_address): PyChess.__init__(self) self.ports = (23, 5000) if not password: self.username = "guest" else: self.username = "PyChess" self.owner = "Lobais" self.password = password self.from_address = "The PyChess Bot <%s>" % from_address self.to_address = "Thomas Dybdahl Ahle <%s>" % to_address # Possible start times self.minutes = (1,2,3,4,5,6,7,8,9,10) self.gains = (0,5,10,15,20) # Possible colors. None == random self.colors = (WHITE, BLACK, None) # The amount of random challenges, that PyChess sends with each seek self.challenges = 10 enableEGTB() self.sudos = set() self.ownerOnline = False self.waitingForPassword = None self.log = [] self.acceptedTimesettings = [] self.worker = None repeat_sleep(self.sendChallenges, 60*1) def __triangular(self, low, high, mode): """Triangular distribution. Continuous distribution bounded by given lower and upper limits, and having a given mode value in-between. http://en.wikipedia.org/wiki/Triangular_distribution """ u = random.random() c = (mode - low) / (high - low) if u > c: u = 1 - u c = 1 - c low, high = high, low tri = low + (high - low) * (u * c) ** 0.5 if tri < mode: return int(tri) elif tri > mode: return int(math.ceil(tri)) return int(round(tri)) def sendChallenges(self): if self.connection.bm.isPlaying(): return True statsbased = ((0.39197722779282, 3, 0), (0.59341408108783, 5, 0), (0.77320877377846, 1, 0), (0.8246379941394, 10, 0), (0.87388717406441, 2, 12), (0.91443760169489, 15, 0), (0.9286423058163, 4, 0), (0.93891977227793, 2, 0), (0.94674539138335, 20, 0), (0.95321476842423, 2, 2), (0.9594588808257, 5, 2), (0.96564528079889, 3, 2), (0.97173859621034, 7, 0), (0.97774906636184, 3, 1), (0.98357243654425, 5, 12), (0.98881309737017, 5, 5), (0.99319644938247, 6, 0), (0.99675879556023, 3, 12), (1, 5, 3)) #n = random.random() #for culminativeChance, minute, gain in statsbased: # if n < culminativeChance: # break culminativeChance, minute, gain = random.choice(statsbased) #type = random.choice((TYPE_LIGHTNING, TYPE_BLITZ, TYPE_STANDARD)) #if type == TYPE_LIGHTNING: # minute = self.__triangular(0,2+1,1) # mingain = not minute and 1 or 0 # maxgain = int((3-minute)*3/2) # gain = random.randint(mingain, maxgain) #elif type == TYPE_BLITZ: # minute = self.__triangular(0,14+1,5) # mingain = max(int((3-minute)*3/2+1), 0) # maxgain = int((15-minute)*3/2) # gain = random.randint(mingain, maxgain) #elif type == TYPE_STANDARD: # minute = self.__triangular(0,20+1,12) # mingain = max(int((15-minute)*3/2+1), 0) # maxgain = int((20-minute)*3/2) # gain = self.__triangular(mingain, maxgain, mingain) #color = random.choice(self.colors) self.extendlog(["Seeking %d %d" % (minute, gain)]) self.connection.glm.seek(minute, gain, True) opps = random.sample(self.connection.players.get_online_playernames(), self.challenges) self.extendlog("Challenging %s" % op for op in opps) for player in opps: self.connection.om.challenge(player, minute, gain, True) return True def makeReady(self): signal.signal(signal.SIGINT, Gtk.main_quit) PyChess.makeReady(self) self.connection = FICSMainConnection("freechess.org", self.ports, self.username, self.password) self.connection.connect("connectingMsg", self.__showConnectLog) self.connection._connect() self.connection.glm.connect("addPlayer", self.__onAddPlayer) self.connection.glm.connect("removePlayer", self.__onRemovePlayer) self.connection.cm.connect("privateMessage", self.__onTell) self.connection.alm.connect("logOut", self.__onLogOut) self.connection.bm.connect("playGameCreated", self.__onGameCreated) self.connection.bm.connect("curGameEnded", self.__onGameEnded) self.connection.bm.connect("boardUpdate", self.__onBoardUpdate) self.connection.om.connect("onChallengeAdd", self.__onChallengeAdd) self.connection.om.connect("onOfferAdd", self.__onOfferAdd) self.connection.adm.connect("onAdjournmentsList", self.__onAdjournmentsList) self.connection.em.connect("onAmbiguousMove", self.__onAmbiguousMove) self.connection.em.connect("onIllegalMove", self.__onAmbiguousMove) self.connection.adm.queryAdjournments() self.connection.lvm.setVariable("autoflag", 1) self.connection.fm.setFingerNote(1, "PyChess is the chess engine bundled with the PyChess %s " % pychess.VERSION + "chess client. This instance is owned by %s, but acts " % self.owner + "quite autonomously.") self.connection.fm.setFingerNote(2, "PyChess is 100% Python code and is released under the terms of " + "the GPL. The evalution function is largely equal to the one of" + "GnuChess, but it plays quite differently.") self.connection.fm.setFingerNote(3, "PyChess runs on an elderly AMD Sempron(tm) Processor 3200+, 512 " + "MB DDR2 Ram, but is built to take use of 64bit calculating when " + "accessible, through the gpm library.") self.connection.fm.setFingerNote(4, "PyChess uses a small 500 KB openingbook based solely on Kasparov " + "games. The engine doesn't have much endgame knowledge, but might " + "in some cases access an online endgamedatabase.") self.connection.fm.setFingerNote(5, "PyChess will allow any pause/resume and adjourn wishes, but will " + "deny takebacks. Draw, abort and switch offers are accepted, " + "if they are found to be an advance. Flag is auto called, but " + "PyChess never resigns. We don't want you to forget your basic " + "mating skills.") def main(self): self.connection.run() self.extendlog([str(self.acceptedTimesettings)]) self.phoneHome("Session ended\n"+"\n".join(self.log)) print("Session ended") def run(self): t = Thread(target=self.main, name=fident(self.main)) t.daemon = True t.start() Gdk.threads_init() Gtk.main() #=========================================================================== # General #=========================================================================== def __showConnectLog (self, connection, message): print(message) def __onLogOut (self, autoLogoutManager): self.connection.close() #sys.exit() def __onAddPlayer (self, gameListManager, player): if player["name"] in self.sudos: self.sudos.remove(player["name"]) if player["name"] == self.owner: self.connection.cm.tellPlayer(self.owner, "Greetings") self.ownerOnline = True def __onRemovePlayer (self, gameListManager, playername): if playername == self.owner: self.ownerOnline = False def __onAdjournmentsList (self, adjournManager, adjournments): for adjournment in adjournments: if adjournment["online"]: adjournManager.challenge(adjournment["opponent"]) def __usage (self): return "|| PyChess bot help file || " +\ "# help 'Displays this help file' " +\ "# sudo <password> <command> 'Lets PyChess execute the given command' "+\ "# sendlog 'Makes PyChess send you its current log'" def __onTell (self, chatManager, name, title, isadmin, text): if self.waitingForPassword: if text.strip() == self.password or (not self.password and text == "none"): self.sudos.add(name) self.tellHome("%s gained sudo access" % name) self.connection.client.run_command(self.waitingForPassword) else: chatManager.tellPlayer(name, "Wrong password") self.tellHome("%s failed sudo access" % name) self.waitingForPassword = None return args = text.split() #if args == ["help"]: # chatManager.tellPlayer(name, self.__usage()) if args[0] == "sudo": command = " ".join(args[1:]) if name in self.sudos or name == self.owner: # Notice: This can be used to make nasty loops print(command, file=self.connection.client) else: print(repr(name), self.sudos) chatManager.tellPlayer(name, "Please send me the password") self.waitingForPassword = command elif args == ["sendlog"]: if self.log: # TODO: Consider email chatManager.tellPlayer(name, "\\n".join(self.log)) else: chatManager.tellPlayer(name, "The log is currently empty") else: if self.ownerOnline: self.tellHome("%s told me '%s'" % (name, text)) else: def onlineanswer (message): data = urlopen("http://www.pandorabots.com/pandora/talk?botid=8d034368fe360895", urlencode({"message":message, "botcust2":"x"}).encode("utf-8")).read().decode('utf-8') ss = "<b>DMPGirl:</b>" es = "<br>" answer = data[data.find(ss)+len(ss) : data.find(es,data.find(ss))] chatManager.tellPlayer(name, answer) t = Thread(target=onlineanswer, name=fident(onlineanswer), args=(text,)) t.daemon = True t.start() #chatManager.tellPlayer(name, "Sorry, your request was nonsense.\n"+\ # "Please read my help file for more info") #=========================================================================== # Challenges and other offers #=========================================================================== def __onChallengeAdd (self, offerManager, index, match): #match = {"tp": type, "w": fname, "rt": rating, "color": color, # "r": rated, "t": mins, "i": incr} offerManager.acceptIndex(index) def __onOfferAdd (self, offerManager, offer): if offer.type in (PAUSE_OFFER, RESUME_OFFER, ADJOURN_OFFER): offerManager.accept(offer) elif offer.type in (TAKEBACK_OFFER,): offerManager.decline(offer) elif offer.type in (DRAW_OFFER, ABORT_OFFER, SWITCH_OFFER): if self.__willingToDraw(): offerManager.accept(offer) else: offerManager.decline(offer) #=========================================================================== # Playing #=========================================================================== def __onGameCreated (self, boardManager, ficsgame): base = int(ficsgame.minutes)*60 inc = int(ficsgame.inc) self.clock[:] = base, base self.increment[:] = inc, inc self.gameno = ficsgame.gameno self.lastPly = -1 self.acceptedTimesettings.append((base, inc)) self.tellHome("Starting a game (%s, %s) gameno: %s" % (ficsgame.wplayer.name, ficsgame.bplayer.name, ficsgame.gameno)) if ficsgame.bplayer.name.lower() == self.connection.getUsername().lower(): self.playingAs = BLACK else: self.playingAs = WHITE self.board = LBoard(NORMALCHESS) # Now we wait until we recieve the board. def __go (self): if self.worker: self.worker.cancel() self.worker = GtkWorker(lambda worker: PyChess._PyChess__go(self, worker)) self.worker.connect("published", lambda w, msg: self.extendlog(msg)) self.worker.connect("done", self.__onMoveCalculated) self.worker.execute() def __willingToDraw (self): return self.scr <= 0 # FIXME: this misbehaves in all but the simplest use cases def __onGameEnded (self, boardManager, ficsgame): self.tellHome(reprResult_long[ficsgame.result] + " " + reprReason_long[ficsgame.reason]) lsearch.searching = False if self.worker: self.worker.cancel() self.worker = None def __onMoveCalculated (self, worker, sanmove): if worker.isCancelled() or not sanmove: return self.board.applyMove(parseSAN(self.board,sanmove)) self.connection.bm.sendMove(sanmove) self.extendlog(["Move sent %s" % sanmove]) def __onBoardUpdate (self, boardManager, gameno, ply, curcol, lastmove, fen, wname, bname, wms, bms): self.extendlog(["","I got move %d %s for gameno %s" % (ply, lastmove, gameno)]) if self.gameno != gameno: return self.board.applyFen(fen) self.clock[:] = wms/1000., bms/1000. if curcol == self.playingAs: self.__go() def __onAmbiguousMove (self, errorManager, move): # This is really a fix for fics, but sometimes it is necessary if determineAlgebraicNotation(move) == SAN: self.board.popMove() move_ = parseSAN(self.board, move) lanmove = toLAN(self.board, move_) self.board.applyMove(move_) self.connection.bm.sendMove(lanmove) else: self.connection.cm.tellOpponent( "I'm sorry, I wanted to move %s, but FICS called " % move + "it 'Ambigious'. I can't find another way to express it, " + "so you can win") self.connection.bm.resign() #=========================================================================== # Utils #=========================================================================== def extendlog(self, messages): [log.info(m+"\n") for m in messages] self.log.extend(messages) del self.log[:-10] def tellHome(self, message): print(message) if self.ownerOnline: self.connection.cm.tellPlayer(self.owner, message) def phoneHome(self, message): SENDMAIL = '/usr/sbin/sendmail' SUBJECT = "Besked fra botten" p = subprocess.Popen([SENDMAIL, '-f', email.Utils.parseaddr(self.from_address)[1], email.Utils.parseaddr(self.to_address)[1]], stdin=subprocess.PIPE) print("MIME-Version: 1.0", file=p.stdin) print("Content-Type: text/plain; charset=UTF-8", file=p.stdin) print("Content-Disposition: inline", file=p.stdin) print("From: %s" % self.from_address, file=p.stdin) print("To: %s" % self.to_address, file=p.stdin) print("Subject: %s" % SUBJECT, file=p.stdin) print(file=p.stdin) print(message, file=p.stdin) print("Cheers", file=p.stdin) p.stdin.close() p.wait()
Selection of candidate for DM/MCh courses will be made on the basis of multiple choice questions carrying 80 marks. The Theory Examination consisting of part I & II is of 1½hrs duration. N.B. There will be negative marking for the wrong answers in the theory paper for part II only. Candidate invited for the selection will be examined in the theory paper in the subject concerned. Merit list will be prepared on the basis of the marks obtained by the candidates in the theory examination. Separate merit list will be prepared for each subject and for each category (i.e. general and sponsored/deputed). There will be negative marking to the extent of 0.25 marks for each wrong answer in the theory paper. However, for the final selection the candidates are required to obtain minimum 60% marks for general category and 55% marks for SC/ST. Applied Anatomy - 6, Applied Biochemistry-12, Immunogenetic & Molecular Biology-10, Applied Physiology-6, Pa t h o l o g y - 1 5 , Ph a r m a c o l o g y - 1 2 , M i c r o b i o l o g y - 1 2 , F o r e n s i c M e d i c i n e - 6 , S o c i a l & Preventive Medicine-7, Medicine-36, Paediatrics-12, Dermatology-6, Psychiatry-6. Radiodiagnosis-6, Radiotherapy-6, Surgery-36, ENT-10, Orthopaedic Surgery-10, Ophthalmology-10, Anaesthesia-6, Obst. & Gynaecology-20. The selection will be made on the basis of theory examination consisting of 100 objective type questions with maximum of 100 marks. The duration of the examination will be 1½ hours. There will be a common entrance test for MDS and House Job (Oral Health Sciences). with 0.25 negative marking for wrong answers in the theory paper. There will not be any clinical/practical examination. category, provided the candidates fulfill the cut off point mentioned above. The interview with selection committee is mandatory. The selection of candidates for Master In Hospital Administration (MHA) course will be made on the basis of a theory paper carrying 80 marks and practical examination carrying 20 marks. The theory examination consisting of Part-I and Part -II is of 11/2 hours duration. A. Age limit as on 1st July/1st January for session starting from July and January respectively. a) MD degree in Medicine or Paediatrics or its equivalent qualification recognized by the Medical Council of India, except as otherwise indicated. b) Must be registered with the Central/State Medical Registration Council. For DM (Clinical Pharmacology) course: MD degree in Pharmacology or its equivalent qualification in any clinical subject recognised by the Medical Council of India. For DM (Neuro Radiology) course : MD degree in Radiology or its equivalent qualification recognised by the Medical Council of India. For DM (Haematopathology) course : MD degree in Pathology or equivalent degree. For DM (Cardiac-Anaesthesia) course : MD degree in Anesthesia or equivalent degree. For DM (Pediatric-Neurology) course : MD degree in Pediatrics or equivalent degree. For MCh courses : MS degree in General Surgery or its equivalent qualification recognised by Medical Council of India. a) MS degree in General Surgery/MS (ENT)/ MS (Orthopedics.) or its equivalent qualification recognised by Medical Council of India. MBBS with minimum 55% aggregate marks or MD/MS in the subject concerned or Diplomat of National Board of Examination. The candidates who have made more than one attempt during their MBBS/MD/MS course are not eligible. However, those belonging to Scheduled Castes / Tribes with upto two attempts in their MBBS/MD/MS course will be eligible. A “Failure” in the examination,”Compartment” or “Re-appear” in the examination will constitute an attempt Candidates who have obtained MBBS/MD/MS degree from Medical Colleges which are not recognised by the Medical Council of India are not eligible to apply. b) Have qualified NET examination (with fellowship only) conduct by UGC, Junior Research Fellowship (JRF) examination conducted by ICMR/CSIR etc (within two years only). Attested copy of the result must be attached. If the candidate fails to produce the proof of his/her eligibility as per above qualification before one week of the entrance examination, his/her candidature will not be considered and entire responsibility for the same will lie on the candidate. A degree of Master of Sciences (MSc) or Master in Veterinary Science (M.V.Sc.) or M.Sc. (Laboratory Technology) or MSc (Speech & Hearing) in subjects allied to Medical Sciences such as Anatomy, Physiology, Biochemistry, Biophysics, Human Biology, Molecular Biology, Microbiology, Biotechnology, Immunology, Life Sciences including Botany, Zoology, Genetics, Cell Biology, Pharmacology, Pharmacy, Organic Chemistry, Anthropology & MSc (Human Genomics). a) MBBS passed or its equivalent qualification from a University/ Institute recognized by Medical Council of India with not more than one attempt made during MBBS course. Candidates belonging to Scheduled Castes/Tribes with upto two attempts during MBBS career will be eligible. b) Must have either completed or due to complete one year internship (rotatory housemanship) training by 30th June/31st December for July and January session respectively. c) Must be registered with Central /State Medical Registration Council. a) Degree of Bachelor of Dental Surgery (BDS) of Panjab University or any other University recognized by the Syndicate of the Panjab University as equivalent thereto with not more than one failure during BDS career. However, candidates belonging to Scheduled Castes/ Tribes who have two failures during BDS career will be eligible. b) Must have either completed or due to complete compulsory paid rotating internship on 30th June/31st December for July and January session respectively. c) Must be registered with the Central State Dental Registration Council. 1. The candidate appointed through the entrance examination for House Job (Oral Health Sciences) for a period of six months, their period can be extended upto one year on the recommendation of the Head of the department. 2. The candidate will be eligible for House Job within 3 years of obtaining the BDS degree. 3. House Job will be offered once in the department for a maximum period of one year. 4. The candidates who have already done House Job in any Hospital or Institute are not eligible for House Job.
#Name: Welder #Info: Mangle GCode to work on welder (2014-02-10) #Depend: GCode #Type: postprocess #Param: speed(float:10) target extruder speed (mm/s) #Param: mindist(float:1) minimum travel distance to switch off welder (mm) #Param: ON1(str:G4 P0) Command to insert after travel (line 1) #Param: ON2(str:M42 P2 Sinf) Command to insert after travel (line 2) #Param: ON3(str:) Command to insert after travel (line 3) #Param: ON4(str:) Command to insert after travel (line 4) #Param: OFF1(str:G4 P0) Command to insert before travel (line 1) #Param: OFF2(str:M42 P2 Snan) Command to insert before travel (line 2) #Param: OFF3(str:) Command to insert before travel (line 3) #Param: OFF4(str:) Command to insert before travel (line 4) import sys __author__ = 'Bas Wijnen <[email protected]>' __date__ = '2014-02-10' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' try: infilename = filename outfilename = filename ON = ''.join (['%s\n' % x for x in (ON1, ON2, ON3, ON4) if x]) OFF = ''.join (['%s\n' % x for x in (OFF1, OFF2, OFF3, OFF4) if x]) except NameError: assert len (sys.argv) in (3, 5) infilename = sys.argv[1] outfilename = sys.argv[2] speed = float (sys.argv[3]) if len (sys.argv) > 3 else 40. mindist = float (sys.argv[4]) if len (sys.argv) > 4 else 1. ON = 'G4 P0\nM42 P2 Sinf\n' OFF = 'G4 P0\nM42 P2 Snan\n' extruding = False pos = [0., 0., 0., 0.] erel = None rel = False edata = [0.,0.] def parse (line): edata[0] = pos[3] global rel, erel, extruding if ';' in line: l = line[:line.find (';')] else: l = line components = l.split () if len (components) == 0: return line if components[0] == 'G90': rel = False if components[0] == 'G91': rel = True if components[0] == 'M82': erel = False if components[0] == 'M83': erel = True if components[0] == 'G92': for w in components: if w[0] in 'XYZ': wh = ord (w[0]) - ord ('X') pos[wh] = float (w[1:]) elif w[0] == 'E': pos[3] = float (w[1:]) if components[0] not in ('G0', 'G1'): return line parts = {} for p in components[1:]: if p[0] in parts or p[0] not in 'XYZEF': print 'warning: %s' % line return line parts[p[0]] = float (p[1:]) x = [] for i, c in enumerate ('XYZE'): if c in parts: x.append (parts[c] if (rel if i < 3 or erel is None else erel) else parts[c] - pos[i]) pos[i] += x[-1] else: x.append (0.) dist = sum ([t ** 2 for t in x[:3]]) ** .5 if 'E' not in parts or x[3] <= 0: if extruding and dist > mindist: extruding = False return OFF + line return line del parts['E'] t = x[3] / speed parts['F'] = dist / t * 60. ret = 'G1 ' + ' '.join (['%s%f' % (c, parts[c]) for c in parts]) if not extruding: extruding = True return ON + ret edata[1] = pos[3] return ret try: with open (infilename, "r") as f: lines = f.readlines () with open (outfilename, "w") as f: for line in lines: f.write (parse (line.strip ()) + '\n') except: print ('something was wrong:', sys.exc_value)
'Phase-I' is located on Garui (Mouza) PO Kanyapur PS Asansol (N) Ward No.31 Burdman Near Asansol Engineering College NH-2, Asansol. 'Phase-I' is set on 9 acres of land and comprises of No Of units. The Phase-I project will come up with a combination of Cottages, Bungalows & Row House such as 2,3,4 & 5BHK.
# fileviewer.py # Skeleton for a simple file viewer. from wax import * from wax.tools.dirview import DirView def isimage(path): EXTENSIONS = [".jpg", ".png", ".gif", ".ico", ".bmp"] path = path.lower() for ext in EXTENSIONS: if path.endswith(ext): return True return False class MainFrame(Frame): def Body(self): self.splitter = Splitter(self) self.dirview = DirView(self.splitter) self.dirview.OnSelectionChanged = self.OnDirViewSelectionChanged self.panel = self.MakeOverlayPanel(self.splitter) self.panel.Select(0) self.splitter.Split(self.dirview, self.panel, direction='v', sashposition=200) self.AddComponent(self.splitter, expand='both') self.Pack() self.Size = 600, 400 def MakeOverlayPanel(self, parent): op = OverlayPanel(parent) # window 1: a textbox self.textbox = TextBox(op, multiline=1, wrap=0, readonly=1) self.textbox.Font = Font("Courier New", 10) op.AddComponent(self.textbox, expand='both') # image 2: a panel self.imagepanel = Panel(op) op.AddComponent(self.imagepanel, expand='both') # create Bitmap control w/ dummy image dummy = ArtProvider((16,16)).GetBitmap('error', 'other') self.bitmap = Bitmap(self.imagepanel, dummy) self.imagepanel.AddComponent(self.bitmap, expand='both') self.imagepanel.Pack() op.Pack() return op def OnDirViewSelectionChanged(self, event): path = self.dirview.GetPath() if isimage(path): self.panel.Select(1) try: bitmap = Image(path).ConvertToBitmap() except: self.ShowText("Image could not be loaded") else: self.bitmap.SetBitmap(bitmap) self.imagepanel.Repack() else: self.ShowText(path) def ShowText(self, text): self.panel.Select(0) self.textbox.Value = text app = Application(MainFrame, title='fileviewer') app.Run()
Click below to see more jobs. You can upload your resume and start applying for the positions. Our goal is to help your business effectively address the Human Resources function in your business. The role of HR is to function as a business partner and help deliver the organisations goals , drive employee performance and enhance the employee experience.
from json import dumps from django.contrib.auth.decorators import login_required, user_passes_test from django.core.exceptions import PermissionDenied from django.http import HttpResponse from django.shortcuts import render, get_object_or_404 from fir_relations.models import Relation from incidents.views import is_incident_handler, is_incident_viewer @login_required @user_passes_test(is_incident_viewer) def relations(request, content_type, object_id): references = Relation.objects.filter(src_content_type=content_type, src_object_id=object_id, active=True).as_template_objects(request, relation_type='target') referenced_by = Relation.objects.filter(tgt_content_type=content_type, tgt_object_id=object_id, active=True).as_template_objects(request, relation_type='source') return render(request, "fir_relations/relations_sidebar.html", {'references': references, 'referenced_by': referenced_by}) @login_required @user_passes_test(is_incident_handler) def remove_relation(request, relation_id): if request.method == "POST": relation = get_object_or_404(Relation, pk=relation_id) if hasattr(relation.source, 'has_perm') and \ relation.source.has_perm(request.user, 'incidents.handle_incidents'): relation.active = False relation.save() return HttpResponse(dumps({'status': 'success'}), content_type="application/json") raise PermissionDenied
When the photographer took this photograph looking east along the Portage Iron Bridge, he captured some interesting details. On the enlargement below, you can see a cleared area north of the bridge. The small white sign is the same one visible in Image 139. This is the area once known as "Portage Park" or "Portage Bridge Park". A 1902 map shows dance pavilion in the area. This area, along with the nearby Cascade House behind trees in the upper right had corner, was the scene of the annual Soldier's Picnic. The view also provides an interesting look at the north side of the Portage Bridge and the "Pennsy" tracks seen at the bottom on the cliff.
# -*- coding: utf-8 -*- """ Created on Mon Feb 13 19:04:45 2017 @author: coskun Midterm Exam > Problem 6 15.0 points possible (graded) Implement a function that meets the specifications below. For example, if L = [[1, 2], [3, 4], [5, 6, 7]] then deep_reverse(L) mutates L to be [[7, 6, 5], [4, 3], [2, 1]] """ #Sample Hand Input L = [[1, 2], [3, 4], [5, 6, 7]] def deep_reverse_copy(L): """ assumes L is a list of lists whose elements are ints Mutates L such that it reverses its elements and also reverses the order of the int elements in every element of L. It does not return anything. """ # Your code here R = L[::-1] mSize = len(L) for i in range(mSize): R[i] = R[i][::-1] return R def deep_reverse(L): # This will be the correct answer for test case """ assumes L is a list of lists whose elements are ints Mutates L such that it reverses its elements and also reverses the order of the int elements in every element of L. It does not return anything. """ # Your code here L.reverse() mSize = len(L) for i in range(mSize): L[i] = L[i][::-1] print(L) deep_reverse(L) print(L)
First half goals courtesy of in-form duo Harry Wilson and Jack Marriott heaped the pressure on Sheffield Wednesday boss Jos Luhukay as Derby County came from behind to beat the Owls 2-1 at Hillsborough. A quick check of respective recent fortunes would have pointed towards an away win, and so it proved on what was a bitterly cold afternoon. The home side raced into an early lead through Adam Reach, but Derby found themselves going into the break ahead after striking twice in the space of 6 minutes. Reach continued his red-hot goalscoring streak with a cool finish low beyond Scott Carson to register his 5th of the season and send the Wednesday faithful wild. Their joy was short-lived, however, young prospect Harry Wilson in the right place at the right time to level things up just shy of the half-hour mark. And the travelling support didn’t have long to wait for a second; summer acquisition Jack Marriot looks a confident player at the moment, and he capitalised on poor defending to slot home. Although a tad fortunate to be ahead, Frank Lampard’s men looked dangerous each time they came forward, with goal scorer Wilson and Florian Jozefzoon causing all kinds of problems for the Owls defence. Things may have been a lot different for the hosts had Ash Baker’s tempting cross been met by a blue and white shirt moments after the re-start, though at the other end Derby could have had a third and effectively put the result beyond all doubt but for interventions of Joey Pelupessy. The home fans were left scratching their heads when first talisman Fernando Forestieri and then Portuguese Marco Matias squandered golden opportunities by failing to steer their efforts goal bound. At the other end, Cameron Dawson had to be alert to tip over veteran Tom Huddlestone’s long range effort on 70 minutes. Tom Lawrence entered the fray with 12 minutes remaining, and the once Manchester United wide man was quick to join in with flurries forward. An ambitious Adam Reach effort would sum up Wednesday’s afternoon – full of intent yet severely lacking in the quality department. The result ensures Derby stay a place above rivals Nottingham Forest in 6th, whilst the Owls, without a home win since August, sit just three points above the drop zone. Sheffield Wednesday: Dawson; Baker, Lees ©, Hector, Fox; (Penney, 74’) Reach, Pelupessy, Bannan, Matias; (Nuhiu 73′) Forestieri, João. Subs not used: Wildsmith, Palmer, Onomah, Thorniley, Pudil. Derby County: Carson; Wisdom, Keogh ©, Tomori, (Davies 45′) Malone; Huddlestone, Holmes; Jozefzoon, (Lawrence 77′) Mount, (Bryson 70′) Wilson; Marriott. Subs not used: Roos, Waghorn, Johnson, Pearce.
# coding: utf-8 from dtest import Tester from tools import since, no_vnodes from assertions import assert_unavailable from cassandra import ConsistencyLevel, WriteTimeout from cassandra.query import SimpleStatement import time from threading import Thread from ccmlib.cluster import Cluster @since('2.0.6') class TestPaxos(Tester): def prepare(self, ordered=False, create_keyspace=True, use_cache=False, nodes=1, rf=1): cluster = self.cluster if (ordered): cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner") if (use_cache): cluster.set_configuration_options(values={ 'row_cache_size_in_mb' : 100 }) cluster.populate(nodes).start() node1 = cluster.nodelist()[0] time.sleep(0.2) cursor = self.patient_cql_connection(node1, version="3.0.0") if create_keyspace: self.create_ks(cursor, 'ks', rf) return cursor def replica_availability_test(self): #See CASSANDRA-8640 session = self.prepare(nodes=3, rf=3) session.execute("CREATE TABLE test (k int PRIMARY KEY, v int)") session.execute("INSERT INTO test (k, v) VALUES (0, 0) IF NOT EXISTS") self.cluster.nodelist()[2].stop() session.execute("INSERT INTO test (k, v) VALUES (1, 1) IF NOT EXISTS") self.cluster.nodelist()[1].stop() assert_unavailable(session.execute, "INSERT INTO test (k, v) VALUES (2, 2) IF NOT EXISTS") self.cluster.nodelist()[1].start() session.execute("INSERT INTO test (k, v) VALUES (3, 3) IF NOT EXISTS") self.cluster.nodelist()[2].start() session.execute("INSERT INTO test (k, v) VALUES (4, 4) IF NOT EXISTS") @no_vnodes() def cluster_availability_test(self): #Warning, a change in partitioner or a change in CCM token allocation #may require the partition keys of these inserts to be changed. #This must not use vnodes as it relies on assumed token values. session = self.prepare(nodes=3) session.execute("CREATE TABLE test (k int PRIMARY KEY, v int)") session.execute("INSERT INTO test (k, v) VALUES (0, 0) IF NOT EXISTS") self.cluster.nodelist()[2].stop() session.execute("INSERT INTO test (k, v) VALUES (1, 1) IF NOT EXISTS") self.cluster.nodelist()[1].stop() session.execute("INSERT INTO test (k, v) VALUES (3, 2) IF NOT EXISTS") self.cluster.nodelist()[1].start() session.execute("INSERT INTO test (k, v) VALUES (5, 5) IF NOT EXISTS") self.cluster.nodelist()[2].start() session.execute("INSERT INTO test (k, v) VALUES (6, 6) IF NOT EXISTS") def contention_test_multi_iterations(self): self._contention_test(8, 100) ##Warning, this test will require you to raise the open ##file limit on OSX. Use 'ulimit -n 1000' def contention_test_many_threds(self): self._contention_test(300, 1) def _contention_test(self, threads, iterations): """ Test threads repeatedly contending on the same row """ verbose = False cursor = self.prepare(nodes=3) cursor.execute("CREATE TABLE test (k int, v int static, id int, PRIMARY KEY (k, id))") cursor.execute("INSERT INTO test(k, v) VALUES (0, 0)"); class Worker(Thread): def __init__(self, wid, cursor, iterations, query): Thread.__init__(self) self.wid = wid self.iterations = iterations self.query = query self.cursor = cursor self.errors = 0 self.retries = 0 def run(self): global worker_done i = 0 prev = 0 while i < self.iterations: done = False while not done: try: res = self.cursor.execute(self.query, (prev+1, prev, self.wid )) if verbose: print "[%3d] CAS %3d -> %3d (res: %s)" % (self.wid, prev, prev+1, str(res)) if res[0][0] is True: done = True prev = prev + 1 else: self.retries = self.retries + 1 # There is 2 conditions, so 2 reasons to fail: if we failed because the row with our # worker ID already exists, it means we timeout earlier but our update did went in, # so do consider this as a success prev = res[0][3] if res[0][2] is not None: if verbose: print "[%3d] Update was inserted on previous try (res = %s)" % (self.wid, str(res)) done = True except WriteTimeout as e: if verbose: print "[%3d] TIMEOUT (%s)" % (self.wid, str(e)) # This means a timeout: just retry, if it happens that our update was indeed persisted, # we'll figure it out on the next run. self.retries = self.retries + 1 except Exception as e: if verbose: print "[%3d] ERROR: %s" % (self.wid, str(e)) self.errors = self.errors + 1 done = True i = i + 1 # Clean up for next iteration while True: try: self.cursor.execute("DELETE FROM test WHERE k = 0 AND id = %d IF EXISTS" % self.wid) break; except WriteTimeout as e: pass nodes = self.cluster.nodelist() workers = [] c = self.patient_cql_connection(nodes[0], version="3.0.0", keyspace='ks') q = c.prepare(""" BEGIN BATCH UPDATE test SET v = ? WHERE k = 0 IF v = ?; INSERT INTO test (k, id) VALUES (0, ?) IF NOT EXISTS; APPLY BATCH """) for n in range(0, threads): workers.append(Worker(n, c, iterations, q)) start = time.time() for w in workers: w.start() for w in workers: w.join() if verbose: runtime = time.time() - start print "runtime:", runtime query = SimpleStatement("SELECT v FROM test WHERE k = 0", consistency_level=ConsistencyLevel.ALL) rows = cursor.execute(query) value = rows[0][0] errors = 0 retries = 0 for w in workers: errors = errors + w.errors retries = retries + w.retries assert (value == threads * iterations) and (errors == 0), "value=%d, errors=%d, retries=%d" % (value, errors, retries)
The MCG Tops charts again!! Today we received word that we’ve topped the charts again as the #1 in comedy throughout Southwest Florida on the ReverbNation charts. Thanks to everyone who’ve listened, shared, and voted for our material. Please keep posted, as we’re about to release new material soon….
# PyTransit: fast and easy exoplanet transit modelling in Python. # Copyright (C) 2010-2019 Hannu Parviainen # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from numpy import ceil, sqrt, where, inf from matplotlib.pyplot import subplots from pytransit.contamination import TabulatedFilter, Instrument, SMContamination from pytransit.contamination.filter import sdss_g, sdss_r, sdss_i, sdss_z from pytransit.lpf.cntlpf import PhysContLPF from pytransit.param import NormalPrior as NP from .mocklc import MockLC class MockLPF(PhysContLPF): def __init__(self, name: str, lc: MockLC): super().__init__(name, passbands=lc.pb_names, times=lc.npb * [lc.time], fluxes=list(lc.flux.T), pbids=list(range(lc.npb))) self._lc = lc self.know_host = lc.setup.know_host self.misidentify_host = lc.setup.misidentify_host self.hteff = lc.hteff if not self.misidentify_host else lc.cteff self.cteff = lc.cteff self.t0_bjd = 0.0 self.period = lc.p self.sma = lc.a self.inc = lc.i self.k_apparent = lc.k_apparent self.b = lc.b self.set_prior(1, NP(lc.p, 1e-7)) if lc.setup.know_orbit: self.set_prior(2, NP(5.0, 0.05)) self.set_prior(3, NP(lc.b, 0.01)) if lc.setup.know_host: if lc.setup.misidentify_host: self.set_prior(6, NP(self._lc.cteff, 10)) else: self.set_prior(6, NP(self._lc.hteff, 10)) def _init_instrument(self): """Set up the instrument and contamination model.""" qe = TabulatedFilter('MockQE', [300, 350, 500, 550, 700, 800, 1000, 1050], [0.10, 0.20, 0.90, 0.96, 0.90, 0.75, 0.11, 0.05]) self.instrument = Instrument('MockInstrument', [sdss_g, sdss_r, sdss_i, sdss_z], (qe, qe, qe, qe)) self.cm = SMContamination(self.instrument, "i'") self.lnpriors.append(lambda pv: where(pv[:, 4] < pv[:, 5], 0, -inf)) def plot_light_curves(self, ncols: int = 2, figsize: tuple = (13, 5)): nrows = int(ceil(self.nlc) / ncols) fig, axs = subplots(nrows, ncols, figsize=figsize, sharex='all', sharey='all', constrained_layout=True) fmodel = self.flux_model(self.de.population)[self.de.minimum_index] for i, ax in enumerate(axs.flat): ax.plot(self.times[i], self.fluxes[i], '.', alpha=0.25) ax.plot(self.times[i], fmodel[self.lcslices[i]], 'k') def posterior_samples(self, burn: int = 0, thin: int = 1, include_ldc: bool = False): df = super().posterior_samples(burn, thin, include_ldc) df['k_app'] = sqrt(df.k2_app) df['k_true'] = sqrt(df.k2_true) df['cnt'] = 1. - df.k2_app / df.k2_true return df
dowebsitesneedtolookexactlythesameineverybrowser.com Do websites need to look exactly the same in every browser? Insert a website address and get the browser shot quickly and for free. Browser shot service generates fullscreen preview of websites.
import gzip from abc import abstractmethod from types import SimpleNamespace from warnings import warn from pandas import read_table, to_numeric, DataFrame, read_csv, concat from helpers.parsers import parse_fasta_file import imports.protein_data as importers from imports.sites.site_importer import SiteImporter class UniprotToRefSeqTrait: default_mappings_path = 'data/HUMAN_9606_idmapping.dat.gz' def __init__(self, mappings_path=None): if not mappings_path: mappings_path = self.default_mappings_path self.mappings = self.load_mappings(mappings_path) @staticmethod def load_mappings(mappings_path): header = ['uniprot', 'type', 'refseq'] mappings = read_table(mappings_path, names=header, converters={ # based on observations, if an accession is primary and # there is only one splice variant, the sequence-related # mappings are identified just as ACCESSION; if there are many # splice variants, the canonical variant version is appended # after a hyphen # (e.g. ACCESSION-4). # Following converter appends '-1' to all accessions # that have no hyphen to make the mapping easier. 'uniprot': lambda u: u if '-' in u else u + '-1' }).query('type == "RefSeq_NT"') mappings = mappings[mappings.refseq.str.startswith('NM_')] # drop refseq version mappings['refseq'], _ = mappings['refseq'].str.split('.', 1).str mappings.dropna(inplace=True) mappings = mappings.drop(columns=['type']) # after removing refseq version, we might get duplicates mappings = mappings.drop_duplicates() return mappings def add_nm_refseq_identifiers(self, sites: DataFrame): return sites.merge( self.mappings, left_on='sequence_accession', right_on='uniprot' ) class UniprotIsoformsTrait: default_path_canonical = 'data/uniprot_sprot.fasta.gz' default_path_splice = 'data/uniprot_sprot_varsplic.fasta.gz' def __init__( self, sprot_canonical_path=None, sprot_splice_variants_path=None, ): self.sequences = self.load_sequences( sprot_canonical_path or self.default_path_canonical, sprot_splice_variants_path or self.default_path_splice ) @staticmethod def load_sequences(canonical_path, splice_variants_path): all_sequences = {} groups = {'canonical': canonical_path, 'splice': splice_variants_path} for isoform_group, path in groups.items(): sequences = {} def append(protein_id, line): sequences[protein_id] += line def on_header(header): protein_id = header.split('|')[1] sequences[protein_id] = '' return protein_id parse_fasta_file(path, append, on_header, file_opener=gzip.open, mode='rt') all_sequences[isoform_group] = sequences return SimpleNamespace(**all_sequences) def is_isoform_canonical(self, isoform: str) -> bool: if isoform in self.sequences.splice: return False if isoform in self.sequences.canonical or isoform.endswith('-1'): return True def get_sequence_of_protein(self, site): """Return sequence of a protein on which the site is described. Having no information describing which isoform is canonical the best way to determine which isoform to use is to check if an isoform is a splice variant; if it is not a splice variant, we know that it has to be a canonical isoform. """ try: return self.sequences.splice[site.sequence_accession] except KeyError: if hasattr(site, 'primary_accession'): primary_accession = site.primary_accession elif site.sequence_accession.endswith('-1'): primary_accession = site.sequence_accession[:-2] else: return try: return self.sequences.canonical[primary_accession] except KeyError: warn(f'No sequence for {site.sequence_accession} found!') class UniprotSequenceAccessionTrait: def add_sequence_accession(self, sites): self.mappings['is_canonical'] = self.mappings.uniprot.apply(self.is_isoform_canonical) canonical_mapping = self.mappings.query('is_canonical == True') canonical_mapping['protein_accession'], _ = canonical_mapping['uniprot'].str.split('-', 1).str canonical_mapping.rename(columns={'uniprot': 'sequence_accession'}, inplace=True) canonical_mapping.drop(columns=['refseq'], inplace=True) canonical_mapping = canonical_mapping.drop_duplicates() return sites.merge(canonical_mapping, on='protein_accession') class UniprotImporter(UniprotToRefSeqTrait, UniprotIsoformsTrait, SiteImporter): """UniProt/SwissProt sites importer. The data can be exported and downloaded using sparql: http://sparql.uniprot.org, but for convenience check the pre-baked URLS in `download.sh` Relevant terms definition are available at: http://www.uniprot.org/docs/ptmlist The sparql code is available in `uniprot.sparql` file. Only reviewed entries (SwissProt) are considered. Many thanks to the author of https://www.biostars.org/p/261823/ for describing how to use sparql to export PTM data from UniProt. Maps sites by isoform; fallback to gene names can be implemented by altering sparql query. """ requires = {importers.proteins_and_genes, importers.sequences} requires.update(SiteImporter.requires) source_name = 'UniProt' @property @abstractmethod def default_path(self) -> str: """Default path to the csv file with site data""" def __init__(self, sprot_canonical_path=None, sprot_splice_variants_path=None, mappings_path=None): SiteImporter.__init__(self) UniprotToRefSeqTrait.__init__(self, mappings_path) UniprotIsoformsTrait.__init__(self, sprot_canonical_path, sprot_splice_variants_path) @abstractmethod def extract_site_mod_type(self, sites: DataFrame) -> DataFrame: """Extract site type information into additional columns. Following columns have to be returned: mod_type, residue. """ def filter_sites(self, sites: DataFrame) -> DataFrame: # remove variant-specific modifications sites = sites[~sites['modifiers'].str.contains('in variant', na=False)] # and those which are not common sites = sites[~sites['modifiers'].str.contains('atypical', na=False)] # see: http://www.uniprot.org/help/evidences # ECO_0000269 = Experimental evidence sites = sites[sites['eco'] == 'ECO_0000269'] return sites def load_sites(self, path=None, **filters): if not path: path = self.default_path sites = read_csv(path) sites.columns = [column.strip() for column in sites.columns] sites.position = to_numeric(sites.position.str.replace(r'\^.*', '')) extracted_data = self.extract_site_mod_type(sites) if sites.source.any(): sites['pub_med_ids'] = ( sites.source.where(sites.source.str.match(r'http://purl.uniprot.org/citations/\d+$')) .str.replace('http://purl.uniprot.org/citations/', '') ) sites['pub_med_ids'] = sites['pub_med_ids'].apply(lambda x: [int(x)] if x == x else None) else: warn('No site source data') sites['pub_med_ids'] = None sites.drop(columns=['data', 'source'], inplace=True) sites = concat([sites, extracted_data], axis=1) sites = self.filter_sites(sites) # only chosen site types sites = sites[sites.mod_type.isin(self.site_types)] # map uniprot to refseq: sites = self.add_nm_refseq_identifiers(sites) mapped_sites = self.map_sites_to_isoforms(sites) return self.create_site_objects(mapped_sites, ['refseq', 'position', 'residue', 'mod_type', 'pub_med_ids']) def repr_site(self, site): return f'{site.sequence_accession}: ' + super().repr_site(site) @staticmethod def split_kinases(kinases): return kinases.str.split(' (?:and|AND|or|OR) ')
Glass table and 6 black chairs. Perfect condition Pick up only. St Kilda area Message if interested.
""" Django settings for ranking project. Generated by 'django-admin startproject' using Django 1.9. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '^@-4*3*_y!92n8*hq)!ouv0!%%crivp2ko#q))#tfi&fcb-b=3' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework' ] REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',), 'PAGE_SIZE': 100 } MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'ranking.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'ranking.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/'
Hercules Productions are pleased to be funded again by the Heritage Lottery Fund, this time to tell the stories that haunt the fast disappearing landscape of Hulme and Moss side. Each One Teach One will reanimate the empty lots and building sites of today, telling the stories that the eldest in our communities can remember, when music, and laughter, love and high drama and ‘every day foolishness’ changed the face of the landscape forever. From the bus garage, to the market hall, the shabeen to the playing field. Each One Teach One is an Oral History Project which will partner young people and elders in an exploration of the stories that tell the history of these sites from the early 1950s through to present day and bring them to life on the stage and in film drama.
''' Sentence model, inspired by the Keras VAE tutorial. Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114 ''' import numpy as np import tensorflow as tf from keras.layers import Input, Embedding, Dense, Lambda, LSTM, RepeatVector, TimeDistributed from keras.models import Model from keras import backend as K from keras import objectives from keras.preprocessing import text, sequence from utils import load_data, to_one_hot batch_size = 30 nb_epoch = 50 max_sequence_len = 25 # Required for tensorflow! lstm_state_dim = 256 latent_dim = 128 intermediate_dim = 256 text_input = Input(shape=(max_sequence_len,), dtype='int32', name='text_input') embedding = Embedding(1000, 64, input_length=max_sequence_len, mask_zero=True, dropout=0.3)(text_input) rnn_encoded = LSTM(lstm_state_dim, dropout_W=0.3)(embedding) h = Dense(intermediate_dim, activation='relu')(rnn_encoded) z_mean = Dense(latent_dim, name="z_mean_dense")(h) z_log_var = Dense(latent_dim, name="z_log_var_dense")(h) def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.) return z_mean + K.exp(z_log_var / 2) * epsilon z = Lambda(sampling)([z_mean, z_log_var]) h_decoded = Dense(intermediate_dim, activation='relu', name="h_decoded_dense", input_shape=(latent_dim,))(z) x_decoded_mean = Dense(lstm_state_dim, activation='relu', name="x_decoded_mean_dense")(h_decoded) rnn_decoded = LSTM(1000, return_sequences=True, dropout_W=0.3)(RepeatVector(max_sequence_len)(x_decoded_mean)) text_output = TimeDistributed(Dense(1000, activation='softmax'))(rnn_decoded) def vae_loss(text_true_onehot, text_predicted_onehot): xent_loss = K.mean(objectives.categorical_crossentropy(text_true_onehot, text_predicted_onehot), axis=-1) kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return xent_loss + kl_loss train, dev, scores = load_data() x_train = [text.one_hot(x, n=1000, lower=True, split=" ") for x in train[0]] x_train = sequence.pad_sequences(x_train, maxlen=max_sequence_len).astype(int) #print(x_train[:2]) x_train_one_hot = np.asarray(list(map(to_one_hot, x_train))) #print(x_train_one_hot[:2]) vae = Model(text_input, text_output) vae.compile(optimizer='adam', loss=vae_loss, metrics=['accuracy']) vae.fit(x_train, x_train_one_hot, shuffle=True, nb_epoch=nb_epoch, batch_size=batch_size) vae.save("model-latest.keras") #vae.evaluate(x_test, x_test, batch_size=256) # build a model to project inputs on the latent space #encoder = Model(text_input, z_mean)
Wellness is not just about walking 10,000 steps a day or being physically fit. There are important spiritual and emotional components that go into feeling your best. I discovered this about 15 years ago when I started practicing yoga. I’d always been active but from my first yoga class, I started to feel its restorative value. The classes made me feel more centered and grounded. I began to change how I thought about travel. I became a certified wellness travel specialist. The more I learn about new destinations, the more I’m able to incorporate wellness into every type of travel. In every location, I look for wonderful hikes, yoga, activities that take you outside into nature or into a culture. That might mean swimming at the spectacular and varied beaches of Anguilla or Barbados or hiking in the Torres del Paine National Park in Chile. For those who are looking for a true reboot, there are some spectacular destination spas. If you’re focused on healthy living, good diet and fitness, de-stressing and detoxing, Canyon Ranch in Tucson or Lenox could be a great choice. They have great exercise classes and healthy eating is a major part of their program. Europe also has some excellent spas for those looking for a healthy reset. Miraval, with locations in Tucson and Austin, is a different type of destination spa. It is very much about healing and spirituality and my go-to spot when someone needs to come to terms with something traumatic or set in motion major life changes. I recently sent two female clients there because they were asking to go somewhere to heal after a difficult year and I knew it would be right for them. During my visit to Miraval, I did yoga, went to a variety of organized classes, took hikes in the desert, and spent time with a healer. I especially responded to their equine experience where I learned more about myself than the horse. Overall, it was an enriching experience, and one I know my clients would enjoy. Wellness is my passion, and whether you’re looking for a place to come to heal, reset your habits or simply incorporate wellness and fitness into your travels, I can help find the ideal getaway for you.
'http://fastml.com/best-buy-mobile-contest-big-data/' import sys, csv, re from collections import defaultdict def prepare( query ): query = re.sub( r'\W', '', query ) query = query.lower() return query input_file = sys.argv[1] test_file = sys.argv[2] benchmark_file = sys.argv[3] output_file = sys.argv[4] i = open( input_file ) reader = csv.reader( i ) t = open( test_file ) b = open( benchmark_file ) headers = reader.next() mapping = defaultdict( lambda: {} ) counter = 0 for line in reader: query = prepare( line[3] ) sku = line[1] # print "%s -> %s" % ( query, sku ) try: mapping[query][sku] += 1 except KeyError: mapping[query][sku] = 1 counter += 1 if counter % 100000 == 0: print counter reader = csv.reader( t ) headers = reader.next() bench_reader = csv.reader( b, delimiter = " " ) headers = bench_reader.next() o = open( output_file, 'wb' ) writer = csv.writer( o, delimiter = " " ) n = 0 m = 0 for line in reader: n += 1 query = prepare( line[2] ) popular_skus = bench_reader.next() if query in mapping: m += 1 skus = [] for sku in sorted( mapping[query], key=mapping[query].get, reverse = True ): skus.append( sku ) skus.extend( popular_skus ) skus = skus[0:5] else: skus = popular_skus writer.writerow( skus ) # counter if n % 10000 == 0: print n print "Used mapping in %s / %s (%s)" % ( m, n, 1.0 * m / n )
Damaged Shipment Label 4" x 4" Damage happens. But, help the receiver document the problems so that your claim is more easily processed. • Label gives valuable detail on how to inspect and handle a package that looks to be damaged.
from ophyd import (EpicsMotor, PVPositioner, Device, EpicsSignal, EpicsSignalRO,PVPositionerPC) from ophyd import (Component as Cpt, FormattedComponent, DynamicDeviceComponent as DDC) #gap #und_gap = 'SR:C11-ID:G1{IVU20:1-Mtr:2}' #SR:C11-ID:G1{IVU20:1-Mtr:2}Inp:Pos ?? class MotorCenterAndGap(Device): "Center and gap using Epics Motor records" xc = Cpt(EpicsMotor, '-Ax:XCtr}Mtr') yc = Cpt(EpicsMotor, '-Ax:YCtr}Mtr') xg = Cpt(EpicsMotor, '-Ax:XGap}Mtr') yg = Cpt(EpicsMotor, '-Ax:YGap}Mtr') @property def hints(self): fields = [] for name in self.component_names: motor = getattr(self, name) fields.extend(motor.hints['fields']) return {'fields': fields} class VirtualGap(PVPositioner): readback = Cpt(EpicsSignalRO, 't2.C') setpoint = Cpt(EpicsSignal, 'size') done = Cpt(EpicsSignalRO, 'DMOV') done_value = 1 class VirtualCenter(PVPositioner): readback = Cpt(EpicsSignalRO, 't2.D') setpoint = Cpt(EpicsSignal, 'center') done = Cpt(EpicsSignalRO, 'DMOV') done_value = 1 class VirtualMotorCenterAndGap(Device): "Center and gap with virtual motors" xc = Cpt(VirtualCenter, '-Ax:X}') yc = Cpt(VirtualCenter, '-Ax:Y}') xg = Cpt(VirtualGap, '-Ax:X}') yg = Cpt(VirtualGap, '-Ax:Y}') class Blades(Device): top = Cpt(EpicsMotor, '-Ax:T}Mtr') bottom = Cpt(EpicsMotor, '-Ax:B}Mtr') outboard = Cpt(EpicsMotor, '-Ax:O}Mtr') inboard = Cpt(EpicsMotor, '-Ax:I}Mtr') class MotorSlits(Blades, MotorCenterAndGap): "combine t b i o and xc yc xg yg" pass class VirtualMotorSlits(Blades, VirtualMotorCenterAndGap): "combine t b i o and xc yc xg yg" # def __init__(self, *args, **kwargs): # super().__init__(*args, **kwargs) # self.xc.readback.name = self.name # self.yc.readback.name = self.name # self.xg.readback.name = self.name pass class XYMotor(Device): x = Cpt(EpicsMotor, '-Ax:X}Mtr') y = Cpt(EpicsMotor, '-Ax:Y}Mtr') class XYThetaMotor(XYMotor): "used for GI mirror" th = Cpt(EpicsMotor, '-Ax:Th}Mtr') class HorizontalDiffractionMirror(XYMotor): "x and y with pitch, which has different read and write PVs" #p = FormattedComponent(EpicsSignal, read_pv='{self.prefix}-Ax:P}}E-I', write_pv='{self.prefix}-Ax:P}}E-SP', add_prefix=('read_pv', 'write_pv', 'suffix')) p = FormattedComponent(EpicsSignal, read_pv='{self.prefix}-Ax:P}}Pos-I', write_pv='{self.prefix}-Ax:P}}PID-SP', add_prefix=('read_pv', 'write_pv', 'suffix')) # for some reason we cannot scan on E-SP. This is the actual piezo voltage (max 100) while our 'usual values' are converted to urad by some other laye rof logic in the ioc # the currrent SP is the input of the PID feedback loop. This requitred the feedback loop to be turned ON class DCM(Device): en = Cpt(EpicsMotor, '-Ax:Energy}Mtr') b = Cpt(EpicsMotor, '-Ax:B}Mtr') r = Cpt(EpicsMotor, '-Ax:R}Mtr') x = Cpt(EpicsMotor, '-Ax:X}Mtr') fp = Cpt(EpicsMotor, '-Ax:FP}Mtr') p = Cpt(EpicsMotor, '-Ax:P}Mtr') class SAXSBeamStop( Device): x = Cpt( EpicsMotor, '-Ax:X}Mtr' ) y1 = Cpt( EpicsMotor, '-Ax:YFT}Mtr') x2 = Cpt( EpicsMotor, '-Ax:XFB}Mtr') y2 = Cpt( EpicsMotor, '-Ax:YFB}Mtr') @property def hints(self): fields = [] for name in self.component_names: motor = getattr(self, name) fields.extend(motor.hints['fields']) return {'fields': fields} class DMM(Device): # en = Cpt(EpicsMotor, '-Ax:Energy}Mtr') b = Cpt(EpicsMotor, '-Ax:B}Mtr') r = Cpt(EpicsMotor, '-Ax:R}Mtr') x = Cpt(EpicsMotor, '-Ax:X}Mtr') y = Cpt(EpicsMotor, '-Ax:Y}Mtr') fp = Cpt(EpicsMotor, '-Ax:FP}Mtr') class Transfocator(Device): crl = DDC({'num%d' % i: (EpicsMotor, '%d-Ax:X}Mtr' % i, {}) for i in range(1, 9)}) x = Cpt(EpicsMotor, 'Ves-Ax:X}Mtr') y = Cpt(EpicsMotor, 'Ves-Ax:Y}Mtr') z = Cpt(EpicsMotor, 'Ves-Ax:Z}Mtr') ph = Cpt(EpicsMotor, 'Ves-Ax:Ph}Mtr') th = Cpt(EpicsMotor, 'Ves-Ax:Th}Mtr') class Kinoform(Device): z = Cpt(EpicsMotor, '-Ax:ZB}Mtr') x = Cpt(EpicsMotor, '-Ax:XB}Mtr') y = Cpt(EpicsMotor, '-Ax:YB}Mtr') chi = Cpt(EpicsMotor, '-Ax:Ch}Mtr') theta = Cpt(EpicsMotor, '-Ax:Th}Mtr') phi = Cpt(EpicsMotor, '-Ax:Ph}Mtr') lx = Cpt(EpicsMotor, '-Ax:XT}Mtr') ly = Cpt(EpicsMotor, '-Ax:YT}Mtr') class SmarPod_x(PVPositionerPC): readback = Cpt(EpicsSignalRO, '-Ax:1}Pos-I') setpoint = Cpt(EpicsSignal, '-Ax:1}Pos-SP') actuate = Cpt(EpicsSignal, '}Move-Cmd') actuate_value = 1 smp_x = SmarPod_x('XF:11IDB-ES{SPod:1',name='smp_x') smp_x.readback.name = 'smp_x' class SmarPod_y(PVPositionerPC): readback = Cpt(EpicsSignalRO, '-Ax:3}Pos-I') setpoint = Cpt(EpicsSignal, '-Ax:3}Pos-SP') actuate = Cpt(EpicsSignal, '}Move-Cmd') actuate_value = 1 smp_y = SmarPod_y('XF:11IDB-ES{SPod:1',name='smp_y') smp_y.readback.name = 'smp_y' class SmarPod_z(PVPositionerPC): readback = Cpt(EpicsSignalRO, '-Ax:2}Pos-I') setpoint = Cpt(EpicsSignal, '-Ax:2}Pos-SP') actuate = Cpt(EpicsSignal, '}Move-Cmd') actuate_value = 1 smp_z = SmarPod_z('XF:11IDB-ES{SPod:1',name='smp_z') smp_z.readback.name = 'smp_z' class SmarPod_rx(PVPositionerPC): readback = Cpt(EpicsSignalRO, '-Ax:1}Rot-I') setpoint = Cpt(EpicsSignal, '-Ax:1}Rot-SP') actuate = Cpt(EpicsSignal, '}Move-Cmd') actuate_value = 1 smp_rx = SmarPod_rx('XF:11IDB-ES{SPod:1',name='smp_rx') smp_rx.readback.name = 'smp_rx' class SmarPod_ry(PVPositionerPC): readback = Cpt(EpicsSignalRO, '-Ax:3}Rot-I') setpoint = Cpt(EpicsSignal, '-Ax:3}Rot-SP') actuate = Cpt(EpicsSignal, '}Move-Cmd') actuate_value = 1 smp_ry = SmarPod_ry('XF:11IDB-ES{SPod:1',name='smp_ry') smp_ry.readback.name = 'smp_ry' class SmarPod_rz(PVPositionerPC): readback = Cpt(EpicsSignalRO, '-Ax:2}Rot-I') setpoint = Cpt(EpicsSignal, '-Ax:2}Rot-SP') actuate = Cpt(EpicsSignal, '}Move-Cmd') actuate_value = 1 smp_rz = SmarPod_rz('XF:11IDB-ES{SPod:1',name='smp_rz') smp_rz.readback.name = 'smp_rz' class Diffractometer(Device): Del= Cpt( EpicsMotor, '-Ax:Del}Mtr') gam = Cpt(EpicsMotor, '-Ax:Gam}Mtr') om = Cpt(EpicsMotor, '-Ax:Om}Mtr') phi = Cpt(EpicsMotor, '-Ax:Ph}Mtr') xb = Cpt(EpicsMotor, '-Ax:XB}Mtr') yb = Cpt(EpicsMotor, '-Ax:YB}Mtr') chh = Cpt(EpicsMotor, '-Ax:ChH}Mtr') thh = Cpt(EpicsMotor, '-Ax:ThH}Mtr') phh = Cpt(EpicsMotor, '-Ax:PhH}Mtr') xh = Cpt(EpicsMotor, '-Ax:XH}Mtr') yh = Cpt(EpicsMotor, '-Ax:YH2}Mtr') zh = Cpt(EpicsMotor, '-Ax:ZH}Mtr') chv = Cpt(EpicsMotor, '-Ax:ChV}Mtr') thv = Cpt(EpicsMotor, '-Ax:ThV}Mtr') xv = Cpt(EpicsMotor, '-Ax:XV}Mtr') yv = Cpt(EpicsMotor, '-Ax:YV}Mtr') zv = Cpt(EpicsMotor, '-Ax:ZV}Mtr') xv2 = Cpt(EpicsMotor, '-Ax:XV2}Mtr') @property def hints(self): fields = [] for name in self.component_names: motor = getattr(self, name) fields.extend(motor.hints['fields']) return {'fields': fields} class XBPM( Device): vt = Cpt( EpicsSignal, 'CtrlDAC:BLevel-SP' ) xBPM =XBPM( 'XF:11IDB-BI{XBPM:02}', name = 'xBPM' ) diff = Diffractometer('XF:11IDB-ES{Dif', name='diff') # sample beamstop #sambst = XYMotor('XF:11IDB-OP{BS:Samp', name='sambst') s1 = MotorCenterAndGap('XF:11IDB-OP{Slt:1', name='s1') k1 = Kinoform('XF:11IDB-OP{Lens:1', name='k1') # upstream k2 = Kinoform('XF:11IDB-OP{Lens:2', name='k2') # downstream gi = XYThetaMotor('XF:11IDB-OP{Mir:GI', name='gi') # GI-mirror s2 = MotorCenterAndGap('XF:11IDB-OP{Slt:2', name='s2') #Beam-defining (large JJ) slits pbs = MotorSlits('XF:11IDA-OP{Slt:PB', name='pbs') # pink beam slits flt_y = EpicsMotor('XF:11IDA-OP{Flt:1-Ax:Y}Mtr', name='flt_y') # filters dcm = DCM('XF:11IDA-OP{Mono:DCM', name='dcm') #, check position, e.g., by dcm.b.user_readback.value dmm = DMM('XF:11IDA-OP{Mono:DMM', name='dmm') mbs = VirtualMotorSlits('XF:11IDA-OP{Slt:MB', name='mbs') # Mono-beam Slits, check position, e.g., by mbs.xc.readback.value tran= Transfocator('XF:11IDA-OP{Lens:', name='tran') # Transfocator s4 = MotorCenterAndGap('XF:11IDB-ES{Slt:4', name='s4') # temp guard slits fsh_x=EpicsMotor('XF:11IDB-OP{FS:1-Ax:X}Mtr', name='fsh_x') # fast shutter positioner: X fsh_y=EpicsMotor('XF:11IDB-OP{FS:1-Ax:Y}Mtr', name='fsh_y') # fast shutter positioner: Y #smp =SmarPod('XF:11IDB-ES{SPod:1-',name='smp') # SmarPod # Diagnostic Manipulators foil_y = EpicsMotor('XF:11IDA-BI{Foil:Bpm-Ax:Y}Mtr', name='foil_y') # foil_x for DBPM (note foil_y is for a different device, perhaps we should rename ...) foil_x = EpicsMotor('XF:11IDB-OP{Mon:Foil-Ax:X}Mtr', name='foil_x') #Sample chamber smaract linear stages # Note crazy names only for Julien!!! #amp = XYMotor('XF:11IDB-OP{BS:Sam', name='amp') class amp_motor(Device): #x = EpicsMotor('XF:11IDB-OP{BS:Sam-Ax:X}Mtr') ampx = EpicsSignal('XF:11IDB-OP{BS:Samp-Ax:X}Mtr.VAL', name='ampx') ampy = EpicsSignal('XF:11IDB-OP{Stg:Samp-Ax:Phi}Mtr.VAL', name='ampy') ampz = EpicsSignal('XF:11IDB-OP{BS:Samp-Ax:Y}Mtr.VAL', name='ampz') #caput('XF:11IDB-ES{Det:Eig4M}cam1:NumImages', fnum ) # SAXS table: WAXS section rotation SAXS_x1 = EpicsMotor('XF:11IDB-ES{Tbl:SAXS-Ax:X1}Mtr',name='SAXS_x1') SAXS_x2 = EpicsMotor('XF:11IDB-ES{Tbl:SAXS-Ax:X2}Mtr',name='SAXS_x2') # Note inconsistency in capitalization of Bpm/BPM below. bpm1 = XYMotor('XF:11IDA-BI{Bpm:1', name='bpm1') bpm2 = XYMotor('XF:11IDB-BI{BPM:2', name='bpm2') w1 = XYMotor('XF:11IDB-OP{Win:1', name='w1') # window positioners hdm = HorizontalDiffractionMirror('XF:11IDA-OP{Mir:HDM', name='hdm') gsl = VirtualMotorCenterAndGap('XF:11IDB-OP{Slt:Guard', name='gsl') #Guard rSlits (SmarAct) #gsl = VirtualMotorSlits('XF:11IDB-OP{Slt:Guard', name='gsl') #Guard rSlits (SmarAct) #SAXS beam stop saxs_bst = SAXSBeamStop( 'XF:11IDB-ES{BS:SAXS', name = 'saxs_bst' ) #To solve the "KeyError Problem" when doing dscan and trying to save to a spec file, Y.G., 20170110 gsl.xc.readback.name = 'gsl_xc' gsl.yc.readback.name = 'gsl_yc' gsl.xg.readback.name = 'gsl_xg' gsl.yg.readback.name = 'gsl_yg' mbs.xc.readback.name = 'mbs_xc' mbs.yc.readback.name = 'mbs_yc' mbs.xg.readback.name = 'mbs_xg' mbs.yg.readback.name = 'mbs_yg' fe = VirtualMotorCenterAndGap('FE:C11A-OP{Slt:12', name='fe') # Front End Slits (Primary Slits) fe.xc.readback.name = 'fe_xc' fe.yc.readback.name = 'fe_yc' fe.xg.readback.name = 'fe_xg' fe.yg.readback.name = 'fe_yg'
BarBri, Bar Review, California Essay Exam [a Thomson business BAR/BRI] on Amazon.com. *FREE* shipping on qualifying offers. Barbri Bar Review California Essay Exam | Jeff Adachi | ISBN: 9780314156556 | Kostenloser Versand für alle Bücher mit Versand und Verkauf duch Amazon. Bar review essay california exam barbri. UBE/MEE Bar Exam Deluxe Package. Call us today at. 818-922-5151 or fill our. Because of the general. Learn more about earning your JD at Regent 1 I celebrate myself, and sing myself, And what I assume you shall assume, For every atom belonging to me as good belongs to you. Please review your schedule and complete the bar barbri california essay exam practice review workbook assignments for. BARBRI International Bar Preparation courses essay writing online tool are offered twice a …. Save $800 Creating the most licensed attorneys every, single year.
import unittest import mock from flashcards.sets import StudySet from flashcards.cards import StudyCard from flashcards import study from flashcards.study import BaseStudySession from flashcards.study import ShuffledStudySession def create_study_set(): """ Create a simple study set for test purposes. """ cards = [ StudyCard('2 + 2 = ?', '4'), StudyCard('2 + 3 = ?', '5'), StudyCard('2 + 4 = ?', '6'), StudyCard('2 + 5 = ?', '7') ] study_set = StudySet('Basic Maths') study_set._cards = cards return study_set def create_cards_list(): """ Create a simple list of cards for test purposes. """ cards = [ StudyCard('2 + 2 = ?', '4'), StudyCard('2 + 3 = ?', '5'), StudyCard('2 + 4 = ?', '6'), StudyCard('2 + 5 = ?', '7') ] return cards class TestGetStudySessionTemplate(unittest.TestCase): def test_get_study_session_template_default(self): mode = 'awdiowad' # Something retarded that is not in the mode options session = study.get_study_session_template(mode) self.assertIsInstance(session, BaseStudySession) def test_get_study_session_template_None_input(self): mode = None # user did not supply any option --mode' session = study.get_study_session_template(mode) self.assertIsInstance(session, BaseStudySession) def test_get_study_session_template_basic(self): mode = 'linear' # user entered `linear` as --mode option.' session = study.get_study_session_template(mode) self.assertIsInstance(session, BaseStudySession) def test_get_study_session_template_shuffled(self): mode = 'shuffled' # user entered `shuffled` as --mode option. session = study.get_study_session_template(mode) self.assertIsInstance(session, ShuffledStudySession) class TestBasicStudyStrategy(unittest.TestCase): def test_studySession_start(self): mock_show_question = mock.Mock() mock_show_answer = mock.Mock() study_set = create_study_set() session = BaseStudySession() session.show_question = mock_show_question session.show_answer = mock_show_answer session.start(study_set) self.assertEqual(4, mock_show_question.call_count) self.assertEqual(4, mock_show_answer.call_count) class TestShuffledStudyStrategy(unittest.TestCase): @mock.patch('flashcards.study.random.shuffle') def test_cards_are_shuffled(self, mock_shuffle): mock_show_question = mock.Mock() mock_show_answer = mock.Mock() study_set = create_study_set() session = ShuffledStudySession() session.show_question = mock_show_question session.show_answer = mock_show_answer session.start(study_set) self.assertEqual(1, mock_shuffle.call_count) self.assertEqual(4, mock_show_question.call_count) self.assertEqual(4, mock_show_answer.call_count)
Fila bras are known and respected for their quality and style. Pamper yourself with a perfect underwire bra that will enhance your cleavage and provide all the necessary support. Bra4Her.com specialists have compiled the largest selection of Fila underwire models and made it easy for you to find your most wanted one by color, size or over 20 other criteria. Compare all the Fila underwire options present on the market and click to buy directly from the store – you don’t pay extra and save time on browsing dozens of separate websites! Underwire bras from Fila have never let anyone down – let them become a part of your lingerie wardrobe and diversify your style!
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv def _lang_get(self, cr, uid, context=None): lang_pool = self.pool.get('res.lang') ids = lang_pool.search(cr, uid, [], context=context) res = lang_pool.read(cr, uid, ids, ['code', 'name'], context) return [(r['code'], r['name']) for r in res] class survey_survey(osv.osv): _inherit = 'survey.survey' _columns = { 'title': fields.char('Survey Title', size=255, required=1, translate=True), 'note': fields.text('Description', translate=True), 'lang': fields.selection(_lang_get, 'Language to print', help="If the selected language is loaded in the system, the survey will be printed in this language."), } survey_survey() class survey_page(osv.osv): _inherit = 'survey.page' _columns = { 'title': fields.char('Page Title', size=255, required=1, translate=True), 'note': fields.text('Description', translate=True), } survey_page() class survey_question(osv.osv): _inherit = 'survey.question' _columns = { 'question': fields.char('Question', size=255, required=1, translate=True), 'req_error_msg': fields.text('Error Message', translate=True), 'descriptive_text': fields.text('Descriptive Text', size=255, translate=True), 'comment_label': fields.char('Field Label', size = 255, translate=True), 'comment_valid_err_msg': fields.text('Error message', translate=True), 'make_comment_field_err_msg': fields.text('Error message', translate=True), 'validation_valid_err_msg': fields.text('Error message', translate=True), 'numeric_required_sum_err_msg': fields.text('Error message', translate=True), 'column_name': fields.char('Column Name',size=256, translate=True), } survey_question() # FIXME 20140923 - class missin #class survey_question_column_heading(osv.osv): # _inherit = 'survey.question.column.heading' # # _columns = { # 'title': fields.char('Column Heading', size=128, required=1, translate=True), # 'menu_choice': fields.text('Menu Choice', translate=True), # } # #survey_question_column_heading() #class survey_answer(osv.osv): # _inherit = 'survey.answer' # # _columns = { # 'answer': fields.char('Answer', size=255, required=1, translate=True), # 'menu_choice': fields.text('Menu Choices', translate=True), # 'question_answer_int': fields.integer('Question Answer ID unique'), # } # #survey_answer() #class survey_response_line(osv.osv): # _inherit = 'survey.response.line' # # _columns = { # 'comment': fields.text('Notes', translate=True), # 'single_text': fields.char('Text', size=255, translate=True), # } # #survey_response_line()
A close reading of the Wisconsin exit polls ought to have Democrats rather worried about John Kerry’s chances of taking back the White House. In the Forward State’s open primary, Kerry got trounced by nearly 2-1 among independent voters, who preferred John Edwards. Kerry also lost the suburbs. And, in what is shaping up as another close presidential election in November, it is once again independent and suburban voters who will decide the winner. Wisconsin’s Democrats joined the lemming-like rush to hop on the bandwagon for Kerry, who won them by nearly the same numbers he lost the independents. But voters in the exit polls also showed that, while they’re prepared to acquiesce in Kerry’s coronation, they are remarkably lacking in enthusiasm for him. In fact, they don’t really like him all that much. In the category of those who preferred a candidate who "cares about me," Kerry again was pummeled by Edwards, the policy-thin populist, by 17 points. Even more telling, asked who has the "right temperament," the same folks who handed Kerry a narrow victory said they found Kerry wanting, by a whopping 55 percent for Edwards to a pitiful 14 percent for Kerry. Following the Iowa and New Hampshire primaries which made him the undisputed front-runner, Kerry—who won in Iowa by his nothing-to-lose transformation into a fiery anti-Bush populist—has increasingly retreated into his normal don’t-offend-anyone mode. He reverted to the soporific, rather pompous speaking style which held him back months ago at the same time as Dean’s unscripted perorations tapped the zeitgeist of the Democratic base. If Kerry does not use the long months between now and the Boston convention to stake out some bolder positions capable of energizing that base, and instead sits cautiously on his insurmountable lead as the winner of 15 of the 17 state contests so far as he parades through the remaining primaries, he risks arriving in Boston with support for his candidacy broad but thin, as it was in Wisconsin. Moreover, the folly of the Terry McAuliffe-Clintonista scheme to drastically alter the primary calendar and bunch the determinant contests early in the year should now be clear. As the virtual nominee, Kerry is now out there as a sitting target for the lavishly funded Bush-Rove propaganda machine for some four-and-a-half months more than the Democrats’ White House postulant would have endured under the old primary schedule. The Republicans have already released their first anti-Kerry ad, and with a projected 3-1 money advantage over the Dems, they’ve announced (and can easily afford without the slightest strain) an early TV blitz that will begin soon. With Edwards’ Wisconsin performance forcing Kerry to put on a real campaign for the next several rounds of primaries, Kerry will be forced to spend much of the money he’s raising now—which he needs for the fall campaign—to finish off Edwards, and so will be unable to directly riposte the GOP’s pre-Boston air war against him. Kerry’s eventual nomination is not placed in doubt by the Wisconsin results, of course. Edwards cannot repeat his come-from-behind Wisconsin performance—which he owes to the independents and Republicans whose crossovers accounted for a third of the total votes there—in the Northern and Western Super Tuesday states, which don’t have open primaries. And his campaign is broke, which means he can’t outspend Kerry on TV (as he did in Wisconsin by some $320,000 to JFK’s $250,000). In fact, it’s not until Edwards arrives in the March 9 Deep South primaries that he has even the glimmer of a hope of winning a state other than the one he was born in, his sole victory thus far. And by that time, crushing defeats at the hands of Kerry in places like California and New York—where the Democratic base is infinitely more liberal than Edwards—will considerably have dampened the North Carolinian’s brief, post-Wisconsin bubble of media momentum. Edwards may well also be hurt by the new media scrutiny to which he’ll probably be subjected post-Wisconsin—voters may not like it when they find out that the senator with the toothy grin has considerably exaggerated his poor-boy roots, and that his father was a comfortably off part of management for most of his career, not an impoverished simple mill worker as Edwards has claimed. More worries for the Democrats: Once crowned, Kerry will be face to face with the fact that the same polls which have shown recent ticks downward in Bush’s credibility and approval also show that a majority of voters still find Dubya likeable. Kerry has a likeability gap—and people want to be able to like their president. Apart from such intangibles of personality, there’s a very good reason that only three senators have ever been elected president in the history of the Republic: senators—especially two-decade veterans of the Beltway like Kerry—have voting records, which can be exploited by their opponents to great effect. There’s also a lot in Kerry’s personal history which the Republicans’ opposition research specialists are already at work exhuming—many clues to future GOP attacks can be found in The Boston Globe’s comprehensive seven-part Kerry profile, which ought to be a must-read for those Democrats who want a real understanding of what their soon-to-be nominee is all about. There is also the fact that Kerry’s wife is, well, rather special—she and the $800 million fortune she wed in her first nuptials may not wear well on the campaign trail. Jay Leno got a huge laugh from his largely Middle-American studio audience recently when he cracked that "John Kerry has found a way to solve the federal budget deficit—find a rich country, like Switzerland, and marry it! " Mrs. Heinz Kerry comes across on the tube as projecting even less warmth than her husband. This could be seen clearly on the podium where Kerry delivered his Wisconsin victory speech: she did not look like a happy camper, and—after a week of intern rumors—ostentatiously avoided the lip-kiss which Kerry tried to plant on her. Laura Bush, well-schooled in the dangers of the camera’s eye, would not have made that mistake. Doug Ireland is a New York-based media critic and commentator whose articles appear regularly in The Nation, Tom Paine.com, and In These Times among many others. This article first appeared in the Tom Paine.com.
# pylint:disable=import-outside-toplevel from typing import Optional, Union from archinfo import Arch from .optimization_pass import OptimizationPassStage from .stack_canary_simplifier import StackCanarySimplifier from .base_ptr_save_simplifier import BasePointerSaveSimplifier from .multi_simplifier import MultiSimplifier from .div_simplifier import DivSimplifier from .mod_simplifier import ModSimplifier from .eager_returns import EagerReturnsSimplifier from .const_derefs import ConstantDereferencesSimplifier from .register_save_area_simplifier import RegisterSaveAreaSimplifier _all_optimization_passes = [ (RegisterSaveAreaSimplifier, True), (StackCanarySimplifier, True), (BasePointerSaveSimplifier, True), (EagerReturnsSimplifier, True), (DivSimplifier, True), (MultiSimplifier, True), (ModSimplifier, True), (ConstantDereferencesSimplifier, True), ] def get_optimization_passes(arch, platform): if isinstance(arch, Arch): arch = arch.name if platform is not None: platform = platform.lower() passes = [ ] for pass_, _ in _all_optimization_passes: if arch in pass_.ARCHES and (platform is None or platform in pass_.PLATFORMS): passes.append(pass_) return passes def get_default_optimization_passes(arch: Union[Arch,str], platform: Optional[str]): if isinstance(arch, Arch): arch = arch.name if platform is not None: platform = platform.lower() passes = [ ] for pass_, default in _all_optimization_passes: if not default: continue if arch in pass_.ARCHES and (platform is None or platform in pass_.PLATFORMS): passes.append(pass_) return passes
Google announced last month that it’d be putting its prototype self-driving cars on California roads this summer, and now they’re here. When they were first announced last year, the cars were shown without any driver controls at all, but these are a bit different — there will still be a qualified driver and manual override controls available at all times, just in case a car gets out of its element. (Data so far suggests the challenge might be with self-driving cars being too safe, not the other way around.) In this way, the cars are similar to the many self-driving Lexus SUVs that Google already has driving on and around its Mountain View campus. The company has also launched a website where drivers can share their experiences on driving near these little things. Was it fun? Weird? Annoying? Google wants to know. It’ll probably help that some of the cars are going to become rolling works of art — how can you hate a cute car covered in an even cuter pattern? Then again, the cars are capped at “a neighborhood-friendly 25mph,” so if you’ve ever wanted to lay on your horn at an artificially intelligent vehicle that’s going way too slow on a public street, this might be a wonderful opportunity. Google has previously said that it would like to run a number of self-driving pilot programs over the next several years, so by all indications this is just the beginning of a very long process — it’s a regulatory and technological challenge that basically every automaker (plus Uber) is trying to figure out right now.
""" This file contains a primitive cache """ from __future__ import absolute_import import threading import time lock = threading.Lock() # pylint: disable=invalid-name class CacheObject: """Object saved in cache""" def __init__(self, value, duration): self.value = value self.expire = time.time() + duration class Cache(dict): """ Primitive key/value cache. Entries are kept in a dict with an expiration. When a get of an expired entry is done, the cache is cleaned of all expired entries. Locking is used for thread safety """ def get(self, key): """Get an object from the cache Arguments: key (str): Cache key Returns: Cached object """ lock.acquire() try: if key not in self: return None current_time = time.time() if self[key].expire > current_time: return self[key].value # expired key, clean out all expired keys deletes = [] for k, val in self.items(): if val.expire <= current_time: deletes.append(k) for k in deletes: del self[k] return None finally: lock.release() def set(self, key, value, duration): """Save an object in the cache Arguments: key (str): Cache key value (object): object to cache duration (int): time in seconds to keep object in cache """ lock.acquire() try: self[key] = CacheObject(value, duration) finally: lock.release()
Northern California is truly a burrito lover’s paradise. After all, we pretty much invented the oversized burrito—also known as the Mission Burrito—back in the 1960s. However, one of the biggest and best burritos isn’t found in San Francisco. It’s in Vacaville. Villa Corona is a popular eatery among locals and they are particularly famous for their adequately named “Super Burrito”. The size of this thing is enough to intimidate even those with the largest appetites. Think you can take it on? Keep scrolling to see this monstrosity for yourself. When it comes to amazing burritos, Northern California certainly isn't lacking. However, one of the tastiest (and biggest) can be found at Villa Corona in Vacaville. Villa Corona is a family owned and operated establishment that originally opened over 40 years ago. A couple, Ismael and Guillermina Villaseñor, left their hometown in Mexico and moved to San Francisco in the 1960s. They later moved to Napa and opened their family business, then known as "Villa Azteca", in 1972. The couple has since retired, but the business is still operated by their children. For decades, Villa Corona has been providing flavorful and mouthwatering Mexican food to the Vacaville area, and they show no signs of letting up anytime soon. Villa Corona specializes in all of the Mexican staples. However, they are most notable for their burritos. They offer a variety of different kinds including Carne Asada, Carnitas, Chile Verde, Pollo Asado, Vegetarian, and many more. Each burrito is stuffed with only the finest ingredients, and a lot of them! To top it all off (literally), they are smothered in red or green sauce and drizzled with sour cream and cheese. Their "Super Burritos" are the largest of them all and the size is bound to blow you away. It takes up the entire plate! These burritos are absolutely massive, so be sure to bring an empty stomach. If you don't think you can stomach an entire burrito, consider one of their other tasty dishes. It really doesn't matter what you get. Each dish is bound to leave you satisfied and counting down the days until you can come back for more. You'll find Villa Corona at 1989 Peabody Rd # 9, Vacaville, CA 95687. These burritos are some of the largest we’ve ever seen! One of those sounds pretty good right now, don’t you think? Northern California is home to some of the greatest Mexican restaurants. Check out this list of 10 of our favorites!