blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acd6a22d7bb374d1f9a11fce956a81b0d38fbd56 | 6ab31b5f3a5f26d4d534abc4b197fe469a68e8e5 | /tests/kyu_7_tests/test_money_money_money.py | 81777cba1d4383469487a9c021f4670b80ab8516 | [
"MIT"
] | permissive | mveselov/CodeWars | e4259194bfa018299906f42cd02b8ef4e5ab6caa | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | refs/heads/master | 2021-06-09T04:17:10.053324 | 2017-01-08T06:36:17 | 2017-01-08T06:36:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | import unittest
from katas.kyu_7.money_money_money import calculate_years
class CalculateYearsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(calculate_years(1000, 0.05, 0.18, 1100), 3)
def test_equals_2(self):
self.assertEqual(calculate_years(1000, 0.01625, 0.18, 1200), 14)
def test_equals_3(self):
self.assertEqual(calculate_years(1000, 0.05, .18, 1000), 0)
| [
"[email protected]"
] | |
e64d478869518eacda665d609f0ea89fcfb6599a | c1666ac45bdb9491f232a6a69a6d9e24c8b33448 | /OpenMatch/utils.py | 69d86250ab01eeb91da58d95a299b987b1f35376 | [
"MIT"
] | permissive | wyfunique/OpenMatch | 7ff94562f61f40e817d98e58fcddbdfa1e63dcf4 | 84b25502bf52c58b9e71bd0754b2fc192d9b448f | refs/heads/master | 2023-08-27T20:12:18.293161 | 2021-11-03T13:40:42 | 2021-11-03T13:40:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | import os
import json
from argparse import Action
class DictOrStr(Action):
def __call__(self, parser, namespace, values, option_string=None):
if '=' in values:
my_dict = {}
for kv in values.split(","):
k,v = kv.split("=")
my_dict[k] = v
setattr(namespace, self.dest, my_dict)
else:
setattr(namespace, self.dest, values)
def check_dir(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def save_trec(rst_file, rst_dict):
with open(rst_file, 'w') as writer:
for q_id, scores in rst_dict.items():
res = sorted(scores.items(), key=lambda x: x[1][0], reverse=True)
for rank, value in enumerate(res):
writer.write(q_id+' Q0 '+str(value[0])+' '+str(rank+1)+' '+str(value[1][0])+' openmatch\n')
return
def save_features(rst_file, features):
with open(rst_file, 'w') as writer:
for feature in features:
writer.write(feature+'\n')
return
| [
"[email protected]"
] | |
c09b00550c14472684e0f4a7267a85e7e942a5b8 | e7b4098bc730160d989d5df8332be3941a71dd8c | /main/migrations/0017_auto_20200518_1107.py | 3c286fea9468d641c389cf148333aa1c29845df7 | [] | no_license | AdityaKhandelwal10/TutorialWebsite | a9d4c6a1bd2598a7573ca036df0cc4f21bf7837c | b1686b0ab4ac9ec98c304d39c3c1ec469da34c8b | refs/heads/master | 2022-09-02T09:25:57.368463 | 2020-05-26T05:13:53 | 2020-05-26T05:13:53 | 265,859,122 | 0 | 0 | null | 2020-05-26T05:13:54 | 2020-05-21T13:43:46 | Python | UTF-8 | Python | false | false | 823 | py | # Generated by Django 3.0.6 on 2020-05-18 05:37
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0016_auto_20200517_1738'),
]
operations = [
migrations.RenameField(
model_name='tutorialcategory',
old_name='tutorial_slug',
new_name='category_slug',
),
migrations.RenameField(
model_name='tutorialcategory',
old_name='tutorial_summary',
new_name='category_summary',
),
migrations.AlterField(
model_name='tutorial',
name='tutorials_published',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 18, 11, 7, 19, 567545), verbose_name='Date Published'),
),
]
| [
"[email protected]"
] | |
6ab679b2ea51c8152faf843138780bcdc3f3ef35 | 968b9ab86dc0c72427fa49ff19ce3a3e9764fc23 | /recipe/run_test.py | 8916a5ff447ea9c6823115340d9eaa102fbfb594 | [
"MIT"
] | permissive | csdms-stack/sedflux-subside-csdms-recipe | edca238429a132479e9a55c44e98c24eec7b4648 | d0a9e00062c22dccd957af5361cc137898e3a36f | refs/heads/master | 2021-01-11T01:48:29.650577 | 2017-06-16T21:44:24 | 2017-06-16T21:44:24 | 70,662,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | #! /usr/bin/env python
import os
os.mkdir('_testing')
os.chdir('_testing')
from pymt.components import Subside as Model
model = Model()
for default in model.defaults:
print('{name}: {val} {units}'.format(
name=default[0], val=default[1][0], units=default[1][1]))
| [
"[email protected]"
] | |
f6ae901aef883d459b385a64e054b15c0ae317ab | 5d0edf31b17c5375faf6126c1a7be8e79bfe2ab8 | /buildout-cache/eggs/Products.Archetypes-1.9.11-py2.7.egg/Products/Archetypes/AllowedTypesByIface.py | 40164071ac6e9c97b1c79324c415b048ba751b74 | [] | no_license | renansfs/Plone_SP | 27cba32ebd9fc03dae3941ec23cf1bf0a7b6667a | 8a7bdbdb98c3f9fc1073c6061cd2d3a0ec80caf5 | refs/heads/master | 2021-01-15T15:32:43.138965 | 2016-08-24T15:30:19 | 2016-08-24T15:30:19 | 65,313,812 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,043 | py | ###############################################################################
#
# Copyright (c) 2002-2005, Benjamin Saller <[email protected]>, and
# the respective authors. All rights reserved.
# For a list of Archetypes contributors see docs/CREDITS.txt.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
###############################################################################
from Products.CMFCore.utils import getToolByName
from Products.Archetypes.atapi import BaseFolder
from Products.Archetypes.ArchetypeTool import listTypes
class AllowedTypesByIfaceMixin:
"""An approach to restrict allowed content types in a container by
the interfaces they implement.
Notice that extending this class means surpassing allowed_content_types,
filter_content_types etc in the FTI, while we are still concerned about
security.
ATBIFolder is an example type that uses AllowedTypesByIfaceMixin:
>>> self.folder.invokeFactory('ATBIFolder', 'f')
'f'
>>> f = self.folder.f
f has an empty list of allowed_interfaces, so it doesn't allow anything
right now:
>>> f.allowedContentTypes()
[]
invokeFactory will fail:
>>> try:
... f.invokeFactory('SimpleType', 'st')
... except ValueError:
... print 'Right'
Right
Now we restrict allowed_interfaces to IBaseFolder:
>>> from Products.Archetypes.interfaces.base import *
>>> f.allowed_interfaces = (IBaseFolder,)
And try to add a SimpleType, which fails again:
>>> try:
... f.invokeFactory('SimpleType', 'st')
... except ValueError:
... print 'Right'
Right
SimpleFolder implements IBaseFolder:
>>> f.invokeFactory('SimpleFolder', 'sf')
'sf'
A content object only needs to implement one of allowed_interfaces:
>>> from zope.interface import Interface
>>> class SomeInterface(Interface): pass
>>> f.allowed_interfaces = (IBaseFolder, SomeInterface)
>>> f.invokeFactory('SimpleFolder', 'sf2')
'sf2'
>>> try:
... f.invokeFactory('SimpleType', 'sf')
... except ValueError:
... print 'Right'
Right
"""
# XXX: This class depends heavily on implementation details in CMF's
# PortalFolder.
allowed_interfaces = () # Don't allow anything, subclasses overwrite!
def allowedContentTypes(self):
"""Redefines CMF PortalFolder's allowedContentTypes."""
at = getToolByName(self, 'archetype_tool')
return at.listPortalTypesWithInterfaces(self.allowed_interfaces)
def invokeFactory(self, type_name, id, RESPONSE=None, *args, **kwargs):
"""Invokes the portal_types tool.
Overrides PortalFolder.invokeFactory."""
pt = getToolByName(self, 'portal_types')
at = getToolByName(self, 'archetype_tool')
fti = None
for t in listTypes():
if t['portal_type'] == type_name:
fti = t
break
if fti is None:
raise ValueError, "Type %r not available." % type_name
if not at.typeImplementsInterfaces(fti, self.allowed_interfaces):
raise ValueError, "Type %r does not implement any of %s." % \
(type_name, self.allowed_interfaces)
args = (type_name, self, id, RESPONSE) + args
new_id = pt.constructContent(*args, **kwargs)
if not new_id:
new_id = id
return new_id
def _verifyObjectPaste(self, object, validate_src=1):
"""Overrides PortalFolder._verifyObjectPaste."""
# XXX: What we do here is trick
# PortalFolder._verifyObjectPaste in its check for
# allowed content types. We make our typeinfo temporarily
# unavailable.
pt = getToolByName(self, 'portal_types')
tmp_name = '%s_TMP' % self.portal_type
ti = pt.getTypeInfo(self.portal_type)
pt.manage_delObjects([self.portal_type])
try:
value = BaseFolder._verifyObjectPaste(self, object, validate_src)
finally:
pt._setObject(self.portal_type, ti)
return value
| [
"[email protected]"
] | |
701374cee88c862cf65ddf3b5bd64d648848c295 | c5e2ca3242cf86c4d6d9e5cff65763784aaaa708 | /NNCProject/Lower/motor_noPID/speedsensor/__init__.py | 510a73ab272c027e0ec490ade5193bdf43a6f8cc | [] | no_license | qingshangithub/Smart-car-tracking-with-opencv | bed8a3417102572963dc35bd6bdb80e226a93142 | 9d11d9b3f22acfc0f24002e6b420cbdc5d95f9cf | refs/heads/master | 2021-04-26T22:57:11.912021 | 2018-03-05T10:35:30 | 2018-03-05T10:35:30 | 123,901,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,821 | py | #! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
monitor wheel speed via subprocess
"""
import pigpio
from multiprocessing import Process
from multiprocessing.sharedctypes import RawValue
from collections import deque
from time import sleep
# set up
left_sensor_gpio = 27
right_sensor_gpio = 17
cache_width = 3
time_out = 135
time_width_restr = 20000
# shared value
left_speed = RawValue('f', 0)
right_speed = RawValue('f', 0)
# shared lock
#ls_lock = Lock()
#rs_lock = Lock()
def init_monitor(ls, rs):
gpio = pigpio.pi()
gpio.set_mode(left_sensor_gpio, pigpio.INPUT)
gpio.set_mode(right_sensor_gpio, pigpio.INPUT)
left_edge_time_tick = gpio.get_current_tick()
right_edge_time_tick = gpio.get_current_tick()
left_speed_queue = deque(maxlen=cache_width)
right_speed_queue = deque(maxlen=cache_width)
def on_left_edge(gpio, level, tick):
nonlocal ls, left_speed_queue, left_edge_time_tick
speed = 0.0
if len(left_speed_queue) >= cache_width:
left_speed_queue.popleft()
if level == pigpio.TIMEOUT:
left_speed_queue.clear()
left_speed_queue.append(0.0)
ls.value = speed
else:
span = tick - left_edge_time_tick
if span <= time_width_restr:
return
left_edge_time_tick = tick
speed = 0.537 /(span * 20 / 1e6)
left_speed_queue.append(speed)
#lsl.acquire()
ls.value = sum(left_speed_queue) / cache_width
#lsl.release()
def on_right_edge(gpio, level, tick):
nonlocal rs, right_speed_queue, right_edge_time_tick
speed = 0.0
if len(right_speed_queue) >= cache_width:
right_speed_queue.popleft()
if level == pigpio.TIMEOUT:
right_speed_queue.clear()
right_speed_queue.append(0.0)
rs.value = speed
else:
span = tick - right_edge_time_tick
if span <= time_width_restr:
return
right_edge_time_tick = tick
speed = 0.537 /(span * 20 / 1e6)
right_speed_queue.append(speed)
#rsl.acquire()
rs.value = sum(right_speed_queue) / cache_width
#rsl.release()
left_callback = gpio.callback(left_sensor_gpio, pigpio.RISING_EDGE, on_left_edge)
right_callback = gpio.callback(right_sensor_gpio, pigpio.RISING_EDGE, on_right_edge)
gpio.set_watchdog(left_sensor_gpio, time_out)
gpio.set_watchdog(right_sensor_gpio, time_out)
# keep alive
while True:
sleep(100)
process = Process(target=init_monitor, args=(left_speed, right_speed))
process.start()
if __name__ == '__main__':
while True:
print('left: %f, right: %f' % (left_speed.value, right_speed.value))
sleep(0.5)
| [
"[email protected]"
] | |
623ef430fcec7b6661565e4ce037067bd4de392b | 9886ca9f3d81e5315925b01cc381d0ab4a01edae | /testrunner/test_defs/__init__.py | c205dcb8ca618a9e30f47b8433e3761bf98ef399 | [] | no_license | PPCDroid/development | 2f6dc67967597b268eccbc784d7c2ba543da1042 | f0c9a44521fb1844a54d26010064aa135d88b75c | refs/heads/master | 2020-04-06T03:46:18.241214 | 2010-04-14T17:27:40 | 2010-04-14T17:27:40 | 32,207,874 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | __all__ = ['test_defs']
| [
"[email protected]"
] | |
f5d58bd54e17ea7fc6bdca478bf07eb4f6e3cb13 | a8cd35b8e523e90552a208eae762b739bad768b9 | /reserved/lotoop3.py | 3b131d8ebbbf37d2be67c3ae97b4567e514f5dd5 | [] | no_license | DmitriChe/Loto_OOP | d59eb27120c22318872ae475a47ab1e2bf0d3bf7 | 7747a1fc5bbfdd9c0e4c42db8c66d9815e095462 | refs/heads/master | 2020-09-17T06:54:12.322845 | 2020-01-08T21:25:15 | 2020-01-08T21:25:15 | 224,025,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,533 | py | from random import randint, shuffle, sample
class Bag:
def __init__(self):
self.nums = sample(range(1, 91), 90)
def next(self):
if self.nums:
return self.nums.pop()
raise Exception('Мешок пуст!')
def stats(self):
print(f'В мешке {len(self.nums)} боченков.')
class Card:
def __init__(self, name):
self.player_name = name
# генерация чисел для карточки
self.card_nums = sample(range(1, 91), 15)
# генерация мест для этих чисел на карточке
self.place_idx = sample(range(0, 9), 5) + sample(range(9, 18), 5) + sample(range(18, 27), 5)
# print(place_idx)
# словарь {№места: число}
self.card = {key: 0 for key in range(27)}
for i in range(len(self.card_nums)):
self.card[self.place_idx[i]] = self.card_nums[i]
def modify(self, num):
del_idx = self.place_idx[self.card_nums.index(num)]
self.card[del_idx] = None
def show(self):
# печать карточки
print(f'-{self.player_name}{"-" * (26 - len(self.player_name) - 1)}')
for i in range(len(self.card)):
if self.card[i] is None:
print('--', end='')
elif self.card[i] == 0:
print(' ', end='')
elif self.card[i] < 10:
print(f' {self.card[i]}', end='')
else:
print(self.card[i], end='')
print() if i + 1 in (9, 18, 27) else print(' ', end='')
print('-' * 26, end='\n\n')
class Player:
def __init__(self, name='Computer'):
self.name = name
self.card = Card(name)
self.nums = self.card.card_nums.copy()
self.is_winner = False
def step(self, num):
if num in self.card.card_nums:
self.card.modify(num)
self.nums.remove(num)
self.is_winner = not self.nums
def stats(self):
print(f'{self.name}. Осталось {len(self.nums)} чисел : {self.nums}')
class Computer(Player):
def __init__(self, name='Computer'):
self.name = name
self.card = Card(name)
self.nums = self.card.card_nums.copy()
self.is_winner = False
def step(self, num):
if num in self.card.card_nums:
self.card.modify(num)
self.nums.remove(num)
self.is_winner = not self.nums
def stats(self):
print(f'{self.name}. Осталось {len(self.nums)} чисел : {self.nums}')
class User(Player):
def __init__(self, name='User'):
self.name = name
self.card = Card(name)
self.nums = self.card.card_nums.copy()
self.is_winner = False
self.is_looser = False
self.answers = ['y', 'n']
def step(self, num):
answer = input('Зачеркнуть цифру? (y/n) ')
while answer not in self.answers:
answer = input('Не понял вас... Зачеркнуть цифру? (y/n) ')
if answer == 'y':
if num in self.card.card_nums:
self.card.modify(num)
self.nums.remove(num)
self.is_winner = not self.nums
else:
self.is_looser = True
elif answer == 'n':
if num in self.card.card_nums:
self.is_looser = True
def stats(self):
print(f'{self.name}. Осталось {len(self.nums)} чисел : {self.nums}')
class Game:
def __init__(self):
# self.players = []
self.user = User()
self.compic = Computer()
self.bag = Bag()
pass
# def new_player(self):
# self.players.append()
# pass
def run(self):
self.cards_show()
while True:
num = self.bag.next()
print(f'Из мешка вынут боченок номер {num}!')
self.step(num)
if self.user.is_looser:
print('\nСОЖАЛЕЮ, но ВЫ ПРОИГРАЛИ... Нужно быть внимательнее!')
# break
self.cards_show()
self.user.stats()
self.compic.stats()
self.bag.stats()
# game.check_winner(players)
if self.check_winner():
break
else:
print('Игра продолжается...\n')
def step(self, num):
self.user.step(num)
self.compic.step(num)
def cards_show(self):
self.user.card.show()
self.compic.card.show()
def check_winner(self):
if self.user.is_winner and self.compic.is_winner:
print('\nНИЧЬЯ!!!')
return True
elif self.user.is_winner:
print('\nВЫ ПОБЕДИТЕЛЬ!')
return True
elif self.compic.is_winner:
print('\nКОМПИК ПОБЕДИТЕЛЬ!')
return True
return False
# for player in players:
# if player.is_winner():
# return True
# pass
# Если оба - ничья
# Если никто - "игра продожается"
# Если кто-то - объявляем победителя и игра завершается
# Если кто пользователь проиграл, то сообщение и игра завершается
# players = []
game = Game()
game.run()
# user = User()
# compic = Computer()
# players.append(user)
# game.user.card.show()
# game.compic.card.show()
# bag = Bag()
# while True:
# num = bag.next()
#
# print(f'Из мешка вынут боченок номер {num}!')
#
# user.step(num)
# compic.step(num)
#
# if user.is_looser:
# print('\nСОЖАЛЕЮ, но ВЫ ПРОИГРАЛИ... Нужно быть внимательнее!')
# break
#
# user.card.show()
# compic.card.show()
#
# user.stats()
# compic.stats()
# bag.stats()
#
# # game.check_winner(players)
#
# if user.is_winner and compic.is_winner:
# print('НИЧЬЯ!!!')
# break
# elif user.is_winner:
# print('ВЫ ПОБЕДИТЕЛЬ!')
# break
# elif compic.is_winner:
# print('КОМПИК ПОБЕДИТЕЛЬ!')
# break
# print('Игра продолжается...\n')
| [
"[email protected]"
] | |
badaab65f38b1b1c297284d46f70fc02f18300be | 447964b1c47e2f7abb14af52c53f28706d220aa6 | /Assignment2/task1/task1_1.py | 87b7e21f541dfbea2a66eee5adb1e1b226d56326 | [] | no_license | psg0796/CS671---Deep-Learning | cb76408accb927f8e607621bd68dc98695f62dd1 | 354322a432afff5795e7fe8279b8949076ef5b41 | refs/heads/master | 2022-02-01T06:54:23.964706 | 2019-05-27T05:54:15 | 2019-05-27T05:54:15 | 176,881,206 | 0 | 2 | null | 2019-05-04T12:41:53 | 2019-03-21T06:12:25 | Python | UTF-8 | Python | false | false | 1,697 | py | import tensorflow as tf
import cv2
import numpy as np
imgDir1 = '../input1/'
imgDir2 = '../input2/'
imgFmt = 'jpg'
# Format of image = angle_length_width_color_variation
angle = 12
length = 2
width = 2
color = 2
variation1 = 600
variation2 = 400
x_train = []
y_train = []
x_test = []
y_test = []
count = []
for i in range(96):
count.append(0)
for angle_iterator in range(angle):
for length_iterator in range(length):
for width_iterator in range(width):
for color_iterator in range(color):
for variation_iterator in range(variation1):
imgFile = str(angle_iterator) + '_' + str(length_iterator) + '_' + str(width_iterator) + '_' + str(color_iterator) + '_' + str(variation_iterator) + '.' + imgFmt
x_train.append(cv2.imread(imgDir1 + imgFile))
y_train.append(angle_iterator*8 + length_iterator*4 + width_iterator*2 + color_iterator)
print(angle_iterator*8 + length_iterator*4 + width_iterator*2 + color_iterator)
for angle_iterator in range(angle):
for length_iterator in range(length):
for width_iterator in range(width):
for color_iterator in range(color):
for variation_iterator in range(variation2):
imgFile = str(angle_iterator) + '_' + str(length_iterator) + '_' + str(width_iterator) + '_' + str(color_iterator) + '_' + str(variation_iterator) + '.' + imgFmt
x_test.append(cv2.imread(imgDir2 + imgFile))
y_test.append(angle_iterator*8 + length_iterator*4 + width_iterator*2 + color_iterator)
print(angle_iterator*8 + length_iterator*4 + width_iterator*2 + color_iterator)
np.save('x_train',np.asarray(x_train))
np.save('y_train',np.asarray(y_train))
np.save('x_test',np.asarray(x_test))
np.save('y_test',np.asarray(y_test))
| [
"[email protected]"
] | |
7879673ce7e8e19fcf1713410b26515ec93ba623 | 5d5ee0d873ef6e0ca78a0732cf0ae0b206c64973 | /pyzenfolio/__init__.py | 90e2cc90b5232ca661aad036aa2d41ae5051e109 | [
"MIT"
] | permissive | rheemix/pyzenfolio | 1b1920705b65fae17f5b69a2111b266f71f2f48b | ec5283b3f133b84098edf9332ca5997f067fb7e3 | refs/heads/master | 2022-09-16T09:13:48.319993 | 2020-05-26T00:42:22 | 2020-05-26T00:42:22 | 266,878,062 | 0 | 0 | null | 2020-05-25T20:53:18 | 2020-05-25T20:53:17 | null | UTF-8 | Python | false | false | 292 | py | from __future__ import print_function, unicode_literals
__version__ = '0.9.1'
__author__ = 'Miroslav Shubernetskiy'
try:
from .api import PyZenfolio # noqa
from .exceptions import APIError # noqa
from .helpers import search_sets_by_title # noqa
except ImportError:
pass
| [
"[email protected]"
] | |
bce042b9e53934834815a602db282cdf456ecd81 | f80ced6aeb588f2327ed43539cf0c18a3f0048ea | /src/applications/blog/migrations/0002_auto_20201116_2208.py | a9f8e24eaed40144cd11067f363658b2dfaf46ae | [] | no_license | NikitaMyslivets/KinoMonster | 0f7a32da6160f633594705e91eb17d60687b6171 | f3e40b2289f9993b39af324f88c42ba163debb42 | refs/heads/master | 2023-01-23T19:40:28.653236 | 2020-11-21T14:09:34 | 2020-11-21T14:09:34 | 287,334,097 | 0 | 0 | null | 2020-10-21T17:18:17 | 2020-08-13T16:54:06 | HTML | UTF-8 | Python | false | false | 507 | py | # Generated by Django 3.1.3 on 2020-11-16 19:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ['-created_at', 'title', 'pk']},
),
migrations.AlterField(
model_name='post',
name='title',
field=models.TextField(unique=True),
),
]
| [
"[email protected]"
] | |
70eed38f5515a58a9da5c5511ec4eed1e85ef04e | ad32805a821fb06bde87a6d05c3d80ae477dc00b | /dashboard/apps.py | 58d606d5139a372d8afd11ecde1c87514208cb2e | [] | no_license | phrac/maintdx | a89a15e4d92f77b91016737a7b410a7579d07c6c | 583b308568e25da6d99fa6d41150602baa253377 | refs/heads/master | 2021-07-01T05:11:51.994071 | 2021-05-14T13:41:20 | 2021-05-14T13:41:20 | 131,013,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | from django.apps import AppConfig
class DashboardConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "maintdx.dashboard"
| [
"[email protected]"
] | |
6ec30b1ffef0a41f41e851ac89f1aecb4dbb58e1 | 305759f40ab397adbbd2548c993f0d07dae2cd2f | /191_superfluid/code/second_sounder.py | 676eac30d0013e56c406f1d4a3cb48534a73b837 | [] | no_license | nwuerfel/phys191 | b4e45e28047341fda2a23f7649bf34e99e64edea | 7d7d574fafb9549d799c1af68a357f7b5f93d775 | refs/heads/master | 2021-01-22T21:16:55.571029 | 2016-11-10T02:51:45 | 2016-11-10T02:51:45 | 68,753,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | import matplotlib.pyplot as plt
import numpy as np
import os
from analyzer_function import PTconverter
data_Dir = '../data/ss_runs/'
pressures = np.array([12.5, 7.2, 10.8, 17, 21.5, 26, 30, 35])
distance_uncertainty = 0.5
time_uncertainty = 0.001
def second_sound():
vel_Data = np.empty([len(os.listdir(data_Dir)),3])
j = 0;
# use converter when its done
temperatures = PTconverter(pressures)
for file in os.listdir(data_Dir):
if file=='.DS_Store':
continue
print 'Doing the data dance: %s' % file
# noticed different results on eugene's computer
# this was a consequence of his file order being different
# proper check should extract the run from the filename
# then index into the pressure array to get the pressure
# we abuse the name convention
temp_index = int(file[7:-4])-1
print 'Temp index: %d' % temp_index
data = np.genfromtxt(data_Dir + file)
dist = data[:,1]
time = data[:,2]
vel = np.empty(len(dist), dtype=float)
vel_uncertainty = np.empty(len(dist), dtype=float)
for i in range(0,len(dist)):
vel[i]=dist[i]/time[i]
vel_uncertainty[i] = (dist[i]/time[i])*np.sqrt((distance_uncertainty/dist[i])**2 * (time_uncertainty/time[i])**2)
## ??? WHATS A STATISTICS?
mean_uncertainty = np.sqrt(np.sum(np.square(vel_uncertainty)))
total_uncertainty = np.sqrt(np.std(vel)**2 + mean_uncertainty**2)
# janky fucking shit
vel_Data[j,0] = temperatures[temp_index]
vel_Data[j,1] = np.mean(vel)
vel_Data[j,2] = total_uncertainty
j = j + 1
print vel_Data
vel_Data = vel_Data[vel_Data[:,0].argsort()]
print vel_Data
# plot that fucking shit yooooooooo
# try this stupid analytical function:
x = np.linspace(1.6,2.17,1000)
y = 26*np.sqrt((x/2.17)*(1-((x/2.17))**(5.5)))
plt.figure()
ax = plt.subplot()
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(20)
# setup the function we're comparing to
plt.plot(x,y,linewidth=2, c='blue')
# errorbar our fucking data yo
plt.errorbar(vel_Data[:,0],vel_Data[:,1],yerr=vel_Data[:,2],elinewidth = 2,linewidth=2, c ='red',marker='o')
plt.title('Velocity of Second Sound', fontsize=36)
plt.xlabel('Temperature (K)', fontsize=28)
plt.ylabel('Velocity (m/s)', fontsize=28)
plt.tight_layout()
plt.show()
print 'temperatures:'
print vel_Data[:,0]
print 'velocity:'
print vel_Data[:,1]
return vel_Data
| [
"[email protected]"
] | |
db911474e6155992b6a1be0a87aadef13dad3736 | b17afc0f6e73e785b28be882878e3608d1fd28d1 | /UI_Data_from_Terminal_sensor/load_widget_graph.py | 2eb3e9bebc70c46de3705e53913fc46d436d16d3 | [] | no_license | Akshay-cita/UI-for-CO2-and-O2-Monitoring- | e05f979fe6915afcbffe0d4208660f6578375c1f | 502cc9776bac00607cd98261596623b605a5e7d9 | refs/heads/master | 2022-07-12T16:41:28.238379 | 2020-05-09T07:53:55 | 2020-05-09T07:54:22 | 262,518,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QStackedWidget
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.Init_UI()
self.loadWidget()
def Init_UI(self):
self.resize(787, 539)
def loadWidget(self): ## to launch home window on startup
from Graph import Ui_graph
self.centralWidget = Ui_graph(self)
self.show()
self.setCentralWidget(self.centralWidget)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
app.setStyle("fusion")
ui = Ui_MainWindow()
#ui.setupUi(MainWindow)
ui.showFullScreen()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
409eab4148296c825a1ba657cdd98a0c995a8c40 | 8f15e2170d08e61b4ac70f75ab755967b0009338 | /mason/clients/s3/s3_client.py | efc9899f57547aef1565d4d013e0282d206fbcd7 | [
"Apache-2.0"
] | permissive | malave/mason | eb86d60b96b16b6e49482097474c05c9805b5f24 | bf45672124ef841bc16216c293034f4ccc506621 | refs/heads/master | 2023-06-12T21:59:46.858046 | 2021-06-11T16:07:18 | 2021-06-11T16:07:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,038 | py | from botocore.errorfactory import ClientError
from typing import Optional, List, Union, Tuple
import s3fs
from returns.result import Result, Success, Failure
from s3fs import S3FileSystem
from mason.clients.aws_client import AWSClient
from mason.clients.response import Response
from mason.engines.storage.models.path import Path, construct
from mason.util.exception import message
from mason.util.list import get
class S3Client(AWSClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def client(self) -> S3FileSystem:
s3 = s3fs.S3FileSystem(key=self.access_key, secret=self.secret_key, client_kwargs={'region_name': self.aws_region})
return s3
def parse_responses(self, s3_response: dict):
error = s3_response.get('Error', {}).get('Code', '')
status = s3_response.get('ResponseMetadata', {}).get('HTTPStatusCode')
message = s3_response.get('Error', {}).get('Message')
return error, status, message
def parse_table_list_data(self, s3_response: dict):
single_items = list(map(lambda x: self.parse_item(x), s3_response.get('Contents', [])))
prefixes = list(map(lambda x: self.parse_prefixes(x), s3_response.get('CommonPrefixes', [])))
return {
"items": single_items,
"prefixes": prefixes
}
def parse_prefixes(self, s3_response: dict):
return s3_response.get("Prefix")
def parse_item(self, s3_response: dict):
table_parsed = {
"name": s3_response.get("Key"),
"updated_at": s3_response.get("LastModified"),
"size": s3_response.get("Size")
}
return table_parsed
def parse_items(self, s3_response: dict):
return list(map(lambda x: self.parse_item(x), s3_response.get('Contents', [])))
def list_objects(self, database_name: str, response: Response) -> Tuple[Result[dict, str], Response]:
try:
split = database_name.split("/", 1)
result = self.client().s3.list_objects(Bucket=split[0], Prefix=(get(split, 1) or '/'), Delimiter='/')
response.add_response(result)
return Success(result), response
except Exception as e:
if isinstance(e, ClientError):
result = e.response
error = result.get("Error", {})
code = error.get("Code", "")
if code == "NoSuchBucket":
response.set_status(404)
return Failure(f"The specified bucket does not exist: {database_name}"), response
return Failure(message(e)), response
def expand_path(self, path: Path, response: Response = Response(), sample_size: int = 3) -> Tuple[List[Path], Response]:
paths: List[Path] = []
full_path = path.full_path()
response.add_info(f"Fetching keys at {full_path}")
keys = self.client().find(full_path)
response.add_response({'keys': keys})
if len(keys) > 0:
paths = list(map(lambda k: Path(k, "s3"), keys))
if sample_size:
import random
try:
ss = int(sample_size)
except TypeError:
response.add_warning(f"Invalid sample size (int): {sample_size}")
ss = 3
response.add_warning(f"Sampling keys to determine schema. Sample size: {ss}.")
if ss < len(paths):
paths = random.sample(paths, ss)
return paths, response
def table_path(self, database_name: str, table_name: str) -> Path:
return construct([database_name, table_name], "s3")
def save_to(self, inpath: Path, outpath: Path, response: Response):
try:
self.client().upload(inpath.path_str, outpath.path_str)
except Exception as e:
response.add_error(f"Error saving {inpath} to {outpath.path_str}")
response.add_error(message(e))
return response
| [
"[email protected]"
] | |
be0a82d0b1ed41ba803a64a433be71c62c647a36 | feb8524d7af1219e6e46027e19ff37dc6f0ef4be | /hashtag.py | d1fdc310ac14be86bc2af09d15679f1d965dc761 | [] | no_license | Kmiet/twint-followers-graph | c3552608e7b879a7ec2eafd7580828ae99e40c97 | 1d858ce55dfa62eecca80d97b82f861deb7a8c3c | refs/heads/master | 2021-04-05T12:32:34.821459 | 2020-06-01T07:48:51 | 2020-06-01T07:48:51 | 248,557,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,995 | py | import tweepy
import sys
import jsonpickle
import pandas as pd
import json
auth = tweepy.OAuthHandler('2l5EsCB5xoR5X1QXM7Vllevu8', 'NuN25ZASb37sDGHhgw8DhHr99hzPjN4IrH25oDAlWKr8uWZCZP')
auth.set_access_token('1233360710794698752-sAAVzLUsFHzMxzOjxFUcDfJkCZTIXM', 'kVTQ7PY2D4XH3UAMIfKTfZssXOh79zNImWmuVHdRc6SST')
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
if not api:
print("Can't Authenticate")
sys.exit(-1)
searchQuery = '@SzumowskiLukasz'
retweet_filter='-filter:retweets'
q=searchQuery+retweet_filter
tweetsPerQry = 200
sinceId = None
max_id = -1
maxTweets = 200
tweetCount = 0
start_username = ""
after_start_username = True
COM = 31
fName = 'smaller100k_mention_comm_%s.txt' % COM
datContent = [i for i in open("./user_comm/smaller100k_mention_comm.txt").readlines()]
user_names = []
hashtags = dict()
for i in datContent:
_com, _unames = i.split(' ', 1)
com = int(_com)
if com == COM:
unames = json.loads(_unames)
for u in unames:
if after_start_username:
user_names.append(u)
if u == start_username:
after_start_username = True
for i in user_names:
print('proceeding:', user_names.index(i),'/',len(user_names))
searchQuery = '@' + i
print(searchQuery)
q=searchQuery+retweet_filter
q = searchQuery
tweetCount = 0
max_id = -1
while tweetCount < maxTweets:
try:
if (max_id <= 0):
if (not sinceId):
new_tweets = api.user_timeline(screen_name=searchQuery, count=tweetsPerQry)
else:
new_tweets = api.user_timeline(screen_name=searchQuery, count=tweetsPerQry,
since_id=sinceId)
else:
if (not sinceId):
new_tweets = api.user_timeline(screen_name=searchQuery, count=tweetsPerQry,
max_id=str(max_id - 1))
else:
new_tweets = api.user_timeline(screen_name=searchQuery, count=tweetsPerQry,
max_id=str(max_id - 1),
since_id=sinceId)
if not new_tweets:
print("No more tweets found")
break
for tweet in new_tweets:
htags = tweet._json['entities']['hashtags']
for _h in htags:
h = _h['text']
if not hashtags.get(h):
hashtags[h] = 0
hashtags[h] += 1
tweetCount += len(new_tweets)
print(tweetCount)
max_id = new_tweets[-1].id
except tweepy.TweepError as e:
# Just exit if any error
# print("some error : " + str(e))
break
with open('./hashtags/%s' % fName, 'w+') as f:
f.write(json.dumps(hashtags)) | [
"[email protected]"
] | |
f5cafa449fbcba08edc856d7cbabbc0f71c9725c | 1852be4726dc1d83780740678819192277159e0f | /LC/357.py | 1ec251b7e29960a9fed81b1639b47c11791f78e0 | [
"MIT"
] | permissive | szhu3210/LeetCode_Solutions | f0a32e30df54b655fdb9c7d48622382f29781409 | 64747eb172c2ecb3c889830246f3282669516e10 | refs/heads/master | 2020-06-30T05:45:40.550146 | 2017-08-11T04:10:25 | 2017-08-11T04:10:25 | 74,389,515 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | class Solution(object):
def countNumbersWithUniqueDigits(self, n):
"""
:type n: int
:rtype: int
"""
choices = [9, 9, 8, 7, 6, 5, 4, 3, 2, 1]
ans, product = 1, 1
for i in range(n if n <= 10 else 10):
product *= choices[i]
ans += product
return ans | [
"[email protected]"
] | |
dbcaf98ce19f6c89c54c2f3b9ae3739bbfeb86ee | 6ff250d354ea3183bffdf7976f03e31aabde226c | /tests/test_platform_string.py | 18d0d9860eb3a425da639d91286d77ff1ccaa600 | [
"BSD-3-Clause"
] | permissive | pombredanne/infi.os_info | e94355be1653b1a839ad94a45ae9a0a595c12227 | d996a64a98581396b04117c0a201b69fc0f87260 | refs/heads/develop | 2021-01-17T20:06:24.664054 | 2017-11-16T12:12:53 | 2017-11-16T12:12:53 | 33,823,575 | 0 | 0 | null | 2017-11-16T12:12:54 | 2015-04-12T16:05:58 | Python | UTF-8 | Python | false | false | 27,798 | py | from infi import unittest
from infi.os_info import get_platform_string
test_subjects = [
dict(expected='linux-ubuntu-quantal-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.5.0-40-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '12.10', 'quantal')),
dict(expected='linux-ubuntu-quantal-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.5.0-40-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '12.10', 'quantal')),
dict(expected='linux-ubuntu-saucy-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.11.0-26-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '13.10', 'saucy')),
dict(expected='linux-ubuntu-saucy-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.11.0-26-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '13.10', 'saucy')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-358.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.4', 'Final')),
dict(expected='linux-ubuntu-oneiric-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.0.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.10', 'oneiric')),
dict(expected='linux-ubuntu-oneiric-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.0.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.10', 'oneiric')),
dict(expected='linux-ubuntu-oneiric-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.0.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.10', 'oneiric')),
dict(expected='linux-ubuntu-oneiric-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.0.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.10', 'oneiric')),
dict(expected='linux-ubuntu-saucy-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.11.0-13-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '13.10', 'saucy')),
dict(expected='linux-ubuntu-saucy-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.11.0-13-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '13.10', 'saucy')),
dict(expected='linux-ubuntu-saucy-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.11.0-26-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '13.10', 'saucy')),
dict(expected='linux-ubuntu-saucy-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.11.0-20-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '13.10', 'saucy')),
dict(expected='linux-ubuntu-oneiric-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.0.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.10', 'oneiric')),
dict(expected='linux-ubuntu-oneiric-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.0.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.10', 'oneiric')),
dict(expected='linux-centos-4-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.9-89.EL', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '4.8', 'Final')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-358.14.1.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.4', 'Final')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-431.17.1.el6.iscsigw.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.5', 'Final')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-358.14.1.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.4', 'Final')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-358.14.1.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.4', 'Final')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-358.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.4', 'Final')),
dict(expected='linux-ubuntu-lucid-x86', system='Linux', architecture=('32bit', 'ELF'), processor='', release='2.6.32-54-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '10.04', 'lucid')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-431.17.1.el6.iscsigw.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.5', 'Final')),
dict(expected='linux-redhat-5-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-100.26.2.el5', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '5.6', 'Tikanga')),
dict(expected='linux-redhat-6-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.32-100.34.1.el6uek.i686', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '6.1', 'Santiago')),
dict(expected='linux-ubuntu-lucid-x86', system='Linux', architecture=('32bit', 'ELF'), processor='', release='2.6.32-53-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '10.04', 'lucid')),
dict(expected='linux-redhat-7-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.10.0-123.el7.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '7.0', 'Maipo')),
dict(expected='linux-redhat-7-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.10.0-123.el7.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '7.0', 'Maipo')),
dict(expected='linux-redhat-7-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.10.0-123.el7.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '7.0', 'Maipo')),
dict(expected='linux-centos-7-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.10.0-123.el7.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS Linux', '7.0.1406', 'Core')),
dict(expected='linux-centos-7-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.10.0-123.el7.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS Linux', '7.0.1406', 'Core')),
dict(expected='linux-ubuntu-trusty-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.13.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '14.04', 'trusty')),
dict(expected='linux-redhat-7-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.10.0-123.6.3.el7.iscsigw.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '7.0', 'Maipo')),
dict(expected='linux-redhat-7-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.10.0-123.6.3.el7.iscsigw.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '7.0', 'Maipo')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-431.29.2.1.el6.izbox.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.5', 'Final')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-431.29.2.1.el6.izbox.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.5', 'Final')),
dict(expected='linux-ubuntu-trusty-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.13.0-35-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '14.04', 'trusty')),
dict(expected='linux-centos-7-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.10.0-123.el7.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS Linux', '7.0.1406', 'Core')),
dict(expected='linux-ubuntu-natty-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.38-16-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.04', 'natty')),
dict(expected='linux-ubuntu-lucid-x86', system='Linux', architecture=('32bit', 'ELF'), processor='', release='2.6.32-57-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '10.04', 'lucid')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-431.17.1.el6.iscsigw.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.5', 'Final')),
dict(expected='linux-redhat-6-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.32-122.el6.i686', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '6.1', 'Santiago')),
dict(expected='linux-ubuntu-lucid-x86', system='Linux', architecture=('32bit', 'ELF'), processor='', release='2.6.32-54-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '10.04', 'lucid')),
dict(expected='linux-redhat-5-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.18-229.el5', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '5.6', 'Tikanga')),
dict(expected='linux-redhat-6-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.32-122.el6.i686', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '6.1', 'Santiago')),
dict(expected='linux-redhat-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-122.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '6.1', 'Santiago')),
dict(expected='linux-redhat-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-122.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '6.1', 'Santiago')),
dict(expected='linux-redhat-5-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.18-229.el5', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '5.6', 'Tikanga')),
dict(expected='linux-redhat-5-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.18-229.el5PAE', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '5.6', 'Tikanga')),
dict(expected='linux-ubuntu-oneiric-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.0.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.10', 'oneiric')),
dict(expected='linux-ubuntu-oneiric-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.0.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.10', 'oneiric')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2012Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-358.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.4', 'Final')),
dict(expected='linux-ubuntu-precise-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.2.0-56-generic-pae', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '12.04', 'precise')),
dict(expected='linux-redhat-5-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.18-229.el5', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '5.6', 'Tikanga')),
dict(expected='linux-ubuntu-precise-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.2.0-57-generic-pae', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '12.04', 'precise')),
dict(expected='linux-ubuntu-precise-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.2.0-56-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '12.04', 'precise')),
dict(expected='linux-ubuntu-oneiric-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.0.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.10', 'oneiric')),
dict(expected='linux-ubuntu-natty-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.38-16-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.04', 'natty')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-358.14.1.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.4', 'Final')),
dict(expected='linux-ubuntu-oneiric-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.0.0-32-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.10', 'oneiric')),
dict(expected='linux-ubuntu-saucy-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.11.0-13-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '13.10', 'saucy')),
dict(expected='linux-ubuntu-precise-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.2.0-56-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '12.04', 'precise')),
dict(expected='linux-ubuntu-natty-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.38-16-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.04', 'natty')),
dict(expected='linux-ubuntu-precise-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.2.0-57-generic-pae', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '12.04', 'precise')),
dict(expected='linux-ubuntu-trusty-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.13.0-24-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '14.04', 'trusty')),
dict(expected='linux-centos-4-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.9-89.EL', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '4.8', 'Final')),
dict(expected='linux-ubuntu-trusty-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.13.0-35-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '14.04', 'trusty')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-431.29.2.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.5', 'Final')),
dict(expected='linux-ubuntu-saucy-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.11.0-20-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '13.10', 'saucy')),
dict(expected='windows-x86', system='Windows', architecture=('32bit', 'WindowsPE'), processor='', release='2008Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2008Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-358.14.1.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.4', 'Final')),
dict(expected='linux-redhat-7-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.10.0-123.el7.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '7.0', 'Maipo')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-358.14.1.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.4', 'Final')),
dict(expected='linux-redhat-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-122.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '6.1', 'Santiago')),
dict(expected='linux-redhat-5-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.18-229.el5', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '5.6', 'Tikanga')),
dict(expected='linux-redhat-6-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.32-122.el6.i686', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '6.1', 'Santiago')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2012Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='linux-ubuntu-natty-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.38-16-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '11.04', 'natty')),
dict(expected='linux-redhat-5-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.18-229.el5', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '5.6', 'Tikanga')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-431.29.2.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.5', 'Final')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-431.29.2.1.el6.izbox.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.5', 'Final')),
dict(expected='linux-ubuntu-trusty-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.13.0-36-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '14.04', 'trusty')),
dict(expected='linux-ubuntu-trusty-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.13.0-36-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '14.04', 'trusty')),
dict(expected='linux-ubuntu-trusty-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.13.0-36-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '14.04', 'trusty')),
dict(expected='linux-ubuntu-trusty-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.13.0-36-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '14.04', 'trusty')),
dict(expected='linux-centos-5-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.18-238.el5', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '5.6', 'Final')),
dict(expected='linux-centos-5-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.18-238.el5', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '5.6', 'Final')),
dict(expected='linux-centos-5-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.18-238.el5', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '5.6', 'Final')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-358.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.4', 'Final')),
dict(expected='linux-centos-6-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.32-71.el6.i686', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS Linux', '6.0', 'Final')),
dict(expected='linux-centos-6-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.32-431.el6.x86_64', mac_ver=('', ('', '', ''), ''), linux_distribution=('CentOS', '6.5', 'Final')),
dict(expected='linux-redhat-5-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.18-238.el5PAE', mac_ver=('', ('', '', ''), ''), linux_distribution=('Red Hat Enterprise Linux Server', '5.6', 'Tikanga')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2012Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2012Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x86', system='Windows', architecture=('32bit', 'WindowsPE'), processor='', release='2008Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2008ServerR2', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2008ServerR2', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x86', system='Windows', architecture=('32bit', 'WindowsPE'), processor='', release='2008Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2012Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2008Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x86', system='Windows', architecture=('32bit', 'WindowsPE'), processor='', release='2008Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2012Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2008Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2008ServerR2', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2008ServerR2', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x86', system='Windows', architecture=('32bit', 'WindowsPE'), processor='', release='2008Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2008ServerR2', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x86', system='Windows', architecture=('32bit', 'WindowsPE'), processor='', release='2008Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2008ServerR2', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='linux-ubuntu-trusty-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.13.0-35-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '14.04', 'trusty')),
dict(expected='linux-ubuntu-trusty-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.13.0-36-generic', mac_ver=('', ('', '', ''), ''), linux_distribution=('Ubuntu', '14.04', 'trusty')),
dict(expected='windows-x64', system='Windows', architecture=('64bit', 'WindowsPE'), processor='', release='2008Server', mac_ver=('', ('', '', ''), ''), linux_distribution=('', '', '')),
dict(expected='linux-suse-11-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.0.76-0.11-default', mac_ver=('', ('', '', ''), ''), linux_distribution=('SUSE Linux Enterprise Server ', '11', 'x86_64')),
dict(expected='linux-suse-11-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='3.0.76-0.11-pae', mac_ver=('', ('', '', ''), ''), linux_distribution=('SUSE Linux Enterprise Server ', '11', 'i586')),
dict(expected='linux-suse-10-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.16.60-0.85.1-default', mac_ver=('', ('', '', ''), ''), linux_distribution=('SUSE Linux Enterprise Server ', '10', 'x86_64')),
dict(expected='linux-suse-10-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.16.60-0.85.1-bigsmp', mac_ver=('', ('', '', ''), ''), linux_distribution=('SUSE Linux Enterprise Server ', '10', 'i586')),
dict(expected='linux-suse-10-x86', system='Linux', architecture=('32bit', 'ELF'), processor='i686', release='2.6.16.60-0.85.1-bigsmp', mac_ver=('', ('', '', ''), ''), linux_distribution=('SUSE Linux Enterprise Server ', '10', 'i586')),
dict(expected='linux-suse-10-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='2.6.16.60-0.85.1-default', mac_ver=('', ('', '', ''), ''), linux_distribution=('SUSE Linux Enterprise Server ', '10', 'x86_64')),
dict(expected='linux-suse-11-x64', system='Linux', architecture=('64bit', 'ELF'), processor='x86_64', release='3.0.76-0.11-default', mac_ver=('', ('', '', ''), ''), linux_distribution=('SUSE Linux Enterprise Server ', '11', 'x86_64')),
]
class FakePlatformMorule(object):
""":param platform_module: a platform-like module that implements system, architecture, processor, release, mac_ver, linux_distribution"""
def __init__(self, system, architecture, processor, release, mac_ver, linux_distribution):
self.system = lambda: system
self.architecture = lambda: architecture
self.processor = lambda: processor
self.release = lambda: release
self.mac_ver = lambda: mac_ver
self.linux_distribution = lambda: linux_distribution
class PlatformStringTestCase(unittest.TestCase):
@unittest.parameters.iterate('test_subject', test_subjects)
def test_platform_string(self, test_subject):
expected = test_subject.pop('expected')
self.assertEquals(expected, get_platform_string(FakePlatformMorule(**test_subject)))
| [
"[email protected]"
] | |
71da03ce7c21791ffeeb73c95666121e3be50f6d | 821a920a8dd7c5857fa436b8fe51d5089e9b85b6 | /python_ds/stack/balancedBracketAlgo.py | 29f9b5f84ee51402c8b4917d5138fd1405065da4 | [] | no_license | mabdullahadeel/python-data-structures | bba7800287a6853a9f2c3011e25a01e92eec0e06 | 4ab1a7e429730b7e9bf2313665e5fe087da94cf7 | refs/heads/master | 2023-08-07T08:54:05.538709 | 2021-09-18T06:36:55 | 2021-09-18T06:36:55 | 365,546,688 | 1 | 1 | null | 2021-09-18T06:36:56 | 2021-05-08T15:20:04 | Python | UTF-8 | Python | false | false | 1,023 | py | from .stack import Stack
"""
The function `are_parenthesis_balanced` is a simple function
that checks if a given string has balanced bracket pair and
returns a boolean respectively
"""
def is_valid_pair(opening, closing):
# Process the brackets and see if they are valid pair
pairs = {"(": ")", "[": "]", "{": "}"}
result = False
if opening in pairs.keys():
result = pairs[opening] == closing
return result
def are_parenthesis_balanced(paren_str):
stack = Stack()
is_balanced = True
index = 0
while index < len(paren_str) and is_balanced:
paren = paren_str[index]
if paren in "[{(":
# opening bracket
stack.push(paren)
else:
# closing bracket
# Edge case
if stack.is_empty():
is_balanced = False
break
else:
top = stack.pop()
if not is_valid_pair(opening=top, closing=paren):
# bracket doen't match
is_balanced = False
break
index += 1
if stack.is_empty() and is_balanced:
return True
else:
return False
if __name__ == "__main__":
pass | [
"[email protected]"
] | |
cbd359e22736a0ac7bbf8539a4ca09b92820d3b5 | e00efe29353159e414fca0e2b0283d10e2f8b69e | /main.py | e9e69a85cf077598d1e784d73367956f4e80ed02 | [] | no_license | nerudxlf/parse_wos | a12299e626923ccc1f6dff3095dc0c7e146c5d4a | c11fd1c8653ef55da834a822c1463c24939b396c | refs/heads/main | 2023-01-28T10:36:46.976830 | 2020-12-07T06:31:46 | 2020-12-07T06:31:46 | 319,221,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from IO.out import out_excel
from IO.readTxt import read_txt
from parse.parseList import ParseList
def main():
name_txt = "id.txt"
mode = "r"
chrome_driver_path = 'C:/programs/chrome/chromedriver'
arr_id = read_txt(name_txt, mode) # read id
pars = ParseList(chrome_driver_path) # create object
arr_teacher = pars.parse(arr_id) # get info
out_excel(arr_teacher, arr_id) # write info
| [
"[email protected]"
] | |
00d84e7bd459424a3ff23e6032272fbb566e64a2 | 9557ccb642d4ddb8dd70147d086ccadac23b1e95 | /baidu_gaofen-cup/code/00-data_sampling.py | 6ca911a82bfd614c3de886d484c407a87afd0cef | [] | no_license | gao-ye/Competition | f1bea96f406ae135b2611bf4b2e1c78ab0235c51 | 17320f62679f02eb816a4d916827407a4eb4068c | refs/heads/master | 2020-05-17T23:31:09.868625 | 2019-04-29T09:22:47 | 2019-04-29T09:22:47 | 184,035,198 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,835 | py | #-*- coding:utf-8 -*-
## 从原始样本中进行第一次采样
import pandas as pd
import numpy as np
radius = 15
sample_size = 5
labels_key = {'dadou': '大豆', 'shuidao': '水稻','yumi':'玉米'}
def data_sampling_1(name):
data_sampling = []
rectangle = pd.read_csv(name+'.txt',header=None)
rectangle = rectangle.values
rectangle = rectangle.astype(int)
print(rectangle[:3,:])
cnt = 0
[rows, cols] = rectangle.shape
for i in range(0, rows,2):
if(i<5):
sample_size = 8
else:
sample_size = 30
# print(i)
# cnt = 0
# print (i)
begin = rectangle[i+0, :]
end = rectangle[i+1,:]
for m in range(begin[0], end[0],sample_size):
for n in range(begin[1], end[1],sample_size):
# print("{} {}".format(m,n))
cnt = cnt+1
data_sampling.append(['其他' , m, n])
print("cnt is{}".format( cnt))
data_sampling = np.array(data_sampling)
data_sampling = pd.DataFrame(data_sampling, index=None, columns=None)
data_sampling.to_csv('./data/final-other-data.txt', header=None, index=None)
print("ok")
def data_sampling(name):
data = pd.read_csv(name)
data = data.values
print(data.shape)
print(data[:3])
data = data[:, [2, 5, 6]]
# print(res[:3, :]) # 样例输出
data[:, -1] = [round(-i) for i in data[:, -1]]
data[:, -2] = [round(i) for i in data[:, -2]]
print(data[:3])
print(type(data))
temp_data = pd.DataFrame(data, index=None, columns=None)
temp_data.to_csv('./data/train_sample_3col.txt', header=None, index=None)
[rows,cols] = data.shape
rectangle = []
for i in range(rows):
[n, x, y] = data[i,:]
rectangle.append([n, int(x-radius), int(y-radius)])
rectangle.append([n, int(x+radius), int(y+radius)])
rectangle = np.array(rectangle)
print(rectangle.shape)
[rows, cols] = rectangle.shape
cnt = 0
sampling =[]
for i in range(0, rows,2):
# print (i)
[kind, x1, y1] = rectangle[i+0, :]
[kind, x2, y2] = rectangle[i+1, :]
for m in range(int(x1), int(x2),sample_size):
for n in range(int(y1), int(y2),sample_size):
cnt = cnt+1
sampling.append([kind, m, n])
print("cnt is{}".format( cnt))
sampling = np.array(sampling)
temp_data = pd.DataFrame(sampling, index=None, columns=None)
temp_data.to_csv('./data/sampling-data.txt', header=None, index=None)
if __name__ == '__main__':
ss = 'data/background' #sample_size = 40 other sample_size =50
data_sampling_1(ss) ##从矩形区域采样
# ss = './data/train_sample.txt'
ss = './data/训练样本点.txt'
data_sampling(ss) ##从矩形区域采样
| [
"[email protected]"
] | |
813c19241491716e4df3b0f1a63f6f1be1b59feb | d78bd6b82eea7876ee7c5c38ebd23ef8f942b9da | /Text processing/extract_sentence_srt.py | 82eb730026d41485a4aeeb00b211c9f3e03e9e28 | [] | no_license | mmggbj/Graduation-Design | 098ab6ebacfd045f0b970545e0a2e9a07033a59d | cd4d2d752f047990510cf41c8d2209ccaadadccc | refs/heads/master | 2020-05-27T14:58:25.062854 | 2019-05-18T08:57:03 | 2019-05-18T08:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | # coding:utf-8
import chardet
import os
import re
cn=r"([\u4e00-\u9fa5]+)"
pattern_cn = re.compile(cn)
jp1=r"([\u3040-\u309F]+)"
pattern_jp1 = re.compile(jp1)
jp2=r"([\u30A0-\u30FF]+)"
pattern_jp2 = re.compile(jp2)
for root, dirs, files in os.walk("./srt"):
file_count = len(files)
if file_count > 0:
for index, file in enumerate(files):
f = open(root + "/" + file, "r")
content = f.read()
f.close()
encoding = chardet.detect(content)["encoding"]
try:
for sentence in content.decode(encoding).split('\n'):
if len(sentence) > 0:
match_cn = pattern_cn.findall(sentence)
match_jp1 = pattern_jp1.findall(sentence)
match_jp2 = pattern_jp2.findall(sentence)
sentence = sentence.strip()
if len(match_cn)>0 and len(match_jp1)==0 and len(match_jp2) == 0 and len(sentence)>1 and len(sentence.split(' ')) < 10:
print (sentence.encode('utf-8'))
except:
continue
| [
"[email protected]"
] | |
2dec658552557ff5591d0acfe41888682e0df317 | 14b0a22e2b7dc8c75ff6baa5994695f23e61f19b | /microsoft malware/Malware_Say_No_To_Overfitting/kaggle_Microsoft_malware_small/daf.py | 3c4df74a598ae0e06408d56e5259d4c1ee07d5b8 | [
"Apache-2.0"
] | permissive | bikash/kaggleCompetition | 8d9fdd59146bdd9132b2a996cb7344338f4e1653 | c168f5a713305f6cf6ef41db60d8b1f4cdceb2b1 | refs/heads/master | 2016-08-11T21:09:00.759071 | 2016-01-11T02:47:30 | 2016-01-11T02:47:30 | 36,067,644 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,404 | py | import heapq
import pickle
import math
from csv import DictReader
import glob
import os
import csv
from datetime import datetime
# generate dfs features and dll call features.
#three different types: memory, constant, register
# memory: dword, word, byte
# constant: arg, var
# register: eax ebx ecx edx esi edi esp ebp ax bx cx dx ah bh ch dh al bl cl dl
def get_pattern(lst):
# return a pattern for a length 2 list
if len(lst) == 2:
first = lst[0]
tmp = lst[-1].split(', ')
if len(tmp) == 2:
second, third = id_pattern(tmp[0]), id_pattern(tmp[1])
return first+'_'+second+'_'+third
return None
def id_pattern(s):
# given a string return its type (memory, constant, register,number, other)
if any(m in s for m in ['dword','word','byte']):
return 'memory'
elif any(r in s for r in ['ax', 'bx', 'cx', 'dx', 'ah', 'bh', 'ch', 'dh', 'al', 'bl', 'cl', 'dl', 'esi','edi','esp','ebp']):
return 'register'
elif any(r in s for r in ['arg','var']):
return 'constant'
elif is_hex(s):
return 'number'
else:
return 'other'
def is_hex(s):
try:
int(s, 16)
return True
except ValueError:
return False
# get the 500 4-gram features for specific class
def ngram_features(path, c):
with open(path,'rb') as f:
features = f.readline().replace('"', '').strip().split(',')
return features[(c-1)*750+1:c*750+1]
# load file names
def load_label(path, label):
result = []
for row in DictReader(open(path)):
if int(row['Class']) == label:
result.append((row['Id']))
return result
def daf_single_file(f, feature_set, N = 4):
pattern_dict = dict()
f_lines = list()
with open(f, 'rb') as outfile:
for line in outfile:
if 'text' in line and ',' in line and ';' not in line:
f_lines.append(line.lower())
for line in xrange(len(f_lines)):
y = [i.strip().split()[1:] for i in f_lines[line:line+4]]
g_list = []
for l in y:
g_list += [i for i in l if is_hex(i) and len(i) == 2]
grams_string = [''.join(g_list[i:i+N]) for i in xrange(len(g_list)-N+1)]
if any(grams in feature_set for grams in grams_string):
# start collect the 3-element patterns.
p = [i.strip().split(' ')[1:] for i in f_lines[line:line+4]]
for e in p:
if e and ',' in e[-1]:
tmp_list = [x.strip() for x in e if x != '']
p = get_pattern(tmp_list)
if p and p not in pattern_dict:
pattern_dict[p] = 1
return pattern_dict
def reduce_dict():
dict_all = dict()
for c in range(1,10):
feature_set = ngram_features('train_data_750.csv',c)
f_labels = load_label('trainLabels.csv', c)
for f in f_labels:
f_name = 'train/'+f+'.asm'
daf = daf_single_file(f_name, feature_set)
for feature in daf:
if feature not in dict_all:
dict_all[feature] = [0]*9
dict_all[feature][c-1] +=1
#print "finishing features in class %i"%c
return dict_all
# load data
def num_instances(path, label):
p = 0
n = 0
for row in DictReader(open(path)):
if int(row['Class']) == label:
p += 1
else:
n += 1
return p,n
def entropy(p,n):
p_ratio = float(p)/(p+n)
n_ratio = float(n)/(p+n)
return -p_ratio*math.log(p_ratio) - n_ratio * math.log(n_ratio)
def info_gain(p0,n0,p1,n1,p,n):
return entropy(p,n) - float(p0+n0)/(p+n)*entropy(p0,n0) - float(p1+n1)/(p+n)*entropy(p1,n1)
def Heap_gain(p, n, class_label, dict_all, num_features = 500, gain_minimum_bar = -1000):
heap = [(gain_minimum_bar, 'gain_bar')] * num_features
root = heap[0]
for gram, count_list in dict_all.iteritems():
p1 = count_list[class_label-1]
n1 = sum(count_list[:(class_label-1)] + count_list[class_label:])
p0,n0 = p - p1, n - n1
if p1*p0*n1*n0 != 0:
gain = info_gain(p0,n0,p1,n1,p,n)
if gain > root[0]:
root = heapq.heapreplace(heap, (gain, gram))
#return heap
result = [i[1] for i in heap if i[1] != 'gain_bar']
#print "the length of daf for class %i is %i"%(class_label, len(result))
return result
def gen_df(features_all, train = True, verbose = False, N = 4):
yield ['Id'] + features_all # yield header
if train == True:
ds = 'train'
else:
ds = 'test'
directory_names = list(set(glob.glob(os.path.join(ds, "*.asm"))))
for f in directory_names:
f_id = f.split('/')[-1].split('.')[0]
if verbose == True:
print 'doing %s'%f_id
binary_features = list()
tmp_pattern = dict()
f_lines = list()
with open(f, 'rb') as outfile:
for line in outfile:
if 'text' in line and ',' in line and ';' not in line:
f_lines.append(line.lower())
for line in f_lines:
e = line.strip().split(' ')[1:]
if e and ',' in e[-1]:
tmp_list = [x.strip() for x in e if x != '']
p = get_pattern(tmp_list)
if p and p not in tmp_pattern:
tmp_pattern[p] = 1
for fea in features_all:
if fea in tmp_pattern:
binary_features.append(1)
else:
binary_features.append(0)
yield [f_id] + binary_features
if __name__ == '__main__':
start = datetime.now()
dict_all = reduce_dict()
features_all = []
for i in range(1,10):
p, n = num_instances('trainLabels.csv', i)
features_all += Heap_gain(p,n,i,dict_all)
train_data = gen_df(features_all, train = True, verbose = False)
with open('train_daf.csv','wb') as outfile:
wr = csv.writer(outfile, delimiter=',', quoting=csv.QUOTE_ALL)
for row in train_data:
wr.writerow(row)
test_data = gen_df(features_all, train = False,verbose = False)
with open('test_daf.csv','wb') as outfile:
wr = csv.writer(outfile, delimiter=',', quoting=csv.QUOTE_ALL)
for row in test_data:
wr.writerow(row)
print "DONE DAF features!"
#print datetime.now() - start
| [
"[email protected]"
] | |
aa534227e6de535ec3870cee1c57ed6395311d64 | edf89640f9363687f619198621accb9e5afe9185 | /10.debug/02.py | a00185c8ac0497a0cb72079abace3417a4d77b07 | [] | no_license | Little-Captain/automate-the-boring-stuff-with-python | 3e27cfa90ec0cb50b90f16eb5bd7697770e0d2c0 | c23fefadfab2d7cc4f52fc9a15b81b797d903ea1 | refs/heads/master | 2020-08-28T03:12:00.330165 | 2020-02-06T14:08:02 | 2020-02-06T14:08:02 | 217,571,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | #!/usr/bin/env python
# 如果 Python 遇到错误,它就会生成一些错误信息,称为“反向跟踪”
# 反向跟踪包含了出错消息、导致该错误的代码行号,以及导致该错误的函数调用的序列
# 这个序列称为“调用栈”
# 在从多个位置调用函数的程序中,调用栈就能帮助你确定哪次调用导致了错误
def spam():
bacon()
def bacon():
raise Exception('This is the error message.')
# spam()
# 只要抛出的异常没有被处理,Python 就会显示反向跟踪
# 但你也可以调用 traceback.format_exc(),得到它的字符串形式
# 如果你希望得到异常的反向跟踪的信息,但也希望 except 语句优雅地
# 处理该异常,这个函数就很有用。在调用该函数之前,
# 需要导入 Python 的 traceback 模块
import traceback
try:
spam()
except Exception as err:
print(str(err))
print(traceback.format_exc())
| [
"[email protected]"
] | |
9cffb83a1057a8c83e3492e8c6139db49cc816f7 | 7d40f818623406ef81c92a16c3a6ede76f5f2780 | /gamble_env/bin/fitscheck | 10a76d70cdef3b375e0d0bc5a6a617c407102e37 | [] | no_license | drewmacleod/Bundesliga_Game_Previews_2020 | 9e66d8dc14933f0dde906568ff2a121b088d74be | 556b1dfde8c051412e3a407c8e609994ecffe4d5 | refs/heads/master | 2022-07-06T06:28:18.802765 | 2020-05-18T21:58:39 | 2020-05-18T21:58:39 | 264,766,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | #!/Users/drewmacleod/Documents/Gambling/Bundesliga/gamble_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from astropy.io.fits.scripts.fitscheck import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
20995b38fcafd6b7d9a48b54306a4288ded65f44 | c798b2aef78f945f7602f4a2688a332455a62013 | /django_all/users/migrations/0006_auto_20200823_1833.py | b2ca7f56ad5acdf9080ae33ab6ee46fa56a3e9ec | [] | no_license | Souliphon/Comma-Project | 5c9502fd0e2d149e686252660d34ae06751f97aa | a1e7f613c1b8f5469a8028abcde032d851c3d8cd | refs/heads/master | 2023-01-27T16:24:17.311084 | 2020-11-24T06:12:54 | 2020-11-24T06:12:54 | 313,191,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # Generated by Django 3.0.5 on 2020-08-23 11:33
from django.db import migrations
import phone_field.models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20200703_1153'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='tel',
field=phone_field.models.PhoneField(blank=True, help_text='Contact phone number e.g 2055667788-856', max_length=31, null=True),
),
]
| [
"[email protected]"
] | |
2acc707e0b4f975a61c4d1f3c42e2d2fb6f433e4 | 431931011af6b9f811590cd098925b6fd9f5753e | /sampark/migrations/0001_initial.py | 2592e9620873699f59015855d0e3d897aa247000 | [] | no_license | aastha007/trip-travel | b58b3e2aea90f4bb7d78b33f498d2c6d60a2724d | 39fa67345309c526cb5179ae33175916952e892b | refs/heads/master | 2021-01-02T23:50:47.062112 | 2017-08-06T19:10:15 | 2017-08-06T19:10:15 | 99,507,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-07-18 16:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='contus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
('email', models.CharField(max_length=50)),
('pno', models.IntegerField()),
('cf', models.TextField()),
],
),
migrations.CreateModel(
name='sugus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sname', models.CharField(max_length=25)),
('email', models.EmailField(max_length=50)),
('country', models.CharField(max_length=30)),
('place', models.CharField(max_length=25)),
('about', models.TextField()),
],
),
]
| [
"[email protected]"
] | |
408aca0bd3c6d2c89436ff089e3fcce6c9765012 | c02778e40ab4d3e5aa3cea36cc25903d785b745e | /equalloudness.py | a6b8e1a170872eb7c87b53fcdac351e2f4ef66ea | [] | no_license | anoop901/musictext | 53a45c7dd3a06513732edbb78c330ac5c7759a65 | e1331b2a6902294adaeb43a66efaee4414f7ddf3 | refs/heads/master | 2021-01-12T00:53:09.904239 | 2017-01-07T23:51:48 | 2017-01-07T23:51:48 | 78,310,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | import bisect
x = [
20,
25,
31.5,
40,
50,
63,
80,
100,
125,
160,
200,
250,
315,
400,
500,
630,
800,
1000,
1250,
1600,
2000,
2500,
3150,
4000,
5000,
6300,
8000,
10000,
12500,
16000,
20000
]
y = [
99.85,
93.94,
88.17,
82.63,
77.78,
73.08,
68.48,
64.37,
60.59,
56.7,
53.41,
50.4,
47.58,
44.98,
43.05,
41.34,
40.06,
40.01,
41.82,
42.51,
39.23,
36.51,
35.61,
36.65,
40.01,
45.83,
51.8,
54.28,
51.49,
51.96,
92.77
]
def lookup(freq):
i = bisect.bisect(x, freq)
if i == 0:
return y[0]
elif i == len(x):
return y[i - 1]
else:
return y[i - 1] + (freq - x[i - 1]) * (y[i] - y[i - 1]) / (x[i] - x[i - 1]) | [
"[email protected]"
] | |
02f0c6d57be372aaec5c79af4bdd4f2379918dc7 | e90bf4b372da78ceec15282d060b48d18ba8d4e9 | /tests/dbus_service_mocks/network_manager.py | 12af2204ac9c11e50ba611f8d0159b8ff7975265 | [
"Apache-2.0"
] | permissive | home-assistant/supervisor | 67f2e1755ff5fbf7cf2084351e1c32c6995274e0 | 4838b280adafed0997f32e021274b531178386cd | refs/heads/main | 2023-08-31T22:51:25.949277 | 2023-08-31T08:01:42 | 2023-08-31T08:01:42 | 84,926,758 | 928 | 477 | Apache-2.0 | 2023-09-14T17:11:27 | 2017-03-14T08:54:15 | Python | UTF-8 | Python | false | false | 10,546 | py | """Mock of Network Manager service."""
from dbus_fast.service import PropertyAccess, dbus_property, signal
from .base import DBusServiceMock, dbus_method
BUS_NAME = "org.freedesktop.NetworkManager"
def setup(object_path: str | None = None) -> DBusServiceMock:
"""Create dbus mock object."""
return NetworkManager()
# pylint: disable=invalid-name
class NetworkManager(DBusServiceMock):
"""Network Manager mock.
gdbus introspect --system --dest org.freedesktop.NetworkManager --object-path /org/freedesktop/NetworkManager
"""
interface = "org.freedesktop.NetworkManager"
object_path = "/org/freedesktop/NetworkManager"
version = "1.22.10"
connectivity = 4
devices = [
"/org/freedesktop/NetworkManager/Devices/1",
"/org/freedesktop/NetworkManager/Devices/3",
]
@dbus_property(access=PropertyAccess.READ)
def Devices(self) -> "ao":
"""Get Devices."""
return self.devices
@dbus_property(access=PropertyAccess.READ)
def AllDevices(self) -> "ao":
"""Get AllDevices."""
return [
"/org/freedesktop/NetworkManager/Devices/1",
"/org/freedesktop/NetworkManager/Devices/2",
"/org/freedesktop/NetworkManager/Devices/3",
]
@dbus_property(access=PropertyAccess.READ)
def Checkpoints(self) -> "ao":
"""Get Checkpoints."""
return []
@dbus_property(access=PropertyAccess.READ)
def NetworkingEnabled(self) -> "b":
"""Get NetworkingEnabled."""
return True
@dbus_property()
def WirelessEnabled(self) -> "b":
"""Get WirelessEnabled."""
return True
@WirelessEnabled.setter
def WirelessEnabled(self, value: "b"):
"""Set WirelessEnabled."""
self.emit_properties_changed({"WirelessEnabled": value})
@dbus_property(access=PropertyAccess.READ)
def WirelessHardwareEnabled(self) -> "b":
"""Get WirelessHardwareEnabled."""
return True
@dbus_property()
def WwanEnabled(self) -> "b":
"""Get WwanEnabled."""
return True
@WwanEnabled.setter
def WwanEnabled(self, value: "b"):
"""Set WwanEnabled."""
self.emit_properties_changed({"WwanEnabled": value})
@dbus_property(access=PropertyAccess.READ)
def WwanHardwareEnabled(self) -> "b":
"""Get WwanHardwareEnabled."""
return True
@dbus_property()
def WimaxEnabled(self) -> "b":
"""Get WimaxEnabled."""
return False
@WimaxEnabled.setter
def WimaxEnabled(self, value: "b"):
"""Set WimaxEnabled."""
self.emit_properties_changed({"WimaxEnabled": value})
@dbus_property(access=PropertyAccess.READ)
def WimaxHardwareEnabled(self) -> "b":
"""Get WimaxHardwareEnabled."""
return False
@dbus_property(access=PropertyAccess.READ)
def ActiveConnections(self) -> "ao":
"""Get ActiveConnections."""
return ["/org/freedesktop/NetworkManager/ActiveConnection/1"]
@dbus_property(access=PropertyAccess.READ)
def PrimaryConnection(self) -> "o":
"""Get PrimaryConnection."""
return "/org/freedesktop/NetworkManager/ActiveConnection/1"
@dbus_property(access=PropertyAccess.READ)
def PrimaryConnectionType(self) -> "s":
"""Get PrimaryConnectionType."""
return "802-3-ethernet"
@dbus_property(access=PropertyAccess.READ)
def Metered(self) -> "u":
"""Get Metered."""
return 4
@dbus_property(access=PropertyAccess.READ)
def ActivatingConnection(self) -> "o":
"""Get ActivatingConnection."""
return "/"
@dbus_property(access=PropertyAccess.READ)
def Startup(self) -> "b":
"""Get Startup."""
return False
@dbus_property(access=PropertyAccess.READ)
def Version(self) -> "s":
"""Get Version."""
return self.version
@dbus_property(access=PropertyAccess.READ)
def Capabilities(self) -> "au":
"""Get Capabilities."""
return [1]
@dbus_property(access=PropertyAccess.READ)
def State(self) -> "u":
"""Get State."""
return 70
@dbus_property(access=PropertyAccess.READ)
def Connectivity(self) -> "u":
"""Get Connectivity."""
return self.connectivity
@dbus_property(access=PropertyAccess.READ)
def ConnectivityCheckAvailable(self) -> "b":
"""Get ConnectivityCheckAvailable."""
return True
@dbus_property()
def ConnectivityCheckEnabled(self) -> "b":
"""Get ConnectivityCheckEnabled."""
return True
@ConnectivityCheckEnabled.setter
def ConnectivityCheckEnabled(self, value: "b"):
"""Set ConnectivityCheckEnabled."""
self.emit_properties_changed({"ConnectivityCheckEnabled": value})
@dbus_property(access=PropertyAccess.READ)
def ConnectivityCheckUri(self) -> "s":
"""Get ConnectivityCheckUri."""
return "http://connectivity-check.ubuntu.com/"
@dbus_property()
def GlobalDnsConfiguration(self) -> "a{sv}":
"""Get GlobalDnsConfiguration."""
return {}
@GlobalDnsConfiguration.setter
def GlobalDnsConfiguration(self, value: "a{sv}"):
"""Set GlobalDnsConfiguration."""
self.emit_properties_changed({"GlobalDnsConfiguration": value})
@signal()
def CheckPermissions(self) -> None:
"""Signal CheckPermissions."""
# These signals all seem redundant. Their respective properties fire PropertiesChanged signals
@signal()
def StateChanged(self) -> "u":
"""Signal StateChanged."""
return 70
@signal()
def DeviceAdded(self) -> "o":
"""Signal DeviceAdded."""
return "/org/freedesktop/NetworkManager/Devices/2"
@signal()
def DeviceRemoved(self) -> "o":
"""Signal DeviceRemoved."""
return "/org/freedesktop/NetworkManager/Devices/2"
@dbus_method()
def Reload(self, flags: "u") -> None:
"""Do Reload method."""
@dbus_method()
def GetDevices(self) -> "ao":
"""Do GetDevices method."""
return self.Devices
@dbus_method()
def GetAllDevices(self) -> "ao":
"""Do GetAllDevices method."""
return self.AllDevices
@dbus_method()
def GetDeviceByIpIface(self, iface: "s") -> "o":
"""Do GetDeviceByIpIface method."""
return "/org/freedesktop/NetworkManager/Devices/1"
@dbus_method()
def ActivateConnection(
self, connection: "o", device: "o", specific_object: "o"
) -> "o":
"""Do ActivateConnection method."""
return "/org/freedesktop/NetworkManager/ActiveConnection/1"
@dbus_method()
def AddAndActivateConnection(
self, connection: "a{sa{sv}}", device: "o", speciic_object: "o"
) -> "oo":
"""Do AddAndActivateConnection method."""
return [
"/org/freedesktop/NetworkManager/Settings/1",
"/org/freedesktop/NetworkManager/ActiveConnection/1",
]
@dbus_method()
def AddAndActivateConnection2(
self,
connection: "a{sa{sv}}",
device: "o",
speciic_object: "o",
options: "a{sv}",
) -> "ooa{sv}":
"""Do AddAndActivateConnection2 method."""
return [
"/org/freedesktop/NetworkManager/Settings/1",
"/org/freedesktop/NetworkManager/ActiveConnection/1",
{},
]
@dbus_method()
def DeactivateConnection(self, active_connection: "o") -> None:
"""Do DeactivateConnection method."""
@dbus_method()
def Sleep(self, sleep: "b") -> None:
"""Do Sleep method."""
@dbus_method()
def Enable(self, enable: "b") -> None:
"""Do Enable method."""
@dbus_method()
def GetPermissions(self) -> "a{ss}":
"""Do GetPermissions method."""
return {
"org.freedesktop.NetworkManager.checkpoint-rollback": "yes",
"org.freedesktop.NetworkManager.enable-disable-connectivity-check": "yes",
"org.freedesktop.NetworkManager.enable-disable-network": "yes",
"org.freedesktop.NetworkManager.enable-disable-statistics": "yes",
"org.freedesktop.NetworkManager.enable-disable-wifi": "yes",
"org.freedesktop.NetworkManager.enable-disable-wimax": "yes",
"org.freedesktop.NetworkManager.enable-disable-wwan": "yes",
"org.freedesktop.NetworkManager.network-control": "yes",
"org.freedesktop.NetworkManager.reload": "yes",
"org.freedesktop.NetworkManager.settings.modify.global-dns": "yes",
"org.freedesktop.NetworkManager.settings.modify.hostname": "yes",
"org.freedesktop.NetworkManager.settings.modify.own": "yes",
"org.freedesktop.NetworkManager.settings.modify.system": "yes",
"org.freedesktop.NetworkManager.sleep-wake": "yes",
"org.freedesktop.NetworkManager.wifi.scan": "yes",
"org.freedesktop.NetworkManager.wifi.share.open": "yes",
"org.freedesktop.NetworkManager.wifi.share.protected": "yes",
}
@dbus_method()
def SetLogging(self, level: "s", domains: "s") -> None:
"""Do SetLogging method."""
@dbus_method()
def GetLogging(self) -> "ss":
"""Do GetLogging method."""
return [
"INFO",
"PLATFORM,RFKILL,ETHER,WIFI,BT,MB,DHCP4,DHCP6,PPP,IP4,IP6,AUTOIP4,DNS,VPN,"
"SHARING,SUPPLICANT,AGENTS,SETTINGS,SUSPEND,CORE,DEVICE,OLPC,INFINIBAND,"
"FIREWALL,ADSL,BOND,VLAN,BRIDGE,TEAM,CONCHECK,DCB,DISPATCH,AUDIT,SYSTEMD,PROXY",
]
@dbus_method()
def CheckConnectivity(self) -> "u":
"""Do CheckConnectivity method."""
return self.Connectivity
@dbus_method()
def state(self) -> "u":
"""Do state method."""
return self.State
@dbus_method()
def CheckpointCreate(self, devices: "ao", rollback_timeout: "u", flags: "u") -> "o":
"""Do CheckpointCreate method."""
return "/org/freedesktop/NetworkManager/Checkpoint/1"
@dbus_method()
def CheckpointDestroy(self, checkpoint: "o") -> None:
"""Do CheckpointDestroy method."""
@dbus_method()
def CheckpointRollback(self, checkpoint: "o") -> "a{su}":
"""Do CheckpointRollback method."""
return {}
@dbus_method()
def CheckpointAdjustRollbackTimeout(
self, checkpoint: "o", add_timeout: "u"
) -> None:
"""Do CheckpointAdjustRollbackTimeout method."""
| [
"[email protected]"
] | |
6e5391fa5e4d463ec02b366e051ea23df32eb190 | 58d0a1606b29d8b86d19acea85f61168a041f61c | /grpc_training/client.py | 558f27552cc1fdd8169a47c29b928ef16e104d70 | [] | no_license | cocodrips/grpc-python-server-training | f4ffa54b60a4efd07f018f8152de0bb7f8fee495 | c327960ee16106667a77a836e898c55b310e8b8d | refs/heads/master | 2020-06-09T20:07:31.969434 | 2019-06-24T13:01:59 | 2019-06-24T13:01:59 | 193,498,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py |
import sys
import pathlib
import grpc
root = pathlib.Path(__name__).parent.resolve()
sys.path.insert(0, str(root / '../proto_python'))
import hello_pb2, hello_pb2_grpc
def run():
channel = grpc.insecure_channel('localhost:50051')
stub = hello_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(hello_pb2.HelloRequest(name='nyan'))
print('message', response.message)
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
d981df9a2a5ac7ea6f17864ce33c3f13b8a2bd04 | cee1b29bbd1e3e87e4dbe9996621b81c4e063a51 | /rl885/cartpole_random_monitor.py | fbf575b76e552f0ea01d931bd7d2cd704a0cc5c7 | [] | no_license | edvgha/122 | 4ab3a1358775864ada3919bca8adff01be30f086 | f3643d336060b18ec3f5416b7b995cdaba804f37 | refs/heads/master | 2023-02-18T07:29:22.190292 | 2022-09-02T12:49:36 | 2022-09-02T12:49:36 | 85,350,600 | 0 | 0 | null | 2023-02-15T21:28:39 | 2017-03-17T20:07:53 | Jupyter Notebook | UTF-8 | Python | false | false | 520 | py | import gym
if __name__ == "__main__":
env = gym.make("CartPole-v0")
env = gym.wrappers.Monitor(env, "recording")
total_reward = 0.0
total_steps = 0
obs = env.reset()
while True:
action = env.action_space.sample()
obs, reward, done, _ = env.step(action)
total_reward += reward
total_steps += 1
if done:
break
print("Episode done in %d steps, total reward %.2f" % (
total_steps, total_reward))
env.close()
env.env.close() | [
"[email protected]"
] | |
cf34a57a7ae45e4c949990357bb37bc9ade93262 | cee1615db5540a5ae4b9d3c67bf3ef139bddb556 | /Cadastrar.py | 3bcc49a2abf597425630409a0c007228162baf87 | [] | no_license | andreCLima/Python | b9aaa01591bf318f522bb5d8aa760eb7a685e0f2 | 6817152783b7b1847165ea8035b18ecdf9153763 | refs/heads/main | 2023-08-04T03:47:52.381357 | 2021-09-24T01:45:24 | 2021-09-24T01:45:24 | 409,796,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | from typing import Text
from db.Query import Sqlite_Db
import os, sys
from PyQt5.QtWidgets import *
from templete.cadastrar import Ui_Cadastrar
class Cadastrar(QDialog):
def __init__(self,*argv,**argvs):
super(Cadastrar,self).__init__(*argv,**argvs)
self.ui = Ui_Cadastrar()
self.ui.setupUi(self)
self.ui.butCadastrar.clicked.connect(self.add)
self.ui.butCancelar.clicked.connect(self.cam)
self.ui.butLimpar.clicked.connect(self.limpar)
def add(self):
con = Sqlite_Db("Empresa.db")
nome = self.ui.edtNome.text()
funcao = self.ui.edtFuncao.text()
user = "admin"
passwd = "admin"
con.sqlQuery1("""
INSERT INTO FUNCIONARIO(NOME, FUNCAO, USER, PASSWD)
VALUES('{}','{}','{}','{}')
""".format(nome,funcao,user,passwd))
QMessageBox.information(QMessageBox(),"Info","Gravado com sucesso")
def cam(self):
self.limpar()
self.close()
def limpar(self):
self.ui.edtCodigo.clear()
self.ui.edtFuncao.clear()
self.ui.edtNome.clear()
| [
"[email protected]"
] | |
5395641913abc59c680fdc254821262ed1e90d7b | eb207aaff733fbeda6ecc09cd4d9948d57dd7ef5 | /votingmachine.py | db776b992f37b837f794cf17785f26676cb10104 | [] | no_license | shivamtech29/PythonProjects | c3542b2f92cf1e68d234e99ef8dd4206525993ad | fcb0384cd8e36a29eeace3a0579e5f4c0c1c7b75 | refs/heads/main | 2023-03-27T03:39:29.829660 | 2021-04-04T07:45:41 | 2021-04-04T07:45:41 | 354,484,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | # Voting Machine in less than 60 lines of clean code
import pyautogui
import time
def voteloop(votedct,n):
p=[]
for i in range(n):
p.append(0)
c='y'
while c=='y':
u = 0
for i in votedct:
print("\t\t\t{} : {}".format(u+1, i))
u += 1
x = int(input("\tEnter your vote choice here : "))
p[x-1] = p[x-1]+1
print("\tYour vote has been recorded. Now press y to confirm...")
c = input("\tConfirm ... ")
print("\tThanks for Voting")
time.sleep(2)
pyautogui.hotkey('alt', 'g')
pyautogui.hotkey('alt', 'g')
print("\n\tProcessing the Results...")
time.sleep(2)
pyautogui.hotkey('alt', 'g')
print("\n\tFinally the results are out...")
d = max(p)
for i in range(n):
if d==p[i]:
name=votedct[i]
print("\n" * 2)
print("\tAnd the WINNER of Elections 2020 is {} with votes".format(name,d))
def startvote():
print("\n"*30)
p=int(input("Enter the number of parties to compete : "))
votedct = []
for i in range(p):
k=input("Enter Contestant name")
votedct.append(k)
print("\tInstructions: choose the given options then Press Enter and confirm your result by pressing 'y'\n")
print("\tYOU HAVE {} CHOICES ".format(p))
u=0
for i in votedct:
print("\t\t\t{} : {}".format(u,i))
u+=1
time.sleep(2)
pyautogui.hotkey('alt', 'g')
voteloop(votedct,p)
startvote() | [
"[email protected]"
] | |
baaee0761e01c33173431d87591d882410c8378e | 301cec2edb0a88558687cf45cf7b958fbb07f9ac | /pokemongo_bot/test/resources/plugin_fixture/__init__.py | ca0a3fe9b3f2b5c787dd6108fd2273364807387d | [
"MIT"
] | permissive | Kafkamorph/PokemonGo-Bot | 2994390e49247dca3cc92acf136ede9a49763487 | df21ce08511673c7d60a1e01a68b5822c45de0b8 | refs/heads/master | 2023-05-28T12:39:56.613529 | 2023-05-24T12:07:58 | 2023-05-24T12:07:58 | 64,310,529 | 0 | 1 | MIT | 2023-05-24T12:08:00 | 2016-07-27T13:25:51 | Python | UTF-8 | Python | false | false | 124 | py | from __future__ import absolute_import
from .fake_task import FakeTask
from .unsupported_api_task import UnsupportedApiTask
| [
"[email protected]"
] | |
5bbe57836e926ddd11912eacf04069b1bc1206f7 | 07bd1848e35bbb75ef4d23f1982af618aa176852 | /chap04/list04c02.py | 099962a1b27cdf94ce4630caad00056e551aec35 | [] | no_license | kimurakousuke/MeiKaiPython | c0b56be8fcb79b39b0c8364e71e2da76eab613fe | 674f6001060f56cf55e3d7336e6e4ca5f135beaf | refs/heads/master | 2021-02-22T13:01:53.397290 | 2020-03-07T11:19:10 | 2020-03-07T11:19:10 | 245,377,717 | 1 | 0 | null | 2020-03-06T11:16:16 | 2020-03-06T09:22:53 | Python | UTF-8 | Python | false | false | 138 | py | # 对整数1到12进行循环但跳过8(其一)
for i in range(1, 13):
if i == 8:
continue
print(i, end=' ')
print()
| [
"[email protected]"
] | |
1dd8ce2b851444579a7663f1ab4fabe99749bf80 | 1dd38f94b9734e45d3c0f76dab718cda48c82a1f | /ecomm/products/migrations/0003_auto_20200608_1742.py | 11998b9f90e2274f3b5df3d540de8c5fdd9b4708 | [] | no_license | jgsneves/semana5CodeNation | edb8154410cf733dc70d92957c7d7b63149539fc | 18ee0c23bdfb5c42f8cfe8dde4ee75c3cb26cc5c | refs/heads/master | 2022-12-22T02:26:30.236793 | 2020-06-10T17:10:49 | 2020-06-10T17:10:49 | 271,338,663 | 1 | 1 | null | 2022-12-18T05:44:46 | 2020-06-10T17:12:07 | Python | UTF-8 | Python | false | false | 506 | py | # Generated by Django 2.2.5 on 2020-06-08 20:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0002_auto_20200608_1706'),
]
operations = [
migrations.AlterField(
model_name='product',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='products', to='products.Category'),
),
]
| [
"[email protected]"
] | |
569ad9991ca0e6f2bfd00651d5a02128e88d7589 | b75b2a86613536d4917768d3263460f2b9a59d8e | /login.py | adb62fe69087bc85795800e70cfa573f7fe7ec03 | [] | no_license | felipemfp/PyLogins | 2f50d932e63c19ed544eb6a81f7b041accf8cc35 | 9995e2d7bfec64cbe6953b938f51f3109b04667c | refs/heads/master | 2021-04-29T04:07:22.766588 | 2016-12-21T00:36:22 | 2016-12-21T00:36:22 | 78,030,633 | 0 | 0 | null | 2017-01-04T16:03:15 | 2017-01-04T16:03:15 | null | UTF-8 | Python | false | false | 1,529 | py | # coding: utf-8
import time
import base64
import argparse
import getpass
def check(pasw):
sen = getpass.getpass("password again: ")
while pasw != sen:
print("Passwords do not match, try again")
sen = getpass.getpass("password: ")
return pasw
def Hides(login):
arqui = open("logins.txt", "a")
codi = base64.b64encode(login.encode("utf-8", 'replace'))
codi = str(codi)
arqui.write(codi + "\n")
arqui.close()
print("Saved successfully")
def mold(usr, pasw):
login = ("Email: {e} | Senha: {s}".format(e = usr, s = pasw))
Hides(login)
def show():
arqui = open("logins.txt", "r")
log = "logins"
while log != "":
log = str(arqui.readline())
log = log[1:]
decodi = base64.b64decode(log)
print(decodi)
arqui.close()
def args():
log = argparse.ArgumentParser(description = 'Email e senha de um cadastro.')
log.add_argument("--e", action = "store", dest = "opc",
required = False,
help = "Enter argument and 'yes' to save logins")
log.add_argument("--s", action = "store", dest = "escolha",
required = False,
help = "Enter the argument and 'yes' to view logins")
data = log.parse_args()
if data.opc == "yes":
usr = input("Email: ")
pasw = getpass.getpass("password: ")
mold(usr, check(pasw))
elif data.escolha == "yes":
show()
def main():
try:
open("logins.txt", "r")
except IOError:
print("File not found\n*Creating file*")
time.sleep(5)
arqui = open("logins.txt", "w")
args()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f97010e961b46c6afb5e84a3dbbfdbd4ed10a1da | 90ac505fb14e4969cd4e7f164f8969ed2344d3e3 | /Power of IT Job/ncr.py | c6c2f9a7dfb5fcd5d985a0c501cceec520952b19 | [] | no_license | rid47/python_basic_book | 4d08641ed802a80f5b5398c568231b366b1cf5d0 | f4a77577115b126094c9e5aac38a18bb42eeb28f | refs/heads/master | 2022-12-22T12:24:48.094483 | 2022-12-10T12:11:52 | 2022-12-10T12:11:52 | 234,990,760 | 0 | 1 | null | 2022-12-02T03:44:50 | 2020-01-20T00:45:53 | Tcl | UTF-8 | Python | false | false | 316 | py | def factorial(n):
value = 1
for i in range(n, 1, -1):
value *= i
return value
n = int(input("Enter n for finding nCr:"))
r = int(input("Enter r for finding nCr:"))
if r > 0 and n > r:
result = factorial(n)/(factorial(r)*factorial(n-r))
print(result)
else:
print("Invalid input!")
| [
"[email protected]"
] | |
3209aa803bf257677aa8d2769303de3b1ec86c9d | a3c8651bb991d7fd5ae575c8ce3020241fe15a4d | /venv/lib/python3.8/site-packages/numpy/array_api/_set_functions.py | dc6096d3b62945f070d0b0b84cb3288ac452e4b9 | [] | no_license | chunyboy/test | fefff853c244657a5c33fe415d8552fdfcbb9eb7 | 44233736f86910fa934c4fd0f3e261df2f761a2d | refs/heads/master | 2022-12-13T11:33:06.176114 | 2022-11-29T15:32:01 | 2022-11-29T15:32:01 | 78,964,487 | 2 | 1 | null | 2017-01-14T22:34:28 | 2017-01-14T20:21:54 | Python | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/f3/42/0d/a4b54ffb86ee9c35d4c66c48ddd156fa0baeffbae05a5030b8dcddb191 | [
"[email protected]"
] | |
7b1e55c63e68e9ea484248b9a4295648880b11d4 | a30c74862bda7003d14c978d225ff24996d1aace | /nightsky.py | 79e993c03b9c95e9363425037ea61d03d21cf2a4 | [] | no_license | kirkabbott1/functions | 47e3c1aa2aaf6834aa57925ffc796c180dc19b53 | 0a5a6b8520749f8622962e13f083fc2d4f7c5448 | refs/heads/master | 2021-01-12T17:49:51.737935 | 2016-09-27T23:07:13 | 2016-09-27T23:07:13 | 69,398,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | from turtle import *
import random
from shapes import star
bgcolor("#040404")
# w_width = window_width()
# w_height = window_height()
#
# turt_x = position()[0]
# turt_y = position()[1]
#
# left(15)
# forward(200)
# print position()[0]
# print position()[1]
# print turt_x
# print turt_y
# left(180)
# forward(400)
# print position()[0]
# print position()[1]
# print turt_x
# print turt_y
# home()
# print position()[0]
# print position()[1]
def nightsky_star(star_size, turn, jump_forward):
star(star_size, True, "#cecece")
up()
print 'position-x: ', abs(position()[0])
print 'position-y: ', abs(position()[1])
print 'window width: ', window_width()
print 'window height: ', window_height()
if abs(position()[0]) * 3 > window_width() or abs(position()[1]) * 3 > window_height():
home()
left(turn)
else:
pass
left(turn)
forward(jump_forward)
down()
speed(0)
for i in range(220):
# generate a random number and store it in a variable
star_size = random.randint(5, 41)
turn = random.randint(10, 141)
jump_forward = random.randint(200, 401)
nightsky_star(star_size, turn, jump_forward)
mainloop()
| [
"[email protected]"
] | |
795eaa24315a1afe16c509b6c908ae9bae566db0 | 781e2692049e87a4256320c76e82a19be257a05d | /assignments/python/61a_hw4/code/161.py | 687844acb25cb43634a63af21f9cd2a158323d78 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 601 | py | def num_common_letters(goal_word, guess):
result = []
result_len = 0
goal_word_list = []
guess_word_list = []
goal_word_list += get_list(goal_word)
guess_word_list += get_list(guess)
for i in range(len(goal_word_list)):
for e in range(len(guess_word_list)):
if goal_word_list[i] == guess_word_list[e]:
result += [goal_word_list[i]]
result_len = len(result)
k = 0
while k <= len(result)-1:
if result[k] in result [k+1:] + result[:k]:
result_len -= 1
k += 1
k += 1
return result_len
| [
"[email protected]"
] | |
588d431353eb0d806071d7add3c1e1c820840ad5 | cbd4e397288a20f5a76907a2c1dfb0fe3356852c | /app.py | e07993fe0d7da510d037f8aea5c42d71d486b839 | [] | no_license | reddy100/QuizAPI | 99865f458ae1d5d8166c421fcee073dc39103a56 | 475e56e7344b2d11fd6397de0bedb8866a0421ef | refs/heads/master | 2020-04-02T08:06:50.915764 | 2018-10-23T04:07:13 | 2018-10-23T04:07:13 | 154,229,723 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,796 | py | from flask import Flask, render_template, request, redirect, url_for, flash, make_response, session, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from datetime import datetime, timedelta
from random import shuffle
import uuid
time_per_Session = 1
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////home/abishek/Code/QuizAPI/database.db'
db = SQLAlchemy(app)
app.config['SECRET_KEY'] = '09134832084uriehfdsh!'
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(minutes=time_per_Session)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = '/'
original_questions = {
1: ['What is my first name',['abishek', 'reddy' , 'wdaru']],
2: ['What is my doges name',['otto','nitro','pomodoro']],
3: ['What is my favoraite food',['chocolate','yes its chocolate','why are you still asking']]
}
questionIds=list(range(1, len(original_questions)+1))
shuffle(questionIds)
class UserTable(UserMixin, db.Model):
id = db.Column(db.Integer, nullable=False, primary_key = True)
firstname = db.Column(db.String(80))
lastname = db.Column(db.String(80))
email = db.Column(db.String(80))
instance_id = db.Column(db.String(80), unique=True)
answer1 = db.Column(db.Integer)
answer2 = db.Column(db.Integer)
answer3 = db.Column(db.Integer)
@login_manager.user_loader
def load_user(user_id):
return UserTable.query.get(int(user_id))
@app.route('/')
def homePage():
if 'loggedIn' in session:
return redirect(url_for('questions', instance_id=current_user.instance_id))
return render_template('login.html')
@app.route('/login', methods = ['GET','POST'])
def login():
if request.form.get('email') and request.form.get('firstname') and request.form.get('lastname'):
email = request.form.get('email')
fName = request.form.get('firstname')
lName = request.form.get('lastname')
instance_id = str(uuid.uuid4())
new_user = UserTable(firstname=fName, lastname=lName, email=email, instance_id=instance_id)
db.session.add(new_user)
db.session.commit()
session['loggedIn'] = True
login_user(new_user)
return redirect(url_for('questions', instance_id=instance_id))
else:
return redirect(url_for('homePage'))
@app.route('/questions/<instance_id>', methods = ['GET','POST'])
@login_required
def questions(instance_id):
#chekc if submit button has been pressed
if request.method=='POST' and request.form.get('userChoice'):
setattr(current_user, 'answer'+str(session['lastQuestionId']), request.form.get('userChoice'))
db.session.commit()
if len(session['questionIds'])>0:
session['questionIds'].pop()
session['questionNumbers'].pop()
if 'questionIds' not in session:
session['questionIds'] = questionIds
session['questionNumbers'] = list(range(len(questionIds),0,-1))
session.permanent=True
session['startTime'] = datetime.now()
localQuestionIds = session.get('questionIds')
questionNumbers = session.get('questionNumbers')
if localQuestionIds:
questionId = localQuestionIds[-1]
questionNumber = questionNumbers[-1]
question, options = original_questions.get(questionId)
session['lastQuestionId']=questionId
timeElapsed = datetime.now() - session['startTime']
return render_template('questions.html', n = questionNumber,q = question, o = options, t= timedelta(minutes=time_per_Session) - timeElapsed, i=current_user.instance_id)
else:
return redirect(url_for('confirmation'))
@app.route('/confirmation')
def confirmation():
questions = [original_questions[i][0] for i in questionIds]
answers = [getattr(current_user, 'answer'+str(j)) for j in questionIds]
logout_user()
session['loggedIn']=False
return jsonify(dict(zip(questions, answers)))
if __name__ == '__main__':
app.run() | [
"[email protected]"
] | |
ced70fb3eebe1d858dcc0187257f7fdcd74b0ebc | 4e7c30694f7c8efdd3e660203af391186f001bf4 | /backend/bids/backend.py | 04e2a3613b2af8343f044a85904adb0b73207de1 | [] | no_license | nkoutsov/web_bid_system | a71b49ca3b62b5b176669f07e4dc8f74105ba7f1 | 84d34b98c9b4eaf7512e8cdab7273a77c3ccb805 | refs/heads/master | 2023-01-20T13:24:13.039469 | 2020-11-13T14:48:42 | 2020-11-13T14:48:42 | 205,192,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from django.contrib.auth.middleware import get_user
from django.utils.functional import SimpleLazyObject
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
class AuthenticationMiddlewareJWT(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
request.user = self.__class__.get_jwt_user(request)
return self.get_response(request)
@staticmethod
def get_jwt_user(request):
user = get_user(request)
jwt_authentication = JSONWebTokenAuthentication()
if jwt_authentication.get_jwt_value(request):
user, jwt = jwt_authentication.authenticate(request)
return user | [
"[email protected]"
] | |
0dc60f3620be8bf49ae2076889b0b99b18524475 | 5abca260d5909e8137921d2a4fb1889f844e41e6 | /rppg_extract/POS.py | 8d7eb973cb7044bdf81e9952284991618f3698cc | [] | no_license | MountainLovers/PulseGAN | be886c570cadd13fc48bd0a507668120107121f5 | fcf5ae30d5b622f6df1177f7a908207a2cec2900 | refs/heads/master | 2023-09-03T17:39:56.307195 | 2021-10-18T21:30:43 | 2021-10-18T21:30:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | """
@author: Chun Hei Michael Chan
@copyright: Copyright Logitech
@credits: [Chun Hei Michael Chan]
@maintainer: Chun Hei Michael Chan
@email: [email protected]
"""
import numpy as np
def POS(X,fps,windows=1.6):
"""
desc: from RGB spatial-average obtain a one time signal POS
args:
- X::[array<array<float> > ]
RGB spatial-averaged array
ret:
- h::[array<float>]
1d signal
"""
P = np.array([[0,1,-1],[-2,1,1]])
wlen = int(windows * fps)
N = X.shape[1]
# Initialize (1)
h = np.zeros(N)
for n in range(N):
# Start index of sliding window (4)
m = n - wlen + 1
if m >= 0:
# Temporal normalization (5)
cn = X[:,m:(n+1)]
mu = np.mean(cn,axis=1)
cn[0] = cn[0] / mu[0]
cn[1] = cn[1] / mu[1]
cn[2] = cn[2] / mu[2]
# Projection (6)
s = np.matmul(P,cn)
# Tuning (7)
hn = np.add(s[0, :], np.std(s[0, :])/np.std(s[1, :])*s[1, :])
# Overlap-adding (8)
h[m:(n+1)] = np.add(h[m:(n+1)], hn - np.mean(hn))
return h
| [
"[email protected]"
] | |
c60aab701879987a68ccc7b1964df7df0029db9b | 2d61193c3b257f8e80b7beea7ac73b2dc1f06265 | /18_calculate/calculate.py | a288d74345a596e93a1c02ee089ddb0cefc4a883 | [] | no_license | jade0304/python-exercise | 961880ffdb62ca45df706327df9542a9de4ee065 | 99ea8a5274ee804eb3d31f1b64c8b53b70083cd4 | refs/heads/main | 2023-09-04T00:20:06.985412 | 2021-10-16T22:39:36 | 2021-10-16T22:39:36 | 417,964,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | def calculate(operation, a, b, make_int=False, message='The result is'):
"""Perform operation on a + b, ()possibly truncating) & returning w/msg.
- operation: 'add', 'subtract', 'multiply', or 'divide'
- a and b: values to operate on
- make_int: (optional, defaults to False) if True, truncates to integer
- message: (optional) message to use (if not provided, use 'The result is')
Performs math operation (truncating if make_int), then returns as
"[message] [result]"
>>> calculate('add', 2.5, 4)
'The result is 6.5'
>>> calculate('subtract', 4, 1.5, make_int=True)
'The result is 2'
>>> calculate('multiply', 1.5, 2)
'The result is 3.0'
>>> calculate('divide', 10, 4, message='I got')
'I got 2.5'
If a valid operation isn't provided, return None.
>>> calculate('foo', 2, 3)
"""
if operation == 'add':
result = a + b
elif operation == 'subtract':
result = a - b
elif operation == 'multiply':
result = a * b
elif operation == 'divide':
result = a / b
else:
return
if make_int:
result = int(result)
return f"{message} {result}"
| [
"[email protected]"
] | |
6b4e0a746ba733fc5817c543cecbb4b2ef06b239 | 8a616f01e2aacc5b8ef45cd3c8e427097a28b88b | /setup.py | d4321f495429133425939bd9cc6297c257a6140f | [] | no_license | captain-proton/damndaily | 25142354cc16de6899152a7e576c67ebf3bac56e | 91af8f2a9898f4c45734cdf35d3a4cdb9be126a7 | refs/heads/master | 2021-01-19T17:30:41.432687 | 2017-09-29T12:06:48 | 2017-09-29T12:06:48 | 101,063,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from setuptools import setup, find_packages
setup(
name='damndaily',
packages=find_packages(),
version='0.1',
license='MIT',
author_email='[email protected]',
include_package_data=True,
install_requires=[
'Flask',
'Flask-SQLAlchemy'
]
)
| [
"[email protected]"
] | |
3ef89feecbd1aa344041da46ed96e5e78677f545 | aea5b776fa0d9f2bdaef0abe56fd95422581da9e | /methods_and_functions/summer_69.py | 1850f9b3bea496b3b536e5f5f6ac8ddaec3d375c | [] | no_license | Arswarum/From_zero_to_hero_practice | 0e5e678f633c61919bccfddcb2e29ace32d4bdba | 5bde6120556a7d937bc7d7264351ce60392be68e | refs/heads/master | 2021-03-15T10:39:40.242601 | 2020-04-17T16:53:40 | 2020-04-17T16:53:40 | 246,844,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | def summer_69(arr):
arr2 =[]
button = True
for num in arr:
if num == 6:
button = False
if num == 9:
button = True
continue
if button:
arr2.append(num)
return sum(arr2)
print(summer_69([1, 3, 5]))
print(summer_69([4, 5, 6, 7, 8, 9]))
print(summer_69([2, 1, 6, 9, 11]))
| [
"[email protected]"
] | |
952c51b3fb29d9d810836ea9f2b037d3e5148db5 | 162c4186ed26c5f7f5b191610770dca128002da7 | /van/van_speed.py | a8682cee689a0c61127f1a6f138929bf6fd6de08 | [] | no_license | vikramraghav90/Python | 390d0206b80e12cd2a6460cec09228c0e3b4b269 | 5a9910364b5e5f3ad10171cd2c06ab3e9eafc39d | refs/heads/master | 2020-04-29T14:42:23.209178 | 2019-04-24T17:37:26 | 2019-06-12T08:23:43 | 176,204,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py |
"""3. Write a function for checking the speed of drivers. This function should have one parameter: speed.
- If speed is less than 70, it should print “Ok”.
- Otherwise, for every 5km above the speed limit (70), it should give the driver one demerit point and print the total number of demerit points. For example, if the speed is 80, it should print: “Points: 2”.
- If the driver gets more than 12 points, the function should print: “License suspended” """
def speed_of_driver(speed):
if speed <= 70:
return 'OK'
else:
point = (speed - 70)//5
if point <= 12:
return point
else:
return 'license suspended'
print(speed_of_driver(55))
print(speed_of_driver(75))
print(speed_of_driver(78))
print(speed_of_driver(90))
print(speed_of_driver(135)) | [
"[email protected]"
] | |
410f4a5d9936de27397992d155a6d1c653de9cc7 | f5be6107d92bc0fb60b6cc7aeb8e6df8e30ad141 | /api.py | 30fd3daa7958eda3484d94518b5237a1573f621b | [] | no_license | qasim-iqbal/rasa_sample | d99fa18423b62b80f72ac0eeed580a0b0edcee11 | 1ed2c241d9a81f849816a98acc9b79b65a851bce | refs/heads/master | 2022-12-12T12:24:44.135756 | 2020-01-10T18:50:34 | 2020-01-10T18:50:34 | 233,109,070 | 0 | 0 | null | 2022-12-08T03:25:20 | 2020-01-10T18:40:02 | Python | UTF-8 | Python | false | false | 3,102 | py | from flask import Flask
from flask import request, jsonify
from flask_cors import CORS, cross_origin
import io
import json
import pytds
import os
import traceback
from rasa_nlu.model import Metadata, Interpreter
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.agent import Agent
from rasa_nlu import config as cnf
import logging
from rasa_core.interpreter import RegexInterpreter
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.interpreter import RasaNLUInterpreter
import bot_engine.actions
from db import db_conn
app = Flask(__name__)
@app.route('/')
def home():
return "Hello, World"
def getStatefulResponse(intent, stage):
response = ""
try:
with db_conn() as conn:
sql = "Select IntentID From luIntent where Intent like '%{0}%'".format(intent)
cursor = conn.cursor()
cursor.execute(sql)
response = cursor.fetchone()[0]
with db_conn() as conn:
sql = "Select IntentID From luIntent where Intent like '%{0}%'".format(intent)
cursor = conn.cursor()
cursor.execute(sql)
response = cursor.fetchone()[0]
except:
print("some error showed up")
response = "default response"
return response
def getResponse(intent):
with db_conn() as conn:
try:
sql = "Select Response From datResponse where Intent like '%{0}%'".format(intent)
cursor = conn.cursor()
cursor.execute(sql)
response = cursor.fetchone()[0]
except:
response = "sorry, could not find that intent"
return response
@app.route('/chat', methods=['POST'])
@cross_origin(supports_credentials=True)
def chat():
if request.method == 'POST':
response = ""
# try:
# interpreter = Interpreter.load('./bot_engine/models/nlu/default/nlu')
interpreter = RasaNLUInterpreter('./bot_engine/models/nlu/default/nluModel')
agent = Agent.load('./bot_engine/models/dialogue', interpreter = interpreter)
resp = agent.handle_message("hi")
print(resp)
# print(request.form)
# user_query = request.form['query']
# resp = agent.handle_message(user_query)
# data = interpreter.parse(user_query)
# print(data)
# confidence = data['intent']['confidence']
# if confidence < 0.70:
# response = "Sorry, I did not get that. Could you repeat that?"
# current = data['intent']['name']
# # response = getResponse(current)
# response = resp
# data = {
# "request": current,
# "response": response
# }
# except:
# traceback.print_exc()
# current = ""
# data = "something didn't work at the api end"
data = {
"request": "stuf",
"response": resp
}
return jsonify(data)
| [
"[email protected]"
] | |
d2e6451557af7313d6ababe7b811844f82474502 | e73f0bd1e15de5b8cb70f1d603ceedc18c42b39b | /adventOfCode/2022/1/1.py | 8d82261bd28d472069bce9fed233ca726d92e8dd | [] | no_license | thran/the_code | cbfa3b8be86c3b31f76f6fbd1deb2013d3326a4a | ba73317ddc42e10791a829cc6e1a3460cc601c44 | refs/heads/master | 2023-01-05T14:39:16.708461 | 2022-12-25T08:37:39 | 2022-12-25T08:37:39 | 160,978,160 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | from core import AdventOfCode
class Level(AdventOfCode):
part_one_test_solution = 24000
part_two_test_solution = 45000
def preprocess_input(self, lines):
elves = [[]]
for line in lines:
if line.isnumeric():
elves[-1].append(int(line))
else:
assert line == ''
elves.append([])
return elves
def part_one(self, elves) -> int:
return max(sum(elv) for elv in elves)
def part_two(self, elves) -> int:
return sum(sorted(sum(elv) for elv in elves)[-3:])
Level().run()
| [
"[email protected]"
] | |
b57f0f89e5cfdf456e7d8cb79eb17e618ac426d4 | 068714f7aac1ff2ca55fbf2966ea34c96312e0be | /ep_parser.py | 4cca053e25472be2d8cc0b20d94ff5216c7e7bb6 | [
"MIT"
] | permissive | marios8543/ant1-dl | 1670b04f734419af9c7f1bc3e2a8b1b82d647604 | 7cd67d49ae113e0f45af1941617b7ab00a521724 | refs/heads/master | 2023-03-12T17:45:34.805176 | 2020-07-15T14:50:22 | 2020-07-15T14:50:22 | 279,821,217 | 0 | 0 | MIT | 2021-02-26T02:56:05 | 2020-07-15T09:12:19 | Python | UTF-8 | Python | false | false | 950 | py | from aiohttp import ClientSession
from pyquery import PyQuery
from sys import argv
from downloader import main
from time import sleep
from asyncio import get_event_loop
web = ClientSession()
def ruc(coro):
return get_event_loop().run_until_complete(coro)
def id_from_link(link):
return link.split("/")[2]
def get_episodes(aid):
page = 1
ids = []
while True:
res = ruc(web.get("https://www.antenna.gr/templates/data/morevideos", params={"aid":aid, "p":page}))
if res.status == 200:
root = PyQuery(ruc(res.text()))
l= []
for el in root.find("article"):
el = el.find("a")
link = el.attrib["href"]
l.append(id_from_link(link))
ids.extend(l)
if not l:
break
page+=1
return ids
if __name__ == '__main__':
ids = get_episodes(argv[1])
for i in ids:
main(i) | [
"[email protected]"
] | |
32384698e04525a004d2cf02dd5e27c72e84d027 | a37dde1198281be903ac8abb284b4d675a6a25bc | /graphmodels/__init__.py | 1fbfb5497459d4837e6c832c1ce964b286a1cc7d | [
"MIT"
] | permissive | DLunin/pygraphmodels | 6980d43c56def995f82dfc387c53f1f8cc2c5f1e | a6075355369d03cfa63227a4c07a8c780ac240f0 | refs/heads/master | 2021-01-10T17:57:59.693338 | 2016-02-18T10:15:43 | 2016-02-18T10:15:43 | 51,922,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | from .output import pretty_draw
from .factor import Factor, TableFactor, IdentityFactor, DirichletTableFactorGen
from .dgm import DGM, ErdosRenyiDGMGen, TreeDGMGen
from .inference import *
from .structure import *
from .misc import constant
from .meta import methoddispatch | [
"[email protected]"
] | |
bd5c967c95a080b66aa5f624bb060ec2a9138cd6 | 5a1718380e0433816c61080849b27baaf30ba264 | /django/wall_app_assignment_2/apps/wall_app/migrations/0001_initial.py | 5f2f269e6b00274e8a227fe50470c74c7e67315a | [] | no_license | Himanshuhub/python_stack | 7be4eb43ba994eea62ed44793c3f483703346d25 | a17e4fde3dfdba3346a0093eeb425bfc0fc28d04 | refs/heads/master | 2020-06-16T14:51:13.476018 | 2017-06-27T01:13:29 | 2017-06-27T01:13:29 | 94,148,752 | 1 | 0 | null | 2017-06-22T03:56:19 | 2017-06-12T23:15:00 | Python | UTF-8 | Python | false | false | 2,509 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-06-20 23:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=45)),
('comment', models.TextField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=45)),
('message', models.TextField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('email', models.CharField(max_length=45)),
('password', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='message',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wall_app.User'),
),
migrations.AddField(
model_name='comment',
name='message_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wall_app.Message'),
),
migrations.AddField(
model_name='comment',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wall_app.User'),
),
]
| [
"[email protected]"
] | |
668c6b660add018360f7f70a5b87f49656790d92 | 3f5c1ecc7c2f07446d15d1e84ec9482c3d6d6a78 | /11/01_.py | 42574e742c40c3e7e8c77bd83acd6e76204be7b6 | [] | no_license | withzjc/t2 | 990a39b9b413d4ca9ca6a4fe4b86c5cd0819d9d9 | 1501341cf87231bd111fc11fd5381f37e6283847 | refs/heads/master | 2020-04-26T07:41:41.414044 | 2019-03-02T04:28:17 | 2019-03-02T04:28:17 | 173,400,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | # re模块
# 正则表达式 匹配字符串的
# 1 [] [0-9] \转义字符匹配字符 [a-zA-z0-9]
# .匹配除了换行符以外的任意字符
# \w 匹配数字字母下划线
# \s 匹配任意的空白符
# \d 匹配数字
# \n匹配一个换行符
# \t 匹配一个制表符
# \b 匹配一个单词的结尾
# ^匹配字符串的开始
# $匹配字符串的结尾
# \W匹配非字母或数字或下划线
# \D匹配非数字
# \S匹配非空白符
# a|b匹配字符a或者字符b
# ()匹配括号内的表达式,也表示一个组
# 【】匹配字符组中的字符
# 【^】匹配除了字符组中的字符的所有字符
# *重复零次或者多次 默认往多了匹配,贪婪匹配,没有也能匹配上
# ?重复一次或者零次 | [
"[email protected]"
] | |
469210cb817766e41f5047f7afb112c92eab48fd | 770a086356802127749b77d855f0a921fadd36c3 | /app/users/utilities.py | df1c6eb30696193f2ec8b8e771d1c107621289fd | [] | no_license | samlex20/airtime | 14578e525a2603b5640b97964e9fed011ce0f4af | 7c2a4f585239aae713cc99be7f9b061636705c52 | refs/heads/master | 2023-05-14T19:14:04.542471 | 2019-12-28T15:38:26 | 2019-12-28T15:38:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | from app import app, mail
from flask_mail import Message
from flask import render_template
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import arrow
def send_reset_email(user, template, **kwargs):
msg = Message(subject=app.config['RESET_MAIL_SUBJECT'],
sender=app.config['MAIL_SENDER'],
recipients=[user.email])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
mail.send(msg)
def airtime_mail(amount, telephone_number, date, balance):
message = Mail(
from_email=app.config['MAIL_SENDER'],
to_emails=app.config['SENDGRIG_EMAIL']
)
message.dynamic_template_data = {
'subject': app.config['EMAIL_SUBJECT'],
'amount': amount,
'telephone': telephone_number,
'date': date,
'balance': balance
}
message.template_id = app.config['SENDGRID_TEMPLATE_ID']
sg = SendGridAPIClient(app.config['SENDGRID_API_KEY'])
response = sg.send(message)
def current_date():
date = arrow.utcnow().to('Africa/Nairobi').format('YYYY-MM-DD HH:mm')
mail_date = str(date)
return mail_date
| [
"[email protected]"
] | |
6ac59a0ca11d698c305113e9ccc38594275bffb2 | 1cdb1a2cfe237ad05dc4510ebb3fc08c97518176 | /myblog/blogs/migrations/0003_person.py | edc1517aaefbef795b60f3ce8346714093efdb7c | [] | no_license | breakerthb/PythonWebServer | 86b0453dd3caeaa89e1929eb322c7af3809597d2 | a590391c058855c5aa2e5695f9e3c549ae0dbbc0 | refs/heads/master | 2021-01-19T06:41:28.704936 | 2016-06-30T07:48:37 | 2016-06-30T07:48:37 | 60,346,686 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0002_auto_20160630_0514'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
],
),
]
| [
"[email protected]"
] | |
f9e9f4090d22c42a444c5ed068ddd426ff23df77 | 5f5ba9e4a767b610914946e17bf74708dbb1b248 | /myInstagram/account/forms.py | 4e711fe96c8288304cdb2a0c6eaf7e6db202dfdc | [] | no_license | bhrigu123/myInstagram | 61a00f517b4413b48219685b9917f9228852574c | 453a228a9057f7096b785ed0ebfe124fd552c4ff | refs/heads/master | 2021-01-18T07:56:22.233045 | 2015-04-18T14:12:12 | 2015-04-18T14:12:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | from django import forms
from .models import *
class UserCreationForm(forms.ModelForm):
passwd1 = forms.CharField(label='Password', widget=forms.PasswordInput)
passwd2 = forms.CharField(label='Confirm Password', widget=forms.PasswordInput, help_text = "Should be same as Password")
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = ''
for field in self.fields:
self.fields[field].required = True
class Meta:
model = MyUser
fields = ['username', 'email', 'first_name', 'last_name', 'gender', 'dob', 'phone']
def clean_passwd2(self):
passwd1 = self.cleaned_data.get("passwd1")
passwd2 = self.cleaned_data.get("passwd2")
if passwd1 and passwd2 and passwd1 != passwd2:
raise forms.ValidationError("Passwords don't match")
return passwd2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit = False)
user.set_password(self.cleaned_data["passwd1"])
if commit:
user.save()
return user
| [
"[email protected]"
] | |
e519b5c0c5910674da8e7762b686afe714112af8 | dde02c2a9a1b62048f978fb0c75958db13988a81 | /Connect4Py/Connect4.py | dc343fc9a4337ae107ca7cab5c27622bc3a99d79 | [] | no_license | TaylorHarvin/Connect4_AI | 63b19940588303fa39d67bdf84cdc2dd9413cfed | 374388b03687048117cfc3a8b73fa654aee34b48 | refs/heads/master | 2021-01-23T08:21:21.836654 | 2017-03-29T16:43:27 | 2017-03-29T16:43:53 | 86,505,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,578 | py | '''
Developer: Taylor Harvin
Date: 3/29/2017
Purpose: Utilizes Connect4 classes: BoardNode, BoardTools, SlotState, and GameAI
to play Connect4 with a user.
'''
from Connect4Tools.BoardNode import BoardNode
from Connect4Tools.BoardTools import BoardTools
from Connect4Tools.States.SlotState import SlotState
from Connect4Tools.GameAI import GameBoardAI
node = BoardNode(0,6,7) # Primary game board
bt = BoardTools() # Primary board tools
boardAI = GameBoardAI(5000) # Primary game AI
userMove = 0 # Current move by user
keepPlaying = True # Game iteration flag
status = [] # Game status flag (for who won if any winners)
# Checks for the current game status
# NOTE:
def GameOver():
global status
global keepPlaying
global node
global bt
status = bt.GetWinner(node.boardState)
if status[0] != SlotState.EMPTY:
print("GAME OVER -- Winner: ",status[0])
return True
elif status[1] == True:
print("GAME OVER -- Tie")
return False
def Continue():
global node
if GameOver():
print(node)
node = BoardNode(0,6,7)
res = input("Play Again? (y/n): ")
if res == 'y':
return True
return False
return True
if __name__ == "__main__":
while(keepPlaying):
print(node)
userMove = input("Enter your move (0-6): ")
boardAI.Learn(node.boardState,userMove)
bt.PlayMove(node.boardState,int(userMove),SlotState.USER)
keepPlaying = Continue()
if keepPlaying:
boardAI.PlayAIMove(node)
keepPlaying = Continue()
| [
"[email protected]"
] | |
e6c8397e01d4b202f7051fca10401bd1198bc5d3 | 84686b23a574201b761a267d18bfd829a2987d29 | /convert.py | f6e067b016669823dbfb030f39dc2f92557c1352 | [] | no_license | trojan321/KantarTask | 4e80d6e37e3bd255c9314fc9c6af6670de70fda6 | 547be164cbf858ba678e0f620104444e1b50b1f4 | refs/heads/main | 2023-04-17T18:47:18.076434 | 2021-04-26T12:15:20 | 2021-04-26T12:15:20 | 361,736,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,480 | py | # load pandas
import pandas as pd
import datetime
import argparse
INPUT_FILE = 'input-statements.psv'
OUTPUT_FILE = 'output-sessions.psv'
def convert(input_file=INPUT_FILE, output_file=OUTPUT_FILE):
# read file as pandas dataframe
df = pd.read_csv(input_file,sep='|')
df['HomeNo'] = df['HomeNo'].astype(str)
df = df.sort_values(['HomeNo',"Starttime"])
df['StarttimeConv'] = pd.to_datetime(df['Starttime'], format='%Y%m%d%H%M%S')
df['HomeNo_s'] = df['HomeNo'].shift(-1)
df['StarttimeConv_s'] = df['StarttimeConv'].shift(-1)
def end_time_conv (row):
if row['HomeNo'] == row['HomeNo_s']:
X=1
end_date = row['StarttimeConv_s'] - datetime.timedelta(seconds=X)
return end_date
else:
end_date = datetime.datetime(row['StarttimeConv'].year, row['StarttimeConv'].month, row['StarttimeConv'].day, 23, 59, 59) # .timestamp()
return end_date
df.apply (lambda row: end_time_conv(row), axis=1)
df['EndTimeConv'] = df.apply (lambda row: end_time_conv(row), axis=1)
def end_time (row):
return row['EndTimeConv'].strftime('%Y%m%d%H%M%S')
df['EndTime'] = df.apply (lambda row: end_time(row), axis=1)
def duration (row):
if row['HomeNo'] == row['HomeNo_s'] :
ego = row['StarttimeConv_s'] - row['StarttimeConv']
return ego.total_seconds()
else:
end_date = datetime.datetime(row['StarttimeConv'].year, row['StarttimeConv'].month, row['StarttimeConv'].day, 23, 59, 59) # .timestamp()
ego = end_date - row['StarttimeConv']
return ego.total_seconds()+1
df.apply (lambda row: duration(row), axis=1)
df['Duration'] = df.apply (lambda row: duration(row), axis=1)
df = df.astype({'Duration': 'int64'})
df = df.drop(columns=['StarttimeConv', 'HomeNo_s', 'StarttimeConv_s', 'EndTimeConv'])
print(df)
df.to_csv(output_file, sep='|', index=False)
if __name__ == "__main__":
# execute only if run as a script
parser = argparse.ArgumentParser(description='Get input and output files names')
parser.add_argument('--input_file', type=str, default=INPUT_FILE,
help='an input file path')
parser.add_argument('--output_file', type=str, default=OUTPUT_FILE,
help='an output file path)')
args = parser.parse_args()
convert(str(args.input_file), str(args.output_file))
| [
"[email protected]"
] | |
6bdfa2ab8c3069a37a97d4c6fa6c918cdf77a26f | 892fb6c81b2e0b50f82194e36fefefef440d3dcc | /airflow_dag/create_background_xml.py | 947f16d58819114938d7d41ed00d171dc8694560 | [] | no_license | mickmcd01/flickr_slides | 483f3108d7dfce1043c28fdaf25ad096a750393f | 755451d5e803ac9d10899b0cc2bcabba8dd4cf13 | refs/heads/master | 2022-12-15T23:37:06.934436 | 2020-09-16T23:10:57 | 2020-09-16T23:10:57 | 269,445,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | import os
import argparse
from shutil import copyfile
xml_destination = '/home/mick/.local/share/shotwell/wallpaper/wallpaper.xml'
xml_temp = '/home/mick/wallpaper.xml'
parser = argparse.ArgumentParser()
parser.add_argument("--slides", help="full path to the slides directory")
args = parser.parse_args()
file_list = []
for pic in os.listdir(args.slides):
if pic.endswith(".jpg"):
file_list.append(pic)
with open(xml_temp, 'w') as xml:
xml.write('<background>\n\t<static>\n\t\t<duration>60.00</duration>\n')
full_path = os.path.join(args.slides, file_list[0])
xml.write('\t\t<file>%s</file>\n\t</static>\n' % full_path)
from_path = full_path
for idx, entry in enumerate(file_list):
if idx == 0:
continue
xml.write('\t<transition>\n\t\t<duration>2.00</duration>\n')
xml.write('\t\t<from>%s</from>\n' % from_path)
to_path = os.path.join(args.slides, entry)
xml.write('\t\t<to>%s</to>\n' % to_path)
xml.write('\t</transition>\n')
from_path = to_path
xml.write('\t<static>\n\t\t<duration>60.00</duration>\n')
xml.write('\t\t<file>%s</file>\n' % to_path)
xml.write('\t</static>\n')
xml.write('</background>\n')
copyfile(xml_temp, xml_destination)
| [
"[email protected]"
] | |
08f45135b85d71d3008d48ff979d44ce38600bcc | b6edc802ba0a8384d109a45fac1bf6aeaa7671d8 | /functional_tests/test_layout_and_styling.py | 0026ae846f40a43f5a2a43085d9e1ba554847189 | [] | no_license | bfalase/python-tdd-book | 28b4ab042af4318c0d58f06ada60bdbf9f4f7c03 | 4057515c03c599e3c8f07007384b78aba2aed1c6 | refs/heads/master | 2022-12-24T19:53:14.804365 | 2020-09-29T22:10:37 | 2020-09-29T22:10:37 | 291,580,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | from selenium.webdriver.common.keys import Keys
from .base import FunctionalTest
class LayoutAndStylingTest(FunctionalTest):
def test_layout_and_styling(self):
# Edith goes to the home page
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
# She notices the input box is nicely centered
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta = 10
)
# She starts a new list and sees the input is nicely
# centered there too
inputbox.send_keys('testing')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: testing')
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta = 10
) | [
"[email protected]"
] | |
13c3e14be1f6c838ffcef4205b18810fc53689a7 | 1b871f74f4ded40de7bb70c7a2acd19ef93ba51a | /script/test.py | fdb7ac2a5a1feb086e1507df86dae7be3b097a51 | [] | no_license | 2033329616/caffe_project | e90f9fba5d47d4bc061a59e523e35d0de9837358 | 35c47790c0dcf6a457d259d8e352c0b944b1dbae | refs/heads/master | 2021-09-01T14:31:13.979329 | 2017-12-27T13:19:49 | 2017-12-27T13:19:49 | 112,702,547 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py |
class Test(object):
def __init__(self):
self.name = 'hahahahha'
name1 = '2233'
_name2 = '666'
__name3 = '777'
def print_name(self):
print(self.name)
a1 = Test()
print(a1.name)
a1.print_name()
# print(a1._Test__name2)
# print(Test._Test__name3)
# print(Test.__dict__) | [
"[email protected]"
] | |
a7a907377db196588f378a6228680aafcdb7fcf7 | 6299ee9b9bb0ba90376005c3f7bc8a71f546b6aa | /routes.py | 20a33501bfd5b7ccb2c7c04e2a7747690f407097 | [] | no_license | jordancohill-1/lab3 | 3441ff78aee34d1e307ba3a5dd56739d10f9e7b4 | 713dbdde95a9bd16ca9a5dd6cebdeb1ff6fb24ad | refs/heads/master | 2023-02-06T19:53:09.201047 | 2019-07-15T19:23:49 | 2019-07-15T19:23:49 | 197,054,838 | 0 | 0 | null | 2023-02-02T06:36:07 | 2019-07-15T18:52:13 | Python | UTF-8 | Python | false | false | 1,349 | py | from flask import Flask, render_template, request, redirect, url_for
from models import db, User
from forms import UsersForm
from flask_heroku import Heroku
app = Flask(__name__)
heroku = Heroku(app)
#app = Flask(__name__)
#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://localhost:5433/usersdb'
db.init_app(app)
app.secret_key = "e14a-key"
@app.route("/")
def index():
return render_template("index.html")
@app.route('/add-user', methods=['GET', 'POST'])
def add_user():
form = UsersForm()
if request.method == 'GET':
return render_template('add_user.html', form=form)
else:
if form.validate_on_submit():
first_name = request.form['first_name']
age = request.form['age']
new_user = User(first_name=first_name, age=age)
db.session.add(new_user)
db.session.commit()
return redirect(url_for('index'))
@app.route('/read')
def show_user():
users = User.query.all()
return render_template('read.html', users=users)
@app.route('/delete-user/<int:user_id>')
def delete_user(user_id):
userToDelete = db.session.query(User).filter_by(user_id=user_id).first()
db.session.delete(userToDelete)
db.session.commit()
return redirect(url_for('show_user'))
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
12e7e85fa298c4350284f7490d1ef6623f9d1087 | 9e252d6e4a1571eced7a32189eb3b3ccc870cfe3 | /ums2/money/data.py | d2c7d1e88d5de6edf0fb2cfacab0891ed6e3168a | [] | no_license | c0per/OIProblems | 1d5893301dd9f8bbe3e40823ced29520c3477a03 | 2282c51cb301807a314a7d7bc9035a5560834b61 | refs/heads/master | 2023-02-06T07:25:49.714674 | 2020-12-25T13:55:26 | 2020-12-25T13:55:26 | 288,164,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | import cyaron, os
os.system('g++ -o std -O2 ./std.cpp')
for i in range(10):
io = cyaron.IO(file_prefix = 'money', data_id = i + 1)
if i < 2:
n = cyaron.randint(15, 20)
elif i < 4:
n = cyaron.randint(900, 1000)
else:
n = cyaron.randint(900000, 1000000)
io.input_writeln(n)
tree = cyaron.Graph.tree(n, weight_limit = (1, 1000000000))
for j in range(n):
io.input_write(cyaron.randint(1, 1000000000))
io.input_write('\n')
io.input_writeln(tree)
io.output_gen('./std')
print('data{} generated'.format(i + 1))
| [
"[email protected]"
] | |
c4720ac12db10f8dd80e8443b991684ddcd4f399 | 0b63d8ee4bee3be54be848568350b04f928287b3 | /VisionAPIDemo/Lib/site-packages/google/cloud/vision_v1p3beta1/services/product_search/transports/base.py | 2b219f8b2544aa6dcc48f4d9ae4ab44772b13f53 | [] | no_license | kailinchu/machacks-text | f55a23c1400e5919c4850e5b97a131622b971ea8 | a2c6699753a137c4680cc8b21a7fb88144886dca | refs/heads/main | 2023-02-27T07:47:41.271216 | 2021-02-07T17:33:50 | 2021-02-07T17:33:50 | 336,629,531 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,110 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.vision_v1p3beta1.types import product_search_service
from google.longrunning import operations_pb2 as operations # type: ignore
from google.protobuf import empty_pb2 as empty # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-vision",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ProductSearchTransport(abc.ABC):
"""Abstract transport class for ProductSearch."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
)
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_product_set: gapic_v1.method.wrap_method(
self.create_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
),
default_timeout=600.0,
client_info=client_info,
),
self.list_product_sets: gapic_v1.method.wrap_method(
self.list_product_sets,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.get_product_set: gapic_v1.method.wrap_method(
self.get_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.update_product_set: gapic_v1.method.wrap_method(
self.update_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_product_set: gapic_v1.method.wrap_method(
self.delete_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.create_product: gapic_v1.method.wrap_method(
self.create_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
),
default_timeout=600.0,
client_info=client_info,
),
self.list_products: gapic_v1.method.wrap_method(
self.list_products,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.get_product: gapic_v1.method.wrap_method(
self.get_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.update_product: gapic_v1.method.wrap_method(
self.update_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_product: gapic_v1.method.wrap_method(
self.delete_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.create_reference_image: gapic_v1.method.wrap_method(
self.create_reference_image,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_reference_image: gapic_v1.method.wrap_method(
self.delete_reference_image,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.list_reference_images: gapic_v1.method.wrap_method(
self.list_reference_images,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.get_reference_image: gapic_v1.method.wrap_method(
self.get_reference_image,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.add_product_to_product_set: gapic_v1.method.wrap_method(
self.add_product_to_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
),
default_timeout=600.0,
client_info=client_info,
),
self.remove_product_from_product_set: gapic_v1.method.wrap_method(
self.remove_product_from_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
),
default_timeout=600.0,
client_info=client_info,
),
self.list_products_in_product_set: gapic_v1.method.wrap_method(
self.list_products_in_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.import_product_sets: gapic_v1.method.wrap_method(
self.import_product_sets,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
),
default_timeout=600.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_product_set(
self,
) -> typing.Callable[
[product_search_service.CreateProductSetRequest],
typing.Union[
product_search_service.ProductSet,
typing.Awaitable[product_search_service.ProductSet],
],
]:
raise NotImplementedError()
@property
def list_product_sets(
self,
) -> typing.Callable[
[product_search_service.ListProductSetsRequest],
typing.Union[
product_search_service.ListProductSetsResponse,
typing.Awaitable[product_search_service.ListProductSetsResponse],
],
]:
raise NotImplementedError()
@property
def get_product_set(
self,
) -> typing.Callable[
[product_search_service.GetProductSetRequest],
typing.Union[
product_search_service.ProductSet,
typing.Awaitable[product_search_service.ProductSet],
],
]:
raise NotImplementedError()
@property
def update_product_set(
self,
) -> typing.Callable[
[product_search_service.UpdateProductSetRequest],
typing.Union[
product_search_service.ProductSet,
typing.Awaitable[product_search_service.ProductSet],
],
]:
raise NotImplementedError()
@property
def delete_product_set(
self,
) -> typing.Callable[
[product_search_service.DeleteProductSetRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
@property
def create_product(
self,
) -> typing.Callable[
[product_search_service.CreateProductRequest],
typing.Union[
product_search_service.Product,
typing.Awaitable[product_search_service.Product],
],
]:
raise NotImplementedError()
@property
def list_products(
self,
) -> typing.Callable[
[product_search_service.ListProductsRequest],
typing.Union[
product_search_service.ListProductsResponse,
typing.Awaitable[product_search_service.ListProductsResponse],
],
]:
raise NotImplementedError()
@property
def get_product(
self,
) -> typing.Callable[
[product_search_service.GetProductRequest],
typing.Union[
product_search_service.Product,
typing.Awaitable[product_search_service.Product],
],
]:
raise NotImplementedError()
@property
def update_product(
self,
) -> typing.Callable[
[product_search_service.UpdateProductRequest],
typing.Union[
product_search_service.Product,
typing.Awaitable[product_search_service.Product],
],
]:
raise NotImplementedError()
@property
def delete_product(
self,
) -> typing.Callable[
[product_search_service.DeleteProductRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
@property
def create_reference_image(
self,
) -> typing.Callable[
[product_search_service.CreateReferenceImageRequest],
typing.Union[
product_search_service.ReferenceImage,
typing.Awaitable[product_search_service.ReferenceImage],
],
]:
raise NotImplementedError()
@property
def delete_reference_image(
self,
) -> typing.Callable[
[product_search_service.DeleteReferenceImageRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
@property
def list_reference_images(
self,
) -> typing.Callable[
[product_search_service.ListReferenceImagesRequest],
typing.Union[
product_search_service.ListReferenceImagesResponse,
typing.Awaitable[product_search_service.ListReferenceImagesResponse],
],
]:
raise NotImplementedError()
@property
def get_reference_image(
self,
) -> typing.Callable[
[product_search_service.GetReferenceImageRequest],
typing.Union[
product_search_service.ReferenceImage,
typing.Awaitable[product_search_service.ReferenceImage],
],
]:
raise NotImplementedError()
@property
def add_product_to_product_set(
self,
) -> typing.Callable[
[product_search_service.AddProductToProductSetRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
@property
def remove_product_from_product_set(
self,
) -> typing.Callable[
[product_search_service.RemoveProductFromProductSetRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
@property
def list_products_in_product_set(
self,
) -> typing.Callable[
[product_search_service.ListProductsInProductSetRequest],
typing.Union[
product_search_service.ListProductsInProductSetResponse,
typing.Awaitable[product_search_service.ListProductsInProductSetResponse],
],
]:
raise NotImplementedError()
@property
def import_product_sets(
self,
) -> typing.Callable[
[product_search_service.ImportProductSetsRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
__all__ = ("ProductSearchTransport",)
| [
"[email protected]"
] | |
222bab0f6336be0cdd7b057c8bc9592e9c85e4f9 | c1505e557c722f3296fa38baab066bcb7b97f43a | /geoids_traveltimepolygons/tethysapp-geoids_traveltimepolygons/tethysapp/geoids_traveltimepolygons/app.py | 35683b2e6d5929139165b40d35d76ed2777893d9 | [] | no_license | Geoids/geoids_traveltimepolygons | 3227d6ba104446acccfb98da565240b478c062ed | dc0c33154182e51c1ff2bcef72abefa637c38424 | refs/heads/master | 2021-04-12T11:52:24.558226 | 2018-04-12T21:08:19 | 2018-04-12T21:08:19 | 126,903,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | from tethys_sdk.base import TethysAppBase, url_map_maker
class geoids_traveltimepolygons(TethysAppBase):
"""
Tethys app class for Travel Time Distance Map App.
"""
name = 'Travel Time Polygons'
index = 'geoids_traveltimepolygons:home'
icon = 'geoids_traveltimepolygons/images/car.jpg'
package = 'geoids_traveltimepolygons'
root_url = 'geoids_traveltimepolygons'
color = '#ffa500'
description = 'This app shows how far vehicles can travel along any route through a road network in a given amount of time.'
tags = 'Transportation; Travel Times; Road Network;'
enable_feedback = False
feedback_emails = []
def url_maps(self):
"""
Add controllers
"""
UrlMap = url_map_maker(self.root_url)
url_maps = (
UrlMap(
name='home',
url='geoids_traveltimepolygons',
controller='geoids_traveltimepolygons.controllers.home'
),
UrlMap(
name='map',
url='geoids_traveltimepolygons/map',
controller='geoids_traveltimepolygons.controllers.map'
),
UrlMap(
name='proposal',
url='geoids_traveltimepolygons/proposal',
controller='geoids_traveltimepolygons.controllers.proposal'
),
UrlMap(
name='design',
url='geoids_traveltimepolygons/design',
controller='geoids_traveltimepolygons.controllers.design'
),
)
return url_maps
| [
"[email protected]"
] | |
e93b29e38017527a9d528055769639b17c587b5b | bdb7e01e4de5103d26b4496d938cbb98d4d3b511 | /tracpro/groups/tests/test_views.py | 2cba2ff45cb72bfe6a982bdb9f5893d285aac22e | [
"BSD-3-Clause"
] | permissive | devartis/tracpro | f780b850126a82ea96801aef25c7ad06a061bc88 | 8253a53761c4b12cde18b0157c4a250b58468ee9 | refs/heads/develop | 2021-01-21T15:49:55.700557 | 2016-08-18T15:57:46 | 2016-08-18T15:57:46 | 64,132,820 | 3 | 0 | null | 2016-07-25T12:33:50 | 2016-07-25T12:33:50 | null | UTF-8 | Python | false | false | 24,042 | py | from __future__ import unicode_literals
import json
from dateutil.relativedelta import relativedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from tracpro.polls import models as polls
from tracpro.test import factories
from tracpro.test.cases import TracProDataTest, TracProTest
from .. import models
class TestSetRegion(TracProTest):
url_name = "set-region"
def setUp(self):
super(TestSetRegion, self).setUp()
self.org = factories.Org()
self.region = factories.Region(org=self.org)
self.user = factories.User()
self.user.regions.add(self.region)
self.login(self.user)
@property
def session_key(self):
return '{org}:region_id'.format(org=self.org.pk)
def set_region(self, data):
return self.url_post(self.org.subdomain, reverse(self.url_name), data)
def test_unauthenticated(self):
"""Unauthenticated users cannot set a region."""
self.client.logout()
response = self.set_region({'region': self.region.pk})
self.assertLoginRedirect(response, self.org.subdomain, reverse(self.url_name))
self.assertFalse(self.session_key in self.client.session)
def test_get(self):
"""Set region view does not allow GET."""
response = self.url_get(self.org.subdomain, reverse(self.url_name))
self.assertEqual(response.status_code, 405)
self.assertFalse(self.session_key in self.client.session)
def test_no_region(self):
"""Set region view requires `region` POST parameter."""
response = self.set_region({})
self.assertEqual(response.status_code, 400)
self.assertFalse(self.session_key in self.client.session)
def test_all_not_admin(self):
"""Non-admin user cannot set region to "All regions"."""
response = self.set_region({'region': 'all'})
self.assertEqual(response.status_code, 400)
self.assertFalse(self.session_key in self.client.session)
def test_all(self):
"""Admin user can set region to "All regions"."""
self.org.administrators.add(self.user)
response = self.set_region({'region': 'all'})
self.assertRedirects(
response, reverse('home.home'), self.org.subdomain,
fetch_redirect_response=False)
self.assertIsNone(self.client.session[self.session_key])
def test_non_existant(self):
"""Cannot set a non-existant region."""
response = self.set_region({'region': '1234'})
self.assertEqual(response.status_code, 400)
self.assertFalse(self.session_key in self.client.session)
def test_not_in_user_regions(self):
"""Cannot set a region the user doesn't have access to."""
another_region = factories.Region(org=self.org)
response = self.set_region({'region': another_region.pk})
self.assertEqual(response.status_code, 400)
self.assertFalse(self.session_key in self.client.session)
def test_set_region(self):
"""Set region_id variable in the session."""
response = self.set_region({'region': self.region.pk})
self.assertRedirects(
response, reverse('home.home'), self.org.subdomain,
fetch_redirect_response=False)
self.assertEqual(self.client.session[self.session_key], str(self.region.pk))
def test_next_invalid(self):
"""Should not redirect to an invalid `next` URL."""
response = self.set_region({
'region': self.region.pk,
'next': 'http://example.com/',
})
self.assertRedirects(
response, reverse('home.home'), self.org.subdomain,
fetch_redirect_response=False)
def test_next(self):
"""Should redirect to custom `next` URL."""
response = self.set_region({
'region': self.region.pk,
'next': '/admin/',
})
self.assertRedirects(
response, '/admin/', self.org.subdomain,
fetch_redirect_response=False)
class TestToggleSubregions(TracProTest):
url_name = "toggle-subregions"
session_key = "include_subregions"
def setUp(self):
super(TestToggleSubregions, self).setUp()
self.org = factories.Org()
self.user = factories.User()
self.login(self.user)
def toggle_subregions(self, data):
return self.url_post(self.org.subdomain, reverse(self.url_name), data)
def test_unauthenticated(self):
"""Unauthenticated users cannot toggle subregion data."""
self.client.logout()
response = self.toggle_subregions({'include_subregions': '0'})
self.assertLoginRedirect(response, self.org.subdomain, reverse(self.url_name))
self.assertFalse(self.session_key in self.client.session)
def test_get(self):
"""Toggle subregion view does not allow GET."""
response = self.url_get(self.org.subdomain, reverse(self.url_name))
self.assertEqual(response.status_code, 405)
self.assertFalse(self.session_key in self.client.session)
def test_no_include_subregions(self):
"""Toggle subregion view requires `include_subregions` POST parameter."""
response = self.toggle_subregions({})
self.assertEqual(response.status_code, 400)
self.assertFalse(self.session_key in self.client.session)
def test_invalid_value(self):
"""`include_subregions` value must be '0' or '1'."""
response = self.toggle_subregions({'include_subregions': 'asdf'})
self.assertEqual(response.status_code, 400)
self.assertFalse(self.session_key in self.client.session)
def test_include_subregions(self):
"""`include_subregions` value of '1' sets parameter to True."""
response = self.toggle_subregions({'include_subregions': '1'})
self.assertRedirects(
response, reverse('home.home'), self.org.subdomain,
fetch_redirect_response=False)
self.assertTrue(self.client.session['include_subregions'])
def test_exclude_subregions(self):
"""`include_subregions` value of '0' sets parameter to False."""
response = self.toggle_subregions({'include_subregions': '0'})
self.assertRedirects(
response, reverse('home.home'), self.org.subdomain,
fetch_redirect_response=False)
self.assertFalse(self.client.session['include_subregions'])
def test_next_invalid(self):
"""Should not redirect to an invalid `next` URL."""
response = self.toggle_subregions({
'include_subregions': '0',
'next': 'http://example.com/',
})
self.assertRedirects(
response, reverse('home.home'), self.org.subdomain,
fetch_redirect_response=False)
def test_next(self):
"""Should redirect to custom `next` URL."""
response = self.toggle_subregions({
'include_subregions': '0',
'next': '/admin/',
})
self.assertRedirects(
response, '/admin/', self.org.subdomain,
fetch_redirect_response=False)
class TestRegionList(TracProDataTest):
url_name = "groups.region_list"
def test_list_non_admin(self):
self.login(self.user1) # not an admin
url = reverse(self.url_name)
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, "unicef", url)
def test_list_admin(self):
self.login(self.admin)
response = self.url_get('unicef', reverse(self.url_name))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 3)
class TestRegionMostActive(TracProDataTest):
url_name = "groups.region_most_active"
def test_most_active(self):
five_weeks_ago = timezone.now() - relativedelta(weeks=5)
five_days_ago = timezone.now() - relativedelta(days=5)
pollrun = factories.RegionalPollRun(
poll=self.poll1,
conducted_on=five_weeks_ago,
)
# empty response in last month for contact in region #1
factories.Response(
pollrun=pollrun, contact=self.contact1,
created_on=five_days_ago, updated_on=five_days_ago,
status=polls.Response.STATUS_EMPTY)
# partial response not in last month for contact in region #2
factories.Response(
pollrun=pollrun, contact=self.contact4,
created_on=five_weeks_ago, updated_on=five_weeks_ago,
status=polls.Response.STATUS_PARTIAL)
# partial response in last month for contact in region #2
factories.Response(
pollrun=pollrun, contact=self.contact4,
created_on=five_days_ago, updated_on=five_days_ago,
status=polls.Response.STATUS_PARTIAL)
# 2 complete responses in last month for contact in region #3
factories.Response(
pollrun=pollrun, contact=self.contact5,
created_on=five_days_ago, updated_on=five_days_ago,
status=polls.Response.STATUS_COMPLETE)
factories.Response(
pollrun=pollrun, contact=self.contact5,
created_on=five_days_ago, updated_on=five_days_ago,
status=polls.Response.STATUS_COMPLETE)
# log in as a non-administrator
self.login(self.user1)
response = self.url_get('unicef', reverse(self.url_name))
results = json.loads(response.content)['results']
self.assertEqual(len(results), 2)
self.assertEqual(results[0]['id'], self.region3.pk)
self.assertEqual(results[0]['name'], self.region3.name)
self.assertEqual(results[0]['response_count'], 2)
self.assertEqual(results[1]['id'], self.region2.pk)
self.assertEqual(results[1]['name'], self.region2.name)
self.assertEqual(results[1]['response_count'], 1)
class TestRegionUpdateAll(TracProTest):
url_name = "groups.region_update_all"
def setUp(self):
super(TestRegionUpdateAll, self).setUp()
self.user = factories.User()
self.login(self.user)
self.org = factories.Org(name="Test", subdomain="test")
self.org.administrators.add(self.user)
def assertErrorResponse(self, data, message):
"""Assert that the data causes an error with the given message."""
response = self.url_post("test", reverse(self.url_name), data=data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertFalse(content['success'])
self.assertEqual(content['status'], 400)
self.assertEqual(content['message'], message)
def assertSuccessResponse(self, data, expected_structure):
"""Assert that regions are successfully updated."""
response = self.url_post("test", reverse(self.url_name), data=data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue(content['success'])
self.assertEqual(content['status'], 200)
self.assertEqual(
content['message'],
"Test regions have been updated.")
new_structure = self.get_structure(models.Region.get_all(self.org))
self.assertDictEqual(expected_structure, new_structure)
def get_structure(self, regions):
"""Create a dict to represent current region parents and boundaries."""
structure = {}
for region in regions:
structure[region.pk] = [region.parent_id, region.boundary_id]
return structure
def make_regions(self):
"""Create a collection of nested regions."""
self.region_uganda = factories.Region(
org=self.org, name="Uganda", parent=None,
boundary=factories.Boundary(org=self.org))
self.region_kampala = factories.Region(
org=self.org, name="Kampala", parent=self.region_uganda,
boundary=factories.Boundary(org=self.org))
self.region_makerere = factories.Region(
org=self.org, name="Makerere", parent=self.region_kampala,
boundary=factories.Boundary(org=self.org))
self.region_entebbe = factories.Region(
org=self.org, name="Entebbe", parent=self.region_uganda,
boundary=factories.Boundary(org=self.org))
self.region_kenya = factories.Region(
org=self.org, name="Kenya", parent=None,
boundary=factories.Boundary(org=self.org))
self.region_nairobi = factories.Region(
org=self.org, name="Nairobi", parent=self.region_kenya,
boundary=factories.Boundary(org=self.org))
self.region_mombasa = factories.Region(
org=self.org, name="Mombasa", parent=self.region_kenya,
boundary=factories.Boundary(org=self.org))
self.region_no_boundary = factories.Region(
org=self.org, name="No Boundary", parent=None,
boundary=None)
self.region_inactive = factories.Region(
org=self.org, name="Inactive", parent=self.region_nairobi,
is_active=False)
return models.Region.get_all(self.org)
def test_unauthenticated(self):
"""View requires authentication."""
self.client.logout()
url = reverse(self.url_name)
response = self.url_get("test", url)
self.assertLoginRedirect(response, "test", url)
def test_no_org(self):
"""View must be used with a specific org."""
response = self.url_get(None, reverse(self.url_name))
self.assertRedirects(response, reverse("orgs_ext.org_chooser"))
def test_no_perms(self):
"""View requires that the user is an org administrator."""
self.org.administrators.remove(self.user)
url = reverse(self.url_name)
response = self.url_get("test", url)
self.assertLoginRedirect(response, "test", url)
def test_editor(self):
"""View requires that the user is an org administrator."""
self.org.administrators.remove(self.user)
self.org.editors.add(self.user)
url = reverse(self.url_name)
response = self.url_get("test", url)
self.assertLoginRedirect(response, "test", url)
def test_viewer(self):
"""View requires that the user is an org administrator."""
self.org.administrators.remove(self.user)
self.org.viewers.add(self.user)
url = reverse(self.url_name)
response = self.url_get("test", url)
self.assertLoginRedirect(response, "test", url)
def test_get(self):
"""View is post-only."""
response = self.url_get("test", reverse(self.url_name))
self.assertEqual(response.status_code, 405)
def test_post_no_data(self):
"""View requires that data is sent in the `data` parameter."""
self.assertErrorResponse(
data={},
message="No data was provided in the `data` parameter.")
def test_post_invalid_json_data(self):
"""View requires valid JSON data in the `data` parameter."""
self.assertErrorResponse(
data={'data': "invalid"},
message="Data must be valid JSON.")
def test_post_wrong_type(self):
"""View requires a JSON-encoded dictionary in the `data` parameter."""
self.assertErrorResponse(
data={'data': json.dumps("Wrong type")},
message="Data must be a dict that maps region id to "
"(parent id, boundary id).")
def test_post_wrong_value_type(self):
"""View requires each dictionary key to be a list with two items."""
regions = self.make_regions()
structure = self.get_structure(regions)
structure[regions.first().pk] = None
self.assertErrorResponse(
data={'data': json.dumps(structure)},
message="All data values must be of the format "
"(parent id, boundary id).")
def test_post_extra_regions(self):
"""Submitted data should provide data for all regions in the org."""
other_region = factories.Region() # another org
regions = self.make_regions()
structure = self.get_structure(regions)
structure[other_region.pk] = [None, None]
self.assertErrorResponse(
data={'data': json.dumps(structure)},
message="Data must map region id to parent id for every region "
"in this org.")
def test_post_missing_regions(self):
"""Submitted data should provide data for all regions in the org."""
regions = self.make_regions()
structure = self.get_structure(regions)
structure.pop(regions.first().pk)
self.assertErrorResponse(
data={'data': json.dumps(structure)},
message="Data must map region id to parent id for every region "
"in this org.")
def test_post_inactive_regions(self):
"""Submitted data should not include info about inactive regions."""
regions = self.make_regions()
structure = self.get_structure(regions)
structure[self.region_inactive.pk] = [None, None]
self.assertErrorResponse(
data={'data': json.dumps(structure)},
message="Data must map region id to parent id for every region "
"in this org.")
def test_post_invalid_region(self):
"""Submitted data should not include info about invalid regions."""
regions = self.make_regions()
structure = self.get_structure(regions)
structure['asdf'] = [None, None]
structure[12345] = [None, None]
self.assertErrorResponse(
data={'data': json.dumps(structure)},
message="Data must map region id to parent id for every region "
"in this org.")
def test_post_invalid_parent(self):
"""Submitted data should only reference parents within the same org."""
regions = self.make_regions()
structure = self.get_structure(regions)
structure[regions.first().pk] = [12345, None]
structure[regions.last().pk] = ["asdf", None]
self.assertErrorResponse(
data={'data': json.dumps(structure)},
message="Region parent must be a region from the same org, or "
"null.")
def test_post_inactive_parent(self):
"""Submitted data should not reference inactive parents."""
regions = self.make_regions()
structure = self.get_structure(regions)
structure[regions.first().pk] = [self.region_inactive.pk, None]
self.assertErrorResponse(
data={'data': json.dumps(structure)},
message="Region parent must be a region from the same org, or "
"null.")
def test_post_other_org_parent(self):
"""Submitted data should not reference parents from another org."""
other_region = factories.Region() # another org
regions = self.make_regions()
structure = self.get_structure(regions)
structure[regions.first().pk] = [other_region.pk, None]
self.assertErrorResponse(
data={'data': json.dumps(structure)},
message="Region parent must be a region from the same org, or "
"null.")
def test_post_invalid_boundaries(self):
"""Submitted data should not make invalid boundary references."""
regions = self.make_regions()
structure = self.get_structure(regions)
structure[regions.first().pk] = [None, 12345]
structure[regions.last().pk] = [None, "asdf"]
self.assertErrorResponse(
data={'data': json.dumps(structure)},
message="Region boundary must be a boundary from the same org, "
"or null.")
def test_post_other_org_boundary(self):
"""Submitted data should not reference boundaries from another org."""
other_boundary = factories.Boundary() # another org
regions = self.make_regions()
structure = self.get_structure(regions)
structure[regions.first().pk] = [None, other_boundary.pk]
self.assertErrorResponse(
data={'data': json.dumps(structure)},
message="Region boundary must be a boundary from the same org, "
"or null.")
def test_post_same(self):
"""Test when hierarchy and boundaries do not change."""
regions = self.make_regions()
structure = self.get_structure(regions)
data = {'data': json.dumps(structure)}
self.assertSuccessResponse(data, structure)
def test_post_change(self):
"""Test hierarchy and boundary changes."""
regions = self.make_regions()
structure = self.get_structure(regions)
structure[self.region_kampala.pk] = [self.region_kenya.pk, None]
structure[self.region_nairobi.pk] = [self.region_uganda.pk,
self.region_uganda.boundary.pk]
structure[self.region_entebbe.pk] = [None, self.region_kenya.boundary.pk]
data = {'data': json.dumps(structure)}
self.assertSuccessResponse(data, structure)
class TestGroupList(TracProDataTest):
url_name = "groups.group_list"
def test_non_admin(self):
self.login(self.user1) # not an admin
url = reverse(self.url_name)
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, "unicef", url)
def test_admin(self):
self.login(self.admin)
response = self.url_get('unicef', reverse(self.url_name))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 3)
class TestGroupMostActive(TracProDataTest):
url_name = "groups.group_most_active"
def test_most_active(self):
five_weeks_ago = timezone.now() - relativedelta(weeks=5)
five_days_ago = timezone.now() - relativedelta(days=5)
pollrun = factories.RegionalPollRun(
poll=self.poll1,
conducted_on=five_weeks_ago,
)
# empty response in last month for contact in group #1
factories.Response(
pollrun=pollrun, contact=self.contact1,
created_on=five_days_ago, updated_on=five_days_ago,
status=polls.Response.STATUS_EMPTY)
# partial response not in last month for contact in group #2
factories.Response(
pollrun=pollrun, contact=self.contact3,
created_on=five_weeks_ago, updated_on=five_weeks_ago,
status=polls.Response.STATUS_PARTIAL)
# partial response in last month for contact in group #2
factories.Response(
pollrun=pollrun, contact=self.contact3,
created_on=five_days_ago, updated_on=five_days_ago,
status=polls.Response.STATUS_PARTIAL)
# 2 complete responses in last month for contact in group #3
factories.Response(
pollrun=pollrun, contact=self.contact5,
created_on=five_days_ago, updated_on=five_days_ago,
status=polls.Response.STATUS_COMPLETE)
factories.Response(
pollrun=pollrun, contact=self.contact5,
created_on=five_days_ago, updated_on=five_days_ago,
status=polls.Response.STATUS_COMPLETE)
# log in as a non-administrator
self.login(self.user1)
response = self.url_get('unicef', reverse(self.url_name))
results = json.loads(response.content)['results']
self.assertEqual(len(results), 2)
self.assertEqual(results[0]['id'], self.group3.pk)
self.assertEqual(results[0]['name'], self.group3.name)
self.assertEqual(results[0]['response_count'], 2)
self.assertEqual(results[1]['id'], self.group2.pk)
self.assertEqual(results[1]['name'], self.group2.name)
self.assertEqual(results[1]['response_count'], 1)
| [
"[email protected]"
] | |
88f17fa4df543a5bc167a16150d3ad71019a317b | 7639e2367fd61c7f92e1583e1775264ac58f8dd5 | /husky_ur3_gripper_moveit_config/scripts/control_Husky_UR3_8.py | 37d12e236cbb61aa2ab13a664841cd16f07d095a | [] | no_license | Shumine/husky_ur3_gripper_simulator | beea4819cdcf4a6a3d8d42b5c91ed104637789a9 | d53f309c1040cc430c161803d3e7c1f6a1e60c58 | refs/heads/master | 2023-01-12T23:18:44.183277 | 2020-11-17T10:45:44 | 2020-11-17T10:45:44 | 287,156,095 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,807 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import sys
import rospy
import copy, math
import threading
import time
import tf
from multiprocessing import Process
from math import pi, radians, degrees, atan2, sqrt
from moveit_commander import MoveGroupCommander, RobotCommander
from moveit_commander import PlanningSceneInterface, roscpp_initialize, roscpp_shutdown
from moveit_commander.conversions import pose_to_list
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion, Twist
from moveit_msgs.msg import Grasp, GripperTranslation, PlaceLocation, MoveItErrorCodes, DisplayTrajectory
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import random
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped, Quaternion
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from math import pi
#GROUP_NAME_GRIPPER = "NAME OF GRIPPER"
roscpp_initialize(sys.argv)
rospy.init_node('control_Husky_UR3', anonymous=True)
robot = RobotCommander()
scene = PlanningSceneInterface()
##모바일 파트 관련 변수 선언
x = 0.0
y = 0.0
theta = 0.0
## 매니퓰레이터 변수 선언
group_name = "ur3_manipulator"
move_group = MoveGroupCommander(group_name)
FIXED_FRAME = 'world'
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
DisplayTrajectory,
queue_size=20)
def newOdom(msg):
global x
global y
global theta
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
rot_q = msg.pose.pose.orientation
(roll, pitch, theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])
def jmove_to_pose_goal(pose_goal):
move_group.set_pose_target(pose_goal)
move_group.go(wait=True)
#tf_display_position = [pose_goal.position.x, pose_goal.position.y, pose_goal.position.z]
#tf_display_orientation = [pose_goal.orientation.x, pose_goal.orientation.y, pose_goal.orientation.z, pose_goal.orientation.w]
#ii = 0
#while ii < 5:
# ii += 1
# br = tf.TransformBroadcaster()
# br.sendTransform(
# tf_display_position,
# tf_display_orientation,
# rospy.Time.now(),
# "Target_pose",
# "base_link")
# rospy.sleep(1)
def jmove_to_joint_goal(joint_goal):
move_group.go(joint_goal, wait=True)
def move_Joint(q1,q2,q3,q4,q5,q6):
joint_goal = move_group.get_current_joint_values()
mobile_joints = [-pi/3, 0.5]
joint_goal_list = [q1,q2,q3,q4,q5,q6]
#매니퓰레이터 관절 value 설정
joint_goal[0] = joint_goal_list[0]
joint_goal[1] = joint_goal_list[1]
joint_goal[2] = joint_goal_list[2]
joint_goal[3] = joint_goal_list[3]
joint_goal[4] = joint_goal_list[4]
joint_goal[5] = joint_goal_list[5]
#제어시작
move_group.go(joint_goal, wait=True)
def get_TF(a,b):
end_flag = 0
listener = tf.TransformListener()
while end_flag ==0:
try:
(trans,rot) = listener.lookupTransform(a,b, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
end_flag = 1
return trans,rot
def move_ee(Px,Py,Pz,Rx,Ry,Rz,Rw):
trans,rot = get_TF('/odom','/base_link')
print('TF from odom to base link :',trans)
x = Px-trans[0]
y = Py-trans[1]
z = Pz-trans[2]
Ox = Rx
Oy = Ry
Oz = Rz-rot[2]
Ow = Rw
print 'real_planning_pose',x,y,z,Ox,Oy,Oz,Ow
print "============ Generating plan 1"
pose_target = Pose()
pose_target.position.x = x
pose_target.position.y = y
pose_target.position.z = z
pose_target.orientation.x = Ox
pose_target.orientation.y = Oy
pose_target.orientation.z = Oz
pose_target.orientation.w = Ow
move_group.set_pose_target(pose_target)
move_group.go(True)
print "============ plan 1 complete!"
trans_1,rot_1 = get_TF('odom','/ee_link')
print "============ ee pose : "
print move_group.get_current_pose()
print move_group.get_planning_frame()
print 'odom_TF',trans_1,rot_1
print "============"
def move_base(a,b):
sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
speed = Twist()
r = rospy.Rate(4)
goal = Point()
goal.x = a
goal.y = b
arrival_radius = 0.3
while (goal.x-x)**2 + (goal.y-y)**2 >= arrival_radius**2 :
#while abs(goal.x-x) >0.1 or abs(goal.y-y) >0.1 or abs(angle_to_goal-theta) >0.1 : #가까의 범위가 0.3이내로 들어오면 break.
inc_x = goal.x -x
inc_y = goal.y -y
angle_to_goal = atan2(inc_y,inc_x)
if abs(angle_to_goal - theta) > 5*pi/180:
speed.linear.x = 0.0
speed.angular.z = 0.3
if abs(angle_to_goal - theta) < 5*pi/180: # 0.5이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.angular.z = 0.03
speed.linear.x = 0.0
else:
speed.linear.x = 0.2
speed.angular.z = 0.0
if abs(goal.x-x) <0.3 and abs(goal.y-y)<0.3: #x,y val이 0.3이내로 들어오면 속도 매우 줄임.
speed.angular.x = 0.05
speed.angular.z = 0.0
#print goal.x-x, goal.y-y, angle_to_goal-theta
pub.publish(speed)
#r.sleep()
final_angle_to_goal = 0
while abs(final_angle_to_goal - theta) > 0.02:
if abs(final_angle_to_goal - theta) > 0.3:
speed.linear.x = 0
speed.angular.z = 0.3
else:
speed.linear.x = 0
speed.angular.z = 0.1
pub.publish(speed)
r.sleep()
print 'mobile robot movement complete!'
return x,y
def cartesian_path_planner(a,b,c):
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z += a # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.x += b # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.y += c # First move up (z)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0 disabling:
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.1, # eef_step
0.0) # jump_threshold
def x_path_planner(a):
pose_target = move_group.get_current_pose().pose
rospy.sleep(1)
pose_target.position.x += a # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def y_path_planner(c):
pose_target = move_group.get_current_pose().pose
rospy.sleep(1)
pose_target.position.y += c # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def z_path_planner(b):
pose_target = move_group.get_current_pose().pose
rospy.sleep(1)
pose_target.position.z += b # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def down_demo():
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #down pose
print "Down demo is ready to start!, press enter..!"
raw_input()
z_path_planner(0.1)
y_path_planner(-0.112)
x_path_planner(0.1)
z_path_planner(-0.2)
rospy.sleep(3)
print "Down demo complete!, Go to home pose..!"
def cartesian_path(x,y,z):
scale = 1
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z += scale * 0.1 # First move up (z)
wpose.position.y += scale * y # and sideways (y)
waypoints.append(copy.deepcopy(wpose))
wpose.position.x += scale * x # Second move forward/backwards in (x)
waypoints.append(copy.deepcopy(wpose))
wpose.position.z += scale * z # Third move sideways (y)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0,
# ignoring the check for infeasible jumps in joint space, which is sufficient
# for this tutorial.
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
# Note: We are just planning, not asking move_group to actually move the robot yet:
return plan, fraction
def Grasp_object(x_dir,y_dir,z_dir):
current_mobile_x,current_mobile_y = move_base(x_dir-0.5,y_dir)
#z_path_planner(0.1)
print "Grasping is ready to start!, press enter..!"
raw_input()
curr_pose = move_group.get_current_pose().pose
x_distance = current_mobile_x+curr_pose.position.x - x_dir
y_distance = current_mobile_y+curr_pose.position.y - y_dir
z_distance = curr_pose.position.z - z_dir
print curr_pose.position.x
print 'x_dir =',x_dir,'y_dir=',y_dir,'z_dir=',z_dir
print 'x =',x_distance,'y=',y_distance,'z=',z_distance
#y_path_planner(-y_distance)
#x_path_planner(-x_distance)
#z_path_planner(-z_distance)
plan,fraction = cartesian_path(-x_distance, -y_distance, -z_distance)
move_group.execute(plan, wait=True)
rospy.sleep(3)
(result_xyz,result_rot) = get_TF('/odom','ee_link')
print 'xyz_result=',result_xyz[0],result_xyz[1],result_xyz[2]
print "Grasping complete!, Go to home pose..!"
if __name__=='__main__':
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
rospy.sleep(1)
curr_pose = move_group.get_current_pose().pose
curr_pose.position.x +=0.05
move_base(8,0)
jmove_to_pose_goal(curr_pose)
#p1 = Process(target = move_base, args = (a,b,))
#p2 = Process(target = jmove_to_pose_goal, args = (curr_pose,))
#
#p1.start()
#p2.start()
#move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
| [
"[email protected]"
] | |
88f3be0529393582e1ac0d38749d1a06c93f9796 | 5985a4feeea08c35a96442c3691bdcea767f235a | /django_svelte/lib/python3.9/site-packages/xlsxwriter/worksheet.py | 176c33899123faa131ab47834a1b02949fc76a9d | [
"BSD-3-Clause"
] | permissive | Mujirin/django_svelte | 52789f495901a350b1c396c4b8fc40d46a15e8e1 | 1c7c5d13b11b43147ef28e5bb78cb8629190949f | refs/heads/main | 2023-04-21T08:40:17.909238 | 2021-04-24T09:38:42 | 2021-04-24T09:38:42 | 361,102,673 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259,088 | py | ###############################################################################
#
# Worksheet - A class for writing the Excel XLSX Worksheet file.
#
# Copyright 2013-2021, John McNamara, [email protected]
#
# Standard packages.
import codecs
import datetime
import os
import re
import sys
import tempfile
from collections import defaultdict
from collections import namedtuple
from math import isnan
from math import isinf
from warnings import warn
# Standard packages in Python 2/3 compatibility mode.
from .compatibility import StringIO
from .compatibility import force_unicode
from .compatibility import num_types, str_types
# Package imports.
from . import xmlwriter
from .format import Format
from .drawing import Drawing
from .shape import Shape
from .xmlwriter import XMLwriter
from .utility import xl_rowcol_to_cell
from .utility import xl_rowcol_to_cell_fast
from .utility import xl_cell_to_rowcol
from .utility import xl_col_to_name
from .utility import xl_range
from .utility import xl_color
from .utility import get_sparkline_style
from .utility import supported_datetime
from .utility import datetime_to_excel_datetime
from .utility import quote_sheetname
from .exceptions import DuplicateTableName
###############################################################################
#
# Decorator functions.
#
###############################################################################
def convert_cell_args(method):
"""
Decorator function to convert A1 notation in cell method calls
to the default row/col notation.
"""
def cell_wrapper(self, *args, **kwargs):
try:
# First arg is an int, default to row/col notation.
if len(args):
first_arg = args[0]
int(first_arg)
except ValueError:
# First arg isn't an int, convert to A1 notation.
new_args = xl_cell_to_rowcol(first_arg)
args = new_args + args[1:]
return method(self, *args, **kwargs)
return cell_wrapper
def convert_range_args(method):
"""
Decorator function to convert A1 notation in range method calls
to the default row/col notation.
"""
def cell_wrapper(self, *args, **kwargs):
try:
# First arg is an int, default to row/col notation.
if len(args):
int(args[0])
except ValueError:
# First arg isn't an int, convert to A1 notation.
if ':' in args[0]:
cell_1, cell_2 = args[0].split(':')
row_1, col_1 = xl_cell_to_rowcol(cell_1)
row_2, col_2 = xl_cell_to_rowcol(cell_2)
else:
row_1, col_1 = xl_cell_to_rowcol(args[0])
row_2, col_2 = row_1, col_1
new_args = [row_1, col_1, row_2, col_2]
new_args.extend(args[1:])
args = new_args
return method(self, *args, **kwargs)
return cell_wrapper
def convert_column_args(method):
"""
Decorator function to convert A1 notation in columns method calls
to the default row/col notation.
"""
def column_wrapper(self, *args, **kwargs):
try:
# First arg is an int, default to row/col notation.
if len(args):
int(args[0])
except ValueError:
# First arg isn't an int, convert to A1 notation.
cell_1, cell_2 = [col + '1' for col in args[0].split(':')]
_, col_1 = xl_cell_to_rowcol(cell_1)
_, col_2 = xl_cell_to_rowcol(cell_2)
new_args = [col_1, col_2]
new_args.extend(args[1:])
args = new_args
return method(self, *args, **kwargs)
return column_wrapper
###############################################################################
#
# Named tuples used for cell types.
#
###############################################################################
cell_string_tuple = namedtuple('String', 'string, format')
cell_number_tuple = namedtuple('Number', 'number, format')
cell_blank_tuple = namedtuple('Blank', 'format')
cell_boolean_tuple = namedtuple('Boolean', 'boolean, format')
cell_formula_tuple = namedtuple('Formula', 'formula, format, value')
cell_arformula_tuple = namedtuple('ArrayFormula',
'formula, format, value, range, atype')
###############################################################################
#
# Worksheet Class definition.
#
###############################################################################
class Worksheet(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Worksheet file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Worksheet, self).__init__()
self.name = None
self.index = None
self.str_table = None
self.palette = None
self.constant_memory = 0
self.tmpdir = None
self.is_chartsheet = False
self.ext_sheets = []
self.fileclosed = 0
self.excel_version = 2007
self.excel2003_style = False
self.xls_rowmax = 1048576
self.xls_colmax = 16384
self.xls_strmax = 32767
self.dim_rowmin = None
self.dim_rowmax = None
self.dim_colmin = None
self.dim_colmax = None
self.colinfo = {}
self.selections = []
self.hidden = 0
self.active = 0
self.tab_color = 0
self.panes = []
self.active_pane = 3
self.selected = 0
self.page_setup_changed = False
self.paper_size = 0
self.orientation = 1
self.print_options_changed = False
self.hcenter = False
self.vcenter = False
self.print_gridlines = False
self.screen_gridlines = True
self.print_headers = False
self.row_col_headers = False
self.header_footer_changed = False
self.header = ''
self.footer = ''
self.header_footer_aligns = True
self.header_footer_scales = True
self.header_images = []
self.footer_images = []
self.header_images_list = []
self.margin_left = 0.7
self.margin_right = 0.7
self.margin_top = 0.75
self.margin_bottom = 0.75
self.margin_header = 0.3
self.margin_footer = 0.3
self.repeat_row_range = ''
self.repeat_col_range = ''
self.print_area_range = ''
self.page_order = 0
self.black_white = 0
self.draft_quality = 0
self.print_comments = 0
self.page_start = 0
self.fit_page = 0
self.fit_width = 0
self.fit_height = 0
self.hbreaks = []
self.vbreaks = []
self.protect_options = {}
self.protected_ranges = []
self.num_protected_ranges = 0
self.set_cols = {}
self.set_rows = defaultdict(dict)
self.zoom = 100
self.zoom_scale_normal = 1
self.print_scale = 100
self.is_right_to_left = 0
self.show_zeros = 1
self.leading_zeros = 0
self.outline_row_level = 0
self.outline_col_level = 0
self.outline_style = 0
self.outline_below = 1
self.outline_right = 1
self.outline_on = 1
self.outline_changed = False
self.original_row_height = 15
self.default_row_height = 15
self.default_row_pixels = 20
self.default_col_width = 8.43
self.default_col_pixels = 64
self.default_row_zeroed = 0
self.names = {}
self.write_match = []
self.table = defaultdict(dict)
self.merge = []
self.row_spans = {}
self.has_vml = False
self.has_header_vml = False
self.has_comments = False
self.comments = defaultdict(dict)
self.comments_list = []
self.comments_author = ''
self.comments_visible = 0
self.vml_shape_id = 1024
self.buttons_list = []
self.vml_header_id = 0
self.autofilter_area = ''
self.autofilter_ref = None
self.filter_range = []
self.filter_on = 0
self.filter_cols = {}
self.filter_type = {}
self.col_sizes = {}
self.row_sizes = {}
self.col_formats = {}
self.col_size_changed = False
self.row_size_changed = False
self.last_shape_id = 1
self.rel_count = 0
self.hlink_count = 0
self.hlink_refs = []
self.external_hyper_links = []
self.external_drawing_links = []
self.external_comment_links = []
self.external_vml_links = []
self.external_table_links = []
self.drawing_links = []
self.vml_drawing_links = []
self.charts = []
self.images = []
self.tables = []
self.sparklines = []
self.shapes = []
self.shape_hash = {}
self.drawing = 0
self.drawing_rels = {}
self.drawing_rels_id = 0
self.vml_drawing_rels = {}
self.vml_drawing_rels_id = 0
self.rstring = ''
self.previous_row = 0
self.validations = []
self.cond_formats = {}
self.data_bars_2010 = []
self.use_data_bars_2010 = False
self.dxf_priority = 1
self.page_view = 0
self.vba_codename = None
self.date_1904 = False
self.hyperlinks = defaultdict(dict)
self.strings_to_numbers = False
self.strings_to_urls = True
self.nan_inf_to_errors = False
self.strings_to_formulas = True
self.default_date_format = None
self.default_url_format = None
self.remove_timezone = False
self.max_url_length = 2079
self.row_data_filename = None
self.row_data_fh = None
self.worksheet_meta = None
self.vml_data_id = None
self.vml_shape_id = None
self.row_data_filename = None
self.row_data_fh = None
self.row_data_fh_closed = False
self.vertical_dpi = 0
self.horizontal_dpi = 0
self.write_handlers = {}
self.ignored_errors = None
self.has_dynamic_arrays = False
# Utility function for writing different types of strings.
def _write_token_as_string(self, token, row, col, *args):
# Map the data to the appropriate write_*() method.
if token == '':
return self._write_blank(row, col, *args)
if self.strings_to_formulas and token.startswith('='):
return self._write_formula(row, col, *args)
if token.startswith('{=') and token.endswith('}'):
return self._write_formula(row, col, *args)
if ':' in token:
if self.strings_to_urls and re.match('(ftp|http)s?://', token):
return self._write_url(row, col, *args)
elif self.strings_to_urls and re.match('mailto:', token):
return self._write_url(row, col, *args)
elif self.strings_to_urls and re.match('(in|ex)ternal:', token):
return self._write_url(row, col, *args)
if self.strings_to_numbers:
try:
f = float(token)
if (self.nan_inf_to_errors or
(not isnan(f) and not isinf(f))):
return self._write_number(row, col, f, *args[1:])
except ValueError:
# Not a number, write as a string.
pass
return self._write_string(row, col, *args)
else:
# We have a plain string.
return self._write_string(row, col, *args)
@convert_cell_args
def write(self, row, col, *args):
"""
Write data to a worksheet cell by calling the appropriate write_*()
method based on the type of data being passed.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
*args: Args to pass to sub functions.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
other: Return value of called method.
"""
return self._write(row, col, *args)
# Undecorated version of write().
def _write(self, row, col, *args):
# Check the number of args passed.
if not len(args):
raise TypeError("write() takes at least 4 arguments (3 given)")
# The first arg should be the token for all write calls.
token = args[0]
# Avoid isinstance() for better performance.
token_type = type(token)
# Check for any user defined type handlers with callback functions.
if token_type in self.write_handlers:
write_handler = self.write_handlers[token_type]
function_return = write_handler(self, row, col, *args)
# If the return value is None then the callback has returned
# control to this function and we should continue as
# normal. Otherwise we return the value to the caller and exit.
if function_return is None:
pass
else:
return function_return
# Write None as a blank cell.
if token is None:
return self._write_blank(row, col, *args)
# Check for standard Python types.
if token_type is bool:
return self._write_boolean(row, col, *args)
if token_type in num_types:
return self._write_number(row, col, *args)
if token_type is str:
return self._write_token_as_string(token, row, col, *args)
if token_type in (datetime.datetime,
datetime.date,
datetime.time,
datetime.timedelta):
return self._write_datetime(row, col, *args)
if sys.version_info < (3, 0, 0):
if token_type is unicode:
try:
return self._write_token_as_string(str(token),
row, col, *args)
except (UnicodeEncodeError, NameError):
pass
# Resort to isinstance() for subclassed primitives.
# Write number types.
if isinstance(token, num_types):
return self._write_number(row, col, *args)
# Write string types.
if isinstance(token, str_types):
return self._write_token_as_string(token, row, col, *args)
# Write boolean types.
if isinstance(token, bool):
return self._write_boolean(row, col, *args)
# Write datetime objects.
if supported_datetime(token):
return self._write_datetime(row, col, *args)
# We haven't matched a supported type. Try float.
try:
f = float(token)
return self._write_number(row, col, f, *args[1:])
except ValueError:
pass
except TypeError:
raise TypeError("Unsupported type %s in write()" % type(token))
# Finally try string.
try:
str(token)
return self._write_string(row, col, *args)
except ValueError:
raise TypeError("Unsupported type %s in write()" % type(token))
@convert_cell_args
def write_string(self, row, col, string, cell_format=None):
"""
Write a string to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
string: Cell data. Str.
format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String truncated to 32k characters.
"""
return self._write_string(row, col, string, cell_format)
# Undecorated version of write_string().
def _write_string(self, row, col, string, cell_format=None):
str_error = 0
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Check that the string is < 32767 chars.
if len(string) > self.xls_strmax:
string = string[:self.xls_strmax]
str_error = -2
# Write a shared string or an in-line string in constant_memory mode.
if not self.constant_memory:
string_index = self.str_table._get_shared_string_index(string)
else:
string_index = string
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_string_tuple(string_index, cell_format)
return str_error
@convert_cell_args
def write_number(self, row, col, number, cell_format=None):
"""
Write a number to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
number: Cell data. Int or float.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_number(row, col, number, cell_format)
# Undecorated version of write_number().
def _write_number(self, row, col, number, cell_format=None):
if isnan(number) or isinf(number):
if self.nan_inf_to_errors:
if isnan(number):
return self._write_formula(row, col, '#NUM!', cell_format,
'#NUM!')
elif isinf(number):
return self._write_formula(row, col, '1/0', cell_format,
'#DIV/0!')
else:
raise TypeError(
"NAN/INF not supported in write_number() "
"without 'nan_inf_to_errors' Workbook() option")
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_number_tuple(number, cell_format)
return 0
@convert_cell_args
def write_blank(self, row, col, blank, cell_format=None):
"""
Write a blank cell with formatting to a worksheet cell. The blank
token is ignored and the format only is written to the cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
blank: Any value. It is ignored.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_blank(row, col, blank, cell_format)
# Undecorated version of write_blank().
def _write_blank(self, row, col, blank, cell_format=None):
# Don't write a blank cell unless it has a format.
if cell_format is None:
return 0
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_blank_tuple(cell_format)
return 0
@convert_cell_args
def write_formula(self, row, col, formula, cell_format=None, value=0):
"""
Write a formula to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
formula: Cell formula.
cell_format: An optional cell Format object.
value: An optional value for the formula. Default is 0.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check that row and col are valid and store max and min values.
return self._write_formula(row, col, formula, cell_format, value)
# Undecorated version of write_formula().
def _write_formula(self, row, col, formula, cell_format=None, value=0):
if self._check_dimensions(row, col):
return -1
# Hand off array formulas.
if formula.startswith('{') and formula.endswith('}'):
return self._write_array_formula(row, col, row, col, formula,
cell_format, value)
# Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_formula_tuple(formula, cell_format, value)
return 0
@convert_range_args
def write_array_formula(self, first_row, first_col, last_row, last_col,
formula, cell_format=None, value=0):
"""
Write a formula to a worksheet cell/range.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
formula: Cell formula.
cell_format: An optional cell Format object.
value: An optional value for the formula. Default is 0.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_array_formula(first_row, first_col, last_row,
last_col, formula, cell_format,
value, 'static')
@convert_range_args
def write_dynamic_array_formula(self, first_row, first_col,
last_row, last_col,
formula, cell_format=None, value=0):
"""
Write a dynamic formula to a worksheet cell/range.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
formula: Cell formula.
cell_format: An optional cell Format object.
value: An optional value for the formula. Default is 0.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
error = self._write_array_formula(first_row, first_col, last_row,
last_col, formula, cell_format,
value, 'dynamic')
if error == 0:
self.has_dynamic_arrays = True
return error
# Undecorated version of write_array_formula() and
# write_dynamic_array_formula().
def _write_array_formula(self, first_row, first_col, last_row, last_col,
formula, cell_format=None, value=0,
atype='static'):
# Swap last row/col with first row/col as necessary.
if first_row > last_row:
first_row, last_row = last_row, first_row
if first_col > last_col:
first_col, last_col = last_col, first_col
# Check that row and col are valid and store max and min values.
if self._check_dimensions(first_row, first_col):
return -1
if self._check_dimensions(last_row, last_col):
return -1
# Define array range
if first_row == last_row and first_col == last_col:
cell_range = xl_rowcol_to_cell(first_row, first_col)
else:
cell_range = (xl_rowcol_to_cell(first_row, first_col) + ':'
+ xl_rowcol_to_cell(last_row, last_col))
# Remove array formula braces and the leading =.
if formula[0] == '{':
formula = formula[1:]
if formula[0] == '=':
formula = formula[1:]
if formula[-1] == '}':
formula = formula[:-1]
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and first_row > self.previous_row:
self._write_single_row(first_row)
# Store the cell data in the worksheet data table.
self.table[first_row][first_col] = cell_arformula_tuple(formula,
cell_format,
value,
cell_range,
atype)
# Pad out the rest of the area with formatted zeroes.
if not self.constant_memory:
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row != first_row or col != first_col:
self._write_number(row, col, 0, cell_format)
return 0
@convert_cell_args
def write_datetime(self, row, col, date, cell_format=None):
"""
Write a date or time to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
date: Date and/or time as a datetime object.
cell_format: A cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_datetime(row, col, date, cell_format)
# Undecorated version of write_datetime().
def _write_datetime(self, row, col, date, cell_format=None):
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Convert datetime to an Excel date.
number = self._convert_date_time(date)
# Add the default date format.
if cell_format is None:
cell_format = self.default_date_format
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_number_tuple(number, cell_format)
return 0
@convert_cell_args
def write_boolean(self, row, col, boolean, cell_format=None):
"""
Write a boolean value to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
boolean: Cell data. bool type.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_boolean(row, col, boolean, cell_format)
# Undecorated version of write_boolean().
def _write_boolean(self, row, col, boolean, cell_format=None):
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
if boolean:
value = 1
else:
value = 0
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_boolean_tuple(value, cell_format)
return 0
# Write a hyperlink. This is comprised of two elements: the displayed
# string and the non-displayed link. The displayed string is the same as
# the link unless an alternative string is specified. The display string
# is written using the write_string() method. Therefore the max characters
# string limit applies.
#
# The hyperlink can be to a http, ftp, mail, internal sheet, or external
# directory urls.
@convert_cell_args
def write_url(self, row, col, url, cell_format=None,
string=None, tip=None):
"""
Write a hyperlink to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
url: Hyperlink url.
format: An optional cell Format object.
string: An optional display string for the hyperlink.
tip: An optional tooltip.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String longer than 32767 characters.
-3: URL longer than Excel limit of 255 characters.
-4: Exceeds Excel limit of 65,530 urls per worksheet.
"""
return self._write_url(row, col, url, cell_format, string, tip)
# Undecorated version of write_url().
def _write_url(self, row, col, url, cell_format=None,
string=None, tip=None):
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# Set the displayed string to the URL unless defined by the user.
if string is None:
string = url
# Default to external link type such as 'http://' or 'external:'.
link_type = 1
# Remove the URI scheme from internal links.
if url.startswith('internal:'):
url = url.replace('internal:', '')
string = string.replace('internal:', '')
link_type = 2
# Remove the URI scheme from external links and change the directory
# separator from Unix to Dos.
external = False
if url.startswith('external:'):
url = url.replace('external:', '')
url = url.replace('/', '\\')
string = string.replace('external:', '')
string = string.replace('/', '\\')
external = True
# Strip the mailto header.
string = string.replace('mailto:', '')
# Check that the string is < 32767 chars
str_error = 0
if len(string) > self.xls_strmax:
warn("Ignoring URL since it exceeds Excel's string limit of "
"32767 characters")
return -2
# Copy string for use in hyperlink elements.
url_str = string
# External links to URLs and to other Excel workbooks have slightly
# different characteristics that we have to account for.
if link_type == 1:
# Split url into the link and optional anchor/location.
if '#' in url:
url, url_str = url.split('#', 1)
else:
url_str = None
url = self._escape_url(url)
if url_str is not None and not external:
url_str = self._escape_url(url_str)
# Add the file:/// URI to the url for Windows style "C:/" link and
# Network shares.
if re.match(r'\w:', url) or re.match(r'\\', url):
url = 'file:///' + url
# Convert a .\dir\file.xlsx link to dir\file.xlsx.
url = re.sub(r'^\.\\', '', url)
# Excel limits the escaped URL and location/anchor to 255 characters.
tmp_url_str = url_str or ''
max_url = self.max_url_length
if len(url) > max_url or len(tmp_url_str) > max_url:
warn("Ignoring URL '%s' with link or location/anchor > %d "
"characters since it exceeds Excel's limit for URLS" %
(force_unicode(url), max_url))
return -3
# Check the limit of URLS per worksheet.
self.hlink_count += 1
if self.hlink_count > 65530:
warn("Ignoring URL '%s' since it exceeds Excel's limit of "
"65,530 URLS per worksheet." % force_unicode(url))
return -4
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Add the default URL format.
if cell_format is None:
cell_format = self.default_url_format
# Write the hyperlink string.
self._write_string(row, col, string, cell_format)
# Store the hyperlink data in a separate structure.
self.hyperlinks[row][col] = {
'link_type': link_type,
'url': url,
'str': url_str,
'tip': tip}
return str_error
@convert_cell_args
def write_rich_string(self, row, col, *args):
"""
Write a "rich" string with multiple formats to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
string_parts: String and format pairs.
cell_format: Optional Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String truncated to 32k characters.
-3: 2 consecutive formats used.
-4: Empty string used.
-5: Insufficient parameters.
"""
return self._write_rich_string(row, col, *args)
# Undecorated version of write_rich_string().
def _write_rich_string(self, row, col, *args):
tokens = list(args)
cell_format = None
str_length = 0
string_index = 0
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# If the last arg is a format we use it as the cell format.
if isinstance(tokens[-1], Format):
cell_format = tokens.pop()
# Create a temp XMLWriter object and use it to write the rich string
# XML to a string.
fh = StringIO()
self.rstring = XMLwriter()
self.rstring._set_filehandle(fh)
# Create a temp format with the default font for unformatted fragments.
default = Format()
# Convert list of format, string tokens to pairs of (format, string)
# except for the first string fragment which doesn't require a default
# formatting run. Use the default for strings without a leading format.
fragments = []
previous = 'format'
pos = 0
if len(tokens) <= 2:
warn("You must specify more than 2 format/fragments for rich "
"strings. Ignoring input in write_rich_string().")
return -5
for token in tokens:
if not isinstance(token, Format):
# Token is a string.
if previous != 'format':
# If previous token wasn't a format add one before string.
fragments.append(default)
fragments.append(token)
else:
# If previous token was a format just add the string.
fragments.append(token)
if token == '':
warn("Excel doesn't allow empty strings in rich strings. "
"Ignoring input in write_rich_string().")
return -4
# Keep track of actual string str_length.
str_length += len(token)
previous = 'string'
else:
# Can't allow 2 formats in a row.
if previous == 'format' and pos > 0:
warn("Excel doesn't allow 2 consecutive formats in rich "
"strings. Ignoring input in write_rich_string().")
return -3
# Token is a format object. Add it to the fragment list.
fragments.append(token)
previous = 'format'
pos += 1
# If the first token is a string start the <r> element.
if not isinstance(fragments[0], Format):
self.rstring._xml_start_tag('r')
# Write the XML elements for the $format $string fragments.
for token in fragments:
if isinstance(token, Format):
# Write the font run.
self.rstring._xml_start_tag('r')
self._write_font(token)
else:
# Write the string fragment part, with whitespace handling.
attributes = []
if re.search(r'^\s', token) or re.search(r'\s$', token):
attributes.append(('xml:space', 'preserve'))
self.rstring._xml_data_element('t', token, attributes)
self.rstring._xml_end_tag('r')
# Read the in-memory string.
string = self.rstring.fh.getvalue()
# Check that the string is < 32767 chars.
if str_length > self.xls_strmax:
warn("String length must be less than or equal to Excel's limit "
"of 32,767 characters in write_rich_string().")
return -2
# Write a shared string or an in-line string in constant_memory mode.
if not self.constant_memory:
string_index = self.str_table._get_shared_string_index(string)
else:
string_index = string
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_string_tuple(string_index, cell_format)
return 0
def add_write_handler(self, user_type, user_function):
"""
Add a callback function to the write() method to handle user defined
types.
Args:
user_type: The user type() to match on.
user_function: The user defined function to write the type data.
Returns:
Nothing.
"""
self.write_handlers[user_type] = user_function
@convert_cell_args
def write_row(self, row, col, data, cell_format=None):
"""
Write a row of data starting from (row, col).
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
data: A list of tokens to be written with write().
format: An optional cell Format object.
Returns:
0: Success.
other: Return value of write() method.
"""
for token in data:
error = self._write(row, col, token, cell_format)
if error:
return error
col += 1
return 0
@convert_cell_args
def write_column(self, row, col, data, cell_format=None):
"""
Write a column of data starting from (row, col).
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
data: A list of tokens to be written with write().
format: An optional cell Format object.
Returns:
0: Success.
other: Return value of write() method.
"""
for token in data:
error = self._write(row, col, token, cell_format)
if error:
return error
row += 1
return 0
@convert_cell_args
def insert_image(self, row, col, filename, options=None):
"""
Insert an image with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
filename: Path and filename for image in PNG, JPG or BMP format.
options: Position, scale, url and data stream of the image.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn('Cannot insert image at (%d, %d).' % (row, col))
return -1
if options is None:
options = {}
x_offset = options.get('x_offset', 0)
y_offset = options.get('y_offset', 0)
x_scale = options.get('x_scale', 1)
y_scale = options.get('y_scale', 1)
url = options.get('url', None)
tip = options.get('tip', None)
anchor = options.get('object_position', 2)
image_data = options.get('image_data', None)
description = options.get('description', None)
decorative = options.get('decorative', False)
# For backward compatibility with older parameter name.
anchor = options.get('positioning', anchor)
if not image_data and not os.path.exists(filename):
warn("Image file '%s' not found." % force_unicode(filename))
return -1
self.images.append([row, col, filename, x_offset, y_offset,
x_scale, y_scale, url, tip, anchor, image_data,
description, decorative])
@convert_cell_args
def insert_textbox(self, row, col, text, options=None):
"""
Insert an textbox with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
text: The text for the textbox.
options: Textbox options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn('Cannot insert textbox at (%d, %d).' % (row, col))
return -1
if text is None:
text = ''
if options is None:
options = {}
x_offset = options.get('x_offset', 0)
y_offset = options.get('y_offset', 0)
x_scale = options.get('x_scale', 1)
y_scale = options.get('y_scale', 1)
anchor = options.get('object_position', 1)
self.shapes.append([row, col, x_offset, y_offset,
x_scale, y_scale, text, anchor, options])
@convert_cell_args
def insert_chart(self, row, col, chart, options=None):
"""
Insert an chart with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
chart: Chart object.
options: Position and scale of the chart.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn('Cannot insert chart at (%d, %d).' % (row, col))
return -1
if options is None:
options = {}
# Ensure a chart isn't inserted more than once.
if (chart.already_inserted or chart.combined
and chart.combined.already_inserted):
warn('Chart cannot be inserted in a worksheet more than once.')
return
else:
chart.already_inserted = True
if chart.combined:
chart.combined.already_inserted = True
x_offset = options.get('x_offset', 0)
y_offset = options.get('y_offset', 0)
x_scale = options.get('x_scale', 1)
y_scale = options.get('y_scale', 1)
anchor = options.get('object_position', 1)
# Allow Chart to override the scale and offset.
if chart.x_scale != 1:
x_scale = chart.x_scale
if chart.y_scale != 1:
y_scale = chart.y_scale
if chart.x_offset:
x_offset = chart.x_offset
if chart.y_offset:
y_offset = chart.y_offset
self.charts.append([row, col, chart,
x_offset, y_offset,
x_scale, y_scale,
anchor])
@convert_cell_args
def write_comment(self, row, col, comment, options=None):
"""
Write a comment to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
comment: Cell comment. Str.
options: Comment formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String longer than 32k characters.
"""
if options is None:
options = {}
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# Check that the comment string is < 32767 chars.
if len(comment) > self.xls_strmax:
return -2
self.has_vml = 1
self.has_comments = 1
# Store the options of the cell comment, to process on file close.
self.comments[row][col] = [row, col, comment, options]
def show_comments(self):
"""
Make any comments in the worksheet visible.
Args:
None.
Returns:
Nothing.
"""
self.comments_visible = 1
def set_comments_author(self, author):
"""
Set the default author of the cell comments.
Args:
author: Comment author name. String.
Returns:
Nothing.
"""
self.comments_author = author
def get_name(self):
"""
Retrieve the worksheet name.
Args:
None.
Returns:
Nothing.
"""
# There is no set_name() method. Name must be set in add_worksheet().
return self.name
def activate(self):
"""
Set this worksheet as the active worksheet, i.e. the worksheet that is
displayed when the workbook is opened. Also set it as selected.
Note: An active worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 0
self.selected = 1
self.worksheet_meta.activesheet = self.index
def select(self):
"""
Set current worksheet as a selected worksheet, i.e. the worksheet
has its tab highlighted.
Note: A selected worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.selected = 1
self.hidden = 0
def hide(self):
"""
Hide the current worksheet.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 1
# A hidden worksheet shouldn't be active or selected.
self.selected = 0
# TODO. Should add a check to see if the sheet is the global
# activesheet or firstsheet and reset them.
def set_first_sheet(self):
"""
Set current worksheet as the first visible sheet. This is necessary
when there are a large number of worksheets and the activated
worksheet is not visible on the screen.
Note: A selected worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 0 # Active worksheet can't be hidden.
self.worksheet_meta.firstsheet = self.index
@convert_column_args
def set_column(self, first_col, last_col, width=None, cell_format=None,
options=None):
"""
Set the width, and other properties of a single column or a
range of columns.
Args:
first_col: First column (zero-indexed).
last_col: Last column (zero-indexed). Can be same as first_col.
width: Column width. (optional).
cell_format: Column cell_format. (optional).
options: Dict of options such as hidden and level.
Returns:
0: Success.
-1: Column number is out of worksheet bounds.
"""
if options is None:
options = {}
# Ensure 2nd col is larger than first.
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Don't modify the row dimensions when checking the columns.
ignore_row = True
# Set optional column values.
hidden = options.get('hidden', False)
collapsed = options.get('collapsed', False)
level = options.get('level', 0)
# Store the column dimension only in some conditions.
if cell_format or (width and hidden):
ignore_col = False
else:
ignore_col = True
# Check that each column is valid and store the max and min values.
if self._check_dimensions(0, last_col, ignore_row, ignore_col):
return -1
if self._check_dimensions(0, first_col, ignore_row, ignore_col):
return -1
# Set the limits for the outline levels (0 <= x <= 7).
if level < 0:
level = 0
if level > 7:
level = 7
if level > self.outline_col_level:
self.outline_col_level = level
# Store the column data. Padded for sorting.
self.colinfo["%05d" % first_col] = [first_col, last_col, width,
cell_format, hidden, level,
collapsed]
# Store the column change to allow optimizations.
self.col_size_changed = True
if width is None:
width = self.default_col_width
# Store the col sizes for use when calculating image vertices taking
# hidden columns into account. Also store the column formats.
for col in range(first_col, last_col + 1):
self.col_sizes[col] = [width, hidden]
if cell_format:
self.col_formats[col] = cell_format
return 0
@convert_column_args
def set_column_pixels(self, first_col, last_col, width=None,
cell_format=None, options=None):
"""
Set the width, and other properties of a single column or a
range of columns, where column width is in pixels.
Args:
first_col: First column (zero-indexed).
last_col: Last column (zero-indexed). Can be same as first_col.
width: Column width in pixels. (optional).
cell_format: Column cell_format. (optional).
options: Dict of options such as hidden and level.
Returns:
0: Success.
-1: Column number is out of worksheet bounds.
"""
if width is not None:
width = self._pixels_to_width(width)
return self.set_column(first_col, last_col, width,
cell_format, options)
def set_row(self, row, height=None, cell_format=None, options=None):
"""
Set the width, and other properties of a row.
Args:
row: Row number (zero-indexed).
height: Row height. (optional).
cell_format: Row cell_format. (optional).
options: Dict of options such as hidden, level and collapsed.
Returns:
0: Success.
-1: Row number is out of worksheet bounds.
"""
if options is None:
options = {}
# Use minimum col in _check_dimensions().
if self.dim_colmin is not None:
min_col = self.dim_colmin
else:
min_col = 0
# Check that row is valid.
if self._check_dimensions(row, min_col):
return -1
if height is None:
height = self.default_row_height
# Set optional row values.
hidden = options.get('hidden', False)
collapsed = options.get('collapsed', False)
level = options.get('level', 0)
# If the height is 0 the row is hidden and the height is the default.
if height == 0:
hidden = 1
height = self.default_row_height
# Set the limits for the outline levels (0 <= x <= 7).
if level < 0:
level = 0
if level > 7:
level = 7
if level > self.outline_row_level:
self.outline_row_level = level
# Store the row properties.
self.set_rows[row] = [height, cell_format, hidden, level, collapsed]
# Store the row change to allow optimizations.
self.row_size_changed = True
# Store the row sizes for use when calculating image vertices.
self.row_sizes[row] = [height, hidden]
def set_row_pixels(self, row, height=None, cell_format=None, options=None):
"""
Set the width (in pixels), and other properties of a row.
Args:
row: Row number (zero-indexed).
height: Row height in pixels. (optional).
cell_format: Row cell_format. (optional).
options: Dict of options such as hidden, level and collapsed.
Returns:
0: Success.
-1: Row number is out of worksheet bounds.
"""
if height is not None:
height = self._pixels_to_height(height)
return self.set_row(row, height, cell_format, options)
def set_default_row(self, height=None, hide_unused_rows=False):
"""
Set the default row properties.
Args:
height: Default height. Optional, defaults to 15.
hide_unused_rows: Hide unused rows. Optional, defaults to False.
Returns:
Nothing.
"""
if height is None:
height = self.default_row_height
if height != self.original_row_height:
# Store the row change to allow optimizations.
self.row_size_changed = True
self.default_row_height = height
if hide_unused_rows:
self.default_row_zeroed = 1
@convert_range_args
def merge_range(self, first_row, first_col, last_row, last_col,
data, cell_format=None):
"""
Merge a range of cells.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
data: Cell data.
cell_format: Cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
other: Return value of write().
"""
# Merge a range of cells. The first cell should contain the data and
# the others should be blank. All cells should have the same format.
# Excel doesn't allow a single cell to be merged
if first_row == last_row and first_col == last_col:
warn("Can't merge single cell")
return
# Swap last row/col with first row/col as necessary
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Check that row and col are valid and store max and min values.
if self._check_dimensions(first_row, first_col):
return -1
if self._check_dimensions(last_row, last_col):
return -1
# Store the merge range.
self.merge.append([first_row, first_col, last_row, last_col])
# Write the first cell
self._write(first_row, first_col, data, cell_format)
# Pad out the rest of the area with formatted blank cells.
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
continue
self._write_blank(row, col, '', cell_format)
@convert_range_args
def autofilter(self, first_row, first_col, last_row, last_col):
"""
Set the autofilter area in the worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
Nothing.
"""
# Reverse max and min values if necessary.
if last_row < first_row:
(first_row, last_row) = (last_row, first_row)
if last_col < first_col:
(first_col, last_col) = (last_col, first_col)
# Build up the print area range "Sheet1!$A$1:$C$13".
area = self._convert_name_area(first_row, first_col,
last_row, last_col)
ref = xl_range(first_row, first_col, last_row, last_col)
self.autofilter_area = area
self.autofilter_ref = ref
self.filter_range = [first_col, last_col]
def filter_column(self, col, criteria):
"""
Set the column filter criteria.
Args:
col: Filter column (zero-indexed).
criteria: Filter criteria.
Returns:
Nothing.
"""
if not self.autofilter_area:
warn("Must call autofilter() before filter_column()")
return
# Check for a column reference in A1 notation and substitute.
try:
int(col)
except ValueError:
# Convert col ref to a cell ref and then to a col number.
col_letter = col
(_, col) = xl_cell_to_rowcol(col + '1')
if col >= self.xls_colmax:
warn("Invalid column '%s'" % col_letter)
return
(col_first, col_last) = self.filter_range
# Reject column if it is outside filter range.
if col < col_first or col > col_last:
warn("Column '%d' outside autofilter() column range (%d, %d)"
% (col, col_first, col_last))
return
tokens = self._extract_filter_tokens(criteria)
if not (len(tokens) == 3 or len(tokens) == 7):
warn("Incorrect number of tokens in criteria '%s'" % criteria)
tokens = self._parse_filter_expression(criteria, tokens)
# Excel handles single or double custom filters as default filters.
# We need to check for them and handle them accordingly.
if len(tokens) == 2 and tokens[0] == 2:
# Single equality.
self.filter_column_list(col, [tokens[1]])
elif (len(tokens) == 5 and tokens[0] == 2 and tokens[2] == 1
and tokens[3] == 2):
# Double equality with "or" operator.
self.filter_column_list(col, [tokens[1], tokens[4]])
else:
# Non default custom filter.
self.filter_cols[col] = tokens
self.filter_type[col] = 0
self.filter_on = 1
def filter_column_list(self, col, filters):
"""
Set the column filter criteria in Excel 2007 list style.
Args:
col: Filter column (zero-indexed).
filters: List of filter criteria to match.
Returns:
Nothing.
"""
if not self.autofilter_area:
warn("Must call autofilter() before filter_column()")
return
# Check for a column reference in A1 notation and substitute.
try:
int(col)
except ValueError:
# Convert col ref to a cell ref and then to a col number.
col_letter = col
(_, col) = xl_cell_to_rowcol(col + '1')
if col >= self.xls_colmax:
warn("Invalid column '%s'" % col_letter)
return
(col_first, col_last) = self.filter_range
# Reject column if it is outside filter range.
if col < col_first or col > col_last:
warn("Column '%d' outside autofilter() column range "
"(%d,%d)" % (col, col_first, col_last))
return
self.filter_cols[col] = filters
self.filter_type[col] = 1
self.filter_on = 1
@convert_range_args
def data_validation(self, first_row, first_col, last_row, last_col,
options=None):
"""
Add a data validation to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Data validation options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -1
if self._check_dimensions(last_row, last_col, True, True):
return -1
if options is None:
options = {}
else:
# Copy the user defined options so they aren't modified.
options = options.copy()
# Valid input parameters.
valid_parameters = {
'validate': True,
'criteria': True,
'value': True,
'source': True,
'minimum': True,
'maximum': True,
'ignore_blank': True,
'dropdown': True,
'show_input': True,
'input_title': True,
'input_message': True,
'show_error': True,
'error_title': True,
'error_message': True,
'error_type': True,
'other_cells': True,
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameters:
warn("Unknown parameter '%s' in data_validation()" % param_key)
return -2
# Map alternative parameter names 'source' or 'minimum' to 'value'.
if 'source' in options:
options['value'] = options['source']
if 'minimum' in options:
options['value'] = options['minimum']
# 'validate' is a required parameter.
if 'validate' not in options:
warn("Parameter 'validate' is required in data_validation()")
return -2
# List of valid validation types.
valid_types = {
'any': 'none',
'any value': 'none',
'whole number': 'whole',
'whole': 'whole',
'integer': 'whole',
'decimal': 'decimal',
'list': 'list',
'date': 'date',
'time': 'time',
'text length': 'textLength',
'length': 'textLength',
'custom': 'custom',
}
# Check for valid validation types.
if not options['validate'] in valid_types:
warn("Unknown validation type '%s' for parameter "
"'validate' in data_validation()" % options['validate'])
return -2
else:
options['validate'] = valid_types[options['validate']]
# No action is required for validation type 'any' if there are no
# input messages to display.
if (options['validate'] == 'none'
and options.get('input_title') is None
and options.get('input_message') is None):
return -2
# The any, list and custom validations don't have a criteria so we use
# a default of 'between'.
if (options['validate'] == 'none'
or options['validate'] == 'list'
or options['validate'] == 'custom'):
options['criteria'] = 'between'
options['maximum'] = None
# 'criteria' is a required parameter.
if 'criteria' not in options:
warn("Parameter 'criteria' is required in data_validation()")
return -2
# Valid criteria types.
criteria_types = {
'between': 'between',
'not between': 'notBetween',
'equal to': 'equal',
'=': 'equal',
'==': 'equal',
'not equal to': 'notEqual',
'!=': 'notEqual',
'<>': 'notEqual',
'greater than': 'greaterThan',
'>': 'greaterThan',
'less than': 'lessThan',
'<': 'lessThan',
'greater than or equal to': 'greaterThanOrEqual',
'>=': 'greaterThanOrEqual',
'less than or equal to': 'lessThanOrEqual',
'<=': 'lessThanOrEqual',
}
# Check for valid criteria types.
if not options['criteria'] in criteria_types:
warn("Unknown criteria type '%s' for parameter "
"'criteria' in data_validation()" % options['criteria'])
return -2
else:
options['criteria'] = criteria_types[options['criteria']]
# 'Between' and 'Not between' criteria require 2 values.
if (options['criteria'] == 'between' or
options['criteria'] == 'notBetween'):
if 'maximum' not in options:
warn("Parameter 'maximum' is required in data_validation() "
"when using 'between' or 'not between' criteria")
return -2
else:
options['maximum'] = None
# Valid error dialog types.
error_types = {
'stop': 0,
'warning': 1,
'information': 2,
}
# Check for valid error dialog types.
if 'error_type' not in options:
options['error_type'] = 0
elif not options['error_type'] in error_types:
warn("Unknown criteria type '%s' for parameter 'error_type' "
"in data_validation()" % options['error_type'])
return -2
else:
options['error_type'] = error_types[options['error_type']]
# Convert date/times value if required.
if options['validate'] == 'date' or options['validate'] == 'time':
if options['value']:
if supported_datetime(options['value']):
date_time = self._convert_date_time(options['value'])
# Format date number to the same precision as Excel.
options['value'] = "%.16g" % date_time
if options['maximum']:
if supported_datetime(options['maximum']):
date_time = self._convert_date_time(options['maximum'])
options['maximum'] = "%.16g" % date_time
# Check that the input title doesn't exceed the maximum length.
if options.get('input_title') and len(options['input_title']) > 32:
warn("Length of input title '%s' exceeds Excel's limit of 32"
% force_unicode(options['input_title']))
return -2
# Check that the error title doesn't exceed the maximum length.
if options.get('error_title') and len(options['error_title']) > 32:
warn("Length of error title '%s' exceeds Excel's limit of 32"
% force_unicode(options['error_title']))
return -2
# Check that the input message doesn't exceed the maximum length.
if (options.get('input_message')
and len(options['input_message']) > 255):
warn("Length of input message '%s' exceeds Excel's limit of 255"
% force_unicode(options['input_message']))
return -2
# Check that the error message doesn't exceed the maximum length.
if (options.get('error_message')
and len(options['error_message']) > 255):
warn("Length of error message '%s' exceeds Excel's limit of 255"
% force_unicode(options['error_message']))
return -2
# Check that the input list doesn't exceed the maximum length.
if options['validate'] == 'list' and type(options['value']) is list:
formula = self._csv_join(*options['value'])
if len(formula) > 255:
warn("Length of list items '%s' exceeds Excel's limit of "
"255, use a formula range instead"
% force_unicode(formula))
return -2
# Set some defaults if they haven't been defined by the user.
if 'ignore_blank' not in options:
options['ignore_blank'] = 1
if 'dropdown' not in options:
options['dropdown'] = 1
if 'show_input' not in options:
options['show_input'] = 1
if 'show_error' not in options:
options['show_error'] = 1
# These are the cells to which the validation is applied.
options['cells'] = [[first_row, first_col, last_row, last_col]]
# A (for now) undocumented parameter to pass additional cell ranges.
if 'other_cells' in options:
options['cells'].extend(options['other_cells'])
# Store the validation information until we close the worksheet.
self.validations.append(options)
@convert_range_args
def conditional_format(self, first_row, first_col, last_row, last_col,
options=None):
"""
Add a conditional format to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Conditional format options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -1
if self._check_dimensions(last_row, last_col, True, True):
return -1
if options is None:
options = {}
else:
# Copy the user defined options so they aren't modified.
options = options.copy()
# Valid input parameters.
valid_parameter = {
'type': True,
'format': True,
'criteria': True,
'value': True,
'minimum': True,
'maximum': True,
'stop_if_true': True,
'min_type': True,
'mid_type': True,
'max_type': True,
'min_value': True,
'mid_value': True,
'max_value': True,
'min_color': True,
'mid_color': True,
'max_color': True,
'min_length': True,
'max_length': True,
'multi_range': True,
'bar_color': True,
'bar_negative_color': True,
'bar_negative_color_same': True,
'bar_solid': True,
'bar_border_color': True,
'bar_negative_border_color': True,
'bar_negative_border_color_same': True,
'bar_no_border': True,
'bar_direction': True,
'bar_axis_position': True,
'bar_axis_color': True,
'bar_only': True,
'data_bar_2010': True,
'icon_style': True,
'reverse_icons': True,
'icons_only': True,
'icons': True}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameter:
warn("Unknown parameter '%s' in conditional_format()" %
param_key)
return -2
# 'type' is a required parameter.
if 'type' not in options:
warn("Parameter 'type' is required in conditional_format()")
return -2
# Valid types.
valid_type = {
'cell': 'cellIs',
'date': 'date',
'time': 'time',
'average': 'aboveAverage',
'duplicate': 'duplicateValues',
'unique': 'uniqueValues',
'top': 'top10',
'bottom': 'top10',
'text': 'text',
'time_period': 'timePeriod',
'blanks': 'containsBlanks',
'no_blanks': 'notContainsBlanks',
'errors': 'containsErrors',
'no_errors': 'notContainsErrors',
'2_color_scale': '2_color_scale',
'3_color_scale': '3_color_scale',
'data_bar': 'dataBar',
'formula': 'expression',
'icon_set': 'iconSet'}
# Check for valid types.
if options['type'] not in valid_type:
warn("Unknown value '%s' for parameter 'type' "
"in conditional_format()" % options['type'])
return -2
else:
if options['type'] == 'bottom':
options['direction'] = 'bottom'
options['type'] = valid_type[options['type']]
# Valid criteria types.
criteria_type = {
'between': 'between',
'not between': 'notBetween',
'equal to': 'equal',
'=': 'equal',
'==': 'equal',
'not equal to': 'notEqual',
'!=': 'notEqual',
'<>': 'notEqual',
'greater than': 'greaterThan',
'>': 'greaterThan',
'less than': 'lessThan',
'<': 'lessThan',
'greater than or equal to': 'greaterThanOrEqual',
'>=': 'greaterThanOrEqual',
'less than or equal to': 'lessThanOrEqual',
'<=': 'lessThanOrEqual',
'containing': 'containsText',
'not containing': 'notContains',
'begins with': 'beginsWith',
'ends with': 'endsWith',
'yesterday': 'yesterday',
'today': 'today',
'last 7 days': 'last7Days',
'last week': 'lastWeek',
'this week': 'thisWeek',
'next week': 'nextWeek',
'last month': 'lastMonth',
'this month': 'thisMonth',
'next month': 'nextMonth',
# For legacy, but incorrect, support.
'continue week': 'nextWeek',
'continue month': 'nextMonth'}
# Check for valid criteria types.
if 'criteria' in options and options['criteria'] in criteria_type:
options['criteria'] = criteria_type[options['criteria']]
# Convert date/times value if required.
if options['type'] == 'date' or options['type'] == 'time':
options['type'] = 'cellIs'
if 'value' in options:
if not supported_datetime(options['value']):
warn("Conditional format 'value' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['value'])
# Format date number to the same precision as Excel.
options['value'] = "%.16g" % date_time
if 'minimum' in options:
if not supported_datetime(options['minimum']):
warn("Conditional format 'minimum' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['minimum'])
options['minimum'] = "%.16g" % date_time
if 'maximum' in options:
if not supported_datetime(options['maximum']):
warn("Conditional format 'maximum' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['maximum'])
options['maximum'] = "%.16g" % date_time
# Valid icon styles.
valid_icons = {
"3_arrows": "3Arrows", # 1
"3_flags": "3Flags", # 2
"3_traffic_lights_rimmed": "3TrafficLights2", # 3
"3_symbols_circled": "3Symbols", # 4
"4_arrows": "4Arrows", # 5
"4_red_to_black": "4RedToBlack", # 6
"4_traffic_lights": "4TrafficLights", # 7
"5_arrows_gray": "5ArrowsGray", # 8
"5_quarters": "5Quarters", # 9
"3_arrows_gray": "3ArrowsGray", # 10
"3_traffic_lights": "3TrafficLights", # 11
"3_signs": "3Signs", # 12
"3_symbols": "3Symbols2", # 13
"4_arrows_gray": "4ArrowsGray", # 14
"4_ratings": "4Rating", # 15
"5_arrows": "5Arrows", # 16
"5_ratings": "5Rating"} # 17
# Set the icon set properties.
if options['type'] == 'iconSet':
# An icon_set must have an icon style.
if not options.get('icon_style'):
warn("The 'icon_style' parameter must be specified when "
"'type' == 'icon_set' in conditional_format()")
return -3
# Check for valid icon styles.
if options['icon_style'] not in valid_icons:
warn("Unknown icon_style '%s' in conditional_format()" %
options['icon_style'])
return -2
else:
options['icon_style'] = valid_icons[options['icon_style']]
# Set the number of icons for the icon style.
options['total_icons'] = 3
if options['icon_style'].startswith('4'):
options['total_icons'] = 4
elif options['icon_style'].startswith('5'):
options['total_icons'] = 5
options['icons'] = self._set_icon_props(options.get('total_icons'),
options.get('icons'))
# Swap last row/col for first row/col as necessary
if first_row > last_row:
first_row, last_row = last_row, first_row
if first_col > last_col:
first_col, last_col = last_col, first_col
# Set the formatting range.
cell_range = xl_range(first_row, first_col, last_row, last_col)
start_cell = xl_rowcol_to_cell(first_row, first_col)
# Override with user defined multiple range if provided.
if 'multi_range' in options:
cell_range = options['multi_range']
cell_range = cell_range.replace('$', '')
# Get the dxf format index.
if 'format' in options and options['format']:
options['format'] = options['format']._get_dxf_index()
# Set the priority based on the order of adding.
options['priority'] = self.dxf_priority
self.dxf_priority += 1
# Check for 2010 style data_bar parameters.
if (self.use_data_bars_2010 or
options.get('data_bar_2010') or
options.get('bar_solid') or
options.get('bar_border_color') or
options.get('bar_negative_color') or
options.get('bar_negative_color_same') or
options.get('bar_negative_border_color') or
options.get('bar_negative_border_color_same') or
options.get('bar_no_border') or
options.get('bar_axis_position') or
options.get('bar_axis_color') or
options.get('bar_direction')):
options['is_data_bar_2010'] = True
# Special handling of text criteria.
if options['type'] == 'text':
if options['criteria'] == 'containsText':
options['type'] = 'containsText'
options['formula'] = ('NOT(ISERROR(SEARCH("%s",%s)))'
% (options['value'], start_cell))
elif options['criteria'] == 'notContains':
options['type'] = 'notContainsText'
options['formula'] = ('ISERROR(SEARCH("%s",%s))'
% (options['value'], start_cell))
elif options['criteria'] == 'beginsWith':
options['type'] = 'beginsWith'
options['formula'] = ('LEFT(%s,%d)="%s"'
% (start_cell,
len(options['value']),
options['value']))
elif options['criteria'] == 'endsWith':
options['type'] = 'endsWith'
options['formula'] = ('RIGHT(%s,%d)="%s"'
% (start_cell,
len(options['value']),
options['value']))
else:
warn("Invalid text criteria '%s' "
"in conditional_format()" % options['criteria'])
# Special handling of time time_period criteria.
if options['type'] == 'timePeriod':
if options['criteria'] == 'yesterday':
options['formula'] = 'FLOOR(%s,1)=TODAY()-1' % start_cell
elif options['criteria'] == 'today':
options['formula'] = 'FLOOR(%s,1)=TODAY()' % start_cell
elif options['criteria'] == 'tomorrow':
options['formula'] = 'FLOOR(%s,1)=TODAY()+1' % start_cell
elif options['criteria'] == 'last7Days':
options['formula'] = \
('AND(TODAY()-FLOOR(%s,1)<=6,FLOOR(%s,1)<=TODAY())' %
(start_cell, start_cell))
elif options['criteria'] == 'lastWeek':
options['formula'] = \
('AND(TODAY()-ROUNDDOWN(%s,0)>=(WEEKDAY(TODAY())),'
'TODAY()-ROUNDDOWN(%s,0)<(WEEKDAY(TODAY())+7))' %
(start_cell, start_cell))
elif options['criteria'] == 'thisWeek':
options['formula'] = \
('AND(TODAY()-ROUNDDOWN(%s,0)<=WEEKDAY(TODAY())-1,'
'ROUNDDOWN(%s,0)-TODAY()<=7-WEEKDAY(TODAY()))' %
(start_cell, start_cell))
elif options['criteria'] == 'nextWeek':
options['formula'] = \
('AND(ROUNDDOWN(%s,0)-TODAY()>(7-WEEKDAY(TODAY())),'
'ROUNDDOWN(%s,0)-TODAY()<(15-WEEKDAY(TODAY())))' %
(start_cell, start_cell))
elif options['criteria'] == 'lastMonth':
options['formula'] = \
('AND(MONTH(%s)=MONTH(TODAY())-1,OR(YEAR(%s)=YEAR('
'TODAY()),AND(MONTH(%s)=1,YEAR(A1)=YEAR(TODAY())-1)))' %
(start_cell, start_cell, start_cell))
elif options['criteria'] == 'thisMonth':
options['formula'] = \
('AND(MONTH(%s)=MONTH(TODAY()),YEAR(%s)=YEAR(TODAY()))' %
(start_cell, start_cell))
elif options['criteria'] == 'nextMonth':
options['formula'] = \
('AND(MONTH(%s)=MONTH(TODAY())+1,OR(YEAR(%s)=YEAR('
'TODAY()),AND(MONTH(%s)=12,YEAR(%s)=YEAR(TODAY())+1)))' %
(start_cell, start_cell, start_cell, start_cell))
else:
warn("Invalid time_period criteria '%s' "
"in conditional_format()" % options['criteria'])
# Special handling of blanks/error types.
if options['type'] == 'containsBlanks':
options['formula'] = 'LEN(TRIM(%s))=0' % start_cell
if options['type'] == 'notContainsBlanks':
options['formula'] = 'LEN(TRIM(%s))>0' % start_cell
if options['type'] == 'containsErrors':
options['formula'] = 'ISERROR(%s)' % start_cell
if options['type'] == 'notContainsErrors':
options['formula'] = 'NOT(ISERROR(%s))' % start_cell
# Special handling for 2 color scale.
if options['type'] == '2_color_scale':
options['type'] = 'colorScale'
# Color scales don't use any additional formatting.
options['format'] = None
# Turn off 3 color parameters.
options['mid_type'] = None
options['mid_color'] = None
options.setdefault('min_type', 'min')
options.setdefault('max_type', 'max')
options.setdefault('min_value', 0)
options.setdefault('max_value', 0)
options.setdefault('min_color', '#FF7128')
options.setdefault('max_color', '#FFEF9C')
options['min_color'] = xl_color(options['min_color'])
options['max_color'] = xl_color(options['max_color'])
# Special handling for 3 color scale.
if options['type'] == '3_color_scale':
options['type'] = 'colorScale'
# Color scales don't use any additional formatting.
options['format'] = None
options.setdefault('min_type', 'min')
options.setdefault('mid_type', 'percentile')
options.setdefault('max_type', 'max')
options.setdefault('min_value', 0)
options.setdefault('max_value', 0)
options.setdefault('min_color', '#F8696B')
options.setdefault('mid_color', '#FFEB84')
options.setdefault('max_color', '#63BE7B')
options['min_color'] = xl_color(options['min_color'])
options['mid_color'] = xl_color(options['mid_color'])
options['max_color'] = xl_color(options['max_color'])
# Set a default mid value.
if 'mid_value' not in options:
options['mid_value'] = 50
# Special handling for data bar.
if options['type'] == 'dataBar':
# Color scales don't use any additional formatting.
options['format'] = None
if not options.get('min_type'):
options['min_type'] = 'min'
options['x14_min_type'] = 'autoMin'
else:
options['x14_min_type'] = options['min_type']
if not options.get('max_type'):
options['max_type'] = 'max'
options['x14_max_type'] = 'autoMax'
else:
options['x14_max_type'] = options['max_type']
options.setdefault('min_value', 0)
options.setdefault('max_value', 0)
options.setdefault('bar_color', '#638EC6')
options.setdefault('bar_border_color', options['bar_color'])
options.setdefault('bar_only', False)
options.setdefault('bar_no_border', False)
options.setdefault('bar_solid', False)
options.setdefault('bar_direction', '')
options.setdefault('bar_negative_color', '#FF0000')
options.setdefault('bar_negative_border_color', '#FF0000')
options.setdefault('bar_negative_color_same', False)
options.setdefault('bar_negative_border_color_same', False)
options.setdefault('bar_axis_position', '')
options.setdefault('bar_axis_color', '#000000')
options['bar_color'] = xl_color(options['bar_color'])
options['bar_border_color'] = xl_color(options['bar_border_color'])
options['bar_axis_color'] = xl_color(options['bar_axis_color'])
options['bar_negative_color'] = \
xl_color(options['bar_negative_color'])
options['bar_negative_border_color'] = \
xl_color(options['bar_negative_border_color'])
# Adjust for 2010 style data_bar parameters.
if options.get('is_data_bar_2010'):
self.excel_version = 2010
if options['min_type'] == 'min' and options['min_value'] == 0:
options['min_value'] = None
if options['max_type'] == 'max' and options['max_value'] == 0:
options['max_value'] = None
options['range'] = cell_range
# Strip the leading = from formulas.
try:
options['min_value'] = options['min_value'].lstrip('=')
except (KeyError, AttributeError):
pass
try:
options['mid_value'] = options['mid_value'].lstrip('=')
except (KeyError, AttributeError):
pass
try:
options['max_value'] = options['max_value'].lstrip('=')
except (KeyError, AttributeError):
pass
# Store the conditional format until we close the worksheet.
if cell_range in self.cond_formats:
self.cond_formats[cell_range].append(options)
else:
self.cond_formats[cell_range] = [options]
@convert_range_args
def add_table(self, first_row, first_col, last_row, last_col,
options=None):
"""
Add an Excel table to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Table format options. (Optional)
Returns:
0: Success.
-1: Not supported in constant_memory mode.
-2: Row or column is out of worksheet bounds.
-3: Incorrect parameter or option.
"""
table = {}
col_formats = {}
if options is None:
options = {}
else:
# Copy the user defined options so they aren't modified.
options = options.copy()
if self.constant_memory:
warn("add_table() isn't supported in 'constant_memory' mode")
return -1
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -2
if self._check_dimensions(last_row, last_col, True, True):
return -2
# Swap last row/col for first row/col as necessary.
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Valid input parameters.
valid_parameter = {
'autofilter': True,
'banded_columns': True,
'banded_rows': True,
'columns': True,
'data': True,
'first_column': True,
'header_row': True,
'last_column': True,
'name': True,
'style': True,
'total_row': True,
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameter:
warn("Unknown parameter '%s' in add_table()" % param_key)
return -3
# Turn on Excel's defaults.
options['banded_rows'] = options.get('banded_rows', True)
options['header_row'] = options.get('header_row', True)
options['autofilter'] = options.get('autofilter', True)
# Check that there are enough rows.
num_rows = last_row - first_row
if options['header_row']:
num_rows -= 1
if num_rows < 0:
warn("Must have at least one data row in in add_table()")
return -3
# Set the table options.
table['show_first_col'] = options.get('first_column', False)
table['show_last_col'] = options.get('last_column', False)
table['show_row_stripes'] = options.get('banded_rows', False)
table['show_col_stripes'] = options.get('banded_columns', False)
table['header_row_count'] = options.get('header_row', 0)
table['totals_row_shown'] = options.get('total_row', False)
# Set the table name.
if 'name' in options:
name = options['name']
table['name'] = name
if ' ' in name:
warn("Name '%s' in add_table() cannot contain spaces"
% force_unicode(name))
return -3
# Warn if the name contains invalid chars as defined by Excel.
if (not re.match(r'^[\w\\][\w\\.]*$', name, re.UNICODE)
or re.match(r'^\d', name)):
warn("Invalid Excel characters in add_table(): '%s'"
% force_unicode(name))
return -1
# Warn if the name looks like a cell name.
if re.match(r'^[a-zA-Z][a-zA-Z]?[a-dA-D]?[0-9]+$', name):
warn("Name looks like a cell name in add_table(): '%s'"
% force_unicode(name))
return -1
# Warn if the name looks like a R1C1 cell reference.
if (re.match(r'^[rcRC]$', name)
or re.match(r'^[rcRC]\d+[rcRC]\d+$', name)):
warn("Invalid name '%s' like a RC cell ref in add_table()"
% force_unicode(name))
return -1
# Set the table style.
if 'style' in options:
table['style'] = options['style']
if table['style'] is None:
table['style'] = ''
# Remove whitespace from style name.
table['style'] = table['style'].replace(' ', '')
else:
table['style'] = "TableStyleMedium9"
# Set the data range rows (without the header and footer).
first_data_row = first_row
last_data_row = last_row
if options.get('header_row'):
first_data_row += 1
if options.get('total_row'):
last_data_row -= 1
# Set the table and autofilter ranges.
table['range'] = xl_range(first_row, first_col,
last_row, last_col)
table['a_range'] = xl_range(first_row, first_col,
last_data_row, last_col)
# If the header row if off the default is to turn autofilter off.
if not options['header_row']:
options['autofilter'] = 0
# Set the autofilter range.
if options['autofilter']:
table['autofilter'] = table['a_range']
# Add the table columns.
col_id = 1
table['columns'] = []
seen_names = {}
for col_num in range(first_col, last_col + 1):
# Set up the default column data.
col_data = {
'id': col_id,
'name': 'Column' + str(col_id),
'total_string': '',
'total_function': '',
'total_value': 0,
'formula': '',
'format': None,
'name_format': None,
}
# Overwrite the defaults with any user defined values.
if 'columns' in options:
# Check if there are user defined values for this column.
if col_id <= len(options['columns']):
user_data = options['columns'][col_id - 1]
else:
user_data = None
if user_data:
# Get the column format.
xformat = user_data.get('format', None)
# Map user defined values to internal values.
if user_data.get('header'):
col_data['name'] = user_data['header']
# Excel requires unique case insensitive header names.
header_name = col_data['name']
name = header_name.lower()
if name in seen_names:
warn("Duplicate header name in add_table(): '%s'"
% force_unicode(name))
return -1
else:
seen_names[name] = True
col_data['name_format'] = user_data.get('header_format')
# Handle the column formula.
if 'formula' in user_data and user_data['formula']:
formula = user_data['formula']
# Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
# Covert Excel 2010 "@" ref to 2007 "#This Row".
formula = formula.replace('@', '[#This Row],')
col_data['formula'] = formula
for row in range(first_data_row, last_data_row + 1):
self._write_formula(row, col_num, formula, xformat)
# Handle the function for the total row.
if user_data.get('total_function'):
function = user_data['total_function']
# Massage the function name.
function = function.lower()
function = function.replace('_', '')
function = function.replace(' ', '')
if function == 'countnums':
function = 'countNums'
if function == 'stddev':
function = 'stdDev'
col_data['total_function'] = function
formula = \
self._table_function_to_formula(function,
col_data['name'])
value = user_data.get('total_value', 0)
self._write_formula(last_row, col_num, formula,
xformat, value)
elif user_data.get('total_string'):
# Total label only (not a function).
total_string = user_data['total_string']
col_data['total_string'] = total_string
self._write_string(last_row, col_num, total_string,
user_data.get('format'))
# Get the dxf format index.
if xformat is not None:
col_data['format'] = xformat._get_dxf_index()
# Store the column format for writing the cell data.
# It doesn't matter if it is undefined.
col_formats[col_id - 1] = xformat
# Store the column data.
table['columns'].append(col_data)
# Write the column headers to the worksheet.
if options['header_row']:
self._write_string(first_row, col_num, col_data['name'],
col_data['name_format'])
col_id += 1
# Write the cell data if supplied.
if 'data' in options:
data = options['data']
i = 0 # For indexing the row data.
for row in range(first_data_row, last_data_row + 1):
j = 0 # For indexing the col data.
for col in range(first_col, last_col + 1):
if i < len(data) and j < len(data[i]):
token = data[i][j]
if j in col_formats:
self._write(row, col, token, col_formats[j])
else:
self._write(row, col, token, None)
j += 1
i += 1
# Store the table data.
self.tables.append(table)
return table
@convert_cell_args
def add_sparkline(self, row, col, options=None):
"""
Add sparklines to the worksheet.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
options: Sparkline formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(row, col, True, True):
return -1
sparkline = {'locations': [xl_rowcol_to_cell(row, col)]}
if options is None:
options = {}
# Valid input parameters.
valid_parameters = {
'location': True,
'range': True,
'type': True,
'high_point': True,
'low_point': True,
'negative_points': True,
'first_point': True,
'last_point': True,
'markers': True,
'style': True,
'series_color': True,
'negative_color': True,
'markers_color': True,
'first_color': True,
'last_color': True,
'high_color': True,
'low_color': True,
'max': True,
'min': True,
'axis': True,
'reverse': True,
'empty_cells': True,
'show_hidden': True,
'plot_hidden': True,
'date_axis': True,
'weight': True,
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameters:
warn("Unknown parameter '%s' in add_sparkline()" % param_key)
return -1
# 'range' is a required parameter.
if 'range' not in options:
warn("Parameter 'range' is required in add_sparkline()")
return -2
# Handle the sparkline type.
spark_type = options.get('type', 'line')
if spark_type not in ('line', 'column', 'win_loss'):
warn("Parameter 'type' must be 'line', 'column' "
"or 'win_loss' in add_sparkline()")
return -2
if spark_type == 'win_loss':
spark_type = 'stacked'
sparkline['type'] = spark_type
# We handle single location/range values or list of values.
if 'location' in options:
if type(options['location']) is list:
sparkline['locations'] = options['location']
else:
sparkline['locations'] = [options['location']]
if type(options['range']) is list:
sparkline['ranges'] = options['range']
else:
sparkline['ranges'] = [options['range']]
range_count = len(sparkline['ranges'])
location_count = len(sparkline['locations'])
# The ranges and locations must match.
if range_count != location_count:
warn("Must have the same number of location and range "
"parameters in add_sparkline()")
return -2
# Store the count.
sparkline['count'] = len(sparkline['locations'])
# Get the worksheet name for the range conversion below.
sheetname = quote_sheetname(self.name)
# Cleanup the input ranges.
new_ranges = []
for spark_range in sparkline['ranges']:
# Remove the absolute reference $ symbols.
spark_range = spark_range.replace('$', '')
# Remove the = from formula.
spark_range = spark_range.lstrip('=')
# Convert a simple range into a full Sheet1!A1:D1 range.
if '!' not in spark_range:
spark_range = sheetname + "!" + spark_range
new_ranges.append(spark_range)
sparkline['ranges'] = new_ranges
# Cleanup the input locations.
new_locations = []
for location in sparkline['locations']:
location = location.replace('$', '')
new_locations.append(location)
sparkline['locations'] = new_locations
# Map options.
sparkline['high'] = options.get('high_point')
sparkline['low'] = options.get('low_point')
sparkline['negative'] = options.get('negative_points')
sparkline['first'] = options.get('first_point')
sparkline['last'] = options.get('last_point')
sparkline['markers'] = options.get('markers')
sparkline['min'] = options.get('min')
sparkline['max'] = options.get('max')
sparkline['axis'] = options.get('axis')
sparkline['reverse'] = options.get('reverse')
sparkline['hidden'] = options.get('show_hidden')
sparkline['weight'] = options.get('weight')
# Map empty cells options.
empty = options.get('empty_cells', '')
if empty == 'zero':
sparkline['empty'] = 0
elif empty == 'connect':
sparkline['empty'] = 'span'
else:
sparkline['empty'] = 'gap'
# Map the date axis range.
date_range = options.get('date_axis')
if date_range and '!' not in date_range:
date_range = sheetname + "!" + date_range
sparkline['date_axis'] = date_range
# Set the sparkline styles.
style_id = options.get('style', 0)
style = get_sparkline_style(style_id)
sparkline['series_color'] = style['series']
sparkline['negative_color'] = style['negative']
sparkline['markers_color'] = style['markers']
sparkline['first_color'] = style['first']
sparkline['last_color'] = style['last']
sparkline['high_color'] = style['high']
sparkline['low_color'] = style['low']
# Override the style colors with user defined colors.
self._set_spark_color(sparkline, options, 'series_color')
self._set_spark_color(sparkline, options, 'negative_color')
self._set_spark_color(sparkline, options, 'markers_color')
self._set_spark_color(sparkline, options, 'first_color')
self._set_spark_color(sparkline, options, 'last_color')
self._set_spark_color(sparkline, options, 'high_color')
self._set_spark_color(sparkline, options, 'low_color')
self.sparklines.append(sparkline)
@convert_range_args
def set_selection(self, first_row, first_col, last_row, last_col):
"""
Set the selected cell or cells in a worksheet
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
0: Nothing.
"""
pane = None
# Range selection. Do this before swapping max/min to allow the
# selection direction to be reversed.
active_cell = xl_rowcol_to_cell(first_row, first_col)
# Swap last row/col for first row/col if necessary
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
sqref = xl_range(first_row, first_col, last_row, last_col)
# Selection isn't set for cell A1.
if sqref == 'A1':
return
self.selections = [[pane, active_cell, sqref]]
def outline_settings(self, visible=1, symbols_below=1, symbols_right=1,
auto_style=0):
"""
Control outline settings.
Args:
visible: Outlines are visible. Optional, defaults to True.
symbols_below: Show row outline symbols below the outline bar.
Optional, defaults to True.
symbols_right: Show column outline symbols to the right of the
outline bar. Optional, defaults to True.
auto_style: Use Automatic style. Optional, defaults to False.
Returns:
0: Nothing.
"""
self.outline_on = visible
self.outline_below = symbols_below
self.outline_right = symbols_right
self.outline_style = auto_style
self.outline_changed = True
@convert_cell_args
def freeze_panes(self, row, col, top_row=None, left_col=None, pane_type=0):
"""
Create worksheet panes and mark them as frozen.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
top_row: Topmost visible row in scrolling region of pane.
left_col: Leftmost visible row in scrolling region of pane.
Returns:
0: Nothing.
"""
if top_row is None:
top_row = row
if left_col is None:
left_col = col
self.panes = [row, col, top_row, left_col, pane_type]
@convert_cell_args
def split_panes(self, x, y, top_row=None, left_col=None):
"""
Create worksheet panes and mark them as split.
Args:
x: The position for the vertical split.
y: The position for the horizontal split.
top_row: Topmost visible row in scrolling region of pane.
left_col: Leftmost visible row in scrolling region of pane.
Returns:
0: Nothing.
"""
# Same as freeze panes with a different pane type.
self.freeze_panes(x, y, top_row, left_col, 2)
def set_zoom(self, zoom=100):
"""
Set the worksheet zoom factor.
Args:
zoom: Scale factor: 10 <= zoom <= 400.
Returns:
Nothing.
"""
# Ensure the zoom scale is in Excel's range.
if zoom < 10 or zoom > 400:
warn("Zoom factor %d outside range: 10 <= zoom <= 400" % zoom)
zoom = 100
self.zoom = int(zoom)
def right_to_left(self):
"""
Display the worksheet right to left for some versions of Excel.
Args:
None.
Returns:
Nothing.
"""
self.is_right_to_left = 1
def hide_zero(self):
"""
Hide zero values in worksheet cells.
Args:
None.
Returns:
Nothing.
"""
self.show_zeros = 0
def set_tab_color(self, color):
"""
Set the color of the worksheet tab.
Args:
color: A #RGB color index.
Returns:
Nothing.
"""
self.tab_color = xl_color(color)
def protect(self, password='', options=None):
"""
Set the password and protection options of the worksheet.
Args:
password: An optional password string.
options: A dictionary of worksheet objects to protect.
Returns:
Nothing.
"""
if password != '':
password = self._encode_password(password)
if not options:
options = {}
# Default values for objects that can be protected.
defaults = {
'sheet': True,
'content': False,
'objects': False,
'scenarios': False,
'format_cells': False,
'format_columns': False,
'format_rows': False,
'insert_columns': False,
'insert_rows': False,
'insert_hyperlinks': False,
'delete_columns': False,
'delete_rows': False,
'select_locked_cells': True,
'sort': False,
'autofilter': False,
'pivot_tables': False,
'select_unlocked_cells': True}
# Overwrite the defaults with user specified values.
for key in (options.keys()):
if key in defaults:
defaults[key] = options[key]
else:
warn("Unknown protection object: '%s'" % key)
# Set the password after the user defined values.
defaults['password'] = password
self.protect_options = defaults
def unprotect_range(self, cell_range, range_name=None, password=None):
"""
Unprotect ranges within a protected worksheet.
Args:
cell_range: The cell or cell range to unprotect.
range_name: An optional name for the range.
password: An optional password string. (undocumented)
Returns:
Nothing.
"""
if cell_range is None:
warn('Cell range must be specified in unprotect_range()')
return -1
# Sanitize the cell range.
cell_range = cell_range.lstrip('=')
cell_range = cell_range.replace('$', '')
self.num_protected_ranges += 1
if range_name is None:
range_name = 'Range' + str(self.num_protected_ranges)
if password:
password = self._encode_password(password)
self.protected_ranges.append((cell_range, range_name, password))
@convert_cell_args
def insert_button(self, row, col, options=None):
"""
Insert a button form object into the worksheet.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
options: Button formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn('Cannot insert button at (%d, %d).' % (row, col))
return -1
if options is None:
options = {}
button = self._button_params(row, col, options)
self.buttons_list.append(button)
self.has_vml = 1
###########################################################################
#
# Public API. Page Setup methods.
#
###########################################################################
def set_landscape(self):
"""
Set the page orientation as landscape.
Args:
None.
Returns:
Nothing.
"""
self.orientation = 0
self.page_setup_changed = True
def set_portrait(self):
"""
Set the page orientation as portrait.
Args:
None.
Returns:
Nothing.
"""
self.orientation = 1
self.page_setup_changed = True
def set_page_view(self):
"""
Set the page view mode.
Args:
None.
Returns:
Nothing.
"""
self.page_view = 1
def set_paper(self, paper_size):
"""
Set the paper type. US Letter = 1, A4 = 9.
Args:
paper_size: Paper index.
Returns:
Nothing.
"""
if paper_size:
self.paper_size = paper_size
self.page_setup_changed = True
def center_horizontally(self):
"""
Center the page horizontally.
Args:
None.
Returns:
Nothing.
"""
self.print_options_changed = True
self.hcenter = 1
def center_vertically(self):
"""
Center the page vertically.
Args:
None.
Returns:
Nothing.
"""
self.print_options_changed = True
self.vcenter = 1
def set_margins(self, left=0.7, right=0.7, top=0.75, bottom=0.75):
"""
Set all the page margins in inches.
Args:
left: Left margin.
right: Right margin.
top: Top margin.
bottom: Bottom margin.
Returns:
Nothing.
"""
self.margin_left = left
self.margin_right = right
self.margin_top = top
self.margin_bottom = bottom
def set_header(self, header='', options=None, margin=None):
"""
Set the page header caption and optional margin.
Args:
header: Header string.
margin: Header margin.
options: Header options, mainly for images.
Returns:
Nothing.
"""
header_orig = header
header = header.replace('&[Picture]', '&G')
if len(header) > 255:
warn("Header string cannot be longer than Excel's "
"limit of 255 characters")
return
if options is not None:
# For backward compatibility allow options to be the margin.
if not isinstance(options, dict):
options = {'margin': options}
else:
options = {}
# Copy the user defined options so they aren't modified.
options = options.copy()
# For backward compatibility.
if margin is not None:
options['margin'] = margin
# Reset the list in case the function is called more than once.
self.header_images = []
if options.get('image_left'):
self.header_images.append([options.get('image_left'),
options.get('image_data_left'),
'LH'])
if options.get('image_center'):
self.header_images.append([options.get('image_center'),
options.get('image_data_center'),
'CH'])
if options.get('image_right'):
self.header_images.append([options.get('image_right'),
options.get('image_data_right'),
'RH'])
placeholder_count = header.count('&G')
image_count = len(self.header_images)
if placeholder_count != image_count:
warn("Number of header images (%s) doesn't match placeholder "
"count (%s) in string: %s"
% (image_count, placeholder_count, header_orig))
self.header_images = []
return
if 'align_with_margins' in options:
self.header_footer_aligns = options['align_with_margins']
if 'scale_with_doc' in options:
self.header_footer_scales = options['scale_with_doc']
self.header = header
self.margin_header = options.get('margin', 0.3)
self.header_footer_changed = True
if image_count:
self.has_header_vml = True
def set_footer(self, footer='', options=None, margin=None):
"""
Set the page footer caption and optional margin.
Args:
footer: Footer string.
margin: Footer margin.
options: Footer options, mainly for images.
Returns:
Nothing.
"""
footer_orig = footer
footer = footer.replace('&[Picture]', '&G')
if len(footer) > 255:
warn("Footer string cannot be longer than Excel's "
"limit of 255 characters")
return
if options is not None:
# For backward compatibility allow options to be the margin.
if not isinstance(options, dict):
options = {'margin': options}
else:
options = {}
# Copy the user defined options so they aren't modified.
options = options.copy()
# For backward compatibility.
if margin is not None:
options['margin'] = margin
# Reset the list in case the function is called more than once.
self.footer_images = []
if options.get('image_left'):
self.footer_images.append([options.get('image_left'),
options.get('image_data_left'),
'LF'])
if options.get('image_center'):
self.footer_images.append([options.get('image_center'),
options.get('image_data_center'),
'CF'])
if options.get('image_right'):
self.footer_images.append([options.get('image_right'),
options.get('image_data_right'),
'RF'])
placeholder_count = footer.count('&G')
image_count = len(self.footer_images)
if placeholder_count != image_count:
warn("Number of footer images (%s) doesn't match placeholder "
"count (%s) in string: %s"
% (image_count, placeholder_count, footer_orig))
self.footer_images = []
return
if 'align_with_margins' in options:
self.header_footer_aligns = options['align_with_margins']
if 'scale_with_doc' in options:
self.header_footer_scales = options['scale_with_doc']
self.footer = footer
self.margin_footer = options.get('margin', 0.3)
self.header_footer_changed = True
if image_count:
self.has_header_vml = True
def repeat_rows(self, first_row, last_row=None):
"""
Set the rows to repeat at the top of each printed page.
Args:
first_row: Start row for range.
last_row: End row for range.
Returns:
Nothing.
"""
if last_row is None:
last_row = first_row
# Convert rows to 1 based.
first_row += 1
last_row += 1
# Create the row range area like: $1:$2.
area = '$%d:$%d' % (first_row, last_row)
# Build up the print titles area "Sheet1!$1:$2"
sheetname = quote_sheetname(self.name)
self.repeat_row_range = sheetname + '!' + area
@convert_column_args
def repeat_columns(self, first_col, last_col=None):
"""
Set the columns to repeat at the left hand side of each printed page.
Args:
first_col: Start column for range.
last_col: End column for range.
Returns:
Nothing.
"""
if last_col is None:
last_col = first_col
# Convert to A notation.
first_col = xl_col_to_name(first_col, 1)
last_col = xl_col_to_name(last_col, 1)
# Create a column range like $C:$D.
area = first_col + ':' + last_col
# Build up the print area range "=Sheet2!$C:$D"
sheetname = quote_sheetname(self.name)
self.repeat_col_range = sheetname + "!" + area
def hide_gridlines(self, option=1):
"""
Set the option to hide gridlines on the screen and the printed page.
Args:
option: 0 : Don't hide gridlines
1 : Hide printed gridlines only
2 : Hide screen and printed gridlines
Returns:
Nothing.
"""
if option == 0:
self.print_gridlines = 1
self.screen_gridlines = 1
self.print_options_changed = True
elif option == 1:
self.print_gridlines = 0
self.screen_gridlines = 1
else:
self.print_gridlines = 0
self.screen_gridlines = 0
def print_row_col_headers(self):
"""
Set the option to print the row and column headers on the printed page.
Args:
None.
Returns:
Nothing.
"""
self.print_headers = True
self.print_options_changed = True
def hide_row_col_headers(self):
"""
Set the option to hide the row and column headers on the worksheet.
Args:
None.
Returns:
Nothing.
"""
self.row_col_headers = True
@convert_range_args
def print_area(self, first_row, first_col, last_row, last_col):
"""
Set the print area in the current worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Set the print area in the current worksheet.
# Ignore max print area since it is the same as no area for Excel.
if (first_row == 0 and first_col == 0
and last_row == self.xls_rowmax - 1
and last_col == self.xls_colmax - 1):
return
# Build up the print area range "Sheet1!$A$1:$C$13".
area = self._convert_name_area(first_row, first_col,
last_row, last_col)
self.print_area_range = area
def print_across(self):
"""
Set the order in which pages are printed.
Args:
None.
Returns:
Nothing.
"""
self.page_order = 1
self.page_setup_changed = True
def fit_to_pages(self, width, height):
"""
Fit the printed area to a specific number of pages both vertically and
horizontally.
Args:
width: Number of pages horizontally.
height: Number of pages vertically.
Returns:
Nothing.
"""
self.fit_page = 1
self.fit_width = width
self.fit_height = height
self.page_setup_changed = True
def set_start_page(self, start_page):
"""
Set the start page number when printing.
Args:
start_page: Start page number.
Returns:
Nothing.
"""
self.page_start = start_page
def set_print_scale(self, scale):
"""
Set the scale factor for the printed page.
Args:
scale: Print scale. 10 <= scale <= 400.
Returns:
Nothing.
"""
# Confine the scale to Excel's range.
if scale < 10 or scale > 400:
warn("Print scale '%d' outside range: 10 <= scale <= 400" % scale)
return
# Turn off "fit to page" option when print scale is on.
self.fit_page = 0
self.print_scale = int(scale)
self.page_setup_changed = True
def set_h_pagebreaks(self, breaks):
"""
Set the horizontal page breaks on a worksheet.
Args:
breaks: List of rows where the page breaks should be added.
Returns:
Nothing.
"""
self.hbreaks = breaks
def set_v_pagebreaks(self, breaks):
"""
Set the horizontal page breaks on a worksheet.
Args:
breaks: List of columns where the page breaks should be added.
Returns:
Nothing.
"""
self.vbreaks = breaks
def set_vba_name(self, name=None):
"""
Set the VBA name for the worksheet. By default this is the
same as the sheet name: i.e., Sheet1 etc.
Args:
name: The VBA name for the worksheet.
Returns:
Nothing.
"""
if name is not None:
self.vba_codename = name
else:
self.vba_codename = 'Sheet' + str(self.index + 1)
def ignore_errors(self, options=None):
"""
Ignore various Excel errors/warnings in a worksheet for user defined
ranges.
Args:
options: A dict of ignore errors keys with cell range values.
Returns:
0: Success.
-1: Incorrect parameter or option.
"""
if options is None:
return -1
else:
# Copy the user defined options so they aren't modified.
options = options.copy()
# Valid input parameters.
valid_parameters = {
'number_stored_as_text': True,
'eval_error': True,
'formula_differs': True,
'formula_range': True,
'formula_unlocked': True,
'empty_cell_reference': True,
'list_data_validation': True,
'calculated_column': True,
'two_digit_text_year': True,
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameters:
warn("Unknown parameter '%s' in ignore_errors()" % param_key)
return -1
self.ignored_errors = options
return 0
###########################################################################
#
# Private API.
#
###########################################################################
def _initialize(self, init_data):
self.name = init_data['name']
self.index = init_data['index']
self.str_table = init_data['str_table']
self.worksheet_meta = init_data['worksheet_meta']
self.constant_memory = init_data['constant_memory']
self.tmpdir = init_data['tmpdir']
self.date_1904 = init_data['date_1904']
self.strings_to_numbers = init_data['strings_to_numbers']
self.strings_to_formulas = init_data['strings_to_formulas']
self.strings_to_urls = init_data['strings_to_urls']
self.nan_inf_to_errors = init_data['nan_inf_to_errors']
self.default_date_format = init_data['default_date_format']
self.default_url_format = init_data['default_url_format']
self.excel2003_style = init_data['excel2003_style']
self.remove_timezone = init_data['remove_timezone']
self.max_url_length = init_data['max_url_length']
if self.excel2003_style:
self.original_row_height = 12.75
self.default_row_height = 12.75
self.default_row_pixels = 17
self.margin_left = 0.75
self.margin_right = 0.75
self.margin_top = 1
self.margin_bottom = 1
self.margin_header = 0.5
self.margin_footer = 0.5
self.header_footer_aligns = False
# Open a temp filehandle to store row data in constant_memory mode.
if self.constant_memory:
# This is sub-optimal but we need to create a temp file
# with utf8 encoding in Python < 3.
(fd, filename) = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.row_data_filename = filename
self.row_data_fh = codecs.open(filename, 'w+', 'utf-8')
# Set as the worksheet filehandle until the file is assembled.
self.fh = self.row_data_fh
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the root worksheet element.
self._write_worksheet()
# Write the worksheet properties.
self._write_sheet_pr()
# Write the worksheet dimensions.
self._write_dimension()
# Write the sheet view properties.
self._write_sheet_views()
# Write the sheet format properties.
self._write_sheet_format_pr()
# Write the sheet column info.
self._write_cols()
# Write the worksheet data such as rows columns and cells.
if not self.constant_memory:
self._write_sheet_data()
else:
self._write_optimized_sheet_data()
# Write the sheetProtection element.
self._write_sheet_protection()
# Write the protectedRanges element.
self._write_protected_ranges()
# Write the phoneticPr element.
if self.excel2003_style:
self._write_phonetic_pr()
# Write the autoFilter element.
self._write_auto_filter()
# Write the mergeCells element.
self._write_merge_cells()
# Write the conditional formats.
self._write_conditional_formats()
# Write the dataValidations element.
self._write_data_validations()
# Write the hyperlink element.
self._write_hyperlinks()
# Write the printOptions element.
self._write_print_options()
# Write the worksheet page_margins.
self._write_page_margins()
# Write the worksheet page setup.
self._write_page_setup()
# Write the headerFooter element.
self._write_header_footer()
# Write the rowBreaks element.
self._write_row_breaks()
# Write the colBreaks element.
self._write_col_breaks()
# Write the ignoredErrors element.
self._write_ignored_errors()
# Write the drawing element.
self._write_drawings()
# Write the legacyDrawing element.
self._write_legacy_drawing()
# Write the legacyDrawingHF element.
self._write_legacy_drawing_hf()
# Write the tableParts element.
self._write_table_parts()
# Write the extLst elements.
self._write_ext_list()
# Close the worksheet tag.
self._xml_end_tag('worksheet')
# Close the file.
self._xml_close()
def _check_dimensions(self, row, col, ignore_row=False, ignore_col=False):
# Check that row and col are valid and store the max and min
# values for use in other methods/elements. The ignore_row /
# ignore_col flags is used to indicate that we wish to perform
# the dimension check without storing the value. The ignore
# flags are use by set_row() and data_validate.
# Check that the row/col are within the worksheet bounds.
if row < 0 or col < 0:
return -1
if row >= self.xls_rowmax or col >= self.xls_colmax:
return -1
# In constant_memory mode we don't change dimensions for rows
# that are already written.
if not ignore_row and not ignore_col and self.constant_memory:
if row < self.previous_row:
return -2
if not ignore_row:
if self.dim_rowmin is None or row < self.dim_rowmin:
self.dim_rowmin = row
if self.dim_rowmax is None or row > self.dim_rowmax:
self.dim_rowmax = row
if not ignore_col:
if self.dim_colmin is None or col < self.dim_colmin:
self.dim_colmin = col
if self.dim_colmax is None or col > self.dim_colmax:
self.dim_colmax = col
return 0
def _convert_date_time(self, dt_obj):
# Convert a datetime object to an Excel serial date and time.
return datetime_to_excel_datetime(dt_obj,
self.date_1904,
self.remove_timezone)
def _convert_name_area(self, row_num_1, col_num_1, row_num_2, col_num_2):
# Convert zero indexed rows and columns to the format required by
# worksheet named ranges, eg, "Sheet1!$A$1:$C$13".
range1 = ''
range2 = ''
area = ''
row_col_only = 0
# Convert to A1 notation.
col_char_1 = xl_col_to_name(col_num_1, 1)
col_char_2 = xl_col_to_name(col_num_2, 1)
row_char_1 = '$' + str(row_num_1 + 1)
row_char_2 = '$' + str(row_num_2 + 1)
# We need to handle special cases that refer to rows or columns only.
if row_num_1 == 0 and row_num_2 == self.xls_rowmax - 1:
range1 = col_char_1
range2 = col_char_2
row_col_only = 1
elif col_num_1 == 0 and col_num_2 == self.xls_colmax - 1:
range1 = row_char_1
range2 = row_char_2
row_col_only = 1
else:
range1 = col_char_1 + row_char_1
range2 = col_char_2 + row_char_2
# A repeated range is only written once (if it isn't a special case).
if range1 == range2 and not row_col_only:
area = range1
else:
area = range1 + ':' + range2
# Build up the print area range "Sheet1!$A$1:$C$13".
sheetname = quote_sheetname(self.name)
area = sheetname + "!" + area
return area
def _sort_pagebreaks(self, breaks):
# This is an internal method used to filter elements of a list of
# pagebreaks used in the _store_hbreak() and _store_vbreak() methods.
# It:
# 1. Removes duplicate entries from the list.
# 2. Sorts the list.
# 3. Removes 0 from the list if present.
if not breaks:
return
breaks_set = set(breaks)
if 0 in breaks_set:
breaks_set.remove(0)
breaks_list = list(breaks_set)
breaks_list.sort()
# The Excel 2007 specification says that the maximum number of page
# breaks is 1026. However, in practice it is actually 1023.
max_num_breaks = 1023
if len(breaks_list) > max_num_breaks:
breaks_list = breaks_list[:max_num_breaks]
return breaks_list
def _extract_filter_tokens(self, expression):
# Extract the tokens from the filter expression. The tokens are mainly
# non-whitespace groups. The only tricky part is to extract string
# tokens that contain whitespace and/or quoted double quotes (Excel's
# escaped quotes).
#
# Examples: 'x < 2000'
# 'x > 2000 and x < 5000'
# 'x = "foo"'
# 'x = "foo bar"'
# 'x = "foo "" bar"'
#
if not expression:
return []
token_re = re.compile(r'"(?:[^"]|"")*"|\S+')
tokens = token_re.findall(expression)
new_tokens = []
# Remove single leading and trailing quotes and un-escape other quotes.
for token in tokens:
if token.startswith('"'):
token = token[1:]
if token.endswith('"'):
token = token[:-1]
token = token.replace('""', '"')
new_tokens.append(token)
return new_tokens
def _parse_filter_expression(self, expression, tokens):
# Converts the tokens of a possibly conditional expression into 1 or 2
# sub expressions for further parsing.
#
# Examples:
# ('x', '==', 2000) -> exp1
# ('x', '>', 2000, 'and', 'x', '<', 5000) -> exp1 and exp2
if len(tokens) == 7:
# The number of tokens will be either 3 (for 1 expression)
# or 7 (for 2 expressions).
conditional = tokens[3]
if re.match('(and|&&)', conditional):
conditional = 0
elif re.match(r'(or|\|\|)', conditional):
conditional = 1
else:
warn("Token '%s' is not a valid conditional "
"in filter expression '%s'" % (conditional, expression))
expression_1 = self._parse_filter_tokens(expression, tokens[0:3])
expression_2 = self._parse_filter_tokens(expression, tokens[4:7])
return expression_1 + [conditional] + expression_2
else:
return self._parse_filter_tokens(expression, tokens)
def _parse_filter_tokens(self, expression, tokens):
# Parse the 3 tokens of a filter expression and return the operator
# and token. The use of numbers instead of operators is a legacy of
# Spreadsheet::WriteExcel.
operators = {
'==': 2,
'=': 2,
'=~': 2,
'eq': 2,
'!=': 5,
'!~': 5,
'ne': 5,
'<>': 5,
'<': 1,
'<=': 3,
'>': 4,
'>=': 6,
}
operator = operators.get(tokens[1], None)
token = tokens[2]
# Special handling of "Top" filter expressions.
if re.match('top|bottom', tokens[0].lower()):
value = int(tokens[1])
if value < 1 or value > 500:
warn("The value '%d' in expression '%s' "
"must be in the range 1 to 500" % (value, expression))
token = token.lower()
if token != 'items' and token != '%':
warn("The type '%s' in expression '%s' "
"must be either 'items' or '%'" % (token, expression))
if tokens[0].lower() == 'top':
operator = 30
else:
operator = 32
if tokens[2] == '%':
operator += 1
token = str(value)
if not operator and tokens[0]:
warn("Token '%s' is not a valid operator "
"in filter expression '%s'" % (token[0], expression))
# Special handling for Blanks/NonBlanks.
if re.match('blanks|nonblanks', token.lower()):
# Only allow Equals or NotEqual in this context.
if operator != 2 and operator != 5:
warn("The operator '%s' in expression '%s' "
"is not valid in relation to Blanks/NonBlanks'"
% (tokens[1], expression))
token = token.lower()
# The operator should always be 2 (=) to flag a "simple" equality
# in the binary record. Therefore we convert <> to =.
if token == 'blanks':
if operator == 5:
token = ' '
else:
if operator == 5:
operator = 2
token = 'blanks'
else:
operator = 5
token = ' '
# if the string token contains an Excel match character then change the
# operator type to indicate a non "simple" equality.
if operator == 2 and re.search('[*?]', token):
operator = 22
return [operator, token]
def _encode_password(self, plaintext):
# Encode the worksheet protection "password" as a simple hash.
# Based on the algorithm by Daniel Rentz of OpenOffice.
i = 0
count = len(plaintext)
digits = []
for char in plaintext:
i += 1
char = ord(char) << i
low_15 = char & 0x7fff
high_15 = char & 0x7fff << 15
high_15 >>= 15
char = low_15 | high_15
digits.append(char)
password_hash = 0x0000
for digit in digits:
password_hash ^= digit
password_hash ^= count
password_hash ^= 0xCE4B
return "%X" % password_hash
def _prepare_image(self, index, image_id, drawing_id, width, height,
name, image_type, x_dpi, y_dpi, digest):
# Set up images/drawings.
drawing_type = 2
(row, col, _, x_offset, y_offset,
x_scale, y_scale, url, tip, anchor, _,
description, decorative) = self.images[index]
width *= x_scale
height *= y_scale
# Scale by non 96dpi resolutions.
width *= 96.0 / x_dpi
height *= 96.0 / y_dpi
dimensions = self._position_object_emus(col, row, x_offset, y_offset,
width, height, anchor)
# Convert from pixels to emus.
width = int(0.5 + (width * 9525))
height = int(0.5 + (height * 9525))
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(['/drawing',
'../drawings/drawing'
+ str(drawing_id)
+ '.xml', None])
else:
drawing = self.drawing
drawing_object = drawing._add_drawing_object()
drawing_object['type'] = drawing_type
drawing_object['dimensions'] = dimensions
drawing_object['width'] = width
drawing_object['height'] = height
drawing_object['description'] = name
drawing_object['shape'] = None
drawing_object['anchor'] = anchor
drawing_object['rel_index'] = 0
drawing_object['url_rel_index'] = 0
drawing_object['tip'] = tip
drawing_object['decorative'] = decorative
if description is not None:
drawing_object['description'] = description
if url:
target = None
rel_type = '/hyperlink'
target_mode = 'External'
if re.match('(ftp|http)s?://', url):
target = self._escape_url(url)
if re.match('^mailto:', url):
target = self._escape_url(url)
if re.match('external:', url):
target = url.replace('external:', '')
target = self._escape_url(target)
# Additional escape not required in worksheet hyperlinks.
target = target.replace('#', '%23')
if re.match(r'\w:', target) or re.match(r'\\', target):
target = 'file:///' + target
else:
target = re.sub(r'\\', '/', target)
if re.match('internal:', url):
target = url.replace('internal:', '#')
target_mode = None
if target is not None:
if len(target) > self.max_url_length:
warn("Ignoring URL '%s' with link and/or anchor > %d "
"characters since it exceeds Excel's limit for URLS" %
(force_unicode(url), self.max_url_length))
else:
if not self.drawing_rels.get(url):
self.drawing_links.append([rel_type, target,
target_mode])
drawing_object['url_rel_index'] = \
self._get_drawing_rel_index(url)
if not self.drawing_rels.get(digest):
self.drawing_links.append(['/image',
'../media/image'
+ str(image_id) + '.'
+ image_type])
drawing_object['rel_index'] = self._get_drawing_rel_index(digest)
def _prepare_shape(self, index, drawing_id):
# Set up shapes/drawings.
drawing_type = 3
(row, col, x_offset, y_offset,
x_scale, y_scale, text, anchor, options) = self.shapes[index]
width = options.get('width', self.default_col_pixels * 3)
height = options.get('height', self.default_row_pixels * 6)
width *= x_scale
height *= y_scale
dimensions = self._position_object_emus(col, row, x_offset, y_offset,
width, height, anchor)
# Convert from pixels to emus.
width = int(0.5 + (width * 9525))
height = int(0.5 + (height * 9525))
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(['/drawing',
'../drawings/drawing'
+ str(drawing_id)
+ '.xml', None])
else:
drawing = self.drawing
shape = Shape('rect', 'TextBox', options)
shape.text = text
drawing_object = drawing._add_drawing_object()
drawing_object['type'] = drawing_type
drawing_object['dimensions'] = dimensions
drawing_object['width'] = width
drawing_object['height'] = height
drawing_object['description'] = None
drawing_object['shape'] = shape
drawing_object['anchor'] = anchor
drawing_object['rel_index'] = 0
drawing_object['url_rel_index'] = 0
drawing_object['tip'] = options.get('tip')
drawing_object['decorative'] = 0
url = options.get('url', None)
if url:
target = None
rel_type = '/hyperlink'
target_mode = 'External'
if re.match('(ftp|http)s?://', url):
target = self._escape_url(url)
if re.match('^mailto:', url):
target = self._escape_url(url)
if re.match('external:', url):
target = url.replace('external:', 'file:///')
target = self._escape_url(target)
# Additional escape not required in worksheet hyperlinks.
target = target.replace('#', '%23')
if re.match('internal:', url):
target = url.replace('internal:', '#')
target_mode = None
if target is not None:
if len(target) > self.max_url_length:
warn("Ignoring URL '%s' with link and/or anchor > %d "
"characters since it exceeds Excel's limit for URLS" %
(force_unicode(url), self.max_url_length))
else:
if not self.drawing_rels.get(url):
self.drawing_links.append([rel_type, target,
target_mode])
drawing_object['url_rel_index'] = \
self._get_drawing_rel_index(url)
def _prepare_header_image(self, image_id, width, height, name, image_type,
position, x_dpi, y_dpi, digest):
# Set up an image without a drawing object for header/footer images.
# Strip the extension from the filename.
name = re.sub(r'\..*$', '', name)
if not self.vml_drawing_rels.get(digest):
self.vml_drawing_links.append(['/image',
'../media/image'
+ str(image_id) + '.'
+ image_type])
ref_id = self._get_vml_drawing_rel_index(digest)
self.header_images_list.append([width, height, name, position,
x_dpi, y_dpi, ref_id])
def _prepare_chart(self, index, chart_id, drawing_id):
# Set up chart/drawings.
drawing_type = 1
(row, col, chart, x_offset, y_offset, x_scale, y_scale, anchor) = \
self.charts[index]
chart.id = chart_id - 1
# Use user specified dimensions, if any.
width = int(0.5 + (chart.width * x_scale))
height = int(0.5 + (chart.height * y_scale))
dimensions = self._position_object_emus(col, row, x_offset, y_offset,
width, height, anchor)
# Set the chart name for the embedded object if it has been specified.
name = chart.chart_name
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(['/drawing',
'../drawings/drawing'
+ str(drawing_id)
+ '.xml'])
else:
drawing = self.drawing
drawing_object = drawing._add_drawing_object()
drawing_object['type'] = drawing_type
drawing_object['dimensions'] = dimensions
drawing_object['width'] = width
drawing_object['height'] = height
drawing_object['description'] = name
drawing_object['shape'] = None
drawing_object['anchor'] = anchor
drawing_object['rel_index'] = self._get_drawing_rel_index()
drawing_object['url_rel_index'] = 0
drawing_object['tip'] = None
drawing_object['decorative'] = 0
self.drawing_links.append(['/chart',
'../charts/chart'
+ str(chart_id)
+ '.xml'])
def _position_object_emus(self, col_start, row_start, x1, y1,
width, height, anchor):
# Calculate the vertices that define the position of a graphical
# object within the worksheet in EMUs.
#
# The vertices are expressed as English Metric Units (EMUs). There are
# 12,700 EMUs per point. Therefore, 12,700 * 3 /4 = 9,525 EMUs per
# pixel
(col_start, row_start, x1, y1,
col_end, row_end, x2, y2, x_abs, y_abs) = \
self._position_object_pixels(col_start, row_start, x1, y1,
width, height, anchor)
# Convert the pixel values to EMUs. See above.
x1 = int(0.5 + 9525 * x1)
y1 = int(0.5 + 9525 * y1)
x2 = int(0.5 + 9525 * x2)
y2 = int(0.5 + 9525 * y2)
x_abs = int(0.5 + 9525 * x_abs)
y_abs = int(0.5 + 9525 * y_abs)
return (col_start, row_start, x1, y1, col_end, row_end, x2, y2,
x_abs, y_abs)
# Calculate the vertices that define the position of a graphical object
# within the worksheet in pixels.
#
# +------------+------------+
# | A | B |
# +-----+------------+------------+
# | |(x1,y1) | |
# | 1 |(A1)._______|______ |
# | | | | |
# | | | | |
# +-----+----| OBJECT |-----+
# | | | | |
# | 2 | |______________. |
# | | | (B2)|
# | | | (x2,y2)|
# +---- +------------+------------+
#
# Example of an object that covers some of the area from cell A1 to B2.
#
# Based on the width and height of the object we need to calculate 8 vars:
#
# col_start, row_start, col_end, row_end, x1, y1, x2, y2.
#
# We also calculate the absolute x and y position of the top left vertex of
# the object. This is required for images.
#
# The width and height of the cells that the object occupies can be
# variable and have to be taken into account.
#
# The values of col_start and row_start are passed in from the calling
# function. The values of col_end and row_end are calculated by
# subtracting the width and height of the object from the width and
# height of the underlying cells.
#
def _position_object_pixels(self, col_start, row_start, x1, y1,
width, height, anchor):
# col_start # Col containing upper left corner of object.
# x1 # Distance to left side of object.
#
# row_start # Row containing top left corner of object.
# y1 # Distance to top of object.
#
# col_end # Col containing lower right corner of object.
# x2 # Distance to right side of object.
#
# row_end # Row containing bottom right corner of object.
# y2 # Distance to bottom of object.
#
# width # Width of object frame.
# height # Height of object frame.
#
# x_abs # Absolute distance to left side of object.
# y_abs # Absolute distance to top side of object.
x_abs = 0
y_abs = 0
# Adjust start column for negative offsets.
while x1 < 0 and col_start > 0:
x1 += self._size_col(col_start - 1)
col_start -= 1
# Adjust start row for negative offsets.
while y1 < 0 and row_start > 0:
y1 += self._size_row(row_start - 1)
row_start -= 1
# Ensure that the image isn't shifted off the page at top left.
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
# Calculate the absolute x offset of the top-left vertex.
if self.col_size_changed:
for col_id in range(col_start):
x_abs += self._size_col(col_id)
else:
# Optimization for when the column widths haven't changed.
x_abs += self.default_col_pixels * col_start
x_abs += x1
# Calculate the absolute y offset of the top-left vertex.
if self.row_size_changed:
for row_id in range(row_start):
y_abs += self._size_row(row_id)
else:
# Optimization for when the row heights haven't changed.
y_abs += self.default_row_pixels * row_start
y_abs += y1
# Adjust start column for offsets that are greater than the col width.
while x1 >= self._size_col(col_start, anchor):
x1 -= self._size_col(col_start)
col_start += 1
# Adjust start row for offsets that are greater than the row height.
while y1 >= self._size_row(row_start, anchor):
y1 -= self._size_row(row_start)
row_start += 1
# Initialize end cell to the same as the start cell.
col_end = col_start
row_end = row_start
# Don't offset the image in the cell if the row/col is hidden.
if self._size_col(col_start, anchor) > 0:
width = width + x1
if self._size_row(row_start, anchor) > 0:
height = height + y1
# Subtract the underlying cell widths to find end cell of the object.
while width >= self._size_col(col_end, anchor):
width -= self._size_col(col_end, anchor)
col_end += 1
# Subtract the underlying cell heights to find end cell of the object.
while height >= self._size_row(row_end, anchor):
height -= self._size_row(row_end, anchor)
row_end += 1
# The end vertices are whatever is left from the width and height.
x2 = width
y2 = height
return ([col_start, row_start, x1, y1, col_end, row_end, x2, y2,
x_abs, y_abs])
def _size_col(self, col, anchor=0):
# Convert the width of a cell from character units to pixels. Excel
# rounds the column width to the nearest pixel. If the width hasn't
# been set by the user we use the default value. A hidden column is
# treated as having a width of zero unless it has the special
# "object_position" of 4 (size with cells).
max_digit_width = 7 # For Calabri 11.
padding = 5
pixels = 0
# Look up the cell value to see if it has been changed.
if col in self.col_sizes:
width = self.col_sizes[col][0]
hidden = self.col_sizes[col][1]
# Convert to pixels.
if hidden and anchor != 4:
pixels = 0
elif width < 1:
pixels = int(width * (max_digit_width + padding) + 0.5)
else:
pixels = int(width * max_digit_width + 0.5) + padding
else:
pixels = self.default_col_pixels
return pixels
def _size_row(self, row, anchor=0):
# Convert the height of a cell from character units to pixels. If the
# height hasn't been set by the user we use the default value. A
# hidden row is treated as having a height of zero unless it has the
# special "object_position" of 4 (size with cells).
pixels = 0
# Look up the cell value to see if it has been changed
if row in self.row_sizes:
height = self.row_sizes[row][0]
hidden = self.row_sizes[row][1]
if hidden and anchor != 4:
pixels = 0
else:
pixels = int(4.0 / 3.0 * height)
else:
pixels = int(4.0 / 3.0 * self.default_row_height)
return pixels
def _pixels_to_width(self, pixels):
# Convert the width of a cell from pixels to character units.
max_digit_width = 7.0 # For Calabri 11.
padding = 5.0
if pixels <= 12:
width = pixels / (max_digit_width + padding)
else:
width = (pixels - padding) / max_digit_width
return width
def _pixels_to_height(self, pixels):
# Convert the height of a cell from pixels to character units.
return 0.75 * pixels
def _comment_params(self, row, col, string, options):
# This method handles the additional optional parameters to
# write_comment() as well as calculating the comment object
# position and vertices.
default_width = 128
default_height = 74
anchor = 0
params = {
'author': None,
'color': '#ffffe1',
'start_cell': None,
'start_col': None,
'start_row': None,
'visible': None,
'width': default_width,
'height': default_height,
'x_offset': None,
'x_scale': 1,
'y_offset': None,
'y_scale': 1,
'font_name': 'Tahoma',
'font_size': 8,
'font_family': 2,
}
# Overwrite the defaults with any user supplied values. Incorrect or
# misspelled parameters are silently ignored.
for key in options.keys():
params[key] = options[key]
# Ensure that a width and height have been set.
if not params['width']:
params['width'] = default_width
if not params['height']:
params['height'] = default_height
# Set the comment background color.
params['color'] = xl_color(params['color']).lower()
# Convert from Excel XML style color to XML html style color.
params['color'] = params['color'].replace('ff', '#', 1)
# Convert a cell reference to a row and column.
if params['start_cell'] is not None:
(start_row, start_col) = xl_cell_to_rowcol(params['start_cell'])
params['start_row'] = start_row
params['start_col'] = start_col
# Set the default start cell and offsets for the comment. These are
# generally fixed in relation to the parent cell. However there are
# some edge cases for cells at the, er, edges.
row_max = self.xls_rowmax
col_max = self.xls_colmax
if params['start_row'] is None:
if row == 0:
params['start_row'] = 0
elif row == row_max - 3:
params['start_row'] = row_max - 7
elif row == row_max - 2:
params['start_row'] = row_max - 6
elif row == row_max - 1:
params['start_row'] = row_max - 5
else:
params['start_row'] = row - 1
if params['y_offset'] is None:
if row == 0:
params['y_offset'] = 2
elif row == row_max - 3:
params['y_offset'] = 16
elif row == row_max - 2:
params['y_offset'] = 16
elif row == row_max - 1:
params['y_offset'] = 14
else:
params['y_offset'] = 10
if params['start_col'] is None:
if col == col_max - 3:
params['start_col'] = col_max - 6
elif col == col_max - 2:
params['start_col'] = col_max - 5
elif col == col_max - 1:
params['start_col'] = col_max - 4
else:
params['start_col'] = col + 1
if params['x_offset'] is None:
if col == col_max - 3:
params['x_offset'] = 49
elif col == col_max - 2:
params['x_offset'] = 49
elif col == col_max - 1:
params['x_offset'] = 49
else:
params['x_offset'] = 15
# Scale the size of the comment box if required.
if params['x_scale']:
params['width'] = params['width'] * params['x_scale']
if params['y_scale']:
params['height'] = params['height'] * params['y_scale']
# Round the dimensions to the nearest pixel.
params['width'] = int(0.5 + params['width'])
params['height'] = int(0.5 + params['height'])
# Calculate the positions of the comment object.
vertices = self._position_object_pixels(
params['start_col'], params['start_row'], params['x_offset'],
params['y_offset'], params['width'], params['height'], anchor)
# Add the width and height for VML.
vertices.append(params['width'])
vertices.append(params['height'])
return ([row, col, string, params['author'],
params['visible'], params['color'],
params['font_name'], params['font_size'],
params['font_family']] + [vertices])
def _button_params(self, row, col, options):
# This method handles the parameters passed to insert_button() as well
# as calculating the comment object position and vertices.
default_height = self.default_row_pixels
default_width = self.default_col_pixels
anchor = 0
button_number = 1 + len(self.buttons_list)
button = {'row': row, 'col': col, 'font': {}}
params = {}
# Overwrite the defaults with any user supplied values. Incorrect or
# misspelled parameters are silently ignored.
for key in options.keys():
params[key] = options[key]
# Set the button caption.
caption = params.get('caption')
# Set a default caption if none was specified by user.
if caption is None:
caption = 'Button %d' % button_number
button['font']['caption'] = caption
# Set the macro name.
if params.get('macro'):
button['macro'] = '[0]!' + params['macro']
else:
button['macro'] = '[0]!Button%d_Click' % button_number
# Ensure that a width and height have been set.
params['width'] = params.get('width', default_width)
params['height'] = params.get('height', default_height)
# Set the x/y offsets.
params['x_offset'] = params.get('x_offset', 0)
params['y_offset'] = params.get('y_offset', 0)
# Scale the size of the button if required.
params['width'] = params['width'] * params.get('x_scale', 1)
params['height'] = params['height'] * params.get('y_scale', 1)
# Round the dimensions to the nearest pixel.
params['width'] = int(0.5 + params['width'])
params['height'] = int(0.5 + params['height'])
params['start_row'] = row
params['start_col'] = col
# Calculate the positions of the button object.
vertices = self._position_object_pixels(
params['start_col'], params['start_row'], params['x_offset'],
params['y_offset'], params['width'], params['height'], anchor)
# Add the width and height for VML.
vertices.append(params['width'])
vertices.append(params['height'])
button['vertices'] = vertices
return button
def _prepare_vml_objects(self, vml_data_id, vml_shape_id, vml_drawing_id,
comment_id):
comments = []
# Sort the comments into row/column order for easier comparison
# testing and set the external links for comments and buttons.
row_nums = sorted(self.comments.keys())
for row in row_nums:
col_nums = sorted(self.comments[row].keys())
for col in col_nums:
user_options = self.comments[row][col]
params = self._comment_params(*user_options)
self.comments[row][col] = params
# Set comment visibility if required and not user defined.
if self.comments_visible:
if self.comments[row][col][4] is None:
self.comments[row][col][4] = 1
# Set comment author if not already user defined.
if self.comments[row][col][3] is None:
self.comments[row][col][3] = self.comments_author
comments.append(self.comments[row][col])
self.external_vml_links.append(['/vmlDrawing',
'../drawings/vmlDrawing'
+ str(vml_drawing_id)
+ '.vml'])
if self.has_comments:
self.comments_list = comments
self.external_comment_links.append(['/comments',
'../comments'
+ str(comment_id)
+ '.xml'])
count = len(comments)
start_data_id = vml_data_id
# The VML o:idmap data id contains a comma separated range when there
# is more than one 1024 block of comments, like this: data="1,2".
for i in range(int(count / 1024)):
vml_data_id = '%s,%d' % (vml_data_id, start_data_id + i + 1)
self.vml_data_id = vml_data_id
self.vml_shape_id = vml_shape_id
return count
def _prepare_header_vml_objects(self, vml_header_id, vml_drawing_id):
# Set up external linkage for VML header/footer images.
self.vml_header_id = vml_header_id
self.external_vml_links.append(['/vmlDrawing',
'../drawings/vmlDrawing'
+ str(vml_drawing_id) + '.vml'])
def _prepare_tables(self, table_id, seen):
# Set the table ids for the worksheet tables.
for table in self.tables:
table['id'] = table_id
if table.get('name') is None:
# Set a default name.
table['name'] = 'Table' + str(table_id)
# Check for duplicate table names.
name = table['name'].lower()
if name in seen:
raise DuplicateTableName(
"Duplicate name '%s' used in worksheet.add_table()." %
table['name'])
else:
seen[name] = True
# Store the link used for the rels file.
self.external_table_links.append(['/table',
'../tables/table'
+ str(table_id)
+ '.xml'])
table_id += 1
def _table_function_to_formula(self, function, col_name):
# Convert a table total function to a worksheet formula.
formula = ''
# Escape special characters, as required by Excel.
col_name = re.sub(r"'", "''", col_name)
col_name = re.sub(r"#", "'#", col_name)
col_name = re.sub(r"]", "']", col_name)
col_name = re.sub(r"\[", "'[", col_name)
subtotals = {
'average': 101,
'countNums': 102,
'count': 103,
'max': 104,
'min': 105,
'stdDev': 107,
'sum': 109,
'var': 110,
}
if function in subtotals:
func_num = subtotals[function]
formula = "SUBTOTAL(%s,[%s])" % (func_num, col_name)
else:
warn("Unsupported function '%s' in add_table()" % function)
return formula
def _set_spark_color(self, sparkline, options, user_color):
# Set the sparkline color.
if user_color not in options:
return
sparkline[user_color] = {'rgb': xl_color(options[user_color])}
def _get_range_data(self, row_start, col_start, row_end, col_end):
# Returns a range of data from the worksheet _table to be used in
# chart cached data. Strings are returned as SST ids and decoded
# in the workbook. Return None for data that doesn't exist since
# Excel can chart series with data missing.
if self.constant_memory:
return ()
data = []
# Iterate through the table data.
for row_num in range(row_start, row_end + 1):
# Store None if row doesn't exist.
if row_num not in self.table:
data.append(None)
continue
for col_num in range(col_start, col_end + 1):
if col_num in self.table[row_num]:
cell = self.table[row_num][col_num]
type_cell_name = type(cell).__name__
if type_cell_name == 'Number':
# Return a number with Excel's precision.
data.append("%.16g" % cell.number)
elif type_cell_name == 'String':
# Return a string from it's shared string index.
index = cell.string
string = self.str_table._get_shared_string(index)
data.append(string)
elif (type_cell_name == 'Formula'
or type_cell_name == 'ArrayFormula'):
# Return the formula value.
value = cell.value
if value is None:
value = 0
data.append(value)
elif type_cell_name == 'Blank':
# Return a empty cell.
data.append('')
else:
# Store None if column doesn't exist.
data.append(None)
return data
def _csv_join(self, *items):
# Create a csv string for use with data validation formulas and lists.
# Convert non string types to string.
items = [str(item) if not isinstance(item, str_types) else item
for item in items]
return ','.join(items)
def _escape_url(self, url):
# Don't escape URL if it looks already escaped.
if re.search('%[0-9a-fA-F]{2}', url):
return url
# Can't use url.quote() here because it doesn't match Excel.
url = url.replace('%', '%25')
url = url.replace('"', '%22')
url = url.replace(' ', '%20')
url = url.replace('<', '%3c')
url = url.replace('>', '%3e')
url = url.replace('[', '%5b')
url = url.replace(']', '%5d')
url = url.replace('^', '%5e')
url = url.replace('`', '%60')
url = url.replace('{', '%7b')
url = url.replace('}', '%7d')
return url
def _get_drawing_rel_index(self, target=None):
# Get the index used to address a drawing rel link.
if target is None:
self.drawing_rels_id += 1
return self.drawing_rels_id
elif self.drawing_rels.get(target):
return self.drawing_rels[target]
else:
self.drawing_rels_id += 1
self.drawing_rels[target] = self.drawing_rels_id
return self.drawing_rels_id
def _get_vml_drawing_rel_index(self, target=None):
# Get the index used to address a vml drawing rel link.
if self.vml_drawing_rels.get(target):
return self.vml_drawing_rels[target]
else:
self.vml_drawing_rels_id += 1
self.vml_drawing_rels[target] = self.vml_drawing_rels_id
return self.vml_drawing_rels_id
###########################################################################
#
# The following font methods are, more or less, duplicated from the
# Styles class. Not the cleanest version of reuse but works for now.
#
###########################################################################
def _write_font(self, xf_format):
# Write the <font> element.
xml_writer = self.rstring
xml_writer._xml_start_tag('rPr')
# Handle the main font properties.
if xf_format.bold:
xml_writer._xml_empty_tag('b')
if xf_format.italic:
xml_writer._xml_empty_tag('i')
if xf_format.font_strikeout:
xml_writer._xml_empty_tag('strike')
if xf_format.font_outline:
xml_writer._xml_empty_tag('outline')
if xf_format.font_shadow:
xml_writer._xml_empty_tag('shadow')
# Handle the underline variants.
if xf_format.underline:
self._write_underline(xf_format.underline)
# Handle super/subscript.
if xf_format.font_script == 1:
self._write_vert_align('superscript')
if xf_format.font_script == 2:
self._write_vert_align('subscript')
# Write the font size
xml_writer._xml_empty_tag('sz', [('val', xf_format.font_size)])
# Handle colors.
if xf_format.theme == -1:
# Ignore for excel2003_style.
pass
elif xf_format.theme:
self._write_color('theme', xf_format.theme)
elif xf_format.color_indexed:
self._write_color('indexed', xf_format.color_indexed)
elif xf_format.font_color:
color = self._get_palette_color(xf_format.font_color)
self._write_rstring_color('rgb', color)
else:
self._write_rstring_color('theme', 1)
# Write some other font properties related to font families.
xml_writer._xml_empty_tag('rFont', [('val', xf_format.font_name)])
xml_writer._xml_empty_tag('family', [('val', xf_format.font_family)])
if xf_format.font_name == 'Calibri' and not xf_format.hyperlink:
xml_writer._xml_empty_tag('scheme',
[('val', xf_format.font_scheme)])
xml_writer._xml_end_tag('rPr')
def _write_underline(self, underline):
# Write the underline font element.
attributes = []
# Handle the underline variants.
if underline == 2:
attributes = [('val', 'double')]
elif underline == 33:
attributes = [('val', 'singleAccounting')]
elif underline == 34:
attributes = [('val', 'doubleAccounting')]
self.rstring._xml_empty_tag('u', attributes)
def _write_vert_align(self, val):
# Write the <vertAlign> font sub-element.
attributes = [('val', val)]
self.rstring._xml_empty_tag('vertAlign', attributes)
def _write_rstring_color(self, name, value):
# Write the <color> element.
attributes = [(name, value)]
self.rstring._xml_empty_tag('color', attributes)
def _get_palette_color(self, color):
# Convert the RGB color.
if color[0] == '#':
color = color[1:]
return "FF" + color.upper()
def _opt_close(self):
# Close the row data filehandle in constant_memory mode.
if not self.row_data_fh_closed:
self.row_data_fh.close()
self.row_data_fh_closed = True
def _opt_reopen(self):
# Reopen the row data filehandle in constant_memory mode.
if self.row_data_fh_closed:
filename = self.row_data_filename
self.row_data_fh = codecs.open(filename, 'a+', 'utf-8')
self.row_data_fh_closed = False
self.fh = self.row_data_fh
def _set_icon_props(self, total_icons, user_props=None):
# Set the sub-properties for icons.
props = []
# Set the defaults.
for _ in range(total_icons):
props.append({'criteria': False,
'value': 0,
'type': 'percent'})
# Set the default icon values based on the number of icons.
if total_icons == 3:
props[0]['value'] = 67
props[1]['value'] = 33
if total_icons == 4:
props[0]['value'] = 75
props[1]['value'] = 50
props[2]['value'] = 25
if total_icons == 5:
props[0]['value'] = 80
props[1]['value'] = 60
props[2]['value'] = 40
props[3]['value'] = 20
# Overwrite default properties with user defined properties.
if user_props:
# Ensure we don't set user properties for lowest icon.
max_data = len(user_props)
if max_data >= total_icons:
max_data = total_icons - 1
for i in range(max_data):
# Set the user defined 'value' property.
if user_props[i].get('value') is not None:
props[i]['value'] = user_props[i]['value']
# Remove the formula '=' sign if it exists.
tmp = props[i]['value']
if isinstance(tmp, str_types) and tmp.startswith('='):
props[i]['value'] = tmp.lstrip('=')
# Set the user defined 'type' property.
if user_props[i].get('type'):
valid_types = ('percent',
'percentile',
'number',
'formula')
if user_props[i]['type'] not in valid_types:
warn("Unknown icon property type '%s' for sub-"
"property 'type' in conditional_format()" %
user_props[i]['type'])
else:
props[i]['type'] = user_props[i]['type']
if props[i]['type'] == 'number':
props[i]['type'] = 'num'
# Set the user defined 'criteria' property.
criteria = user_props[i].get('criteria')
if criteria and criteria == '>':
props[i]['criteria'] = True
return props
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_worksheet(self):
# Write the <worksheet> element. This is the root element.
schema = 'http://schemas.openxmlformats.org/'
xmlns = schema + 'spreadsheetml/2006/main'
xmlns_r = schema + 'officeDocument/2006/relationships'
xmlns_mc = schema + 'markup-compatibility/2006'
ms_schema = 'http://schemas.microsoft.com/'
xmlns_x14ac = ms_schema + 'office/spreadsheetml/2009/9/ac'
attributes = [
('xmlns', xmlns),
('xmlns:r', xmlns_r)]
# Add some extra attributes for Excel 2010. Mainly for sparklines.
if self.excel_version == 2010:
attributes.append(('xmlns:mc', xmlns_mc))
attributes.append(('xmlns:x14ac', xmlns_x14ac))
attributes.append(('mc:Ignorable', 'x14ac'))
self._xml_start_tag('worksheet', attributes)
def _write_dimension(self):
# Write the <dimension> element. This specifies the range of
# cells in the worksheet. As a special case, empty
# spreadsheets use 'A1' as a range.
if self.dim_rowmin is None and self.dim_colmin is None:
# If the min dimensions are not defined then no dimensions
# have been set and we use the default 'A1'.
ref = 'A1'
elif self.dim_rowmin is None and self.dim_colmin is not None:
# If the row dimensions aren't set but the column
# dimensions are set then they have been changed via
# set_column().
if self.dim_colmin == self.dim_colmax:
# The dimensions are a single cell and not a range.
ref = xl_rowcol_to_cell(0, self.dim_colmin)
else:
# The dimensions are a cell range.
cell_1 = xl_rowcol_to_cell(0, self.dim_colmin)
cell_2 = xl_rowcol_to_cell(0, self.dim_colmax)
ref = cell_1 + ':' + cell_2
elif (self.dim_rowmin == self.dim_rowmax and
self.dim_colmin == self.dim_colmax):
# The dimensions are a single cell and not a range.
ref = xl_rowcol_to_cell(self.dim_rowmin, self.dim_colmin)
else:
# The dimensions are a cell range.
cell_1 = xl_rowcol_to_cell(self.dim_rowmin, self.dim_colmin)
cell_2 = xl_rowcol_to_cell(self.dim_rowmax, self.dim_colmax)
ref = cell_1 + ':' + cell_2
self._xml_empty_tag('dimension', [('ref', ref)])
def _write_sheet_views(self):
# Write the <sheetViews> element.
self._xml_start_tag('sheetViews')
# Write the sheetView element.
self._write_sheet_view()
self._xml_end_tag('sheetViews')
def _write_sheet_view(self):
# Write the <sheetViews> element.
attributes = []
# Hide screen gridlines if required.
if not self.screen_gridlines:
attributes.append(('showGridLines', 0))
# Hide screen row/column headers.
if self.row_col_headers:
attributes.append(('showRowColHeaders', 0))
# Hide zeroes in cells.
if not self.show_zeros:
attributes.append(('showZeros', 0))
# Display worksheet right to left for Hebrew, Arabic and others.
if self.is_right_to_left:
attributes.append(('rightToLeft', 1))
# Show that the sheet tab is selected.
if self.selected:
attributes.append(('tabSelected', 1))
# Turn outlines off. Also required in the outlinePr element.
if not self.outline_on:
attributes.append(("showOutlineSymbols", 0))
# Set the page view/layout mode if required.
if self.page_view:
attributes.append(('view', 'pageLayout'))
# Set the zoom level.
if self.zoom != 100:
if not self.page_view:
attributes.append(('zoomScale', self.zoom))
if self.zoom_scale_normal:
attributes.append(('zoomScaleNormal', self.zoom))
attributes.append(('workbookViewId', 0))
if self.panes or len(self.selections):
self._xml_start_tag('sheetView', attributes)
self._write_panes()
self._write_selections()
self._xml_end_tag('sheetView')
else:
self._xml_empty_tag('sheetView', attributes)
def _write_sheet_format_pr(self):
# Write the <sheetFormatPr> element.
default_row_height = self.default_row_height
row_level = self.outline_row_level
col_level = self.outline_col_level
attributes = [('defaultRowHeight', default_row_height)]
if self.default_row_height != self.original_row_height:
attributes.append(('customHeight', 1))
if self.default_row_zeroed:
attributes.append(('zeroHeight', 1))
if row_level:
attributes.append(('outlineLevelRow', row_level))
if col_level:
attributes.append(('outlineLevelCol', col_level))
if self.excel_version == 2010:
attributes.append(('x14ac:dyDescent', '0.25'))
self._xml_empty_tag('sheetFormatPr', attributes)
def _write_cols(self):
# Write the <cols> element and <col> sub elements.
# Exit unless some column have been formatted.
if not self.colinfo:
return
self._xml_start_tag('cols')
for col in sorted(self.colinfo.keys()):
self._write_col_info(self.colinfo[col])
self._xml_end_tag('cols')
def _write_col_info(self, col_info):
# Write the <col> element.
(col_min, col_max, width, cell_format,
hidden, level, collapsed) = col_info
custom_width = 1
xf_index = 0
# Get the cell_format index.
if cell_format:
xf_index = cell_format._get_xf_index()
# Set the Excel default column width.
if width is None:
if not hidden:
width = 8.43
custom_width = 0
else:
width = 0
elif width == 8.43:
# Width is defined but same as default.
custom_width = 0
# Convert column width from user units to character width.
if width > 0:
# For Calabri 11.
max_digit_width = 7
padding = 5
if width < 1:
width = int((int(width * (max_digit_width + padding) + 0.5))
/ float(max_digit_width) * 256.0) / 256.0
else:
width = int((int(width * max_digit_width + 0.5) + padding)
/ float(max_digit_width) * 256.0) / 256.0
attributes = [
('min', col_min + 1),
('max', col_max + 1),
('width', "%.16g" % width)]
if xf_index:
attributes.append(('style', xf_index))
if hidden:
attributes.append(('hidden', '1'))
if custom_width:
attributes.append(('customWidth', '1'))
if level:
attributes.append(('outlineLevel', level))
if collapsed:
attributes.append(('collapsed', '1'))
self._xml_empty_tag('col', attributes)
def _write_sheet_data(self):
# Write the <sheetData> element.
if self.dim_rowmin is None:
# If the dimensions aren't defined there is no data to write.
self._xml_empty_tag('sheetData')
else:
self._xml_start_tag('sheetData')
self._write_rows()
self._xml_end_tag('sheetData')
def _write_optimized_sheet_data(self):
# Write the <sheetData> element when constant_memory is on. In this
# case we read the data stored in the temp file and rewrite it to the
# XML sheet file.
if self.dim_rowmin is None:
# If the dimensions aren't defined then there is no data to write.
self._xml_empty_tag('sheetData')
else:
self._xml_start_tag('sheetData')
# Rewind the filehandle that was used for temp row data.
buff_size = 65536
self.row_data_fh.seek(0)
data = self.row_data_fh.read(buff_size)
while data:
self.fh.write(data)
data = self.row_data_fh.read(buff_size)
self.row_data_fh.close()
os.unlink(self.row_data_filename)
self._xml_end_tag('sheetData')
def _write_page_margins(self):
# Write the <pageMargins> element.
attributes = [
('left', self.margin_left),
('right', self.margin_right),
('top', self.margin_top),
('bottom', self.margin_bottom),
('header', self.margin_header),
('footer', self.margin_footer)]
self._xml_empty_tag('pageMargins', attributes)
def _write_page_setup(self):
# Write the <pageSetup> element.
#
# The following is an example taken from Excel.
#
# <pageSetup
# paperSize="9"
# scale="110"
# fitToWidth="2"
# fitToHeight="2"
# pageOrder="overThenDown"
# orientation="portrait"
# blackAndWhite="1"
# draft="1"
# horizontalDpi="200"
# verticalDpi="200"
# r:id="rId1"
# />
#
attributes = []
# Skip this element if no page setup has changed.
if not self.page_setup_changed:
return
# Set paper size.
if self.paper_size:
attributes.append(('paperSize', self.paper_size))
# Set the print_scale.
if self.print_scale != 100:
attributes.append(('scale', self.print_scale))
# Set the "Fit to page" properties.
if self.fit_page and self.fit_width != 1:
attributes.append(('fitToWidth', self.fit_width))
if self.fit_page and self.fit_height != 1:
attributes.append(('fitToHeight', self.fit_height))
# Set the page print direction.
if self.page_order:
attributes.append(('pageOrder', "overThenDown"))
# Set start page for printing.
if self.page_start > 1:
attributes.append(('firstPageNumber', self.page_start))
# Set page orientation.
if self.orientation:
attributes.append(('orientation', 'portrait'))
else:
attributes.append(('orientation', 'landscape'))
# Set start page for printing.
if self.page_start != 0:
attributes.append(('useFirstPageNumber', '1'))
# Set the DPI. Mainly only for testing.
if self.is_chartsheet:
if self.horizontal_dpi:
attributes.append(('horizontalDpi', self.horizontal_dpi))
if self.vertical_dpi:
attributes.append(('verticalDpi', self.vertical_dpi))
else:
if self.vertical_dpi:
attributes.append(('verticalDpi', self.vertical_dpi))
if self.horizontal_dpi:
attributes.append(('horizontalDpi', self.horizontal_dpi))
self._xml_empty_tag('pageSetup', attributes)
def _write_print_options(self):
# Write the <printOptions> element.
attributes = []
if not self.print_options_changed:
return
# Set horizontal centering.
if self.hcenter:
attributes.append(('horizontalCentered', 1))
# Set vertical centering.
if self.vcenter:
attributes.append(('verticalCentered', 1))
# Enable row and column headers.
if self.print_headers:
attributes.append(('headings', 1))
# Set printed gridlines.
if self.print_gridlines:
attributes.append(('gridLines', 1))
self._xml_empty_tag('printOptions', attributes)
def _write_header_footer(self):
# Write the <headerFooter> element.
attributes = []
if not self.header_footer_scales:
attributes.append(('scaleWithDoc', 0))
if not self.header_footer_aligns:
attributes.append(('alignWithMargins', 0))
if self.header_footer_changed:
self._xml_start_tag('headerFooter', attributes)
if self.header:
self._write_odd_header()
if self.footer:
self._write_odd_footer()
self._xml_end_tag('headerFooter')
elif self.excel2003_style:
self._xml_empty_tag('headerFooter', attributes)
def _write_odd_header(self):
# Write the <headerFooter> element.
self._xml_data_element('oddHeader', self.header)
def _write_odd_footer(self):
# Write the <headerFooter> element.
self._xml_data_element('oddFooter', self.footer)
def _write_rows(self):
# Write out the worksheet data as a series of rows and cells.
self._calculate_spans()
for row_num in range(self.dim_rowmin, self.dim_rowmax + 1):
if (row_num in self.set_rows or row_num in self.comments
or self.table[row_num]):
# Only process rows with formatting, cell data and/or comments.
span_index = int(row_num / 16)
if span_index in self.row_spans:
span = self.row_spans[span_index]
else:
span = None
if self.table[row_num]:
# Write the cells if the row contains data.
if row_num not in self.set_rows:
self._write_row(row_num, span)
else:
self._write_row(row_num, span, self.set_rows[row_num])
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
col_ref = self.table[row_num][col_num]
self._write_cell(row_num, col_num, col_ref)
self._xml_end_tag('row')
elif row_num in self.comments:
# Row with comments in cells.
self._write_empty_row(row_num, span,
self.set_rows[row_num])
else:
# Blank row with attributes only.
self._write_empty_row(row_num, span,
self.set_rows[row_num])
def _write_single_row(self, current_row_num=0):
# Write out the worksheet data as a single row with cells.
# This method is used when constant_memory is on. A single
# row is written and the data table is reset. That way only
# one row of data is kept in memory at any one time. We don't
# write span data in the optimized case since it is optional.
# Set the new previous row as the current row.
row_num = self.previous_row
self.previous_row = current_row_num
if (row_num in self.set_rows or row_num in self.comments
or self.table[row_num]):
# Only process rows with formatting, cell data and/or comments.
# No span data in optimized mode.
span = None
if self.table[row_num]:
# Write the cells if the row contains data.
if row_num not in self.set_rows:
self._write_row(row_num, span)
else:
self._write_row(row_num, span, self.set_rows[row_num])
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
col_ref = self.table[row_num][col_num]
self._write_cell(row_num, col_num, col_ref)
self._xml_end_tag('row')
else:
# Row attributes or comments only.
self._write_empty_row(row_num, span, self.set_rows[row_num])
# Reset table.
self.table.clear()
def _calculate_spans(self):
# Calculate the "spans" attribute of the <row> tag. This is an
# XLSX optimization and isn't strictly required. However, it
# makes comparing files easier. The span is the same for each
# block of 16 rows.
spans = {}
span_min = None
span_max = None
for row_num in range(self.dim_rowmin, self.dim_rowmax + 1):
if row_num in self.table:
# Calculate spans for cell data.
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
if span_min is None:
span_min = col_num
span_max = col_num
else:
if col_num < span_min:
span_min = col_num
if col_num > span_max:
span_max = col_num
if row_num in self.comments:
# Calculate spans for comments.
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if (row_num in self.comments
and col_num in self.comments[row_num]):
if span_min is None:
span_min = col_num
span_max = col_num
else:
if col_num < span_min:
span_min = col_num
if col_num > span_max:
span_max = col_num
if ((row_num + 1) % 16 == 0) or row_num == self.dim_rowmax:
span_index = int(row_num / 16)
if span_min is not None:
span_min += 1
span_max += 1
spans[span_index] = "%s:%s" % (span_min, span_max)
span_min = None
self.row_spans = spans
def _write_row(self, row, spans, properties=None, empty_row=False):
# Write the <row> element.
xf_index = 0
if properties:
height, cell_format, hidden, level, collapsed = properties
else:
height, cell_format, hidden, level, collapsed = None, None, 0, 0, 0
if height is None:
height = self.default_row_height
attributes = [('r', row + 1)]
# Get the cell_format index.
if cell_format:
xf_index = cell_format._get_xf_index()
# Add row attributes where applicable.
if spans:
attributes.append(('spans', spans))
if xf_index:
attributes.append(('s', xf_index))
if cell_format:
attributes.append(('customFormat', 1))
if height != self.original_row_height:
attributes.append(('ht', "%g" % height))
if hidden:
attributes.append(('hidden', 1))
if height != self.original_row_height:
attributes.append(('customHeight', 1))
if level:
attributes.append(('outlineLevel', level))
if collapsed:
attributes.append(('collapsed', 1))
if self.excel_version == 2010:
attributes.append(('x14ac:dyDescent', '0.25'))
if empty_row:
self._xml_empty_tag_unencoded('row', attributes)
else:
self._xml_start_tag_unencoded('row', attributes)
def _write_empty_row(self, row, spans, properties=None):
# Write and empty <row> element.
self._write_row(row, spans, properties, empty_row=True)
def _write_cell(self, row, col, cell):
# Write the <cell> element.
# Note. This is the innermost loop so efficiency is important.
cell_range = xl_rowcol_to_cell_fast(row, col)
attributes = [('r', cell_range)]
if cell.format:
# Add the cell format index.
xf_index = cell.format._get_xf_index()
attributes.append(('s', xf_index))
elif row in self.set_rows and self.set_rows[row][1]:
# Add the row format.
row_xf = self.set_rows[row][1]
attributes.append(('s', row_xf._get_xf_index()))
elif col in self.col_formats:
# Add the column format.
col_xf = self.col_formats[col]
attributes.append(('s', col_xf._get_xf_index()))
type_cell_name = type(cell).__name__
# Write the various cell types.
if type_cell_name == 'Number':
# Write a number.
self._xml_number_element(cell.number, attributes)
elif type_cell_name == 'String':
# Write a string.
string = cell.string
if not self.constant_memory:
# Write a shared string.
self._xml_string_element(string, attributes)
else:
# Write an optimized in-line string.
# Escape control characters. See SharedString.pm for details.
string = re.sub('(_x[0-9a-fA-F]{4}_)', r'_x005F\1', string)
string = re.sub(r'([\x00-\x08\x0B-\x1F])',
lambda match: "_x%04X_" %
ord(match.group(1)), string)
# Escape non characters.
if sys.version_info[0] == 2:
non_char1 = unichr(0xFFFE)
non_char2 = unichr(0xFFFF)
else:
non_char1 = "\uFFFE"
non_char2 = "\uFFFF"
string = re.sub(non_char1, '_xFFFE_', string)
string = re.sub(non_char2, '_xFFFF_', string)
# Write any rich strings without further tags.
if re.search('^<r>', string) and re.search('</r>$', string):
self._xml_rich_inline_string(string, attributes)
else:
# Add attribute to preserve leading or trailing whitespace.
preserve = 0
if re.search(r'^\s', string) or re.search(r'\s$', string):
preserve = 1
self._xml_inline_string(string, preserve, attributes)
elif type_cell_name == 'Formula':
# Write a formula. First check the formula value type.
value = cell.value
if type(cell.value) == bool:
attributes.append(('t', 'b'))
if cell.value:
value = 1
else:
value = 0
elif isinstance(cell.value, str_types):
error_codes = ('#DIV/0!', '#N/A', '#NAME?', '#NULL!',
'#NUM!', '#REF!', '#VALUE!')
if cell.value == '':
# Allow blank to force recalc in some third party apps.
pass
elif cell.value in error_codes:
attributes.append(('t', 'e'))
else:
attributes.append(('t', 'str'))
self._xml_formula_element(cell.formula, value, attributes)
elif type_cell_name == 'ArrayFormula':
# Write a array formula.
if cell.atype == 'dynamic':
attributes.append(('cm', 1))
# First check if the formula value is a string.
try:
float(cell.value)
except ValueError:
attributes.append(('t', 'str'))
# Write an array formula.
self._xml_start_tag('c', attributes)
self._write_cell_array_formula(cell.formula, cell.range)
self._write_cell_value(cell.value)
self._xml_end_tag('c')
elif type_cell_name == 'Blank':
# Write a empty cell.
self._xml_empty_tag('c', attributes)
elif type_cell_name == 'Boolean':
# Write a boolean cell.
attributes.append(('t', 'b'))
self._xml_start_tag('c', attributes)
self._write_cell_value(cell.boolean)
self._xml_end_tag('c')
def _write_cell_value(self, value):
# Write the cell value <v> element.
if value is None:
value = ''
self._xml_data_element('v', value)
def _write_cell_array_formula(self, formula, cell_range):
# Write the cell array formula <f> element.
attributes = [
('t', 'array'),
('ref', cell_range)
]
self._xml_data_element('f', formula, attributes)
def _write_sheet_pr(self):
# Write the <sheetPr> element for Sheet level properties.
attributes = []
if (not self.fit_page
and not self.filter_on
and not self.tab_color
and not self.outline_changed
and not self.vba_codename):
return
if self.vba_codename:
attributes.append(('codeName', self.vba_codename))
if self.filter_on:
attributes.append(('filterMode', 1))
if (self.fit_page
or self.tab_color
or self.outline_changed):
self._xml_start_tag('sheetPr', attributes)
self._write_tab_color()
self._write_outline_pr()
self._write_page_set_up_pr()
self._xml_end_tag('sheetPr')
else:
self._xml_empty_tag('sheetPr', attributes)
def _write_page_set_up_pr(self):
# Write the <pageSetUpPr> element.
if not self.fit_page:
return
attributes = [('fitToPage', 1)]
self._xml_empty_tag('pageSetUpPr', attributes)
def _write_tab_color(self):
# Write the <tabColor> element.
color = self.tab_color
if not color:
return
attributes = [('rgb', color)]
self._xml_empty_tag('tabColor', attributes)
def _write_outline_pr(self):
# Write the <outlinePr> element.
attributes = []
if not self.outline_changed:
return
if self.outline_style:
attributes.append(("applyStyles", 1))
if not self.outline_below:
attributes.append(("summaryBelow", 0))
if not self.outline_right:
attributes.append(("summaryRight", 0))
if not self.outline_on:
attributes.append(("showOutlineSymbols", 0))
self._xml_empty_tag('outlinePr', attributes)
def _write_row_breaks(self):
# Write the <rowBreaks> element.
page_breaks = self._sort_pagebreaks(self.hbreaks)
if not page_breaks:
return
count = len(page_breaks)
attributes = [
('count', count),
('manualBreakCount', count),
]
self._xml_start_tag('rowBreaks', attributes)
for row_num in page_breaks:
self._write_brk(row_num, 16383)
self._xml_end_tag('rowBreaks')
def _write_col_breaks(self):
# Write the <colBreaks> element.
page_breaks = self._sort_pagebreaks(self.vbreaks)
if not page_breaks:
return
count = len(page_breaks)
attributes = [
('count', count),
('manualBreakCount', count),
]
self._xml_start_tag('colBreaks', attributes)
for col_num in page_breaks:
self._write_brk(col_num, 1048575)
self._xml_end_tag('colBreaks')
def _write_brk(self, brk_id, brk_max):
# Write the <brk> element.
attributes = [
('id', brk_id),
('max', brk_max),
('man', 1)]
self._xml_empty_tag('brk', attributes)
def _write_merge_cells(self):
# Write the <mergeCells> element.
merged_cells = self.merge
count = len(merged_cells)
if not count:
return
attributes = [('count', count)]
self._xml_start_tag('mergeCells', attributes)
for merged_range in merged_cells:
# Write the mergeCell element.
self._write_merge_cell(merged_range)
self._xml_end_tag('mergeCells')
def _write_merge_cell(self, merged_range):
# Write the <mergeCell> element.
(row_min, col_min, row_max, col_max) = merged_range
# Convert the merge dimensions to a cell range.
cell_1 = xl_rowcol_to_cell(row_min, col_min)
cell_2 = xl_rowcol_to_cell(row_max, col_max)
ref = cell_1 + ':' + cell_2
attributes = [('ref', ref)]
self._xml_empty_tag('mergeCell', attributes)
def _write_hyperlinks(self):
# Process any stored hyperlinks in row/col order and write the
# <hyperlinks> element. The attributes are different for internal
# and external links.
hlink_refs = []
display = None
# Sort the hyperlinks into row order.
row_nums = sorted(self.hyperlinks.keys())
# Exit if there are no hyperlinks to process.
if not row_nums:
return
# Iterate over the rows.
for row_num in row_nums:
# Sort the hyperlinks into column order.
col_nums = sorted(self.hyperlinks[row_num].keys())
# Iterate over the columns.
for col_num in col_nums:
# Get the link data for this cell.
link = self.hyperlinks[row_num][col_num]
link_type = link['link_type']
# If the cell isn't a string then we have to add the url as
# the string to display.
if (self.table
and self.table[row_num]
and self.table[row_num][col_num]):
cell = self.table[row_num][col_num]
if type(cell).__name__ != 'String':
display = link['url']
if link_type == 1:
# External link with rel file relationship.
self.rel_count += 1
hlink_refs.append([link_type,
row_num,
col_num,
self.rel_count,
link['str'],
display,
link['tip']])
# Links for use by the packager.
self.external_hyper_links.append(['/hyperlink',
link['url'], 'External'])
else:
# Internal link with rel file relationship.
hlink_refs.append([link_type,
row_num,
col_num,
link['url'],
link['str'],
link['tip']])
# Write the hyperlink elements.
self._xml_start_tag('hyperlinks')
for args in hlink_refs:
link_type = args.pop(0)
if link_type == 1:
self._write_hyperlink_external(*args)
elif link_type == 2:
self._write_hyperlink_internal(*args)
self._xml_end_tag('hyperlinks')
def _write_hyperlink_external(self, row, col, id_num, location=None,
display=None, tooltip=None):
# Write the <hyperlink> element for external links.
ref = xl_rowcol_to_cell(row, col)
r_id = 'rId' + str(id_num)
attributes = [
('ref', ref),
('r:id', r_id)]
if location is not None:
attributes.append(('location', location))
if display is not None:
attributes.append(('display', display))
if tooltip is not None:
attributes.append(('tooltip', tooltip))
self._xml_empty_tag('hyperlink', attributes)
def _write_hyperlink_internal(self, row, col, location=None, display=None,
tooltip=None):
# Write the <hyperlink> element for internal links.
ref = xl_rowcol_to_cell(row, col)
attributes = [
('ref', ref),
('location', location)]
if tooltip is not None:
attributes.append(('tooltip', tooltip))
attributes.append(('display', display))
self._xml_empty_tag('hyperlink', attributes)
def _write_auto_filter(self):
# Write the <autoFilter> element.
if not self.autofilter_ref:
return
attributes = [('ref', self.autofilter_ref)]
if self.filter_on:
# Autofilter defined active filters.
self._xml_start_tag('autoFilter', attributes)
self._write_autofilters()
self._xml_end_tag('autoFilter')
else:
# Autofilter defined without active filters.
self._xml_empty_tag('autoFilter', attributes)
def _write_autofilters(self):
# Function to iterate through the columns that form part of an
# autofilter range and write the appropriate filters.
(col1, col2) = self.filter_range
for col in range(col1, col2 + 1):
# Skip if column doesn't have an active filter.
if col not in self.filter_cols:
continue
# Retrieve the filter tokens and write the autofilter records.
tokens = self.filter_cols[col]
filter_type = self.filter_type[col]
# Filters are relative to first column in the autofilter.
self._write_filter_column(col - col1, filter_type, tokens)
def _write_filter_column(self, col_id, filter_type, filters):
# Write the <filterColumn> element.
attributes = [('colId', col_id)]
self._xml_start_tag('filterColumn', attributes)
if filter_type == 1:
# Type == 1 is the new XLSX style filter.
self._write_filters(filters)
else:
# Type == 0 is the classic "custom" filter.
self._write_custom_filters(filters)
self._xml_end_tag('filterColumn')
def _write_filters(self, filters):
# Write the <filters> element.
non_blanks = [filter for filter in filters
if str(filter).lower() != 'blanks']
attributes = []
if len(filters) != len(non_blanks):
attributes = [('blank', 1)]
if len(filters) == 1 and len(non_blanks) == 0:
# Special case for blank cells only.
self._xml_empty_tag('filters', attributes)
else:
# General case.
self._xml_start_tag('filters', attributes)
for autofilter in sorted(non_blanks):
self._write_filter(autofilter)
self._xml_end_tag('filters')
def _write_filter(self, val):
# Write the <filter> element.
attributes = [('val', val)]
self._xml_empty_tag('filter', attributes)
def _write_custom_filters(self, tokens):
# Write the <customFilters> element.
if len(tokens) == 2:
# One filter expression only.
self._xml_start_tag('customFilters')
self._write_custom_filter(*tokens)
self._xml_end_tag('customFilters')
else:
# Two filter expressions.
attributes = []
# Check if the "join" operand is "and" or "or".
if tokens[2] == 0:
attributes = [('and', 1)]
else:
attributes = [('and', 0)]
# Write the two custom filters.
self._xml_start_tag('customFilters', attributes)
self._write_custom_filter(tokens[0], tokens[1])
self._write_custom_filter(tokens[3], tokens[4])
self._xml_end_tag('customFilters')
def _write_custom_filter(self, operator, val):
# Write the <customFilter> element.
attributes = []
operators = {
1: 'lessThan',
2: 'equal',
3: 'lessThanOrEqual',
4: 'greaterThan',
5: 'notEqual',
6: 'greaterThanOrEqual',
22: 'equal',
}
# Convert the operator from a number to a descriptive string.
if operators[operator] is not None:
operator = operators[operator]
else:
warn("Unknown operator = %s" % operator)
# The 'equal' operator is the default attribute and isn't stored.
if not operator == 'equal':
attributes.append(('operator', operator))
attributes.append(('val', val))
self._xml_empty_tag('customFilter', attributes)
def _write_sheet_protection(self):
# Write the <sheetProtection> element.
attributes = []
if not self.protect_options:
return
options = self.protect_options
if options['password']:
attributes.append(('password', options['password']))
if options['sheet']:
attributes.append(('sheet', 1))
if options['content']:
attributes.append(('content', 1))
if not options['objects']:
attributes.append(('objects', 1))
if not options['scenarios']:
attributes.append(('scenarios', 1))
if options['format_cells']:
attributes.append(('formatCells', 0))
if options['format_columns']:
attributes.append(('formatColumns', 0))
if options['format_rows']:
attributes.append(('formatRows', 0))
if options['insert_columns']:
attributes.append(('insertColumns', 0))
if options['insert_rows']:
attributes.append(('insertRows', 0))
if options['insert_hyperlinks']:
attributes.append(('insertHyperlinks', 0))
if options['delete_columns']:
attributes.append(('deleteColumns', 0))
if options['delete_rows']:
attributes.append(('deleteRows', 0))
if not options['select_locked_cells']:
attributes.append(('selectLockedCells', 1))
if options['sort']:
attributes.append(('sort', 0))
if options['autofilter']:
attributes.append(('autoFilter', 0))
if options['pivot_tables']:
attributes.append(('pivotTables', 0))
if not options['select_unlocked_cells']:
attributes.append(('selectUnlockedCells', 1))
self._xml_empty_tag('sheetProtection', attributes)
def _write_protected_ranges(self):
# Write the <protectedRanges> element.
if self.num_protected_ranges == 0:
return
self._xml_start_tag('protectedRanges')
for (cell_range, range_name, password) in self.protected_ranges:
self._write_protected_range(cell_range, range_name, password)
self._xml_end_tag('protectedRanges')
def _write_protected_range(self, cell_range, range_name, password):
# Write the <protectedRange> element.
attributes = []
if password:
attributes.append(('password', password))
attributes.append(('sqref', cell_range))
attributes.append(('name', range_name))
self._xml_empty_tag('protectedRange', attributes)
def _write_drawings(self):
# Write the <drawing> elements.
if not self.drawing:
return
self.rel_count += 1
self._write_drawing(self.rel_count)
def _write_drawing(self, drawing_id):
# Write the <drawing> element.
r_id = 'rId' + str(drawing_id)
attributes = [('r:id', r_id)]
self._xml_empty_tag('drawing', attributes)
def _write_legacy_drawing(self):
# Write the <legacyDrawing> element.
if not self.has_vml:
return
# Increment the relationship id for any drawings or comments.
self.rel_count += 1
r_id = 'rId' + str(self.rel_count)
attributes = [('r:id', r_id)]
self._xml_empty_tag('legacyDrawing', attributes)
def _write_legacy_drawing_hf(self):
# Write the <legacyDrawingHF> element.
if not self.has_header_vml:
return
# Increment the relationship id for any drawings or comments.
self.rel_count += 1
r_id = 'rId' + str(self.rel_count)
attributes = [('r:id', r_id)]
self._xml_empty_tag('legacyDrawingHF', attributes)
def _write_data_validations(self):
# Write the <dataValidations> element.
validations = self.validations
count = len(validations)
if not count:
return
attributes = [('count', count)]
self._xml_start_tag('dataValidations', attributes)
for validation in validations:
# Write the dataValidation element.
self._write_data_validation(validation)
self._xml_end_tag('dataValidations')
def _write_data_validation(self, options):
# Write the <dataValidation> element.
sqref = ''
attributes = []
# Set the cell range(s) for the data validation.
for cells in options['cells']:
# Add a space between multiple cell ranges.
if sqref != '':
sqref += ' '
(row_first, col_first, row_last, col_last) = cells
# Swap last row/col for first row/col as necessary
if row_first > row_last:
(row_first, row_last) = (row_last, row_first)
if col_first > col_last:
(col_first, col_last) = (col_last, col_first)
sqref += xl_range(row_first, col_first, row_last, col_last)
if options['validate'] != 'none':
attributes.append(('type', options['validate']))
if options['criteria'] != 'between':
attributes.append(('operator', options['criteria']))
if 'error_type' in options:
if options['error_type'] == 1:
attributes.append(('errorStyle', 'warning'))
if options['error_type'] == 2:
attributes.append(('errorStyle', 'information'))
if options['ignore_blank']:
attributes.append(('allowBlank', 1))
if not options['dropdown']:
attributes.append(('showDropDown', 1))
if options['show_input']:
attributes.append(('showInputMessage', 1))
if options['show_error']:
attributes.append(('showErrorMessage', 1))
if 'error_title' in options:
attributes.append(('errorTitle', options['error_title']))
if 'error_message' in options:
attributes.append(('error', options['error_message']))
if 'input_title' in options:
attributes.append(('promptTitle', options['input_title']))
if 'input_message' in options:
attributes.append(('prompt', options['input_message']))
attributes.append(('sqref', sqref))
if options['validate'] == 'none':
self._xml_empty_tag('dataValidation', attributes)
else:
self._xml_start_tag('dataValidation', attributes)
# Write the formula1 element.
self._write_formula_1(options['value'])
# Write the formula2 element.
if options['maximum'] is not None:
self._write_formula_2(options['maximum'])
self._xml_end_tag('dataValidation')
def _write_formula_1(self, formula):
# Write the <formula1> element.
if type(formula) is list:
formula = self._csv_join(*formula)
formula = '"%s"' % formula
else:
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
self._xml_data_element('formula1', formula)
def _write_formula_2(self, formula):
# Write the <formula2> element.
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
self._xml_data_element('formula2', formula)
def _write_conditional_formats(self):
# Write the Worksheet conditional formats.
ranges = sorted(self.cond_formats.keys())
if not ranges:
return
for cond_range in ranges:
self._write_conditional_formatting(cond_range,
self.cond_formats[cond_range])
def _write_conditional_formatting(self, cond_range, params):
# Write the <conditionalFormatting> element.
attributes = [('sqref', cond_range)]
self._xml_start_tag('conditionalFormatting', attributes)
for param in params:
# Write the cfRule element.
self._write_cf_rule(param)
self._xml_end_tag('conditionalFormatting')
def _write_cf_rule(self, params):
# Write the <cfRule> element.
attributes = [('type', params['type'])]
if 'format' in params and params['format'] is not None:
attributes.append(('dxfId', params['format']))
attributes.append(('priority', params['priority']))
if params.get('stop_if_true'):
attributes.append(('stopIfTrue', 1))
if params['type'] == 'cellIs':
attributes.append(('operator', params['criteria']))
self._xml_start_tag('cfRule', attributes)
if 'minimum' in params and 'maximum' in params:
self._write_formula_element(params['minimum'])
self._write_formula_element(params['maximum'])
else:
self._write_formula_element(params['value'])
self._xml_end_tag('cfRule')
elif params['type'] == 'aboveAverage':
if re.search('below', params['criteria']):
attributes.append(('aboveAverage', 0))
if re.search('equal', params['criteria']):
attributes.append(('equalAverage', 1))
if re.search('[123] std dev', params['criteria']):
match = re.search('([123]) std dev', params['criteria'])
attributes.append(('stdDev', match.group(1)))
self._xml_empty_tag('cfRule', attributes)
elif params['type'] == 'top10':
if 'criteria' in params and params['criteria'] == '%':
attributes.append(('percent', 1))
if 'direction' in params:
attributes.append(('bottom', 1))
rank = params['value'] or 10
attributes.append(('rank', rank))
self._xml_empty_tag('cfRule', attributes)
elif params['type'] == 'duplicateValues':
self._xml_empty_tag('cfRule', attributes)
elif params['type'] == 'uniqueValues':
self._xml_empty_tag('cfRule', attributes)
elif (params['type'] == 'containsText'
or params['type'] == 'notContainsText'
or params['type'] == 'beginsWith'
or params['type'] == 'endsWith'):
attributes.append(('operator', params['criteria']))
attributes.append(('text', params['value']))
self._xml_start_tag('cfRule', attributes)
self._write_formula_element(params['formula'])
self._xml_end_tag('cfRule')
elif params['type'] == 'timePeriod':
attributes.append(('timePeriod', params['criteria']))
self._xml_start_tag('cfRule', attributes)
self._write_formula_element(params['formula'])
self._xml_end_tag('cfRule')
elif (params['type'] == 'containsBlanks'
or params['type'] == 'notContainsBlanks'
or params['type'] == 'containsErrors'
or params['type'] == 'notContainsErrors'):
self._xml_start_tag('cfRule', attributes)
self._write_formula_element(params['formula'])
self._xml_end_tag('cfRule')
elif params['type'] == 'colorScale':
self._xml_start_tag('cfRule', attributes)
self._write_color_scale(params)
self._xml_end_tag('cfRule')
elif params['type'] == 'dataBar':
self._xml_start_tag('cfRule', attributes)
self._write_data_bar(params)
if params.get('is_data_bar_2010'):
self._write_data_bar_ext(params)
self._xml_end_tag('cfRule')
elif params['type'] == 'expression':
self._xml_start_tag('cfRule', attributes)
self._write_formula_element(params['criteria'])
self._xml_end_tag('cfRule')
elif params['type'] == 'iconSet':
self._xml_start_tag('cfRule', attributes)
self._write_icon_set(params)
self._xml_end_tag('cfRule')
def _write_formula_element(self, formula):
# Write the <formula> element.
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
self._xml_data_element('formula', formula)
def _write_color_scale(self, param):
# Write the <colorScale> element.
self._xml_start_tag('colorScale')
self._write_cfvo(param['min_type'], param['min_value'])
if param['mid_type'] is not None:
self._write_cfvo(param['mid_type'], param['mid_value'])
self._write_cfvo(param['max_type'], param['max_value'])
self._write_color('rgb', param['min_color'])
if param['mid_color'] is not None:
self._write_color('rgb', param['mid_color'])
self._write_color('rgb', param['max_color'])
self._xml_end_tag('colorScale')
def _write_data_bar(self, param):
# Write the <dataBar> element.
attributes = []
# Min and max bar lengths in in the spec but not supported directly by
# Excel.
if param.get('min_length'):
attributes.append(('minLength', param['min_length']))
if param.get('max_length'):
attributes.append(('maxLength', param['max_length']))
if param.get('bar_only'):
attributes.append(('showValue', 0))
self._xml_start_tag('dataBar', attributes)
self._write_cfvo(param['min_type'], param['min_value'])
self._write_cfvo(param['max_type'], param['max_value'])
self._write_color('rgb', param['bar_color'])
self._xml_end_tag('dataBar')
def _write_data_bar_ext(self, param):
# Write the <extLst> dataBar extension element.
# Create a pseudo GUID for each unique Excel 2010 data bar.
worksheet_count = self.index + 1
data_bar_count = len(self.data_bars_2010) + 1
guid = "{DA7ABA51-AAAA-BBBB-%04X-%012X}" % (worksheet_count,
data_bar_count)
# Store the 2010 data bar parameters to write the extLst elements.
param['guid'] = guid
self.data_bars_2010.append(param)
self._xml_start_tag('extLst')
self._write_ext('{B025F937-C7B1-47D3-B67F-A62EFF666E3E}')
self._xml_data_element('x14:id', guid)
self._xml_end_tag('ext')
self._xml_end_tag('extLst')
def _write_icon_set(self, param):
# Write the <iconSet> element.
attributes = []
# Don't set attribute for default style.
if param['icon_style'] != '3TrafficLights':
attributes = [('iconSet', param['icon_style'])]
if param.get('icons_only'):
attributes.append(('showValue', 0))
if param.get('reverse_icons'):
attributes.append(('reverse', 1))
self._xml_start_tag('iconSet', attributes)
# Write the properties for different icon styles.
for icon in reversed(param['icons']):
self._write_cfvo(
icon['type'],
icon['value'],
icon['criteria'])
self._xml_end_tag('iconSet')
def _write_cfvo(self, cf_type, val, criteria=None):
# Write the <cfvo> element.
attributes = [('type', cf_type)]
if val is not None:
attributes.append(('val', val))
if criteria:
attributes.append(('gte', 0))
self._xml_empty_tag('cfvo', attributes)
def _write_color(self, name, value):
# Write the <color> element.
attributes = [(name, value)]
self._xml_empty_tag('color', attributes)
def _write_selections(self):
# Write the <selection> elements.
for selection in self.selections:
self._write_selection(*selection)
def _write_selection(self, pane, active_cell, sqref):
# Write the <selection> element.
attributes = []
if pane:
attributes.append(('pane', pane))
if active_cell:
attributes.append(('activeCell', active_cell))
if sqref:
attributes.append(('sqref', sqref))
self._xml_empty_tag('selection', attributes)
def _write_panes(self):
# Write the frozen or split <pane> elements.
panes = self.panes
if not len(panes):
return
if panes[4] == 2:
self._write_split_panes(*panes)
else:
self._write_freeze_panes(*panes)
def _write_freeze_panes(self, row, col, top_row, left_col, pane_type):
# Write the <pane> element for freeze panes.
attributes = []
y_split = row
x_split = col
top_left_cell = xl_rowcol_to_cell(top_row, left_col)
active_pane = ''
state = ''
active_cell = ''
sqref = ''
# Move user cell selection to the panes.
if self.selections:
(_, active_cell, sqref) = self.selections[0]
self.selections = []
# Set the active pane.
if row and col:
active_pane = 'bottomRight'
row_cell = xl_rowcol_to_cell(row, 0)
col_cell = xl_rowcol_to_cell(0, col)
self.selections.append(['topRight', col_cell, col_cell])
self.selections.append(['bottomLeft', row_cell, row_cell])
self.selections.append(['bottomRight', active_cell, sqref])
elif col:
active_pane = 'topRight'
self.selections.append(['topRight', active_cell, sqref])
else:
active_pane = 'bottomLeft'
self.selections.append(['bottomLeft', active_cell, sqref])
# Set the pane type.
if pane_type == 0:
state = 'frozen'
elif pane_type == 1:
state = 'frozenSplit'
else:
state = 'split'
if x_split:
attributes.append(('xSplit', x_split))
if y_split:
attributes.append(('ySplit', y_split))
attributes.append(('topLeftCell', top_left_cell))
attributes.append(('activePane', active_pane))
attributes.append(('state', state))
self._xml_empty_tag('pane', attributes)
def _write_split_panes(self, row, col, top_row, left_col, pane_type):
# Write the <pane> element for split panes.
attributes = []
has_selection = 0
active_pane = ''
active_cell = ''
sqref = ''
y_split = row
x_split = col
# Move user cell selection to the panes.
if self.selections:
(_, active_cell, sqref) = self.selections[0]
self.selections = []
has_selection = 1
# Convert the row and col to 1/20 twip units with padding.
if y_split:
y_split = int(20 * y_split + 300)
if x_split:
x_split = self._calculate_x_split_width(x_split)
# For non-explicit topLeft definitions, estimate the cell offset based
# on the pixels dimensions. This is only a workaround and doesn't take
# adjusted cell dimensions into account.
if top_row == row and left_col == col:
top_row = int(0.5 + (y_split - 300) / 20 / 15)
left_col = int(0.5 + (x_split - 390) / 20 / 3 * 4 / 64)
top_left_cell = xl_rowcol_to_cell(top_row, left_col)
# If there is no selection set the active cell to the top left cell.
if not has_selection:
active_cell = top_left_cell
sqref = top_left_cell
# Set the Cell selections.
if row and col:
active_pane = 'bottomRight'
row_cell = xl_rowcol_to_cell(top_row, 0)
col_cell = xl_rowcol_to_cell(0, left_col)
self.selections.append(['topRight', col_cell, col_cell])
self.selections.append(['bottomLeft', row_cell, row_cell])
self.selections.append(['bottomRight', active_cell, sqref])
elif col:
active_pane = 'topRight'
self.selections.append(['topRight', active_cell, sqref])
else:
active_pane = 'bottomLeft'
self.selections.append(['bottomLeft', active_cell, sqref])
# Format splits to the same precision as Excel.
if x_split:
attributes.append(('xSplit', "%.16g" % x_split))
if y_split:
attributes.append(('ySplit', "%.16g" % y_split))
attributes.append(('topLeftCell', top_left_cell))
if has_selection:
attributes.append(('activePane', active_pane))
self._xml_empty_tag('pane', attributes)
def _calculate_x_split_width(self, width):
# Convert column width from user units to pane split width.
max_digit_width = 7 # For Calabri 11.
padding = 5
# Convert to pixels.
if width < 1:
pixels = int(width * (max_digit_width + padding) + 0.5)
else:
pixels = int(width * max_digit_width + 0.5) + padding
# Convert to points.
points = pixels * 3 / 4
# Convert to twips (twentieths of a point).
twips = points * 20
# Add offset/padding.
width = twips + 390
return width
def _write_table_parts(self):
# Write the <tableParts> element.
tables = self.tables
count = len(tables)
# Return if worksheet doesn't contain any tables.
if not count:
return
attributes = [('count', count,)]
self._xml_start_tag('tableParts', attributes)
for _ in tables:
# Write the tablePart element.
self.rel_count += 1
self._write_table_part(self.rel_count)
self._xml_end_tag('tableParts')
def _write_table_part(self, r_id):
# Write the <tablePart> element.
r_id = 'rId' + str(r_id)
attributes = [('r:id', r_id,)]
self._xml_empty_tag('tablePart', attributes)
def _write_ext_list(self):
# Write the <extLst> element for data bars and sparklines.
has_data_bars = len(self.data_bars_2010)
has_sparklines = len(self.sparklines)
if not has_data_bars and not has_sparklines:
return
# Write the extLst element.
self._xml_start_tag('extLst')
if has_data_bars:
self._write_ext_list_data_bars()
if has_sparklines:
self._write_ext_list_sparklines()
self._xml_end_tag('extLst')
def _write_ext_list_data_bars(self):
# Write the Excel 2010 data_bar subelements.
self._write_ext('{78C0D931-6437-407d-A8EE-F0AAD7539E65}')
self._xml_start_tag('x14:conditionalFormattings')
# Write the Excel 2010 conditional formatting data bar elements.
for data_bar in self.data_bars_2010:
# Write the x14:conditionalFormatting element.
self._write_conditional_formatting_2010(data_bar)
self._xml_end_tag('x14:conditionalFormattings')
self._xml_end_tag('ext')
def _write_conditional_formatting_2010(self, data_bar):
# Write the <x14:conditionalFormatting> element.
xmlns_xm = 'http://schemas.microsoft.com/office/excel/2006/main'
attributes = [('xmlns:xm', xmlns_xm)]
self._xml_start_tag('x14:conditionalFormatting', attributes)
# Write the x14:cfRule element.
self._write_x14_cf_rule(data_bar)
# Write the x14:dataBar element.
self._write_x14_data_bar(data_bar)
# Write the x14 max and min data bars.
self._write_x14_cfvo(data_bar['x14_min_type'], data_bar['min_value'])
self._write_x14_cfvo(data_bar['x14_max_type'], data_bar['max_value'])
if not data_bar['bar_no_border']:
# Write the x14:borderColor element.
self._write_x14_border_color(data_bar['bar_border_color'])
# Write the x14:negativeFillColor element.
if not data_bar['bar_negative_color_same']:
self._write_x14_negative_fill_color(
data_bar['bar_negative_color'])
# Write the x14:negativeBorderColor element.
if (not data_bar['bar_no_border'] and
not data_bar['bar_negative_border_color_same']):
self._write_x14_negative_border_color(
data_bar['bar_negative_border_color'])
# Write the x14:axisColor element.
if data_bar['bar_axis_position'] != 'none':
self._write_x14_axis_color(data_bar['bar_axis_color'])
self._xml_end_tag('x14:dataBar')
self._xml_end_tag('x14:cfRule')
# Write the xm:sqref element.
self._xml_data_element('xm:sqref', data_bar['range'])
self._xml_end_tag('x14:conditionalFormatting')
def _write_x14_cf_rule(self, data_bar):
# Write the <x14:cfRule> element.
rule_type = 'dataBar'
guid = data_bar['guid']
attributes = [('type', rule_type), ('id', guid)]
self._xml_start_tag('x14:cfRule', attributes)
def _write_x14_data_bar(self, data_bar):
# Write the <x14:dataBar> element.
min_length = 0
max_length = 100
attributes = [
('minLength', min_length),
('maxLength', max_length),
]
if not data_bar['bar_no_border']:
attributes.append(('border', 1))
if data_bar['bar_solid']:
attributes.append(('gradient', 0))
if data_bar['bar_direction'] == 'left':
attributes.append(('direction', 'leftToRight'))
if data_bar['bar_direction'] == 'right':
attributes.append(('direction', 'rightToLeft'))
if data_bar['bar_negative_color_same']:
attributes.append(('negativeBarColorSameAsPositive', 1))
if (not data_bar['bar_no_border'] and
not data_bar['bar_negative_border_color_same']):
attributes.append(('negativeBarBorderColorSameAsPositive', 0))
if data_bar['bar_axis_position'] == 'middle':
attributes.append(('axisPosition', 'middle'))
if data_bar['bar_axis_position'] == 'none':
attributes.append(('axisPosition', 'none'))
self._xml_start_tag('x14:dataBar', attributes)
def _write_x14_cfvo(self, rule_type, value):
# Write the <x14:cfvo> element.
attributes = [('type', rule_type)]
if rule_type in ('min', 'max', 'autoMin', 'autoMax'):
self._xml_empty_tag('x14:cfvo', attributes)
else:
self._xml_start_tag('x14:cfvo', attributes)
self._xml_data_element('xm:f', value)
self._xml_end_tag('x14:cfvo')
def _write_x14_border_color(self, rgb):
# Write the <x14:borderColor> element.
attributes = [('rgb', rgb)]
self._xml_empty_tag('x14:borderColor', attributes)
def _write_x14_negative_fill_color(self, rgb):
# Write the <x14:negativeFillColor> element.
attributes = [('rgb', rgb)]
self._xml_empty_tag('x14:negativeFillColor', attributes)
def _write_x14_negative_border_color(self, rgb):
# Write the <x14:negativeBorderColor> element.
attributes = [('rgb', rgb)]
self._xml_empty_tag('x14:negativeBorderColor', attributes)
def _write_x14_axis_color(self, rgb):
# Write the <x14:axisColor> element.
attributes = [('rgb', rgb)]
self._xml_empty_tag('x14:axisColor', attributes)
def _write_ext_list_sparklines(self):
# Write the sparkline extension sub-elements.
self._write_ext('{05C60535-1F16-4fd2-B633-F4F36F0B64E0}')
# Write the x14:sparklineGroups element.
self._write_sparkline_groups()
# Write the sparkline elements.
for sparkline in reversed(self.sparklines):
# Write the x14:sparklineGroup element.
self._write_sparkline_group(sparkline)
# Write the x14:colorSeries element.
self._write_color_series(sparkline['series_color'])
# Write the x14:colorNegative element.
self._write_color_negative(sparkline['negative_color'])
# Write the x14:colorAxis element.
self._write_color_axis()
# Write the x14:colorMarkers element.
self._write_color_markers(sparkline['markers_color'])
# Write the x14:colorFirst element.
self._write_color_first(sparkline['first_color'])
# Write the x14:colorLast element.
self._write_color_last(sparkline['last_color'])
# Write the x14:colorHigh element.
self._write_color_high(sparkline['high_color'])
# Write the x14:colorLow element.
self._write_color_low(sparkline['low_color'])
if sparkline['date_axis']:
self._xml_data_element('xm:f', sparkline['date_axis'])
self._write_sparklines(sparkline)
self._xml_end_tag('x14:sparklineGroup')
self._xml_end_tag('x14:sparklineGroups')
self._xml_end_tag('ext')
def _write_sparklines(self, sparkline):
# Write the <x14:sparklines> element and <x14:sparkline> sub-elements.
# Write the sparkline elements.
self._xml_start_tag('x14:sparklines')
for i in range(sparkline['count']):
spark_range = sparkline['ranges'][i]
location = sparkline['locations'][i]
self._xml_start_tag('x14:sparkline')
self._xml_data_element('xm:f', spark_range)
self._xml_data_element('xm:sqref', location)
self._xml_end_tag('x14:sparkline')
self._xml_end_tag('x14:sparklines')
def _write_ext(self, uri):
# Write the <ext> element.
schema = 'http://schemas.microsoft.com/office/'
xmlns_x14 = schema + 'spreadsheetml/2009/9/main'
attributes = [
('xmlns:x14', xmlns_x14),
('uri', uri),
]
self._xml_start_tag('ext', attributes)
def _write_sparkline_groups(self):
# Write the <x14:sparklineGroups> element.
xmlns_xm = 'http://schemas.microsoft.com/office/excel/2006/main'
attributes = [('xmlns:xm', xmlns_xm)]
self._xml_start_tag('x14:sparklineGroups', attributes)
def _write_sparkline_group(self, options):
# Write the <x14:sparklineGroup> element.
#
# Example for order.
#
# <x14:sparklineGroup
# manualMax="0"
# manualMin="0"
# lineWeight="2.25"
# type="column"
# dateAxis="1"
# displayEmptyCellsAs="span"
# markers="1"
# high="1"
# low="1"
# first="1"
# last="1"
# negative="1"
# displayXAxis="1"
# displayHidden="1"
# minAxisType="custom"
# maxAxisType="custom"
# rightToLeft="1">
#
empty = options.get('empty')
attributes = []
if options.get('max') is not None:
if options['max'] == 'group':
options['cust_max'] = 'group'
else:
attributes.append(('manualMax', options['max']))
options['cust_max'] = 'custom'
if options.get('min') is not None:
if options['min'] == 'group':
options['cust_min'] = 'group'
else:
attributes.append(('manualMin', options['min']))
options['cust_min'] = 'custom'
# Ignore the default type attribute (line).
if options['type'] != 'line':
attributes.append(('type', options['type']))
if options.get('weight'):
attributes.append(('lineWeight', options['weight']))
if options.get('date_axis'):
attributes.append(('dateAxis', 1))
if empty:
attributes.append(('displayEmptyCellsAs', empty))
if options.get('markers'):
attributes.append(('markers', 1))
if options.get('high'):
attributes.append(('high', 1))
if options.get('low'):
attributes.append(('low', 1))
if options.get('first'):
attributes.append(('first', 1))
if options.get('last'):
attributes.append(('last', 1))
if options.get('negative'):
attributes.append(('negative', 1))
if options.get('axis'):
attributes.append(('displayXAxis', 1))
if options.get('hidden'):
attributes.append(('displayHidden', 1))
if options.get('cust_min'):
attributes.append(('minAxisType', options['cust_min']))
if options.get('cust_max'):
attributes.append(('maxAxisType', options['cust_max']))
if options.get('reverse'):
attributes.append(('rightToLeft', 1))
self._xml_start_tag('x14:sparklineGroup', attributes)
def _write_spark_color(self, element, color):
# Helper function for the sparkline color functions below.
attributes = []
if color.get('rgb'):
attributes.append(('rgb', color['rgb']))
if color.get('theme'):
attributes.append(('theme', color['theme']))
if color.get('tint'):
attributes.append(('tint', color['tint']))
self._xml_empty_tag(element, attributes)
def _write_color_series(self, color):
# Write the <x14:colorSeries> element.
self._write_spark_color('x14:colorSeries', color)
def _write_color_negative(self, color):
# Write the <x14:colorNegative> element.
self._write_spark_color('x14:colorNegative', color)
def _write_color_axis(self):
# Write the <x14:colorAxis> element.
self._write_spark_color('x14:colorAxis', {'rgb': 'FF000000'})
def _write_color_markers(self, color):
# Write the <x14:colorMarkers> element.
self._write_spark_color('x14:colorMarkers', color)
def _write_color_first(self, color):
# Write the <x14:colorFirst> element.
self._write_spark_color('x14:colorFirst', color)
def _write_color_last(self, color):
# Write the <x14:colorLast> element.
self._write_spark_color('x14:colorLast', color)
def _write_color_high(self, color):
# Write the <x14:colorHigh> element.
self._write_spark_color('x14:colorHigh', color)
def _write_color_low(self, color):
# Write the <x14:colorLow> element.
self._write_spark_color('x14:colorLow', color)
def _write_phonetic_pr(self):
# Write the <phoneticPr> element.
attributes = [
('fontId', '0'),
('type', 'noConversion'),
]
self._xml_empty_tag('phoneticPr', attributes)
def _write_ignored_errors(self):
# Write the <ignoredErrors> element.
if not self.ignored_errors:
return
self._xml_start_tag('ignoredErrors')
if self.ignored_errors.get('number_stored_as_text'):
range = self.ignored_errors['number_stored_as_text']
self._write_ignored_error('numberStoredAsText', range)
if self.ignored_errors.get('eval_error'):
range = self.ignored_errors['eval_error']
self._write_ignored_error('evalError', range)
if self.ignored_errors.get('formula_differs'):
range = self.ignored_errors['formula_differs']
self._write_ignored_error('formula', range)
if self.ignored_errors.get('formula_range'):
range = self.ignored_errors['formula_range']
self._write_ignored_error('formulaRange', range)
if self.ignored_errors.get('formula_unlocked'):
range = self.ignored_errors['formula_unlocked']
self._write_ignored_error('unlockedFormula', range)
if self.ignored_errors.get('empty_cell_reference'):
range = self.ignored_errors['empty_cell_reference']
self._write_ignored_error('emptyCellReference', range)
if self.ignored_errors.get('list_data_validation'):
range = self.ignored_errors['list_data_validation']
self._write_ignored_error('listDataValidation', range)
if self.ignored_errors.get('calculated_column'):
range = self.ignored_errors['calculated_column']
self._write_ignored_error('calculatedColumn', range)
if self.ignored_errors.get('two_digit_text_year'):
range = self.ignored_errors['two_digit_text_year']
self._write_ignored_error('twoDigitTextYear', range)
self._xml_end_tag('ignoredErrors')
def _write_ignored_error(self, type, range):
# Write the <ignoredError> element.
attributes = [
('sqref', range),
(type, 1),
]
self._xml_empty_tag('ignoredError', attributes)
| [
"[email protected]"
] |
Subsets and Splits