code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# !/usr/bin/python
# Written against python 2.7
# Matasano Problem 15
#
# Created by 404 on 15/11/26.
#
# Copyright (c) 2015 404. All rights reserved.
def pkcs7pad_remove(message):
last = message[-1]
length_message = len(message) - ord(last)
for i in xrange(length_message,len(message)):
assert(message[i]==last)
return message[:length_message]
message = 'ICE ICE BABY\x04\x04\x04\x04'
print pkcs7pad_remove(message)
| Han0nly/crypto | solutions/15.py | Python | mit | 445 |
from django.shortcuts import render
from django.http import HttpResponse
import requests
def main(request):
return render(request, 'welcome.html')
def search(request):
return render(request, 'search.html')
def location(request):
return render(request, 'location.html')
| findapad/find_a_pad | find_a_pad_app/views.py | Python | mit | 291 |
#!/usr/bin/env python
#
# redirect data from a TCP/IP connection to a serial port and vice versa
# using RFC 2217
#
# (C) 2009-2015 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import socket
import sys
import time
import threading
import serial
import serial.rfc2217
class Redirector(object):
def __init__(self, serial_instance, socket, debug=False):
self.serial = serial_instance
self.socket = socket
self._write_lock = threading.Lock()
self.rfc2217 = serial.rfc2217.PortManager(
self.serial,
self,
logger=logging.getLogger('rfc2217.server') if debug else None
)
self.log = logging.getLogger('redirector')
def statusline_poller(self):
self.log.debug('status line poll thread started')
while self.alive:
time.sleep(1)
self.rfc2217.check_modem_lines()
self.log.debug('status line poll thread terminated')
def shortcircuit(self):
"""connect the serial port to the TCP port by copying everything
from one side to the other"""
self.alive = True
self.thread_read = threading.Thread(target=self.reader)
self.thread_read.daemon = True
self.thread_read.name = 'serial->socket'
self.thread_read.start()
self.thread_poll = threading.Thread(target=self.statusline_poller)
self.thread_poll.daemon = True
self.thread_poll.name = 'status line poll'
self.thread_poll.start()
self.writer()
def reader(self):
"""loop forever and copy serial->socket"""
self.log.debug('reader thread started')
while self.alive:
try:
data = self.serial.read() #self.serial.inWaiting or 1)
if data:
# escape outgoing data when needed (Telnet IAC (0xff) character)
self.write(serial.to_bytes(self.rfc2217.escape(data)))
except socket.error as msg:
self.log.error('%s' % (msg,))
# probably got disconnected
break
self.alive = False
self.log.debug('reader thread terminated')
def write(self, data):
"""thread safe socket write with no data escaping. used to send telnet stuff"""
with self._write_lock:
self.socket.sendall(data)
def writer(self):
"""loop forever and copy socket->serial"""
while self.alive:
try:
data = self.socket.recv(1024)
if not data:
break
self.serial.write(serial.to_bytes(self.rfc2217.filter(data)))
except socket.error as msg:
self.log.error('%s' % (msg,))
# probably got disconnected
break
self.stop()
def stop(self):
"""Stop copying"""
self.log.debug('stopping')
if self.alive:
self.alive = False
self.thread_read.join()
self.thread_poll.join()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="RFC 2217 Serial to Network (TCP/IP) redirector.",
epilog="""\
NOTE: no security measures are implemented. Anyone can remotely connect
to this service over the network.
Only one connection at once is supported. When the connection is terminated
it waits for the next connect.
""")
parser.add_argument('SERIALPORT')
parser.add_argument(
'-p', '--localport',
type=int,
help='local TCP port, default: %(default)s',
metavar='TCPPORT',
default=2217
)
parser.add_argument(
'-v', '--verbose',
dest='verbosity',
action='count',
help='print more diagnostic messages (option can be given multiple times)',
default=0
)
args = parser.parse_args()
if args.verbosity > 3:
args.verbosity = 3
level = (
logging.WARNING,
logging.INFO,
logging.DEBUG,
logging.NOTSET,
)[args.verbosity]
logging.basicConfig(level=logging.INFO)
#~ logging.getLogger('root').setLevel(logging.INFO)
logging.getLogger('rfc2217').setLevel(level)
# connect to serial port
ser = serial.serial_for_url(args.SERIALPORT, do_not_open=True)
ser.timeout = 3 # required so that the reader thread can exit
# reset control line as no _remote_ "terminal" has been connected yet
ser.dtr = False
ser.rts = False
logging.info("RFC 2217 TCP/IP to Serial redirector - type Ctrl-C / BREAK to quit")
try:
ser.open()
except serial.SerialException as e:
logging.error("Could not open serial port {}: {}".format(ser.name, e))
sys.exit(1)
logging.info("Serving serial port: {}".format(ser.name))
#settings = ser.get_settings()
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srv.bind(('', args.localport))
srv.listen(1)
logging.info("TCP/IP port: {}".format(args.localport))
while True:
try:
client_socket, addr = srv.accept()
logging.info('Connected by {}:{}'.format(addr[0], addr[1]))
client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
ser.rts = True
ser.dtr = True
# enter network <-> serial loop
r = Redirector(
ser,
client_socket,
args.verbosity > 0
)
try:
r.shortcircuit()
finally:
logging.info('Disconnected')
r.stop()
client_socket.close()
ser.dtr = False
ser.rts = False
# Restore port settings (may have been changed by RFC 2217
# capable client)
#ser.applySettings(settings)
except KeyboardInterrupt:
sys.stdout.write('\n')
break
except socket.error as msg:
logging.error(str(msg))
logging.info('--- exit ---')
| raphaelbs/HomeR | NodeJS/node_modules/modbus-serial/examples/telnet.py | Python | mit | 6,313 |
from .types import Type, BlockType, AbstractWidget
from .connection import Connection
from .filedialog import FileDialog
from .notification import Notification
from .play_button import PlayButton
| AlvarBer/Persimmon | persimmon/view/util/__init__.py | Python | mit | 197 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009-2010 CloudSizzle Team
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import re
def remove_extra_whitespace(text):
return re.sub('\s+', ' ', text)
| jpvanhal/cloudsizzle | cloudsizzle/scrapers/utils.py | Python | mit | 1,215 |
# TODO still necessary?
# from bootstrap import admin_actions_registry
from django.db import models
from django.utils.translation import ugettext as _, ugettext_lazy
# TODO still necessary?
# # admin action to be displayed in action row
# from django.core.urlresolvers import reverse
# admin_actions_registry['ip_assembler'] = lambda: \
# '<a href="%s" class="button">IP Batch Processing</a>' % reverse('admin:batch_process_ips_view')
class LocationLocal(models.Model):
"""
Location of a local .htaccess file.
"""
path = models.CharField(max_length=1000)
def __str__(self):
"""
Returns the name of the IP.
:return: the name
:rtype: unicode
"""
return self.path
class Meta:
app_label = 'ip_assembler'
verbose_name = ugettext_lazy('Local location')
verbose_name_plural = ugettext_lazy('Local locations')
# TODO django_fields does not work with Python 3
# class LocationFTP(models.Model):
# """
# Location of an external, via FTP reachable, .htaccess file.
# """
# host = models.CharField(max_length=255, verbose_name=_('Host'))
# username = models.CharField(max_length=255, verbose_name=_('Username'))
# password = fields.EncryptedCharField(cipher='AES', block_type='MODE_CBC', verbose_name=_('Password'))
# path = models.CharField(max_length=1000)
#
# def __unicode__(self):
# """
# Returns the name of the IP.
# :return: the name
# :rtype: unicode
# """
# return u'%(host)s:%(path)s' % {'host': self.host, 'path': self.path}
#
# class Meta:
# app_label = 'ip_assembler'
# verbose_name = ugettext_lazy('FTP location')
# verbose_name_plural = ugettext_lazy('FTP locations')
class IP(models.Model):
seg_0 = models.CharField(max_length=3, verbose_name=_('Segment 1'))
seg_1 = models.CharField(max_length=3, verbose_name=_('Segment 2'))
seg_2 = models.CharField(max_length=3, verbose_name=_('Segment 3'))
seg_3 = models.CharField(max_length=3, verbose_name=_('Segment 4'))
@staticmethod
def batch_add_ips(ips):
"""
Adds the given list of IPs to the database if the IP is not already there.
:param ips: list of IPs
:return: number of created IPs
:type ips: list
:rtype: int
"""
ips_created = 0
if len(ips) > 0:
# for each ip, check if already existent, if not add
for ip in ips:
(s0, s1, s2, s3) = ip.split('.')
(ip_db, is_ip_created) = IP.objects.get_or_create(seg_0=s0, seg_1=s1, seg_2=s2, seg_3=s3, )
if is_ip_created:
ips_created += 1
return ips_created
@staticmethod
def unify_ips():
"""
Unifies the currently saved IPs.
Unification is based on last IP segment.
So if there are is e.g. 192.168.128.121 and 192.168.128.122 tthey will be merged to 192.168.128.121.
This is a little aggressive but the spammers are aggressive, too.
:return: number of merged ips
:rtype: int
"""
processed_ips = 0
ips = {}
# query for the IPs, also includes the starred IPs
for ip in IP.objects.raw(
'select distinct a.id, a.seg_0, a.seg_1, a.seg_2 '
'from ip_assembler_ip a, ip_assembler_ip b '
'where a.seg_0 = b.seg_0 and a.seg_1 = b.seg_1 and a.seg_2 = b.seg_2 and a.seg_3 != b.seg_3 '
'order by a.seg_0, a.seg_1, a.seg_2',
):
key = '%d.%d.%d' % (int(ip.seg_0), int(ip.seg_1), int(ip.seg_2))
if not key in ips:
ips[key] = []
ips[key].append(ip)
for key, ip_list in ips.items():
# check if a starred ip is in list
starred_ip = None
for ip in ip_list:
if ip.seg_3 == '*':
starred_ip = ip
if starred_ip is None:
IP.objects.create(seg_0=ip_list[0].seg_0, seg_1=ip_list[0].seg_1, seg_2=ip_list[0].seg_2, seg_3='*', )
# delete the other ips
for ip in ip_list:
if ip != starred_ip:
processed_ips += 1
ip.delete()
return processed_ips
def __str__(self):
"""
Returns the name of the IP.
:return: the name
:rtype: unicode
"""
return u'%s.%s.%s.%s' % (self.seg_0, self.seg_1, self.seg_2, self.seg_3)
class Meta:
app_label = 'ip_assembler'
verbose_name = ugettext_lazy('IP')
verbose_name_plural = ugettext_lazy('IPs') | dArignac/ip_assembler | ip_assembler/models.py | Python | mit | 4,699 |
from unittest import TestCase
import roxentools
import tempfile
import os
class TestInterfaceCall(TestCase):
def setUp(self):
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
self.conf_empty = os.path.join(self.test_dir, "empty.conf")
with open(self.conf_empty, 'w') as conf:
conf.write("")
self.conf_valid = os.path.join(self.test_dir, "valid.conf")
with open(self.conf_valid, 'w') as conf:
conf.write('{"username":"user","password":"pass"}')
def test_config_nonexistant(self):
passed = False
try:
s = roxentools.interface_call(conf_file='iwontexist.conf')
except IOError:
passed = True
self.assertTrue(passed)
def test_config_empty(self):
passed = False
try:
s = roxentools.interface_call(conf_file=self.conf_empty)
except ValueError:
pass
else:
raise AssertionError("ValueError was not raised")
def test_config_invalid(self):
try:
s = roxentools.interface_call(conf_file=self.conf_valid)
except:
pass
else:
raise AssertionError("Shoud have failed to connect to localhost with user:pass")
| whojarr/roxentools | tests/interface/test_interface_call.py | Python | mit | 1,289 |
def do_monkeypatch():
def get_url(self):
return self._view.get_url('%s.%s' % (self._view.endpoint, self._view._default_view))
import flask_admin.menu
flask_admin.menu.MenuView.get_url = get_url | ActiDoo/gamification-engine | gengine/base/monkeypatch_flaskadmin.py | Python | mit | 214 |
"""
flyingsphinx
~~~~~~~~~~~~
:copyright: (c) 2012 by Pat Allan
:license: MIT, see LICENCE for more details.
"""
__title__ = 'flyingsphinx'
__version__ = '0.1.0'
__author__ = 'Pat Allan'
__license__ = 'MIT'
__copyright__ = 'Copyright 2012 Pat Allan'
from .api import API
from .cli import CLI
from .configuration import Configuration
from .index import Index
from .sphinx import Sphinx
def cli():
import sys
CLI(sys.argv[1], sys.argv[2:])
def configuration():
return Configuration(API())
def index():
return Index(API())
def info():
return API().get('/')
def sphinx():
return Sphinx(API())
| flying-sphinx/flying-sphinx-py | flyingsphinx/__init__.py | Python | mit | 655 |
import os
import sys
import tempfile
import unittest
import mock
import numpy
import chainer
from chainer.backends import cuda
from chainer import link
from chainer import links
from chainer import optimizers
from chainer.serializers import hdf5
from chainer import testing
from chainer.testing import attr
if hdf5._available:
import h5py
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5Serializer(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
self.hdf5file = h5py.File(path, 'w')
self.serializer = hdf5.HDF5Serializer(self.hdf5file, compression=3)
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.serializer['x']
self.assertIsInstance(child, hdf5.HDF5Serializer)
self.assertEqual(child.group.name, '/x')
self.assertEqual(child.compression, 3)
def check_serialize(self, data):
ret = self.serializer('w', data)
dset = self.hdf5file['w']
self.assertIsInstance(dset, h5py.Dataset)
self.assertEqual(dset.shape, data.shape)
self.assertEqual(dset.size, data.size)
self.assertEqual(dset.dtype, data.dtype)
read = numpy.empty((2, 3), dtype=numpy.float32)
dset.read_direct(read)
numpy.testing.assert_array_equal(read, cuda.to_cpu(data))
self.assertEqual(dset.compression_opts, 3)
self.assertIs(ret, data)
def test_serialize_cpu(self):
self.check_serialize(self.data)
@attr.gpu
def test_serialize_gpu(self):
self.check_serialize(cuda.to_gpu(self.data))
def test_serialize_scalar(self):
ret = self.serializer('x', 10)
dset = self.hdf5file['x']
self.assertIsInstance(dset, h5py.Dataset)
self.assertEqual(dset.shape, ())
self.assertEqual(dset.size, 1)
self.assertEqual(dset.dtype, int)
read = numpy.empty((), dtype=numpy.int32)
dset.read_direct(read)
self.assertEqual(read, 10)
self.assertEqual(dset.compression_opts, None)
self.assertIs(ret, 10)
@unittest.skipUnless(hdf5._available and
h5py.version.version_tuple >= (2, 7, 0),
'h5py>=2.7.0 is not available')
def test_serialize_none(self):
ret = self.serializer('x', None)
self.assertIs(ret, None)
dset = self.hdf5file['x']
self.assertIsInstance(dset, h5py.Dataset)
self.assertIs(dset.shape, None)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5Deserializer(unittest.TestCase):
def setUp(self):
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with h5py.File(path, 'w') as f:
f.require_group('x')
f.create_dataset('y', data=self.data)
f.create_dataset('z', data=numpy.asarray(10))
# h5py.Empty is introduced from 2.7.0
if h5py.version.version_tuple >= (2, 7, 0):
f.create_dataset('w', data=h5py.Empty('f'))
self.hdf5file = h5py.File(path, 'r')
self.deserializer = hdf5.HDF5Deserializer(self.hdf5file)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.deserializer['x']
self.assertIsInstance(child, hdf5.HDF5Deserializer)
self.assertEqual(child.group.name, '/x')
def check_deserialize(self, y):
ret = self.deserializer('y', y)
numpy.testing.assert_array_equal(cuda.to_cpu(y), self.data)
self.assertIs(ret, y)
def check_deserialize_none_value(self, y):
ret = self.deserializer('y', None)
numpy.testing.assert_array_equal(cuda.to_cpu(ret), self.data)
def test_deserialize_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(y)
def test_deserialize_none_value_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_none_value(y)
@attr.gpu
def test_deserialize_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(cuda.to_gpu(y))
@attr.gpu
def test_deserialize_none_value_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_none_value(cuda.to_gpu(y))
def test_deserialize_scalar(self):
z = 5
ret = self.deserializer('z', z)
self.assertEqual(ret, 10)
def test_string(self):
fd, path = tempfile.mkstemp()
os.close(fd)
try:
data = 'abc'
with h5py.File(path, 'w') as f:
f.create_dataset('str', data=data)
with h5py.File(path, 'r') as f:
deserializer = hdf5.HDF5Deserializer(f)
ret = deserializer('str', '')
self.assertEqual(ret, data)
finally:
os.remove(path)
@unittest.skipUnless(hdf5._available and
h5py.version.version_tuple >= (2, 7, 0),
'h5py>=2.7.0 is not available')
def test_deserialize_none(self):
ret = self.deserializer('w', None)
self.assertIs(ret, None)
@unittest.skipUnless(hdf5._available and
h5py.version.version_tuple >= (2, 7, 0),
'h5py>=2.7.0 is not available')
def test_deserialize_none_by_passing_array(self):
y = numpy.empty((1,))
ret = self.deserializer('w', y)
self.assertIs(ret, None)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5DeserializerNonStrict(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with h5py.File(path, 'w') as f:
f.require_group('x')
self.hdf5file = h5py.File(path, 'r')
self.deserializer = hdf5.HDF5Deserializer(self.hdf5file, strict=False)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_deserialize_partial(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
ret = self.deserializer('y', y)
self.assertIs(ret, y)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5DeserializerNonStrictGroupHierachy(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
child = link.Chain()
with child.init_scope():
child.linear = links.Linear(2, 3)
parent = link.Chain()
with parent.init_scope():
parent.linear = links.Linear(3, 2)
parent.child = child
hdf5.save_hdf5(self.temp_file_path, parent)
self.source = parent
self.hdf5file = h5py.File(path, 'r')
self.deserializer = hdf5.HDF5Deserializer(self.hdf5file, strict=False)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_deserialize_hierarchy(self):
child = link.Chain()
with child.init_scope():
child.linear2 = links.Linear(2, 3)
target = link.Chain()
with target.init_scope():
target.linear = links.Linear(3, 2)
target.child = child
target_child_W = numpy.copy(child.linear2.W.data)
target_child_b = numpy.copy(child.linear2.b.data)
self.deserializer.load(target)
numpy.testing.assert_array_equal(
self.source.linear.W.data, target.linear.W.data)
numpy.testing.assert_array_equal(
self.source.linear.b.data, target.linear.b.data)
numpy.testing.assert_array_equal(
target.child.linear2.W.data, target_child_W)
numpy.testing.assert_array_equal(
target.child.linear2.b.data, target_child_b)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestSaveHDF5(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_save(self):
obj = mock.MagicMock()
hdf5.save_hdf5(self.temp_file_path, obj, compression=3)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertIsInstance(serializer, hdf5.HDF5Serializer)
self.assertEqual(serializer.compression, 3)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestLoadHDF5(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
# Make a hdf5 file with empty data
h5py.File(path, 'w')
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_load(self):
obj = mock.MagicMock()
hdf5.load_hdf5(self.temp_file_path, obj)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertIsInstance(serializer, hdf5.HDF5Deserializer)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestGroupHierachy(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
child = link.Chain()
with child.init_scope():
child.linear = links.Linear(2, 3)
child.Wc = chainer.Parameter(shape=(2, 3))
self.parent = link.Chain()
with self.parent.init_scope():
self.parent.child = child
self.parent.Wp = chainer.Parameter(shape=(2, 3))
self.optimizer = optimizers.AdaDelta()
self.optimizer.setup(self.parent)
self.parent.cleargrads()
self.optimizer.update() # init states
def _save(self, h5, obj, name):
group = h5.create_group(name)
serializer = hdf5.HDF5Serializer(group)
serializer.save(obj)
def _load(self, h5, obj, name):
group = h5[name]
serializer = hdf5.HDF5Deserializer(group)
serializer.load(obj)
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def _check_group(self, h5, state):
self.assertSetEqual(set(h5.keys()),
set(('child',) + state))
self.assertSetEqual(set(h5['child'].keys()),
{'linear', 'Wc'})
self.assertSetEqual(set(h5['child']['linear'].keys()),
{'W', 'b'})
def test_save_chain(self):
with h5py.File(self.temp_file_path) as h5:
self._save(h5, self.parent, 'test')
self.assertSetEqual(set(h5.keys()), {'test'})
self._check_group(h5['test'], ('Wp',))
def test_save_optimizer(self):
with h5py.File(self.temp_file_path) as h5:
self._save(h5, self.optimizer, 'test')
self.assertSetEqual(set(h5.keys()), {'test'})
self._check_group(h5['test'], ('Wp', 'epoch', 't'))
def test_save_chain2(self):
hdf5.save_hdf5(self.temp_file_path, self.parent)
with h5py.File(self.temp_file_path) as h5:
self._check_group(h5, ('Wp',))
def test_save_optimizer2(self):
hdf5.save_hdf5(self.temp_file_path, self.optimizer)
with h5py.File(self.temp_file_path) as h5:
self._check_group(h5, ('Wp', 'epoch', 't'))
def test_load_chain(self):
with h5py.File(self.temp_file_path) as h5:
self._save(h5, self.parent, 'test')
with h5py.File(self.temp_file_path) as h5:
self._load(h5, self.parent, 'test')
def test_load_optimizer(self):
with h5py.File(self.temp_file_path) as h5:
self._save(h5, self.optimizer, 'test')
with h5py.File(self.temp_file_path) as h5:
self._load(h5, self.optimizer, 'test')
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestNoH5py(unittest.TestCase):
def setUp(self):
# Remove h5py from sys.modules to emulate situation that h5py is not
# installed.
sys.modules['h5py'] = None
def tearDown(self):
sys.modules['h5py'] = h5py
def test_raise(self):
del sys.modules['chainer.serializers.hdf5']
del sys.modules['chainer.serializers.npz']
del sys.modules['chainer.serializers']
import chainer.serializers
self.assertFalse(chainer.serializers.hdf5._available)
with self.assertRaises(RuntimeError):
chainer.serializers.save_hdf5(None, None, None)
with self.assertRaises(RuntimeError):
chainer.serializers.load_hdf5(None, None)
with self.assertRaises(RuntimeError):
chainer.serializers.HDF5Serializer(None)
with self.assertRaises(RuntimeError):
chainer.serializers.HDF5Deserializer(None)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class Test5pyEmptyNotAvailable(unittest.TestCase):
def setUp(self):
# Set h5py.version.version_tuple to emulate situation that h5py is
# so old that it doesn't have h5py.Empty.
self.original_version_tuple = h5py.version.version_tuple
h5py.version.version_tuple = (2, 6, 0)
# Prepare serializer
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
self.hdf5file = h5py.File(path, 'w')
self.serializer = hdf5.HDF5Serializer(self.hdf5file, compression=3)
def tearDown(self):
h5py.version.version_tuple = self.original_version_tuple
def test_raise1(self):
with self.assertRaises(RuntimeError):
self.serializer('x', None)
testing.run_module(__name__, __file__)
| aonotas/chainer | tests/chainer_tests/serializers_tests/test_hdf5.py | Python | mit | 14,628 |
import logging
import sys
import datetime
import os
import errno
import copy
from pythonjsonlogger import jsonlogger
from cloghandler import ConcurrentRotatingFileHandler
class LogFactory(object):
'''
Goal is to manage Simple LogObject instances
Like a Singleton
'''
_instance = None
@classmethod
def get_instance(self, **kwargs):
if self._instance is None:
self._instance = LogObject(**kwargs)
return self._instance
class LogObject(object):
'''
Easy wrapper for writing json logs to a rotating file log
'''
level_dict = {
"DEBUG": 0,
"INFO": 1,
"WARN": 2,
"WARNING": 2,
"ERROR": 3,
"CRITICAL": 4,
}
def __init__(self, json=False, stdout=True, name='scrapy-cluster',
dir='logs', file='main.log', bytes=25000000, backups=5,
level='INFO',
format='%(asctime)s [%(name)s] %(levelname)s: %(message)s',
propagate=False):
'''
@param stdout: Flag to write logs to stdout or file
@param json: Flag to write json logs with objects or just the messages
@param name: The logger name
@param dir: The directory to write logs into
@param file: The file name
@param bytes: The max file size in bytes
@param backups: The number of backups to keep of the file
@param level: The logging level string
@param format: The log format
@param propagate: Allow the log to propagate to other ancestor loggers
'''
# set up logger
self.logger = logging.getLogger(name)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = propagate
self.json = json
self.log_level = level
self.format_string = format
if stdout:
# set up to std out
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.DEBUG)
formatter = self._get_formatter(json)
stream_handler.setFormatter(formatter)
self.logger.addHandler(stream_handler)
self._check_log_level(level)
self.debug("Logging to stdout")
else:
# set up to file
try:
# try to make dir
os.makedirs(dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
file_handler = ConcurrentRotatingFileHandler(dir + '/' + file,
maxBytes=bytes,
backupCount=backups)
file_handler.setLevel(logging.DEBUG)
formatter = self._get_formatter(json)
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
self._check_log_level(level)
self.debug("Logging to file: {file}".format(
file=dir+'/'+file))
def _check_log_level(self, level):
'''
Ensures a valid log level
@param level: the asked for level
'''
if level not in self.level_dict.keys():
self.log_level = 'DEBUG'
self.logger.warn("Unknown log level '{lev}', defaulting to DEBUG"
.format(lev=level))
def _get_formatter(self, json):
'''
Return the proper log formatter
@param json: Boolean value
'''
if json:
return jsonlogger.JsonFormatter()
else:
return logging.Formatter(self.format_string)
def debug(self, message, extra={}):
'''
Writes an error message to the log
@param message: The message to write
@param extra: The extras object to pass in
'''
if self.level_dict['DEBUG'] >= self.level_dict[self.log_level]:
extras = self.add_extras(extra, "DEBUG")
self._write_message(message, extras)
def info(self, message, extra={}):
'''
Writes an info message to the log
@param message: The message to write
@param extra: The extras object to pass in
'''
if self.level_dict['INFO'] >= self.level_dict[self.log_level]:
extras = self.add_extras(extra, "INFO")
self._write_message(message, extras)
def warn(self, message, extra={}):
'''
Writes a warning message to the log
@param message: The message to write
@param extra: The extras object to pass in
'''
self.warning(message, extra)
def warning(self, message, extra={}):
'''
Writes a warning message to the log
@param message: The message to write
@param extra: The extras object to pass in
'''
if self.level_dict['WARNING'] >= self.level_dict[self.log_level]:
extras = self.add_extras(extra, "WARNING")
self._write_message(message, extras)
def error(self, message, extra={}):
'''
Writes an error message to the log
@param message: The message to write
@param extra: The extras object to pass in
'''
if self.level_dict['ERROR'] >= self.level_dict[self.log_level]:
extras = self.add_extras(extra, "ERROR")
self._write_message(message, extras)
def critical(self, message, extra={}):
'''
Writes a critical message to the log
@param message: The message to write
@param extra: The extras object to pass in
'''
if self.level_dict['CRITICAL'] >= self.level_dict[self.log_level]:
extras = self.add_extras(extra, "CRITICAL")
self._write_message(message, extras)
def _write_message(self, message, extra):
'''
Writes the log output
@param message: The message to write
@param extra: The potential object to write
'''
if not self.json:
self._write_standard(message, extra)
else:
self._write_json(message, extra)
def _write_standard(self, message, extra):
'''
Writes a standard log statement
@param message: The message to write
@param extra: The object to pull defaults from
'''
if extra['level'] == 'INFO':
self.logger.info(message)
elif extra['level'] == 'DEBUG':
self.logger.debug(message)
elif extra['level'] == 'WARNING':
self.logger.warning(message)
elif extra['level'] == 'ERROR':
self.logger.error(message)
elif extra['level'] == 'CRITICAL':
self.logger.critical(message)
else:
self.logger.debug(message)
def _write_json(self, message, extra):
'''
The JSON logger doesn't obey log levels
@param message: The message to write
@param extra: The object to write
'''
self.logger.info(message, extra=extra)
def name(self):
'''
Returns the logger name
'''
return self.logger.name
def add_extras(self, dict, level):
'''
Adds the log level to the dict object
'''
my_copy = copy.deepcopy(dict)
if 'level' not in my_copy:
my_copy['level'] = level
if 'timestamp' not in my_copy:
my_copy['timestamp'] = self._get_time()
if 'logger' not in my_copy:
my_copy['logger'] = self.name()
return my_copy
def _get_time(self):
'''
Returns the system time
'''
return datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
| quixey/scrapy-cluster | utils/scutils/log_factory.py | Python | mit | 7,753 |
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
from typing import List, Tuple
from .util import bfh, bh2u, BitcoinException, print_error, assert_bytes, to_bytes, inv_dict
from . import version
from . import segwit_addr
from . import constants
from . import ecc
from .crypto import Hash, sha256, hash_160, hmac_oneshot
################################## transactions
COINBASE_MATURITY = 100
COIN = 100000000
TOTAL_COIN_SUPPLY_LIMIT_IN_BTC = 21000000
# supported types of transaction outputs
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
def rev_hex(s):
return bh2u(bfh(s)[::-1])
def int_to_hex(i: int, length: int=1) -> str:
"""Converts int to little-endian hex string.
`length` is the number of bytes available
"""
if not isinstance(i, int):
raise TypeError('{} instead of int'.format(i))
range_size = pow(256, length)
if i < -range_size/2 or i >= range_size:
raise OverflowError('cannot convert int {} to hex ({} bytes)'.format(i, length))
if i < 0:
# two's complement
i = range_size + i
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def script_num_to_hex(i: int) -> str:
"""See CScriptNum in Bitcoin Core.
Encodes an integer as hex, to be used in script.
ported from https://github.com/bitcoin/bitcoin/blob/8cbc5c4be4be22aca228074f087a374a7ec38be8/src/script/script.h#L326
"""
if i == 0:
return ''
result = bytearray()
neg = i < 0
absvalue = abs(i)
while absvalue > 0:
result.append(absvalue & 0xff)
absvalue >>= 8
if result[-1] & 0x80:
result.append(0x80 if neg else 0x00)
elif neg:
result[-1] |= 0x80
return bh2u(result)
def var_int(i: int) -> str:
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def witness_push(item: str) -> str:
"""Returns data in the form it should be present in the witness.
hex -> hex
"""
return var_int(len(item) // 2) + item
def op_push(i: int) -> str:
if i<0x4c: # OP_PUSHDATA1
return int_to_hex(i)
elif i<=0xff:
return '4c' + int_to_hex(i)
elif i<=0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def push_script(data: str) -> str:
"""Returns pushed data to the script, automatically
choosing canonical opcodes depending on the length of the data.
hex -> hex
ported from https://github.com/btcsuite/btcd/blob/fdc2bc867bda6b351191b5872d2da8270df00d13/txscript/scriptbuilder.go#L128
"""
data = bfh(data)
from .transaction import opcodes
data_len = len(data)
# "small integer" opcodes
if data_len == 0 or data_len == 1 and data[0] == 0:
return bh2u(bytes([opcodes.OP_0]))
elif data_len == 1 and data[0] <= 16:
return bh2u(bytes([opcodes.OP_1 - 1 + data[0]]))
elif data_len == 1 and data[0] == 0x81:
return bh2u(bytes([opcodes.OP_1NEGATE]))
return op_push(data_len) + bh2u(data)
def add_number_to_script(i: int) -> bytes:
return bfh(push_script(script_num_to_hex(i)))
hash_encode = lambda x: bh2u(x[::-1])
hash_decode = lambda x: bfh(x)[::-1]
hmac_sha_512 = lambda x, y: hmac_oneshot(x, y, hashlib.sha512)
def is_new_seed(x, prefix=version.SEED_PREFIX):
from . import mnemonic
x = mnemonic.normalize_text(x)
s = bh2u(hmac_sha_512(b"Seed version", x.encode('utf8')))
return s.startswith(prefix)
def is_old_seed(seed):
from . import old_mnemonic, mnemonic
seed = mnemonic.normalize_text(seed)
words = seed.split()
try:
# checks here are deliberately left weak for legacy reasons, see #3149
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed = bfh(seed)
is_hex = (len(seed) == 16 or len(seed) == 32)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
def seed_type(x):
if is_old_seed(x):
return 'old'
elif is_new_seed(x):
return 'standard'
elif is_new_seed(x, version.SEED_PREFIX_SW):
return 'segwit'
elif is_new_seed(x, version.SEED_PREFIX_2FA):
return '2fa'
return ''
is_seed = lambda x: bool(seed_type(x))
############ functions from pywallet #####################
def hash160_to_b58_address(h160: bytes, addrtype):
s = bytes([addrtype])
s += h160
return base_encode(s+Hash(s)[0:4], base=58)
def b58_address_to_hash160(addr):
addr = to_bytes(addr, 'ascii')
_bytes = base_decode(addr, 25, base=58)
return _bytes[0], _bytes[1:21]
def hash160_to_p2pkh(h160, *, net=None):
if net is None:
net = constants.net
return hash160_to_b58_address(h160, net.ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160, *, net=None):
if net is None:
net = constants.net
return hash160_to_b58_address(h160, net.ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key: bytes) -> str:
return hash160_to_p2pkh(hash_160(public_key))
def hash_to_segwit_addr(h, witver, *, net=None):
if net is None:
net = constants.net
return segwit_addr.encode(net.SEGWIT_HRP, witver, h)
def public_key_to_p2wpkh(public_key):
return hash_to_segwit_addr(hash_160(public_key), witver=0)
def script_to_p2wsh(script):
return hash_to_segwit_addr(sha256(bfh(script)), witver=0)
def p2wpkh_nested_script(pubkey):
pkh = bh2u(hash_160(bfh(pubkey)))
return '00' + push_script(pkh)
def p2wsh_nested_script(witness_script):
wsh = bh2u(sha256(bfh(witness_script)))
return '00' + push_script(wsh)
def pubkey_to_address(txin_type, pubkey):
if txin_type == 'p2pkh':
return public_key_to_p2pkh(bfh(pubkey))
elif txin_type == 'p2wpkh':
return public_key_to_p2wpkh(bfh(pubkey))
elif txin_type == 'p2wpkh-p2sh':
scriptSig = p2wpkh_nested_script(pubkey)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def redeem_script_to_address(txin_type, redeem_script):
if txin_type == 'p2sh':
return hash160_to_p2sh(hash_160(bfh(redeem_script)))
elif txin_type == 'p2wsh':
return script_to_p2wsh(redeem_script)
elif txin_type == 'p2wsh-p2sh':
scriptSig = p2wsh_nested_script(redeem_script)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def script_to_address(script, *, net=None):
from .transaction import get_address_from_output_script
t, addr = get_address_from_output_script(bfh(script), net=net)
assert t == TYPE_ADDRESS
return addr
def address_to_script(addr, *, net=None):
if net is None:
net = constants.net
witver, witprog = segwit_addr.decode(net.SEGWIT_HRP, addr)
if witprog is not None:
if not (0 <= witver <= 16):
raise BitcoinException('impossible witness version: {}'.format(witver))
OP_n = witver + 0x50 if witver > 0 else 0
script = bh2u(bytes([OP_n]))
script += push_script(bh2u(bytes(witprog)))
return script
addrtype, hash_160 = b58_address_to_hash160(addr)
if addrtype == net.ADDRTYPE_P2PKH:
script = '76a9' # op_dup, op_hash_160
script += push_script(bh2u(hash_160))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == net.ADDRTYPE_P2SH:
script = 'a9' # op_hash_160
script += push_script(bh2u(hash_160))
script += '87' # op_equal
else:
raise BitcoinException('unknown address type: {}'.format(addrtype))
return script
def address_to_scripthash(addr):
script = address_to_script(addr)
return script_to_scripthash(script)
def script_to_scripthash(script):
h = sha256(bytes.fromhex(script))[0:32]
return bh2u(bytes(reversed(h)))
def public_key_to_p2pk_script(pubkey):
script = push_script(pubkey)
script += 'ac' # op_checksig
return script
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v: bytes, base: int) -> str:
""" encode v, which is a string of bytes, to base58."""
assert_bytes(v)
if base not in (58, 43):
raise ValueError('not supported base: {}'.format(base))
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * c
result = bytearray()
while long_value >= base:
div, mod = divmod(long_value, base)
result.append(chars[mod])
long_value = div
result.append(chars[long_value])
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0x00:
nPad += 1
else:
break
result.extend([chars[0]] * nPad)
result.reverse()
return result.decode('ascii')
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
# assert_bytes(v)
v = to_bytes(v, 'ascii')
if base not in (58, 43):
raise ValueError('not supported base: {}'.format(base))
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
digit = chars.find(bytes([c]))
if digit == -1:
raise ValueError('Forbidden character {} for base {}'.format(c, base))
long_value += digit * (base**i)
result = bytearray()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result.append(mod)
long_value = div
result.append(long_value)
nPad = 0
for c in v:
if c == chars[0]:
nPad += 1
else:
break
result.extend(b'\x00' * nPad)
if length is not None and len(result) != length:
return None
result.reverse()
return bytes(result)
class InvalidChecksum(Exception):
pass
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
raise InvalidChecksum('expected {}, actual {}'.format(bh2u(cs32), bh2u(csum)))
else:
return key
# backwards compat
# extended WIF for segwit (used in 3.0.x; but still used internally)
# the keys in this dict should be a superset of what Imported Wallets can import
WIF_SCRIPT_TYPES = {
'p2pkh':0,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':5,
'p2wsh':6,
'p2wsh-p2sh':7
}
WIF_SCRIPT_TYPES_INV = inv_dict(WIF_SCRIPT_TYPES)
PURPOSE48_SCRIPT_TYPES = {
'p2wsh-p2sh': 1, # specifically multisig
'p2wsh': 2, # specifically multisig
}
PURPOSE48_SCRIPT_TYPES_INV = inv_dict(PURPOSE48_SCRIPT_TYPES)
def serialize_privkey(secret: bytes, compressed: bool, txin_type: str,
internal_use: bool=False) -> str:
# we only export secrets inside curve range
secret = ecc.ECPrivkey.normalize_secret_bytes(secret)
if internal_use:
prefix = bytes([(WIF_SCRIPT_TYPES[txin_type] + constants.net.WIF_PREFIX) & 255])
else:
prefix = bytes([constants.net.WIF_PREFIX])
suffix = b'\01' if compressed else b''
vchIn = prefix + secret + suffix
base58_wif = EncodeBase58Check(vchIn)
if internal_use:
return base58_wif
else:
return '{}:{}'.format(txin_type, base58_wif)
def deserialize_privkey(key: str) -> Tuple[str, bytes, bool]:
if is_minikey(key):
return 'p2pkh', minikey_to_private_key(key), False
txin_type = None
if ':' in key:
txin_type, key = key.split(sep=':', maxsplit=1)
if txin_type not in WIF_SCRIPT_TYPES:
raise BitcoinException('unknown script type: {}'.format(txin_type))
try:
vch = DecodeBase58Check(key)
except BaseException:
neutered_privkey = str(key)[:3] + '..' + str(key)[-2:]
raise BitcoinException("cannot deserialize privkey {}"
.format(neutered_privkey))
if txin_type is None:
# keys exported in version 3.0.x encoded script type in first byte
prefix_value = vch[0] - constants.net.WIF_PREFIX
try:
txin_type = WIF_SCRIPT_TYPES_INV[prefix_value]
except KeyError:
raise BitcoinException('invalid prefix ({}) for WIF key (1)'.format(vch[0]))
else:
# all other keys must have a fixed first byte
if vch[0] != constants.net.WIF_PREFIX:
raise BitcoinException('invalid prefix ({}) for WIF key (2)'.format(vch[0]))
if len(vch) not in [33, 34]:
raise BitcoinException('invalid vch len for WIF key: {}'.format(len(vch)))
compressed = len(vch) == 34
secret_bytes = vch[1:33]
# we accept secrets outside curve range; cast into range here:
secret_bytes = ecc.ECPrivkey.normalize_secret_bytes(secret_bytes)
return txin_type, secret_bytes, compressed
def is_compressed(sec):
return deserialize_privkey(sec)[2]
def address_from_private_key(sec):
txin_type, privkey, compressed = deserialize_privkey(sec)
public_key = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
return pubkey_to_address(txin_type, public_key)
def is_segwit_address(addr):
try:
witver, witprog = segwit_addr.decode(constants.net.SEGWIT_HRP, addr)
except Exception as e:
return False
return witprog is not None
def is_b58_address(addr):
try:
addrtype, h = b58_address_to_hash160(addr)
except Exception as e:
return False
if addrtype not in [constants.net.ADDRTYPE_P2PKH, constants.net.ADDRTYPE_P2SH]:
return False
return addr == hash160_to_b58_address(h, addrtype)
def is_address(addr):
return is_segwit_address(addr) or is_b58_address(addr)
def is_private_key(key):
try:
k = deserialize_privkey(key)
return k is not False
except:
return False
########### end pywallet functions #######################
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitcoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text):
return sha256(text)
###################################### BIP32 ##############################
BIP32_PRIME = 0x80000000
def protect_against_invalid_ecpoint(func):
def func_wrapper(*args):
n = args[-1]
while True:
is_prime = n & BIP32_PRIME
try:
return func(*args[:-1], n=n)
except ecc.InvalidECPointException:
print_error('bip32 protect_against_invalid_ecpoint: skipping index')
n += 1
is_prime2 = n & BIP32_PRIME
if is_prime != is_prime2: raise OverflowError()
return func_wrapper
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is hardened (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is not hardened, the resulting private key's corresponding
# public key can be determined without the master private key.
@protect_against_invalid_ecpoint
def CKD_priv(k, c, n):
if n < 0: raise ValueError('the bip32 index needs to be non-negative')
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, bfh(rev_hex(int_to_hex(n,4))), is_prime)
def _CKD_priv(k, c, s, is_prime):
try:
keypair = ecc.ECPrivkey(k)
except ecc.InvalidECPointException as e:
raise BitcoinException('Impossible xprv (not within curve order)') from e
cK = keypair.get_public_key_bytes(compressed=True)
data = bytes([0]) + k + s if is_prime else cK + s
I = hmac_oneshot(c, data, hashlib.sha512)
I_left = ecc.string_to_number(I[0:32])
k_n = (I_left + ecc.string_to_number(k)) % ecc.CURVE_ORDER
if I_left >= ecc.CURVE_ORDER or k_n == 0:
raise ecc.InvalidECPointException()
k_n = ecc.number_to_string(k_n, ecc.CURVE_ORDER)
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# not hardened. If n is hardened, we need the master private key to find it.
@protect_against_invalid_ecpoint
def CKD_pub(cK, c, n):
if n < 0: raise ValueError('the bip32 index needs to be non-negative')
if n & BIP32_PRIME: raise Exception()
return _CKD_pub(cK, c, bfh(rev_hex(int_to_hex(n,4))))
# helper function, callable with arbitrary string.
# note: 's' does not need to fit into 32 bits here! (c.f. trustedcoin billing)
def _CKD_pub(cK, c, s):
I = hmac_oneshot(c, cK + s, hashlib.sha512)
pubkey = ecc.ECPrivkey(I[0:32]) + ecc.ECPubkey(cK)
if pubkey.is_at_infinity():
raise ecc.InvalidECPointException()
cK_n = pubkey.get_public_key_bytes(compressed=True)
c_n = I[32:]
return cK_n, c_n
def xprv_header(xtype, *, net=None):
if net is None:
net = constants.net
return bfh("%08x" % net.XPRV_HEADERS[xtype])
def xpub_header(xtype, *, net=None):
if net is None:
net = constants.net
return bfh("%08x" % net.XPUB_HEADERS[xtype])
def serialize_xprv(xtype, c, k, depth=0, fingerprint=b'\x00'*4,
child_number=b'\x00'*4, *, net=None):
if not ecc.is_secret_within_curve_range(k):
raise BitcoinException('Impossible xprv (not within curve order)')
xprv = xprv_header(xtype, net=net) \
+ bytes([depth]) + fingerprint + child_number + c + bytes([0]) + k
return EncodeBase58Check(xprv)
def serialize_xpub(xtype, c, cK, depth=0, fingerprint=b'\x00'*4,
child_number=b'\x00'*4, *, net=None):
xpub = xpub_header(xtype, net=net) \
+ bytes([depth]) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
class InvalidMasterKeyVersionBytes(BitcoinException): pass
def deserialize_xkey(xkey, prv, *, net=None):
if net is None:
net = constants.net
xkey = DecodeBase58Check(xkey)
if len(xkey) != 78:
raise BitcoinException('Invalid length for extended key: {}'
.format(len(xkey)))
depth = xkey[4]
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
header = int('0x' + bh2u(xkey[0:4]), 16)
headers = net.XPRV_HEADERS if prv else net.XPUB_HEADERS
if header not in headers.values():
raise InvalidMasterKeyVersionBytes('Invalid extended key format: {}'
.format(hex(header)))
xtype = list(headers.keys())[list(headers.values()).index(header)]
n = 33 if prv else 32
K_or_k = xkey[13+n:]
if prv and not ecc.is_secret_within_curve_range(K_or_k):
raise BitcoinException('Impossible xprv (not within curve order)')
return xtype, depth, fingerprint, child_number, c, K_or_k
def deserialize_xpub(xkey, *, net=None):
return deserialize_xkey(xkey, False, net=net)
def deserialize_xprv(xkey, *, net=None):
return deserialize_xkey(xkey, True, net=net)
def xpub_type(x):
return deserialize_xpub(x)[0]
def is_xpub(text):
try:
deserialize_xpub(text)
return True
except:
return False
def is_xprv(text):
try:
deserialize_xprv(text)
return True
except:
return False
def xpub_from_xprv(xprv):
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
cK = ecc.ECPrivkey(k).get_public_key_bytes(compressed=True)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_root(seed, xtype):
I = hmac_oneshot(b"Bitcoin seed", seed, hashlib.sha512)
master_k = I[0:32]
master_c = I[32:]
# create xprv first, as that will check if master_k is within curve order
xprv = serialize_xprv(xtype, master_c, master_k)
cK = ecc.ECPrivkey(master_k).get_public_key_bytes(compressed=True)
xpub = serialize_xpub(xtype, master_c, cK)
return xprv, xpub
def xpub_from_pubkey(xtype, cK):
if cK[0] not in (0x02, 0x03):
raise ValueError('Unexpected first byte: {}'.format(cK[0]))
return serialize_xpub(xtype, b'\x00'*32, cK)
def bip32_derivation(s):
if not s.startswith('m/'):
raise ValueError('invalid bip32 derivation path: {}'.format(s))
s = s[2:]
for n in s.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
yield i
def convert_bip32_path_to_list_of_uint32(n: str) -> List[int]:
"""Convert bip32 path to list of uint32 integers with prime flags
m/0/-1/1' -> [0, 0x80000001, 0x80000001]
based on code in trezorlib
"""
path = []
for x in n.split('/')[1:]:
if x == '': continue
prime = 0
if x.endswith("'"):
x = x.replace('\'', '')
prime = BIP32_PRIME
if x.startswith('-'):
prime = BIP32_PRIME
path.append(abs(int(x)) | prime)
return path
def is_bip32_derivation(x):
try:
[ i for i in bip32_derivation(x)]
return True
except :
return False
def bip32_private_derivation(xprv, branch, sequence):
if not sequence.startswith(branch):
raise ValueError('incompatible branch ({}) and sequence ({})'
.format(branch, sequence))
if branch == sequence:
return xprv, xpub_from_xprv(xprv)
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
parent_cK = ecc.ECPrivkey(parent_k).get_public_key_bytes(compressed=True)
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
cK = ecc.ECPrivkey(k).get_public_key_bytes(compressed=True)
xpub = serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
xprv = serialize_xprv(xtype, c, k, depth, fingerprint, child_number)
return xprv, xpub
def bip32_public_derivation(xpub, branch, sequence):
xtype, depth, fingerprint, child_number, c, cK = deserialize_xpub(xpub)
if not sequence.startswith(branch):
raise ValueError('incompatible branch ({}) and sequence ({})'
.format(branch, sequence))
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return k
| cryptapus/electrum | electrum/bitcoin.py | Python | mit | 25,370 |
"""
We train a simple Q-Learning algorithm for fraud detection.
"""
import state_space
import action_space
import numpy as np
class BanditAgent:
def __init__(self, do_reward_shaping=False):
self.frequency = np.zeros((state_space.SIZE, action_space.SIZE))
self.avg_reward = np.zeros((state_space.SIZE, action_space.SIZE))
self.do_reward_shaping = do_reward_shaping
def take_action(self, state, customer=None):
# get average reward for arms
rew = self.avg_reward[state]
# get frequency count
freq = self.frequency[state]
action = np.argmax(rew + np.sqrt(2*np.log(np.sum(freq))/freq))
return action
def update(self, state, action, reward, next_state):
if self.do_reward_shaping:
if reward > 0: # transaction was successful
if action == 0:
reward += 0.1
self.frequency[state, action] += 1
self.avg_reward[state, action] = self.avg_reward[state, action] + (reward - self.avg_reward[state, action]) / self.frequency[state, action]
| lmzintgraf/MultiMAuS | learning/agent_bandit.py | Python | mit | 1,093 |
from threading import local
__author__ = 'jhg | https://djangosnippets.org/snippets/3041/'
class SiteID(object):
"""
That class represents a local instance of SiteID. It is used as a local var to save SITE_ID in a thread-safe way.
"""
def __init__(self):
self._site_thread_info = local()
self._site_thread_info.SITE_ID = 1
def __int__(self):
return self._site_thread_info.SITE_ID
def __hash__(self):
return self._site_thread_info.SITE_ID
def __str__(self):
return str(self._site_thread_info.SITE_ID)
def _set(self, new_id):
self._site_thread_info.SITE_ID = new_id
| jisson/django-simple-domain | django_simple_domain/site_id_local.py | Python | mit | 652 |
import argparse
from datetime import datetime, timedelta
import logging
import os
import re
import stat
import sys
import pyexiv2
import photo_rename
from photo_rename import Filemap, FilemapList, FileMetadata, Harvester
from photo_rename.utils import CustomArgumentParser
logger = logging.getLogger(__name__)
def process_all_files(workdir, initial_dt, interval, simon_sez=None):
"""
Manage the entire process of gathering data and renaming files.
"""
error = False
if not os.path.exists(workdir):
logger.error(
"Directory {0} does not exist. Exiting.".format(workdir))
error = True
if not os.access(workdir, os.W_OK):
logger.error(
"Destination directory {0} is not writable. Exiting.".format(
workdir))
error = True
if error:
logger.warn("Exiting due to errors.")
sys.exit(1)
else:
start_datetime = datetime.strptime(initial_dt, '%Y-%m-%d %H:%M:%S')
counter = 0
harvester = Harvester(workdir)
files = harvester["files"]
fmds = []
for fn in files:
# Compute delta. Add to start_datetime.
dt_delta = counter * interval
this_dt = start_datetime + timedelta(0, dt_delta)
fmd = FileMetadata(os.path.join(workdir, fn))
fmds.append(fmd)
# Set the date and time
msg = "Set datetime: {} : {}".format(
fn, this_dt.strftime('%Y:%m:%d %H:%M:%S'))
if simon_sez:
fmd.set_datetime(this_dt)
else:
msg = "DRY RUN: {}".format(msg)
logger.info(msg)
counter += 1
def main():
"""
Parse command-line arguments. Initiate file processing.
"""
parser = CustomArgumentParser()
parser.add_argument("-s", "--simon-sez",
help="Really, Simon sez copy the data!", action="store_true")
parser.add_argument("-d", "--directory",
help="Set EXIF/XMP DateTime on files in this directory.")
parser.add_argument("-t", "--datetime",
help="Initial datetime YYYY-mm-DD HH:MM:SS.")
parser.add_argument("-i", "--interval",
help="Interval in seconds to use for successive files.")
parser.add_argument("-v", "--verbose", help="Log level to DEBUG.",
action="store_true")
args = parser.parse_args()
error = False
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
# Use current directory if --directory not specified.
workdir = args.directory
if workdir is None:
workdir = os.getcwd()
logger.info(
"--directory not given. Using workdir={}".format(workdir))
if not args.datetime:
logger.error("Initial datetime not set.")
error = True
if args.datetime and not re.match(r'\d{4}-\d\d-\d\d \d\d:\d\d:\d\d', args.datetime):
logger.error("Invalid datetime. Use YYYY-mm-DD HH:MM:SS.")
error = True
if not args.interval:
# Default to 60 second.
interval = 60
logger.warn(
"--interval not specified. Using {} second interval".format(
interval))
else:
interval = int(args.interval)
if error:
logger.error("Exiting due to errors.")
parser.usage_message()
sys.exit(1)
else:
process_all_files(
workdir, args.datetime, interval, simon_sez=args.simon_sez)
if __name__ == '__main__': # pragma: no cover
main()
| eigenholser/jpeg_rename | photo_rename/set_datetime.py | Python | mit | 3,637 |
from test_helper import TestHelper
from app import app, db
from app.models.plant_size import PlantSize
class TestPlantSize(TestHelper):
def test_create_plant_size(self):
ps1 = PlantSize(label='1 Gal')
assert ps1 != None
assert PlantSize.save_to_db(ps1) != False
def test_validation(self):
ps1 = PlantSize(label='1 Gal')
PlantSize.save_to_db(ps1)
ps2 = PlantSize(label='1 Gal')
assert PlantSize.save_to_db(ps2) == False
def test_tohash(self):
ps1 = PlantSize(label='1 Gal')
assert ps1.to_hash() == {'label': '1 Gal'}
def test_tojson(self):
ps1 = PlantSize(label='1 Gal')
assert ps1.to_json() == '{"label": "1 Gal"}' | CHrycyna/LandscapeTracker | tests/test_plantsize.py | Python | mit | 744 |
"""
CraftyPluginOrganizer v0.8.0
Python 3
Requires modules from pip:
cfscrape
BeautifulSoup4
Linux only, `curl` and `nodejs` packages required.
(add more dependencies from github page for cfscrape)
Download from sites:
SpigotMC.org
GitHub
BukkitDev
Jenkins CI
EngineHub
TODO:
Add support for uncompressing .zip files.
Look at page if plugin has changed since last download.
Add checks to make sure the file exists and is not an html page.
Clean up most of the output from the script to be more readable.
Add clean up for older organized plugin downloads
"""
import subprocess
import os, shutil
import cfscrape
from bs4 import BeautifulSoup
import requests
import time
import json
from urllib.request import urlopen
import configparser
global datetime
global disableSSL
# Config
datetime = time.strftime("%m-%d-%Y--%I:%M%p")
disableSSL = True
configDir = "config"
# End Config
# Title
print("CraftyPluginOrganizer v0.8.0\ncolebob9\n")
# Delete Download directory
if os.path.exists("Download"):
print("Deleting Download Directory...")
shutil.rmtree("Download")
# Make directories
if not os.path.exists("Download"):
os.mkdir("Download")
print("Made Download directory. All downloaded plugins will be stored here.")
if not os.path.exists("Organized"):
os.mkdir("Organized")
print("Made Organized directory. All plugins will be sorted here.")
if not os.path.exists("Organized/" + datetime):
os.mkdir("Organized/" + "/" + datetime)
print("Made Organized/" + datetime + " directory. All plugins will be sorted here.")
# Create config
if not os.path.exists(configDir):
print("Did not find config directory. Creating...")
os.mkdir(configDir)
config = configparser.RawConfigParser()
config.optionxform = str
if not os.path.isfile(configDir + "/" + "plugins.cfg"):
print("Did not find plugins.cfg. Creating example...")
# SpigotMCLatestDownload
config.add_section('PerWorldInventory')
config.set('PerWorldInventory', 'site', "SpigotMCLatest")
config.set('PerWorldInventory', 'url', "https://www.spigotmc.org/resources/per-world-inventory.4482/")
config.set('PerWorldInventory', 'filetype', ".jar")
config.set('PerWorldInventory', 'servers', "Creative,Survival")
#SpigotMCDownload
config.add_section('PerWorldInventory2')
config.set('PerWorldInventory2', 'site', "SpigotMCDownload")
config.set('PerWorldInventory2', 'url', "https://www.spigotmc.org/resources/per-world-inventory.4482/download?version=151285")
config.set('PerWorldInventory2', 'filetype', ".jar")
config.set('PerWorldInventory2', 'servers', "Creative,Survival")
# GitHubLatestRelease
config.add_section('ProtocolLib')
config.set('ProtocolLib', 'site', "GitHubRelease")
config.set('ProtocolLib', 'url', "https://github.com/dmulloy2/ProtocolLib/releases")
config.set('ProtocolLib', 'filetype', ".jar")
config.set('ProtocolLib', 'servers', "Hub,Creative,Survival")
# BukkitDev / GeneralCurl
config.add_section('WorldEdit')
config.set('WorldEdit', 'site', "Curl")
config.set('WorldEdit', 'url', "https://dev.bukkit.org/projects/worldedit/files/latest")
config.set('WorldEdit', 'filetype', ".jar")
config.set('WorldEdit', 'servers', "Hub,Creative,Survival")
# Jenkins CI
config.add_section('Multiverse-Core')
config.set('Multiverse-Core', 'site', "Jenkins")
config.set('Multiverse-Core', 'url', "https://ci.onarandombox.com/job/Multiverse-Core/lastSuccessfulBuild/")
config.set('Multiverse-Core', 'filetype', ".jar")
config.set('Multiverse-Core', 'servers', "Hub,Creative,Survival")
config.set('Multiverse-Core', 'searchfor', "Multiverse-Core")
config.set('Multiverse-Core', 'searchforend', "SNAPSHOT")
# EngineHub
config.add_section('WorldEdit-Dev')
config.set('WorldEdit-Dev', 'site', "EngineHub")
config.set('WorldEdit-Dev', 'url', "http://builds.enginehub.org/job/worldedit/last-successful?branch=master")
config.set('WorldEdit-Dev', 'filetype', ".jar")
config.set('WorldEdit-Dev', 'servers', "Hub,Creative")
with open(configDir + "/" + 'plugins.cfg', 'w') as configfile:
config.write(configfile)
print("An example plugins.cfg has been created.")
print("Please configure and launch again.")
def organize(pluginName, fileFormat, servers):
os.chdir("..")
# To check plugin was downloaded correctly
# To sort plugins
for s in servers:
if not os.path.exists("Organized/" + datetime + "/" + s):
os.mkdir("Organized/" + datetime + "/" + s)
print("Made " + s + " server directory.")
fromDownloadJar = "Download/" + pluginName + fileFormat
toOrganizedJar = "Organized/" + datetime + "/" + s + "/" + pluginName + fileFormat
shutil.copy(fromDownloadJar, toOrganizedJar)
print("Copied: " + fromDownloadJar + " to " + toOrganizedJar)
print("")
# To find the latest download link from SpigotMC website, then download latest plugin with found link.
# Make sure this is used with a resource that has the download through SpigotMC, not a redirect to another website.
def spigotmcLatestDownload(pluginName, url, fileFormat, servers):
os.chdir("Download")
print("[DOWNLOAD] Downloading latest version of " + pluginName + " from SpigotMC.org.\n")
pluginNameHtml = pluginName + ".html"
spigotMCAddress = "https://www.spigotmc.org/"
scraper = cfscrape.create_scraper()
# To find link in web page.
#r = requests.get(url)
r = scraper.get(url)
encoding = r.encoding if 'charset' in r.headers.get('content-type', '').lower() else None
soup = BeautifulSoup(r.content, "html5lib", from_encoding=encoding)
for link in soup.find_all('a', {'class': "inner"}):
latestDownload = link.get('href')
# resources/protocollib.1997/download?version=131115
# https://www.spigotmc.org/resources/protocollib.1997/download?version=131115
print("Found link: " + latestDownload)
fullLatestDownload = spigotMCAddress + latestDownload
print("Full link: " + fullLatestDownload)
# Download latest plugin.
cookie_arg, user_agent = cfscrape.get_cookie_string(fullLatestDownload)
print("Downloading jar file: " + pluginName + fileFormat)
subprocess.call(["curl", "-o", pluginName + fileFormat, "--cookie", cookie_arg, "-A", user_agent, fullLatestDownload])
# Sometimes fails with ProtocolLib, used too much?
organize(pluginName, fileFormat, servers)
# To download a specific version of a plugin from SpigotMC.org. Requires specific version download link.
def spigotmcPluginDownload(pluginName, url, fileFormat, servers):
os.chdir("Download")
print("[DOWNLOAD] Downloading " + pluginName + " from SpigotMC.\n")
cookie_arg, user_agent = cfscrape.get_cookie_string(url)
subprocess.call(["curl", "-o", pluginName + fileFormat, "--cookie", cookie_arg, "-A", user_agent, url])
organize(pluginName, fileFormat, servers)
def githubLatestRelease(pluginName, url, fileFormat, servers):
os.chdir("Download")
# Convert to API link
print("Link: " + url)
if url.startswith('https://github.com'):
print("URL is a normal release link. Converting to an API link...")
url = "https://api.github.com/repos" + url[18:]
print("API link: " + url)
elif url.startswith("https://api.github.com") and url.endswith("releases"):
print("URL is an API link. Proceeding...")
else:
print("GitHub link may be invalid, proceeding as if it is an API link.")
print("[DOWNLOAD] Downloading latest release of " + pluginName + " from GitHub")
# Rewritten code! Yay!
response = urlopen(url).read().decode('utf8')
jsonDict = json.loads(response)
#print(jsonDict)
latestDownloadLink = jsonDict[0]['assets'][0]['browser_download_url']
print("Found latest release download: " + latestDownloadLink)
print("Using curl command:")
print(["curl", "-o", pluginName + fileFormat, "-L", latestDownloadLink])
subprocess.call(["curl", "-o", pluginName + fileFormat, "-L", latestDownloadLink])
organize(pluginName, fileFormat, servers)
# For any site that uses a permalink to download a specific or latest version. (BukkitDev, Developer's Website, etc.)
def generalCurl(pluginName, url, fileFormat, servers):
os.chdir("Download")
print("[DOWNLOAD] Downloading " + pluginName + " from URL: " + url)
subprocess.call(["curl", "-o", pluginName + fileFormat, "-L", url])
organize(pluginName, fileFormat, servers)
def jenkinsLatestDownload(pluginName, url, fileFormat, searchFor, searchForEnd, servers):
os.chdir("Download")
try:
r = requests.get(url)
except requests.exceptions.SSLError:
#disableSSL = input("The script has detected that this website\'s SSL certificates are causing problems. (Most likely an untrusted SSL cert.) \nWould you like to disable SSL to continue? (ONLY DISABLE IF YOU TRUST THE SITE) y/n: ").lower()
if disableSSL == True:
r = requests.get(url, verify=False)
elif disableSSL == False:
print("skipping...")
return
encoding = r.encoding if 'charset' in r.headers.get('content-type', '').lower() else None
soup = BeautifulSoup(r.content, "html5lib", from_encoding=encoding)
#soup = (soup.decode("utf-8"))
for link in soup.find_all('a'):
hrefLink = str(link.get('href'))
#print(hrefLink) # Only uncomment if you want to see every link it finds on the page.
if hrefLink.count(searchFor):
if hrefLink.endswith(searchForEnd + fileFormat):
latestDownload = hrefLink
print("File found: " + latestDownload)
latestDownloadLink = url + latestDownload
print("Full link: " + latestDownloadLink)
print("[DOWNLOAD] Downloading " + pluginName + " from Jenkins CI.")
if disableSSL == True:
subprocess.call(["curl", "-k", "-o", pluginName + fileFormat, "-L", latestDownloadLink])
else:
subprocess.call(["curl", "-o", pluginName + fileFormat, "-L", latestDownloadLink])
organize(pluginName, fileFormat, servers)
def engineHubLatestDownload(pluginName, url, fileFormat, servers):
# Modified code from obzidi4n's plugins.py script
# get page
r = requests.get(url)
# parse download link with BeautifulSoup
soup = BeautifulSoup(r.text, 'html.parser')
soup2 = soup.find(class_="col-md-8")
soup3 = soup2.find('a')
target = soup3['href']
response = requests.get(target, stream=True, verify=False)
fileName = pluginName + fileFormat
# report
print('Plugin:', pluginName)
print('Target:', target)
print('File: ', fileName, '\n')
os.chdir("Download")
print("[DOWNLOAD] Downloading " + pluginName + " from EngineHub.")
subprocess.call(["curl", "-o", pluginName + fileFormat, "-L", target])
organize(pluginName, fileFormat, servers)
config.read(configDir + "/" + "plugins.cfg")
for each_section in config.sections():
#spigotmcLatestDownload(pluginName, url, fileFormat, servers)
site = config.get(each_section, 'site')
url = config.get(each_section, 'url')
filetype = config.get(each_section, 'filetype')
servers = (config.get(each_section, 'servers')).split(',')
if site == "SpigotMCLatest":
spigotmcLatestDownload(each_section, url, filetype, servers)
elif site == "SpigotMCDownload":
spigotmcPluginDownload(each_section, url, filetype, servers)
elif site == "GitHubRelease":
githubLatestRelease(each_section, url, filetype, servers)
elif site == "Curl":
generalCurl(each_section, url, filetype, servers)
elif site == "Jenkins":
searchFor = config.get(each_section, 'searchfor')
searchForEnd = config.get(each_section, 'searchforend')
jenkinsLatestDownload(each_section, url, filetype, searchFor, searchForEnd, servers)
elif site == "EngineHub":
engineHubLatestDownload(each_section, url, filetype, servers)
else:
print(each_section + " has an invalid site: " + site)
print("Please fix in your plugins.cfg config")
| colebob9/CraftyPluginOrganizer | CPO-Organize.py | Python | mit | 12,328 |
def Endscript(text,*keys):
i = 0
Etext = ""
for Char in text:
Etext += chr((ord(Char)+keys[i])%256)
i += 1
if i >= len(keys):
i = 0
return Etext
NamePlainText = input("Please enter the file name of the plain text: ")
Key = int(input("Please enter encryption key: "))
NameCipherText = input("Please enter the file name of the cipher text: ")
Pfile = open(NamePlainText,"r")
Cfile = open(NameCipherText,"w")
Cfile.write(Endscript(Pfile.read(),Key))
Pfile.close()
Cfile.close()
| Kasemsanm/Python | Security/Encryption/Encryption.py | Python | mit | 488 |
# -*- coding: utf-8 -*-
"""
aerende.configuration
------------------
This module contains the configuration logic for aerende. This includes the
logic for loading a config file, writing a config file none is present, and
the loading of default configuration options.
There are 3 seperate configuration categories, at the moment. They are:
palette :: defines the colour palettes used various elements.
data_options :: defines settings for aerende's data
key_bindings :: defaults the key bindings for aerende
"""
from os import path, makedirs
import yaml
class Configuration(object):
# Default Configuration Options
# Default options for the palettes
# For more information on valid palette settings, see:
# http://urwid.org/manual/displayattributes.html#standard-foreground-colors
# status_bar :: the palette for the status bar at the bottom
# edit_bar :: the palette for the note editing bar
# highlight_note :: the palette for the currently focused note
# high_priority :: the palette for any high priority (>= 10) note
DEFAULT_PALETTE = {
'status_bar': ['black', 'white'],
'edit_bar': ['black', 'light red'],
'highlight_note': ['light blue', 'default'],
'high_priority': ['light red', 'default']
}
# Default options for the data options.
# data_path :: path to the aerende YAML data file
DEFAULT_DATA_OPTIONS = {
'data_path': '~/.andgeloman/aerende/data.yml'
}
# Default key bindings
# new_note :: key to create a new note
# delete_note :: key to delete the focused note
# edit_note :: key to edit the focused note
# increment_note_priority :: key to increment the focused note's priority
# super_increment_note_priorty :: as above, but by +10
# decrement_note_priority :: key to decrement the focused note's priority
# super_decrement_note_priority :: as above, but by -10
# quit :: key to exit aerende
# next_note :: focus the next note
# previous_note :: focus the previous note
DEFAULT_KEY_BINDINGS = {
'new_note': 'n',
'delete_note': 'd',
'edit_note': 'e',
'increment_note_priority': '+',
'super_increment_note_priority': 'meta +',
'decrement_note_priority': '-',
'super_decrement_note_priority': 'meta -',
'quit': 'q',
'next_note': ['j', 'down'],
'previous_note': ['k', 'up']
}
DEFAULT_CONFIG = {
'palette': DEFAULT_PALETTE,
'data_options': DEFAULT_DATA_OPTIONS,
'key_bindings': DEFAULT_KEY_BINDINGS
}
# Banner to prepend to the default configuration if it does not exist.
CONFIG_BANNER = """# Aerende :: Configuration File
#
# Please see
# https://aerende.readthedocs.io/en/latest/usage/configuration.html for a
# complete reference of configuration options, as well as their effects.
"""
def __init__(self, configuration_path):
""" On initialisation, preload the configuration options from the
defaults.
"""
self.palette = self.DEFAULT_PALETTE
self.data_path = self.DEFAULT_DATA_OPTIONS
self.key_bindings = self.DEFAULT_KEY_BINDINGS
self.__load_configuration(configuration_path)
def __load_configuration(self, configuration_path):
""" Load the configuration from the supplied path. If the file does
not exist at this path, create it from the default config settings.
"""
expanded_path = path.expanduser(configuration_path)
if not path.exists(path.dirname(expanded_path)):
makedirs(path.dirname(expanded_path))
if not path.exists(expanded_path):
with open(expanded_path, 'w') as config_file:
config_file.write(self.CONFIG_BANNER)
yaml.dump(self.DEFAULT_CONFIG, config_file,
default_flow_style=False)
self.palette = self.DEFAULT_PALETTE
self.data_path = self.DEFAULT_DATA_OPTIONS
self.key_bindings = self.DEFAULT_KEY_BINDINGS
else:
self.__load_configuration_values(expanded_path)
def __load_configuration_values(self, path):
""" Load the configuration file, update the config values from this
file.
"""
with open(path, 'r') as config_file:
config_dict = yaml.load(config_file)
config_variables = {
'palette': self.palette,
'data_options': self.data_path,
'key_bindings': self.key_bindings
}
for key, value in config_variables.items():
self.__update_configuration(key, config_dict, value)
def __update_configuration(self, config_key, config_dict, var):
""" Update a config dictionary given a category key
"""
if config_key in config_dict:
var.update(config_dict[config_key])
def get_palette(self):
return [[k] + v for k, v in self.palette.items()]
def get_data_path(self):
return self.data_path['data_path']
def get_key_bindings(self):
return self.key_bindings
| Autophagy/aerende | aerende/configuration.py | Python | mit | 5,140 |
import unittest
import datetime
from loggerglue.util.parse_timestamp import parse_timestamp
class TestParseTimestamp(unittest.TestCase):
pairs = {
'2003-10-11T12:14:15.003000Z': datetime.datetime(2003, 10, 11, 12, 14, 15, 3000),
'2003-10-11T12:14:15.003Z': datetime.datetime(2003, 10, 11, 12, 14, 15, 3000),
'2003-10-11T12:14:15Z': datetime.datetime(2003, 10, 11, 12, 14, 15, 0),
'2003-10-11T12:14:15.003000+04:00': datetime.datetime(2003, 10, 11, 8, 14, 15, 3000),
'2003-10-11T12:14:15.003+04:00': datetime.datetime(2003, 10, 11, 8, 14, 15, 3000),
'2003-10-11T12:14:15+04:00': datetime.datetime(2003, 10, 11, 8, 14, 15, 0),
# Pacific/Kiritimati
'2003-10-11T12:14:15.003000+14:00': datetime.datetime(2003, 10, 10, 22, 14, 15, 3000),
'2003-10-11T12:14:15.003+14:00': datetime.datetime(2003, 10, 10, 22, 14, 15, 3000),
'2003-10-11T12:14:15+14:00': datetime.datetime(2003, 10, 10, 22, 14, 15, 0),
'2003-10-11T12:14:15.003000-04:00': datetime.datetime(2003, 10, 11, 16, 14, 15, 3000),
'2003-10-11T12:14:15.003-04:00': datetime.datetime(2003, 10, 11, 16, 14, 15, 3000),
'2003-10-11T12:14:15-04:00': datetime.datetime(2003, 10, 11, 16, 14, 15, 0),
'2003-10-11T12:14:15.003000-12:00': datetime.datetime(2003, 10, 12, 0, 14, 15, 3000),
'2003-10-11T12:14:15.003-12:00': datetime.datetime(2003, 10, 12, 0, 14, 15, 3000),
'2003-10-11T12:14:15-12:00': datetime.datetime(2003, 10, 12, 0, 14, 15, 0),
}
longMessage = True
def test_parse(self):
for k, v in self.pairs.items():
self.assertEqual(v, parse_timestamp(k), k)
if __name__ == '__main__':
unittest.main()
| AlekSi/loggerglue | loggerglue/tests/test_parse_timestamp.py | Python | mit | 1,797 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.utils.importlib import import_module
# Default settings
BOOTSTRAP3_DEFAULTS = {
'jquery_url': '//code.jquery.com/jquery.min.js',
'base_url': '//netdna.bootstrapcdn.com/bootstrap/3.2.0/',
'css_url': None,
'theme_url': None,
'javascript_url': None,
'javascript_in_head': False,
'include_jquery': False,
'horizontal_label_class': 'col-md-4',
'horizontal_field_class': 'col-md-8',
'set_required': True,
'set_placeholder': True,
'required_css_class': '',
'error_css_class': 'has-error',
'success_css_class': 'has-success',
'formset_renderers': {
'default': 'bootstrap3.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap3.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer',
},
}
# Start with a copy of default settings
BOOTSTRAP3 = BOOTSTRAP3_DEFAULTS.copy()
# Override with user settings from settings.py
BOOTSTRAP3.update(getattr(settings, 'BOOTSTRAP3', {}))
def get_bootstrap_setting(setting, default=None):
"""
Read a setting
"""
return BOOTSTRAP3.get(setting, default)
def bootstrap_url(postfix):
"""
Prefix a relative url with the bootstrap base url
"""
return get_bootstrap_setting('base_url') + postfix
def jquery_url():
"""
Return the full url to jQuery file to use
"""
return get_bootstrap_setting('jquery_url')
def javascript_url():
"""
Return the full url to the Bootstrap JavaScript file
"""
return get_bootstrap_setting('javascript_url') or bootstrap_url('js/bootstrap.min.js')
def css_url():
"""
Return the full url to the Bootstrap CSS file
"""
return get_bootstrap_setting('css_url') or bootstrap_url('css/bootstrap.min.css')
def theme_url():
"""
Return the full url to the theme CSS file
"""
return get_bootstrap_setting('theme_url')
def get_renderer(renderers, **kwargs):
layout = kwargs.get('layout', '')
path = renderers.get(layout, renderers['default'])
mod, cls = path.rsplit(".", 1)
return getattr(import_module(mod), cls)
def get_formset_renderer(**kwargs):
renderers = get_bootstrap_setting('formset_renderers')
return get_renderer(renderers, **kwargs)
def get_form_renderer(**kwargs):
renderers = get_bootstrap_setting('form_renderers')
return get_renderer(renderers, **kwargs)
def get_field_renderer(**kwargs):
renderers = get_bootstrap_setting('field_renderers')
return get_renderer(renderers, **kwargs)
| Titulacion-Sistemas/PythonTitulacion-EV | Lib/site-packages/bootstrap3/bootstrap.py | Python | mit | 2,732 |
"""This file includes a collection of utility functions that are useful for
implementing DQN."""
import gym
import tensorflow as tf
import numpy as np
import random
def huber_loss(x, delta=1.0):
# https://en.wikipedia.org/wiki/Huber_loss
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
def sample_n_unique(sampling_f, n):
"""Helper function. Given a function `sampling_f` that returns
comparable objects, sample n such unique objects.
"""
res = []
while len(res) < n:
candidate = sampling_f()
if candidate not in res:
res.append(candidate)
return res
class Schedule(object):
def value(self, t):
"""Value of the schedule at time t"""
raise NotImplementedError()
class ConstantSchedule(object):
def __init__(self, value):
"""Value remains constant over time.
Parameters
----------
value: float
Constant value of the schedule
"""
self._v = value
def value(self, t):
"""See Schedule.value"""
return self._v
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class PiecewiseSchedule(object):
def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):
"""Piecewise schedule.
endpoints: [(int, int)]
list of pairs `(time, value)` meanining that schedule should output
`value` when `t==time`. All the values for time must be sorted in
an increasing order. When t is between two times, e.g. `(time_a, value_a)`
and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs
`interpolation(value_a, value_b, alpha)` where alpha is a fraction of
time passed between `time_a` and `time_b` for time `t`.
interpolation: lambda float, float, float: float
a function that takes value to the left and to the right of t according
to the `endpoints`. Alpha is the fraction of distance from left endpoint to
right endpoint that t has covered. See linear_interpolation for example.
outside_value: float
if the value is requested outside of all the intervals sepecified in
`endpoints` this value is returned. If None then AssertionError is
raised when outside value is requested.
"""
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
"""See Schedule.value"""
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
def compute_exponential_averages(variables, decay):
"""Given a list of tensorflow scalar variables
create ops corresponding to their exponential
averages
Parameters
----------
variables: [tf.Tensor]
List of scalar tensors.
Returns
-------
averages: [tf.Tensor]
List of scalar tensors corresponding to averages
of al the `variables` (in order)
apply_op: tf.runnable
Op to be run to update the averages with current value
of variables.
"""
averager = tf.train.ExponentialMovingAverage(decay=decay)
apply_op = averager.apply(variables)
return [averager.average(v) for v in variables], apply_op
def minimize_and_clip(optimizer, objective, var_list, clip_val=10):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return optimizer.apply_gradients(gradients)
def initialize_interdependent_variables(session, vars_list, feed_dict):
"""Initialize a list of variables one at a time, which is useful if
initialization of some variables depends on initialization of the others.
"""
vars_left = vars_list
while len(vars_left) > 0:
new_vars_left = []
for v in vars_left:
try:
# If using an older version of TensorFlow, uncomment the line
# below and comment out the line after it.
#session.run(tf.initialize_variables([v]), feed_dict)
session.run(tf.variables_initializer([v]), feed_dict)
except tf.errors.FailedPreconditionError:
new_vars_left.append(v)
if len(new_vars_left) >= len(vars_left):
# This can happend if the variables all depend on each other, or more likely if there's
# another variable outside of the list, that still needs to be initialized. This could be
# detected here, but life's finite.
raise Exception("Cycle in variable dependencies, or extenrnal precondition unsatisfied.")
else:
vars_left = new_vars_left
def get_wrapper_by_name(env, classname):
currentenv = env
while True:
if classname in currentenv.__class__.__name__:
return currentenv
elif isinstance(env, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named %s"%classname)
class ReplayBuffer(object):
def __init__(self, size, frame_history_len, lander=False):
"""This is a memory efficient implementation of the replay buffer.
The sepecific memory optimizations use here are:
- only store each frame once rather than k times
even if every observation normally consists of k last frames
- store frames as np.uint8 (actually it is most time-performance
to cast them back to float32 on GPU to minimize memory transfer
time)
- store frame_t and frame_(t+1) in the same buffer.
For the tipical use case in Atari Deep RL buffer with 1M frames the total
memory footprint of this buffer is 10^6 * 84 * 84 bytes ~= 7 gigabytes
Warning! Assumes that returning frame of zeros at the beginning
of the episode, when there is less frames than `frame_history_len`,
is acceptable.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
frame_history_len: int
Number of memories to be retried for each observation.
"""
self.lander = lander
self.size = size
self.frame_history_len = frame_history_len
self.next_idx = 0
self.num_in_buffer = 0
self.obs = None
self.action = None
self.reward = None
self.done = None
def can_sample(self, batch_size):
"""Returns true if `batch_size` different transitions can be sampled from the buffer."""
return batch_size + 1 <= self.num_in_buffer
def _encode_sample(self, idxes):
obs_batch = np.concatenate([self._encode_observation(idx)[None] for idx in idxes], 0)
act_batch = self.action[idxes]
rew_batch = self.reward[idxes]
next_obs_batch = np.concatenate([self._encode_observation(idx + 1)[None] for idx in idxes], 0)
done_mask = np.array([1.0 if self.done[idx] else 0.0 for idx in idxes], dtype=np.float32)
return obs_batch, act_batch, rew_batch, next_obs_batch, done_mask
def sample(self, batch_size):
"""Sample `batch_size` different transitions.
i-th sample transition is the following:
when observing `obs_batch[i]`, action `act_batch[i]` was taken,
after which reward `rew_batch[i]` was received and subsequent
observation next_obs_batch[i] was observed, unless the epsiode
was done which is represented by `done_mask[i]` which is equal
to 1 if episode has ended as a result of that action.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
Array of shape
(batch_size, img_h, img_w, img_c * frame_history_len)
and dtype np.uint8
act_batch: np.array
Array of shape (batch_size,) and dtype np.int32
rew_batch: np.array
Array of shape (batch_size,) and dtype np.float32
next_obs_batch: np.array
Array of shape
(batch_size, img_h, img_w, img_c * frame_history_len)
and dtype np.uint8
done_mask: np.array
Array of shape (batch_size,) and dtype np.float32
"""
assert self.can_sample(batch_size)
idxes = sample_n_unique(lambda: random.randint(0, self.num_in_buffer - 2), batch_size)
return self._encode_sample(idxes)
def encode_recent_observation(self):
"""Return the most recent `frame_history_len` frames.
Returns
-------
observation: np.array
Array of shape (img_h, img_w, img_c * frame_history_len)
and dtype np.uint8, where observation[:, :, i*img_c:(i+1)*img_c]
encodes frame at time `t - frame_history_len + i`
"""
assert self.num_in_buffer > 0
return self._encode_observation((self.next_idx - 1) % self.size)
def _encode_observation(self, idx):
end_idx = idx + 1 # make noninclusive
start_idx = end_idx - self.frame_history_len
# this checks if we are using low-dimensional observations, such as RAM
# state, in which case we just directly return the latest RAM.
if len(self.obs.shape) == 2:
return self.obs[end_idx-1]
# if there weren't enough frames ever in the buffer for context
if start_idx < 0 and self.num_in_buffer != self.size:
start_idx = 0
for idx in range(start_idx, end_idx - 1):
if self.done[idx % self.size]:
start_idx = idx + 1
missing_context = self.frame_history_len - (end_idx - start_idx)
# if zero padding is needed for missing context
# or we are on the boundry of the buffer
if start_idx < 0 or missing_context > 0:
frames = [np.zeros_like(self.obs[0]) for _ in range(missing_context)]
for idx in range(start_idx, end_idx):
frames.append(self.obs[idx % self.size])
return np.concatenate(frames, 2)
else:
# this optimization has potential to saves about 30% compute time \o/
img_h, img_w = self.obs.shape[1], self.obs.shape[2]
return self.obs[start_idx:end_idx].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)
def store_frame(self, frame):
"""Store a single frame in the buffer at the next available index, overwriting
old frames if necessary.
Parameters
----------
frame: np.array
Array of shape (img_h, img_w, img_c) and dtype np.uint8
the frame to be stored
Returns
-------
idx: int
Index at which the frame is stored. To be used for `store_effect` later.
"""
if self.obs is None:
self.obs = np.empty([self.size] + list(frame.shape), dtype=np.float32 if self.lander else np.uint8)
self.action = np.empty([self.size], dtype=np.int32)
self.reward = np.empty([self.size], dtype=np.float32)
self.done = np.empty([self.size], dtype=np.bool)
self.obs[self.next_idx] = frame
ret = self.next_idx
self.next_idx = (self.next_idx + 1) % self.size
self.num_in_buffer = min(self.size, self.num_in_buffer + 1)
return ret
def store_effect(self, idx, action, reward, done):
"""Store effects of action taken after obeserving frame stored
at index idx. The reason `store_frame` and `store_effect` is broken
up into two functions is so that once can call `encode_recent_observation`
in between.
Paramters
---------
idx: int
Index in buffer of recently observed frame (returned by `store_frame`).
action: int
Action that was performed upon observing this frame.
reward: float
Reward that was received when the actions was performed.
done: bool
True if episode was finished after performing that action.
"""
self.action[idx] = action
self.reward[idx] = reward
self.done[idx] = done
| berkeleydeeprlcourse/homework | hw3/dqn_utils.py | Python | mit | 14,060 |
from django.urls import path
from . import views
urlpatterns = [
path('login/', views.login, name='shopify_app_login'),
path('authenticate/', views.authenticate, name='shopify_app_authenticate'),
path('finalize/', views.finalize, name='shopify_app_login_finalize'),
path('logout/', views.logout, name='shopify_app_logout'),
]
| Shopify/shopify_django_app | shopify_app/urls.py | Python | mit | 344 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-31 13:21
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('primus', '0036_assetplace'),
]
operations = [
migrations.RenameField(
model_name='assetplace',
old_name='used',
new_name='use_count',
),
]
| sighill/shade_app | primus/migrations/0037_auto_20160531_1521.py | Python | mit | 423 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'InstagramAccount'
db.create_table('social_instagramaccount', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('instagram_id', self.gf('django.db.models.fields.BigIntegerField')()),
('username', self.gf('django.db.models.fields.CharField')(max_length=255)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('profile_picture', self.gf('django.db.models.fields.URLField')(max_length=200)),
('access_token', self.gf('django.db.models.fields.CharField')(max_length=255)),
('scrap_profile', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('social', ['InstagramAccount'])
def backwards(self, orm):
# Deleting model 'InstagramAccount'
db.delete_table('social_instagramaccount')
models = {
'social.facebookaccount': {
'Meta': {'object_name': 'FacebookAccount'},
'fb_id': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_poll_time': ('django.db.models.fields.IntegerField', [], {'default': '1375826801'})
},
'social.facebookmessage': {
'Meta': {'object_name': 'FacebookMessage', '_ormbases': ['social.Message']},
'facebook_account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['social.FacebookAccount']", 'null': 'True', 'blank': 'True'}),
'message_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['social.Message']", 'unique': 'True', 'primary_key': 'True'})
},
'social.facebooksetting': {
'Meta': {'object_name': 'FacebookSetting'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'app_secret': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'auto_approve': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.IntegerField', [], {'default': '15'})
},
'social.instagramaccount': {
'Meta': {'object_name': 'InstagramAccount'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instagram_id': ('django.db.models.fields.BigIntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'profile_picture': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'scrap_profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'social.instagrammessage': {
'Meta': {'object_name': 'InstagramMessage', '_ormbases': ['social.Message']},
'comments': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'images': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'instagram_search': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['social.InstagramSearch']", 'null': 'True', 'blank': 'True'}),
'message_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['social.Message']", 'unique': 'True', 'primary_key': 'True'})
},
'social.instagramsearch': {
'Meta': {'object_name': 'InstagramSearch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'search_term': ('django.db.models.fields.CharField', [], {'max_length': '160', 'blank': 'True'})
},
'social.instagramsetting': {
'Meta': {'object_name': 'InstagramSetting'},
'auto_approve': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'client_secret': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.IntegerField', [], {'default': '15'}),
'redirect_uri': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'social.message': {
'Meta': {'object_name': 'Message'},
'avatar': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'blob': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'deeplink': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'reply_id': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reply'", 'null': 'True', 'to': "orm['social.Message']"}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'})
},
'social.rssaccount': {
'Meta': {'object_name': 'RSSAccount'},
'feed_name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'social.rssmessage': {
'Meta': {'object_name': 'RSSMessage', '_ormbases': ['social.Message']},
'_images': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'_links': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'message_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['social.Message']", 'unique': 'True', 'primary_key': 'True'}),
'rss_account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['social.RSSAccount']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'social.rsssetting': {
'Meta': {'object_name': 'RSSSetting'},
'auto_approve': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.IntegerField', [], {'default': '15'})
},
'social.twitteraccount': {
'Meta': {'object_name': 'TwitterAccount'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '160', 'blank': 'True'}),
'entities': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'followers_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'oauth_secret': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'oauth_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'parse_timeline_tweets': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'poll_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'profile_background_image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'profile_background_image_url_https': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'profile_image_url_https': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'twitter_id': ('django.db.models.fields.BigIntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'social.twittermessage': {
'Meta': {'object_name': 'TwitterMessage', '_ormbases': ['social.Message']},
'_entities': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'favorited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_reply_to_screen_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'in_reply_to_status_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'in_reply_to_user_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'message_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['social.Message']", 'unique': 'True', 'primary_key': 'True'}),
'retweet_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'retweeted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'twitter_account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['social.TwitterAccount']", 'null': 'True', 'blank': 'True'}),
'twitter_search': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['social.TwitterSearch']", 'null': 'True', 'blank': 'True'})
},
'social.twittersearch': {
'Meta': {'object_name': 'TwitterSearch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'search_term': ('django.db.models.fields.CharField', [], {'max_length': '160', 'blank': 'True'}),
'search_until': ('django.db.models.fields.IntegerField', [], {'default': '1375826801'})
},
'social.twittersetting': {
'Meta': {'object_name': 'TwitterSetting'},
'auto_approve': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'consumer_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'consumer_secret': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.IntegerField', [], {'default': '15'})
}
}
complete_apps = ['social'] | MadeInHaus/django-social | social/south_migrations/0006_auto__add_instagramaccount.py | Python | mit | 12,343 |
"""Calculate Lumi Show quiz scores."""
from operator import itemgetter
# Define correct answers.
correct = [3, 2, 1, 4, 1, 2, 4, 3, 4, 2, 1, 3]
# Read table details.
active_sheet('discussions')
tables = []
# Set data start row.
row = 5
# Loop through rows until empty row is found.
data_present = True
while data_present:
# Append nested list to 'tables' list containing delegate ID and table.
tables.append([Cell(row, 'E').value, Cell(row, 'F').value])
row += 1
# Check next row for data.
data_present = Cell(row, 'A').value
# Read delegate details.
active_sheet('questions')
delegates = []
# Set data start row.
row = 4
# Determine first quiz response column.
header = Cell(3, 'A').horizontal
for i in range(0, len(header)):
if header[i][-12:] == ': Question 1':
quiz_col = i + 1
break
# Loop through rows until empty row is found.
data_present = True
while data_present:
# Read delegate ID.
delegate_id = Cell(row, 'C').value
# If the delegate has entered a table, record their ID, name, answers,
# initialised score and table.
for table_id, table in tables:
if table_id == delegate_id:
delegates.append([delegate_id, Cell(row, 'B').value,
Cell(row, quiz_col).horizontal, 0, table])
break
row += 1
# Check next row for data.
data_present = Cell(row, 'A').value
# Determine the number of questions answered based on header row.
answered = len(Cell(3, quiz_col).horizontal)
# Calculate delegate scores. Add 5 to score for a correct answer, otherwise
# subtract 5.
for delegate in delegates:
for i in range(0, len(correct[:answered])):
if delegate[2][i] == correct[i]:
delegate[3] += 5
else:
delegate[3] -= 3
# Sort 'delegates' by ascending table then descending score.
delegates.sort(key=itemgetter(4))
delegates.sort(key=itemgetter(3), reverse=True)
# Write delegate scores.
active_sheet('scores')
Cell(1, 'A').value = 'Name'
Cell(1, 'B').value = 'Table'
Cell(1, 'C').value = 'Score'
row = 2
for delegate_id, name, answers, score, table in delegates:
Cell(row, 'A').value = name
Cell(row, 'B').value = table
Cell(row, 'C').value = score
row += 1
| justsostephen/datanitro-scripts | show-scores/scores.py | Python | mit | 2,311 |
# -*- coding: utf-8 -*-
#
# Sphinx markup documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 18 22:54:33 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinx'
copyright = u'2009, RA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'test'
# The full version, including alpha/beta/rc tags.
release = 'test'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinx'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sphinx.tex', u'sphinx Documentation', u'RA', 'manual'),
('index2', 'sphinx2.tex', u'sphinx2 Documentation', u'RA', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# -- Options for PDF output --------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author).
pdf_documents = [
('index', u'MyProject1', u'My Project', u'Author Name'),
('index2', u'MyProject2', u'My Project', u'Author Name'),
]
# A comma-separated list of custom stylesheets. Example:
pdf_stylesheets = ['sphinx','kerning']
# Create a compressed PDF
# Use True/False or 1/0
# Example: compressed=True
#pdf_compressed=False
# A colon-separated list of folders to search for fonts. Example:
# pdf_font_path=['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
# Language to be used for hyphenation support
pdf_language="en_US"
# If false, no index is generated.
pdf_use_index = True
# If false, no modindex is generated.
pdf_use_modindex = True
# If false, no coverpage is generated.
pdf_use_coverpage = False
pdf_break_level = 1
pdf_verbosity=2
| thomaspurchas/rst2pdf | rst2pdf/tests/input/sphinx-multidoc/conf.py | Python | mit | 7,320 |
import unittest
from pgallery.forms import PhotoForm
class PhotoFormTestCase(unittest.TestCase):
def test_form_invalid(self):
form = PhotoForm({})
self.assertFalse(form.is_valid())
| zsiciarz/django-pgallery | tests/test_forms.py | Python | mit | 204 |
import inspect
from django.core.exceptions import ValidationError
from rest_framework import serializers
from django.core.validators import validate_slug
class SerializerRelatedField(serializers.SlugRelatedField):
"""
SerializerRelatedField for DjangoRestFramework
for data add: Slug @from_native
for data list or detail: serializer @to_native
Example:
SerializerRelatedField(serializer_field=TagsSerializer,
slug_field='slug')
"""
def __init__(self, *args, **kwargs):
self.serializer_field = kwargs.pop('serializer_field', None)
assert self.serializer_field, 'serializer_field is required'
assert inspect.isclass(self.serializer_field)
super(SerializerRelatedField, self).__init__(*args, **kwargs)
def to_native(self, obj):
# list or detail
return self.serializer_field(instance=obj).data
class GetOrCreateField(SerializerRelatedField):
"""
Get Or Create Field
Example:
GetOrCreateField(serializer_field=TagsSerializer, slug_field='slug')
"""
def from_native(self, data):
if self.queryset is None:
raise Exception('Writable related fields must include a '
'`queryset` argument')
try:
data = data.strip().lower()
validate_slug(data)
obj, _ = self.queryset.get_or_create(**{self.slug_field: data})
return obj
except (TypeError, ValueError):
msg = self.error_messages['invalid']
raise ValidationError(msg)
| pombredanne/snippit | snippit/apps/snippet/fields.py | Python | mit | 1,589 |
from conans import ConanFile, CMake
import urllib3
import tarfile
class FltkConan(ConanFile):
name = "FLTK"
version = "1.3.4"
license = "GNU LGPL with exceptions (http://www.fltk.org/COPYING.php)"
url = "https://github.com/trigger-happy/conan-packages"
description = "FLTK widget library for C++"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
def source(self):
fileName = 'fltk-{0}-1-source.tar.gz'.format(self.version)
sourceUrl = 'http://fltk.org/pub/fltk/1.3.4/fltk-{0}-1-source.tar.gz'.format(self.version)
pool = urllib3.PoolManager()
request = pool.request('GET', sourceUrl)
if request.status == 200:
f = open(fileName, 'w')
f.write(request.data)
f.close()
tf = tarfile.open(fileName)
tf.extractall('.')
tf.close()
else:
raise Exception('Could not download source file')
def configure(self):
self.requires("libxdmcp/1.1.2@trigger-happy/stable")
self.requires("libxau/1.0.8@trigger-happy/stable")
self.requires("libxcb/1.12@trigger-happy/stable")
self.requires("pcre/8.40.0@kmaragon/stable")
self.requires("graphite/1.3.10@trigger-happy/stable")
self.requires("libx11/1.6.5@trigger-happy/stable")
self.options["libxdmcp/1.1.2"].shared = self.options.shared
self.options["libxau/1.0.8"].shared = self.options.shared
self.options["libxcb/1.12"].shared = self.options.shared
self.options["pcre/8.40.0"].shared = self.options.shared
self.options["graphite/1.3.10"].shared = self.options.shared
self.options["libx11/1.6.5"].shared = self.options.shared
def build(self):
cmake = CMake(self.settings)
shared = "-DBUILD_SHARED_LIBS=ON" if self.options.shared else ""
self.run('cmake fltk-1.3.4-1 %s %s' % (cmake.command_line, shared))
self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("FL", dst="include", src="fltk-1.3.4-1")
self.copy("lib/*", dst="lib", keep_path=False)
self.copy("bin/fluid", dst="bin", keep_path=False)
self.copy("bin/fltk-config", dst="bin", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["fltk", "fltk_images", "fltk_gl", "fltk_forms"]
| trigger-happy/conan-packages | fltk/conanfile.py | Python | mit | 2,459 |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from collections import namedtuple
import functools
import os
import uuid
from devtools_testutils import AzureMgmtPreparer
from azure_devtools.scenario_tests.exceptions import AzureTestError
from devtools_testutils import ResourceGroupPreparer
FakeResource = namedtuple(
'FakeResource',
['name', 'id', 'host_name']
)
class DigitalTwinsRGPreparer(ResourceGroupPreparer):
def create_resource(self, name, **kwargs):
if self.is_live and 'AZURE_DIGITAL_TWINS_HOSTNAME' in os.environ:
self.resource = self.resource or FakeResource(
name=name,
id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/"+name,
host_name=None
)
return {
self.parameter_name: self.resource,
self.parameter_name_for_location: self.location,
}
return super(DigitalTwinsRGPreparer, self).create_resource(name, **kwargs)
def remove_resource(self, name, **kwargs):
if 'AZURE_DIGITAL_TWINS_HOSTNAME' not in os.environ:
return super(DigitalTwinsRGPreparer, self).remove_resource(name, **kwargs)
class DigitalTwinsPreparer(AzureMgmtPreparer):
def __init__(self, name_prefix='',
use_cache=False,
random_name_length=50,
location='westcentralus',
parameter_name='digitaltwin',
role_assignment_name='Azure Digital Twins Data Owner',
resource_group_parameter_name='resource_group',
disable_recording=True,
playback_fake_resource=None,
client_kwargs=None,
random_name_enabled=True):
super(DigitalTwinsPreparer, self).__init__(
name_prefix,
random_name_length,
playback_fake_resource=playback_fake_resource,
disable_recording=disable_recording,
client_kwargs=client_kwargs,
random_name_enabled=random_name_enabled
)
self.location = location
self.resource_group_parameter_name = resource_group_parameter_name
self.parameter_name = parameter_name
self.resource_moniker = self.name_prefix
self.use_cache = use_cache
self.role_name = role_assignment_name
if random_name_enabled:
self.resource_moniker += "digitaltwinsname"
self.set_cache(use_cache, None, location)
def create_resource(self, name, **kwargs):
if self.is_live:
if os.environ.get('AZURE_DIGITAL_TWINS_HOSTNAME'):
host_name=os.environ['AZURE_DIGITAL_TWINS_HOSTNAME']
name = host_name.split('.')[0]
self.resource = FakeResource(name=name, id=name, host_name=host_name)
self.test_class_instance.scrubber.register_name_pair(
name,
self.resource_moniker
)
else:
# We have to import here due to a bug in the mgmt SDK
from azure.mgmt.digitaltwins import AzureDigitalTwinsManagementClient
self.client = self.create_mgmt_client(AzureDigitalTwinsManagementClient)
group = self._get_resource_group(**kwargs)
result = self.client.digital_twins.create_or_update(group.name, name, self.location)
self.resource = result.result()
self.id = self.resource.id
self._add_role_assignment(group)
self.test_class_instance.scrubber.register_name_pair(
name,
self.resource_moniker
)
else:
self.resource = FakeResource(
name=name,
id=name,
host_name= self.resource_moniker + ".api.wcus.digitaltwins.azure.net")
return {self.parameter_name: self.resource}
def remove_resource(self, name, **kwargs):
if self.is_live and 'AZURE_DIGITAL_TWINS_HOSTNAME' not in os.environ:
group = self._get_resource_group(**kwargs)
self.client.digital_twins.delete(group.name, name, polling=False)
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a Digital Twin, a resource group is required. Please add ' \
'decorator @{} in front of this preparer.'
raise AzureTestError(template.format(ResourceGroupPreparer.__name__))
def _add_role_assignment(self, resource_group):
from azure.mgmt.authorization import AuthorizationManagementClient
role_client = self.create_mgmt_client(AuthorizationManagementClient)
sp_id = os.environ.get('AZURE_CLIENT_ID')
if not sp_id:
raise ValueError("Cannot assign role to DigitalTwins with AZURE_CLIENT_ID.")
roles = list(role_client.role_definitions.list(
resource_group.id,
filter="roleName eq '{}'".format(self.role_name)
))
assert len(roles) == 1
dt_role = roles[0]
role_client.role_assignments.create(
self.id,
uuid.uuid4(), # Role assignment random name
{
'role_definition_id': dt_role.id,
'principal_id': sp_id
}
)
CachedDigitalTwinsRGPreparer = functools.partial(DigitalTwinsRGPreparer, use_cache=True)
CachedDigitalTwinsPreparer = functools.partial(DigitalTwinsPreparer, use_cache=True)
| Azure/azure-sdk-for-python | sdk/digitaltwins/azure-digitaltwins-core/tests/_preparer.py | Python | mit | 5,928 |
#!/usr/bin/python3
# scrape HLTV results for CS:GO Matches
from requests import get
from bs4 import BeautifulSoup
import re
from csv import DictWriter
from time import gmtime, strftime
def formatMatch(hltvMatch):
'''
extract match information and format to list
'''
# team names
hltvMatchNames = [name.get_text() for name in hltvMatch.select('div.team')]
# match id
hltvMatchLink = hltvMatch.select('a.a-reset')[0]['href'][9:] # removes the prefix '/matches/'
hltvMatchId = hltvMatchLink[:hltvMatchLink.index('/')]
# team ids
hltvMatchTeamLogoSources = [id_text['src'] for id_text in hltvMatch.select('img.team-logo')]
hltvMatchTeamIds = [src[src.replace('/', '_', src.count('/')-1).index('/')+1:] for src in hltvMatchTeamLogoSources]
# event id
hltvMatchEventLink = hltvMatch.select('td.event img')[0]['src']
hltvMatchEventLink = hltvMatchEventLink.replace('/', '', hltvMatchEventLink.count('/')-1)
hltvMatchEventId = hltvMatchEventLink[hltvMatchEventLink.index('/')+1:hltvMatchEventLink.index('.')]
# score(s)
try:
# when there was a tie, retrieve the shared score.
# This will raise an Exception if there was no tie, handled afterwards
hltvMatchScoreTie = hltvMatch.select('span.score-tie')[0].get_text()
score1 = hltvMatchScoreTie
score2 = hltvMatchScoreTie
hltvMatchTeamWon = 0
except IndexError:
# when there wasn't a tie, retrieve the winning team and the different scores
hltvMatchTeamWon = hltvMatchNames.index(hltvMatch.select('div.team-won')[0].get_text())
score1 = hltvMatch.select('span.score-won')[0].get_text()
score2 = hltvMatch.select('span.score-lost')[0].get_text()
return {
"team1": hltvMatchNames[hltvMatchTeamWon],
"team2": hltvMatchNames[1-hltvMatchTeamWon],
"map": hltvMatch.select('div.map-text')[0].get_text(),
"event": hltvMatch.select('span.event-name')[0].get_text(),
"matchid": hltvMatchId,
"teamid1": hltvMatchTeamIds[hltvMatchTeamWon],
"teamid2": hltvMatchTeamIds[1-hltvMatchTeamWon],
"eventid": hltvMatchEventId,
"score1": score1,
"score2": score2
}
def getMatchesOfPage(hltvUrl):
'''
gets all matches from one page
'''
# get website content
hltvReq = get(hltvUrl, headers={'User-Agent' : "github users please insert something meaningful here"})
hltvHTML = hltvReq.text
# obtain the html soup from the raw content
hltvSoup = BeautifulSoup(hltvHTML, 'html.parser')
# retrieve a list with a soup per match
hltvMatches = hltvSoup.select('div.result-con')
# parse every match html soup into meaningful content
hltvMatchesFormatted = [formatMatch(hltvMatch) for hltvMatch in hltvMatches]
return hltvMatchesFormatted
def writeMatchesToFile(matchesOfPage, iteration):
'''
writes lists to file
'''
with open('hltv_org_matches_2018.csv', 'a+') as csvfile:
hltvWriter = DictWriter(csvfile, matchesOfPage[0].keys())
if iteration == 0:
hltvWriter.writeheader()
for match in matchesOfPage:
hltvWriter.writerow(match)
for offset in range(0, 9000, 100):
hltvUrlbase = 'http://www.hltv.org/results?offset='
hltvUrl = hltvUrlbase + str(offset)
matchesOfPage = getMatchesOfPage(hltvUrl)
writeMatchesToFile(matchesOfPage, offset)
print(strftime("%Y-%m-%d %H:%M:%S: ", gmtime()) + str(offset + 50) + " HLTV CS:GO matches completed.")
| Schw4rz/csgo_glicko2 | scraper.py | Python | mit | 3,548 |
import base64
import hashlib
import warnings
from gevent.pywsgi import WSGIHandler
from .websocket import WebSocket, Stream
from .logging import create_logger
class Client(object):
def __init__(self, address, ws):
self.address = address
self.ws = ws
class WebSocketHandler(WSGIHandler):
"""
Automatically upgrades the connection to a websocket.
To prevent the WebSocketHandler to call the underlying WSGI application,
but only setup the WebSocket negotiations, do:
mywebsockethandler.prevent_wsgi_call = True
before calling run_application(). This is useful if you want to do more
things before calling the app, and want to off-load the WebSocket
negotiations to this library. Socket.IO needs this for example, to send
the 'ack' before yielding the control to your WSGI app.
"""
SUPPORTED_VERSIONS = ('13', '8', '7')
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
def run_websocket(self):
"""
Called when a websocket has been created successfully.
"""
if getattr(self, 'prevent_wsgi_call', False):
return
# In case WebSocketServer is not used
if not hasattr(self.server, 'clients'):
self.server.clients = {}
# Since we're now a websocket connection, we don't care what the
# application actually responds with for the http response
try:
self.server.clients[self.client_address] = Client(
self.client_address, self.websocket)
self.application(self.environ, lambda s, h, e=None: [])
finally:
del self.server.clients[self.client_address]
if not self.websocket.closed:
self.websocket.close()
self.environ.update({
'wsgi.websocket': None
})
self.websocket = None
def run_application(self):
if (hasattr(self.server, 'pre_start_hook')
and self.server.pre_start_hook):
self.logger.debug("Calling pre-start hook")
if self.server.pre_start_hook(self):
return super(WebSocketHandler, self).run_application()
self.logger.debug("Initializing WebSocket")
self.result = self.upgrade_websocket()
if hasattr(self, 'websocket'):
if self.status and not self.headers_sent:
self.write('')
self.run_websocket()
else:
if self.status:
# A status was set, likely an error so just send the response
if not self.result:
self.result = []
self.process_result()
return
# This handler did not handle the request, so defer it to the
# underlying application object
return super(WebSocketHandler, self).run_application()
def upgrade_websocket(self):
"""
Attempt to upgrade the current environ into a websocket enabled
connection. If successful, the environ dict with be updated with two
new entries, `wsgi.websocket` and `wsgi.websocket_version`.
:returns: Whether the upgrade was successful.
"""
# Some basic sanity checks first
self.logger.debug("Validating WebSocket request")
if self.environ.get('REQUEST_METHOD', '') != 'GET':
# This is not a websocket request, so we must not handle it
self.logger.debug('Can only upgrade connection if using GET method.')
return
upgrade = self.environ.get('HTTP_UPGRADE', '').lower()
if upgrade == 'websocket':
connection = self.environ.get('HTTP_CONNECTION', '').lower()
if 'upgrade' not in connection:
# This is not a websocket request, so we must not handle it
self.logger.warning("Client didn't ask for a connection "
"upgrade")
return
else:
# This is not a websocket request, so we must not handle it
return
if self.request_version != 'HTTP/1.1':
self.start_response('402 Bad Request', [])
self.logger.warning("Bad server protocol in headers")
return ['Bad protocol version']
if self.environ.get('HTTP_SEC_WEBSOCKET_VERSION'):
return self.upgrade_connection()
else:
self.logger.warning("No protocol defined")
self.start_response('426 Upgrade Required', [
('Sec-WebSocket-Version', ', '.join(self.SUPPORTED_VERSIONS))])
return ['No Websocket protocol version defined']
def upgrade_connection(self):
"""
Validate and 'upgrade' the HTTP request to a WebSocket request.
If an upgrade succeeded then then handler will have `start_response`
with a status of `101`, the environ will also be updated with
`wsgi.websocket` and `wsgi.websocket_version` keys.
:param environ: The WSGI environ dict.
:param start_response: The callable used to start the response.
:param stream: File like object that will be read from/written to by
the underlying WebSocket object, if created.
:return: The WSGI response iterator is something went awry.
"""
self.logger.debug("Attempting to upgrade connection")
version = self.environ.get("HTTP_SEC_WEBSOCKET_VERSION")
if version not in self.SUPPORTED_VERSIONS:
msg = "Unsupported WebSocket Version: {0}".format(version)
self.logger.warning(msg)
self.start_response('400 Bad Request', [
('Sec-WebSocket-Version', ', '.join(self.SUPPORTED_VERSIONS))
])
return [msg]
key = self.environ.get("HTTP_SEC_WEBSOCKET_KEY", '').strip()
if not key:
# 5.2.1 (3)
msg = "Sec-WebSocket-Key header is missing/empty"
self.logger.warning(msg)
self.start_response('400 Bad Request', [])
return [msg]
try:
key_len = len(base64.b64decode(key))
except TypeError:
msg = "Invalid key: {0}".format(key)
self.logger.warning(msg)
self.start_response('400 Bad Request', [])
return [msg]
if key_len != 16:
# 5.2.1 (3)
msg = "Invalid key: {0}".format(key)
self.logger.warning(msg)
self.start_response('400 Bad Request', [])
return [msg]
# Check for WebSocket Protocols
requested_protocols = self.environ.get(
'HTTP_SEC_WEBSOCKET_PROTOCOL', '')
protocol = None
if hasattr(self.application, 'app_protocol'):
allowed_protocol = self.application.app_protocol(
self.environ['PATH_INFO'])
if allowed_protocol and allowed_protocol in requested_protocols:
protocol = allowed_protocol
self.logger.debug("Protocol allowed: {0}".format(protocol))
self.websocket = WebSocket(self.environ, Stream(self), self)
self.environ.update({
'wsgi.websocket_version': version,
'wsgi.websocket': self.websocket
})
headers = [
("Upgrade", "websocket"),
("Connection", "Upgrade"),
("Sec-WebSocket-Accept", base64.b64encode(
hashlib.sha1(key + self.GUID).digest())),
]
if protocol:
headers.append(("Sec-WebSocket-Protocol", protocol))
self.logger.debug("WebSocket request accepted, switching protocols")
self.start_response("101 Switching Protocols", headers)
@property
def logger(self):
if not hasattr(self.server, 'logger'):
self.server.logger = create_logger(__name__)
return self.server.logger
def log_request(self):
if '101' not in self.status:
self.logger.info(self.format_request())
@property
def active_client(self):
return self.server.clients[self.client_address]
def start_response(self, status, headers, exc_info=None):
"""
Called when the handler is ready to send a response back to the remote
endpoint. A websocket connection may have not been created.
"""
writer = super(WebSocketHandler, self).start_response(
status, headers, exc_info=exc_info)
self._prepare_response()
return writer
def _prepare_response(self):
"""
Sets up the ``pywsgi.Handler`` to work with a websocket response.
This is used by other projects that need to support WebSocket
connections as part of a larger effort.
"""
assert not self.headers_sent
if not self.environ.get('wsgi.websocket'):
# a WebSocket connection is not established, do nothing
return
# So that `finalize_headers` doesn't write a Content-Length header
self.provided_content_length = False
# The websocket is now controlling the response
self.response_use_chunked = False
# Once the request is over, the connection must be closed
self.close_connection = True
# Prevents the Date header from being written
self.provided_date = True
| mcking49/apache-flask | Python/Lib/site-packages/geventwebsocket/handler.py | Python | mit | 9,388 |
import numpy as np
def sixteen2eight(I: np.ndarray, Clim: tuple) -> np.ndarray:
"""
scipy.misc.bytescale had bugs
inputs:
------
I: 2-D Numpy array of grayscale image data
Clim: length 2 of tuple or numpy 1-D array specifying lowest and highest expected values in grayscale image
Michael Hirsch, Ph.D.
"""
Q = normframe(I, Clim)
Q *= 255 # stretch to [0,255] as a float
return Q.round().astype(np.uint8) # convert to uint8
def normframe(I: np.ndarray, Clim: tuple) -> np.ndarray:
"""
inputs:
-------
I: 2-D Numpy array of grayscale image data
Clim: length 2 of tuple or numpy 1-D array specifying lowest and highest expected values in grayscale image
Michael Hirsch, Ph.D.
"""
Vmin = Clim[0]
Vmax = Clim[1]
return (I.astype(np.float32).clip(Vmin, Vmax) - Vmin) / (Vmax - Vmin) # stretch to [0,1]
| scivision/raspberrypi_raw_camera | pibayer/utils.py | Python | mit | 889 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Attributes(msrest.serialization.Model):
"""The object attributes managed by the KeyVault service.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
}
def __init__(
self,
**kwargs
):
super(Attributes, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.not_before = kwargs.get('not_before', None)
self.expires = kwargs.get('expires', None)
self.created = None
self.updated = None
class BackupSecretResult(msrest.serialization.Model):
"""The backup secret result, containing the backup blob.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The backup blob containing the backed up secret.
:vartype value: bytes
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
**kwargs
):
super(BackupSecretResult, self).__init__(**kwargs)
self.value = None
class SecretBundle(msrest.serialization.Model):
"""A secret consisting of a value, id and its attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field specifies the
corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a secret
backing a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(SecretBundle, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.id = kwargs.get('id', None)
self.content_type = kwargs.get('content_type', None)
self.attributes = kwargs.get('attributes', None)
self.tags = kwargs.get('tags', None)
self.kid = None
self.managed = None
class DeletedSecretBundle(SecretBundle):
"""A Deleted Secret consisting of its previous id, attributes and its tags, as well as information on when it will be purged.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field specifies the
corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a secret
backing a certificate, then managed will be true.
:vartype managed: bool
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
secret.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the secret is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the secret was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
**kwargs
):
super(DeletedSecretBundle, self).__init__(**kwargs)
self.recovery_id = kwargs.get('recovery_id', None)
self.scheduled_purge_date = None
self.deleted_date = None
class SecretItem(msrest.serialization.Model):
"""The secret item containing secret metadata.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Secret identifier.
:type id: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a key backing
a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'managed': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(SecretItem, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.attributes = kwargs.get('attributes', None)
self.tags = kwargs.get('tags', None)
self.content_type = kwargs.get('content_type', None)
self.managed = None
class DeletedSecretItem(SecretItem):
"""The deleted secret item containing metadata about the deleted secret.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Secret identifier.
:type id: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a key backing
a certificate, then managed will be true.
:vartype managed: bool
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
secret.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the secret is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the secret was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'managed': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
**kwargs
):
super(DeletedSecretItem, self).__init__(**kwargs)
self.recovery_id = kwargs.get('recovery_id', None)
self.scheduled_purge_date = None
self.deleted_date = None
class DeletedSecretListResult(msrest.serialization.Model):
"""The deleted secret list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of the deleted secrets in the vault along
with a link to the next page of deleted secrets.
:vartype value: list[~azure.keyvault.v7_3_preview.models.DeletedSecretItem]
:ivar next_link: The URL to get the next set of deleted secrets.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DeletedSecretItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeletedSecretListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Error(msrest.serialization.Model):
"""The key vault server error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar inner_error: The key vault server error.
:vartype inner_error: ~azure.keyvault.v7_3_preview.models.Error
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'inner_error': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'inner_error': {'key': 'innererror', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = None
self.message = None
self.inner_error = None
class KeyVaultError(msrest.serialization.Model):
"""The key vault error exception.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: The key vault server error.
:vartype error: ~azure.keyvault.v7_3_preview.models.Error
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultError, self).__init__(**kwargs)
self.error = None
class SecretAttributes(Attributes):
"""The secret management attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
:ivar recoverable_days: softDelete data retention days. Value should be >=7 and <=90 when
softDelete enabled, otherwise 0.
:vartype recoverable_days: int
:ivar recovery_level: Reflects the deletion recovery level currently in effect for secrets in
the current vault. If it contains 'Purgeable', the secret can be permanently deleted by a
privileged user; otherwise, only the system can purge the secret, at the end of the retention
interval. Possible values include: "Purgeable", "Recoverable+Purgeable", "Recoverable",
"Recoverable+ProtectedSubscription", "CustomizedRecoverable+Purgeable",
"CustomizedRecoverable", "CustomizedRecoverable+ProtectedSubscription".
:vartype recovery_level: str or ~azure.keyvault.v7_3_preview.models.DeletionRecoveryLevel
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
'recoverable_days': {'readonly': True},
'recovery_level': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
'recoverable_days': {'key': 'recoverableDays', 'type': 'int'},
'recovery_level': {'key': 'recoveryLevel', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecretAttributes, self).__init__(**kwargs)
self.recoverable_days = None
self.recovery_level = None
class SecretListResult(msrest.serialization.Model):
"""The secret list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of secrets in the key vault along with a link
to the next page of secrets.
:vartype value: list[~azure.keyvault.v7_3_preview.models.SecretItem]
:ivar next_link: The URL to get the next set of secrets.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SecretItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecretListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SecretProperties(msrest.serialization.Model):
"""Properties of the key backing a certificate.
:param content_type: The media type (MIME type).
:type content_type: str
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecretProperties, self).__init__(**kwargs)
self.content_type = kwargs.get('content_type', None)
class SecretRestoreParameters(msrest.serialization.Model):
"""The secret restore parameters.
All required parameters must be populated in order to send to Azure.
:param secret_bundle_backup: Required. The backup blob associated with a secret bundle.
:type secret_bundle_backup: bytes
"""
_validation = {
'secret_bundle_backup': {'required': True},
}
_attribute_map = {
'secret_bundle_backup': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
**kwargs
):
super(SecretRestoreParameters, self).__init__(**kwargs)
self.secret_bundle_backup = kwargs['secret_bundle_backup']
class SecretSetParameters(msrest.serialization.Model):
"""The secret set parameters.
All required parameters must be populated in order to send to Azure.
:param value: Required. The value of the secret.
:type value: str
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:param secret_attributes: The secret management attributes.
:type secret_attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'secret_attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
}
def __init__(
self,
**kwargs
):
super(SecretSetParameters, self).__init__(**kwargs)
self.value = kwargs['value']
self.tags = kwargs.get('tags', None)
self.content_type = kwargs.get('content_type', None)
self.secret_attributes = kwargs.get('secret_attributes', None)
class SecretUpdateParameters(msrest.serialization.Model):
"""The secret update parameters.
:param content_type: Type of the secret value such as a password.
:type content_type: str
:param secret_attributes: The secret management attributes.
:type secret_attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
'secret_attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(SecretUpdateParameters, self).__init__(**kwargs)
self.content_type = kwargs.get('content_type', None)
self.secret_attributes = kwargs.get('secret_attributes', None)
self.tags = kwargs.get('tags', None)
| Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_generated/v7_3_preview/models/_models.py | Python | mit | 19,685 |
from abc import ABCMeta, abstractmethod
from complexism.mcore import ModelAtom
from complexism.element import AbsTicker, Event
__author__ = 'TimeWz667'
__all__ = ['AbsActor', 'PassiveActor', 'ActiveActor']
class AbsActor(ModelAtom, metaclass=ABCMeta):
def __init__(self, name, pars=None):
ModelAtom.__init__(self, name, pars=pars)
@abstractmethod
def register(self, sub_model, ti):
pass
def fill(self, obs: dict, model, ti):
pass
@staticmethod
@abstractmethod
def decorate(name, model, **kwargs):
pass
@abstractmethod
def match(self, be_src, ags_src, ags_new, ti):
pass
class ActiveActor(AbsActor, metaclass=ABCMeta):
def __init__(self, name, clock: AbsTicker, pars=None):
AbsActor.__init__(self, name, pars=pars)
self.Clock = clock
def find_next(self):
return self.compose_event(self.Clock.Next)
def execute_event(self):
"""
Do not use
"""
pass
def operate(self, model):
event = self.Next
time = event.Time
self.Clock.update(time)
self.do_action(model, event.Todo, time)
self.drop_next()
def initialise(self, ti, *args, **kwargs):
self.Clock.initialise(ti)
def reset(self, ti, *args, **kwargs):
self.Clock.initialise(ti)
@abstractmethod
def compose_event(self, ti):
"""
Compose the next event
:param ti: current time
:return:
:rtype:
"""
pass
@abstractmethod
def do_action(self, model, td, ti):
"""
Let an event occur
:param model: source model
:param td: action to be done
:param ti: time
:type: double
:return:
"""
pass
class PassiveActor(AbsActor, metaclass=ABCMeta):
def __init__(self, name, pars=None):
AbsActor.__init__(self, name, pars=pars)
@property
def Next(self):
return Event.NullEvent
@property
def TTE(self):
return float('inf')
def drop_next(self):
return
def find_next(self):
pass
def execute_event(self):
pass
| TimeWz667/Kamanian | complexism/multimodel/actor.py | Python | mit | 2,187 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and Contributors
# See license.txt
import frappe
from frappe.integrations.doctype.social_login_key.social_login_key import BaseUrlNotSetError
import unittest
class TestSocialLoginKey(unittest.TestCase):
def test_adding_frappe_social_login_provider(self):
provider_name = "Frappe"
social_login_key = make_social_login_key(
social_login_provider=provider_name
)
social_login_key.get_social_login_provider(provider_name, initialize=True)
self.assertRaises(BaseUrlNotSetError, social_login_key.insert)
def make_social_login_key(**kwargs):
kwargs["doctype"] = "Social Login Key"
if not "provider_name" in kwargs:
kwargs["provider_name"] = "Test OAuth2 Provider"
doc = frappe.get_doc(kwargs)
return doc
def create_or_update_social_login_key():
# used in other tests (connected app, oauth20)
try:
social_login_key = frappe.get_doc("Social Login Key", "frappe")
except frappe.DoesNotExistError:
social_login_key = frappe.new_doc("Social Login Key")
social_login_key.get_social_login_provider("Frappe", initialize=True)
social_login_key.base_url = frappe.utils.get_url()
social_login_key.enable_social_login = 0
social_login_key.save()
frappe.db.commit()
return social_login_key
| mhbu50/frappe | frappe/integrations/doctype/social_login_key/test_social_login_key.py | Python | mit | 1,273 |
"""
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction (ie, buy one and sell one share of the stock), design an algorithm to find the maximum profit.
"""
class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
if len(prices) == 0:
return 0
profit = 0
buy = prices[0]
for price in prices:
buy = min(buy, price)
profit = max(profit, price - buy)
return profit | Ahmed--Mohsen/leetcode | best_time_to_buy_and_sell_stock.py | Python | mit | 527 |
"""Represent a descriptor of a resource."""
from __future__ import absolute_import
from inspect import isclass
import six
from future.builtins import object
from rotest.management.models import ResourceData
from rotest.management.common.errors import ResourceBuildError
from rotest.management.common.utils import (TYPE_NAME,
PROPERTIES,
extract_type,
extract_type_path)
class ResourceDescriptor(object):
"""Holds the data for a resource request."""
def __init__(self, resource_type, **properties):
"""Initialize the required parameters of resource request.
Args:
resource_type (type): resource type.
properties (kwargs): properties of the resource.
"""
self.type = resource_type
self.properties = properties
def __repr__(self):
"""Returns the descriptor's repr string."""
type_name = self.type.__name__
keywords = ', '.join(['%s=%r' % (key, val)
for key, val in six.iteritems(self.properties)])
return "%s(%s)" % (type_name, keywords)
def build_resource(self):
"""Build a resource.
Returns:
rotest.common.models.base_resource.BaseResource. a resource.
Raises:
ResourceBuildError: Failed to build the resource with given params.
"""
try:
return self.type(**self.properties)
except TypeError as ex:
raise ResourceBuildError('Failed to build resource. Original error'
'was: "%s"' % ex)
def encode(self):
"""Build a dictionary that represent the ResourceDescriptor.
Returns:
dict. the corresponding dictionary.
"""
if isclass(self.type) and issubclass(self.type, ResourceData):
name = extract_type_path(self.type)
else:
name = extract_type_path(self.type.DATA_CLASS)
return {TYPE_NAME: name, PROPERTIES: self.properties}
@staticmethod
def decode(descriptor):
"""Build a ResourceDescriptor from the given dictionary.
Args:
descriptor (dict): a dictionary that represent a descriptor.
For instance: {'type': 'my_res', 'properties': {'key1': 1}}.
Returns:
ResourceDescriptor. the corresponding ResourceDescriptor.
Raises:
ValueError: given dictionary missing a relevant key.
"""
for key in (TYPE_NAME, PROPERTIES):
if key not in descriptor:
raise ValueError("'descriptor' %r missing key %r" %
(descriptor, key))
type_name = descriptor[TYPE_NAME]
resource_type = extract_type(type_name)
properties = descriptor[PROPERTIES]
return ResourceDescriptor(resource_type, **properties)
| gregoil/rotest | src/rotest/management/common/resource_descriptor.py | Python | mit | 2,988 |
# Kata link: https://www.codewars.com/kata/57fb142297e0860073000064
def product(s):
return s.count('!')*s.count('?')
| chyumin/Codewars | Python/7 kyu/Count the number of exclamation and question mark, return product.py | Python | mit | 122 |
#!C:\Users\saulo\Downloads\sauloal-cufflinksviewer-9364be0\sauloal-cufflinksviewer-9364be0\venvwin\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==0.6c11','console_scripts','easy_install'
__requires__ = 'setuptools==0.6c11'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('setuptools==0.6c11', 'console_scripts', 'easy_install')()
)
| sauloal/cufflinksviewer | venvwin/Scripts/easy_install-script.py | Python | mit | 392 |
from header_filter.matchers import Header
def test_and_matcher_supports_bitwise_not(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
matcher = ~(Header(h_name_1, h_value_1) & Header(h_name_2, h_value_2))
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2})
assert matcher.match(request) is False
def test_and_matcher_supports_bitwise_and(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
h_name_3, h_value_3 = 'HTTP_X_C', 'val_z'
matcher = (Header(h_name_1, h_value_1) & Header(h_name_2, h_value_2)) & Header(h_name_3, h_value_3)
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2, h_name_3: h_value_3})
assert matcher.match(request) is True
def test_and_matcher_supports_bitwise_or(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
h_name_3, h_value_3 = 'HTTP_X_C', 'val_z'
matcher = (Header(h_name_1, h_value_1) & Header(h_name_2, h_value_2)) | Header(h_name_3, h_value_3)
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2, h_name_3: h_value_3})
assert matcher.match(request) is True
def test_and_matcher_supports_bitwise_xor(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
h_name_3, h_value_3 = 'HTTP_X_C', 'val_z'
matcher = (Header(h_name_1, h_value_1) & Header(h_name_2, h_value_2)) ^ Header(h_name_3, h_value_3)
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2, h_name_3: h_value_3})
assert matcher.match(request) is False
def test_repr():
assert (
repr(Header('HTTP_X_A', 'val_x') & Header('HTTP_X_B', 'val_y'))
== "(Header('HTTP_X_A', 'val_x') & Header('HTTP_X_B', 'val_y'))"
)
| sanjioh/django-header-filter | tests/test_matcher_and.py | Python | mit | 1,834 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class InheritanceOperations(object):
"""InheritanceOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get complex types that extend others.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Siamese
<Fixtures.AcceptanceTestsBodyComplex.models.Siamese>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyComplex.models.ErrorException>`
"""
# Construct URL
url = '/complex/inheritance/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Siamese', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_valid(
self, complex_body, custom_headers=None, raw=False, **operation_config):
"""Put complex types that extend others.
:param complex_body: Please put a siamese with id=2, name="Siameee",
color=green, breed=persion, which hates 2 dogs, the 1st one named
"Potato" with id=1 and food="tomato", and the 2nd one named "Tomato"
with id=-1 and food="french fries".
:type complex_body: :class:`Siamese
<Fixtures.AcceptanceTestsBodyComplex.models.Siamese>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyComplex.models.ErrorException>`
"""
# Construct URL
url = '/complex/inheritance/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(complex_body, 'Siamese')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| fhoring/autorest | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyComplex/autorestcomplextestservice/operations/inheritance_operations.py | Python | mit | 4,687 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._api_management_client import ApiManagementClient
from ._version import VERSION
__version__ = VERSION
__all__ = ['ApiManagementClient']
# `._patch.py` is used for handwritten extensions to the generated code
# Example: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md
from ._patch import patch_sdk
patch_sdk()
| Azure/azure-sdk-for-python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/__init__.py | Python | mit | 841 |
#!/usr/bin/python
"""
Simple point imagery request for Earth Engine
"""
## MIT License
##
## Copyright (c) 2017, krishna bhogaonker
## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'krishna bhogaonker'
__copyright__ = 'copyright 2017'
__credits__ = ['krishna bhogaonker']
__license__ = "MIT"
__version__ = '0.1.0'
__maintainer__ = 'krishna bhogaonker'
__email__ = '[email protected]'
__status__ = 'pre-alpha'
from aenum import Enum, extend_enum
import ee
from .AdapterSpecifyImageryCollection import AdapterSpecifyImageryCollection
from .AdapterDateFilter import AdapterDateFilter
from .AdapterPointBoundingBox import AdapterPointBoundingBox
from .abcEarthEngineProcessor import abcEarthEngineProcessor
ee.Initialize()
class EarthEngineSimplePointImageryProcessor(abcEarthEngineProcessor):
def process(self):
self.add_metadata_columns_to_request_data()
for index, row in self.get_request().get_data_iterator():
self.set_imageryCollection()
self.set_dateFilterToRequestDates()
# TODO check the order of y and x for proper filter.
coords = [row.Geometry.y, row.Geometry.x]
self.set_boundaryFilter(coords)
p = ee.Geometry.Point(coords).buffer(self.request.get_radius())
def clipper(image):
return image.clip(p.bounds())
boundary = ee.Geometry.Polygon(p.bounds().getInfo().coordinates[0]).toGeoJSONString();
self.get_imageryCollection().map(clipper)
url = ee.data.makeDownloadUrl(
ee.data.getDownloadId({
'image': image.serialize(),
'scale': '%d' % self.get_request().get_resolution(),
'crs': 'EPSG:4326',
'filePerBand': 'false',
'name': filename,
'region': boundary,
}))
def add_metadata_columns_to_request_data(self):
self.get_request().add_column_to_data('source_id')
self.get_request().add_column_to_data('bands')
self.get_request().add_column_to_data('imagery_startdate')
self.get_request().add_column_to_data('imagery_enddate')
self.get_request().add_column_to_data('download_date')
| krishnab-datakind/mining-data-acquisition | data_gather/EarthEngineSimplePointImageryProcessor.py | Python | mit | 3,293 |
from typing import Dict
from urllib.parse import (
urlencode,
urlunsplit,
)
class ISignEnvironment:
def __init__(self, name: str, netloc: str, scheme: str = "https") -> None:
self.name = name
self.netloc = netloc
self.scheme = scheme
def __repr__(self) -> str:
return f"ISignEnvironment(name={self.name!r}, netloc={self.netloc!r}, scheme={self.scheme!r})"
def __str__(self) -> str:
return f"< isign env {self.name!r} {self.netloc} >"
def construct_url(self, access_token: str, path: str) -> str:
if not access_token:
raise ValueError("access_token must be provided")
query = urlencode({"access_token": access_token})
components = self.scheme, self.netloc, path, query, ""
return urlunsplit(components)
SANDBOX = ISignEnvironment("sandbox", "developers.isign.io")
LIVE = ISignEnvironment("live", "api2.isign.io")
ENV_MAP: Dict[str, ISignEnvironment] = {
SANDBOX.name: SANDBOX,
LIVE.name: LIVE,
}
def get_default_environment(name: str) -> ISignEnvironment:
if name not in ENV_MAP:
raise ValueError(f"unknown ISignEnvironment {name!r}")
return ENV_MAP[name]
| Paulius-Maruska/python-isign | src/isign/environment.py | Python | mit | 1,196 |
from rest_framework import serializers
from models import Resource
from votes.models import Vote
from comments.serializers import CommentSerializer
class ResourceSerializer(serializers.ModelSerializer):
"""Resource Serializer"""
comments = CommentSerializer(many=True, read_only=True)
author = serializers.ReadOnlyField(source='author.username')
class Meta:
model = Resource
fields = ('id', 'author', 'text', 'language_tags', 'resource_file',
'resource_file_name', 'resource_file_size', 'snippet_text',
'date_added', 'date_modified', 'comments')
read_only_fields = ('date_modified', 'date_added', 'comments')
class ResourceVoteSerializer(serializers.ModelSerializer):
"""Resource Votes Serializer"""
user = serializers.ReadOnlyField(source='user.username')
resource = serializers.ReadOnlyField(source='resource.text')
class Meta:
model = Vote
fields = ('id', 'user', 'vote', 'resource', 'time_stamp')
read_only_fields = ('time_stamp')
def create(self, validated_data):
vote_action = validated_data.get('vote')
resource = validated_data.get('resource')
user = validated_data.get('user')
existing_vote = Vote.objects.filter(
resource=resource,
user=user
).first()
if existing_vote is None:
vote = Vote(user=user, resource=resource, vote=vote_action)
vote.save()
return vote
elif existing_vote.vote is not vote_action:
existing_vote.vote = vote_action
existing_vote.save()
else:
existing_vote.delete()
return existing_vote
| andela/codango | codango/resources/serializers.py | Python | mit | 1,724 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations:
"""LocalNetworkGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.LocalNetworkGateway",
**kwargs: Any
) -> "_models.LocalNetworkGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.LocalNetworkGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.LocalNetworkGateway"]:
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs: Any
) -> "_models.LocalNetworkGateway":
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.LocalNetworkGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.LocalNetworkGateway"]:
"""Updates a local network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to update local network gateway tags.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LocalNetworkGatewayListResult"]:
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/aio/operations/_local_network_gateways_operations.py | Python | mit | 27,485 |
"""
Functions:
convert_gene_ids
"""
# _convert_gene_ids_biomart
# _convert_gene_ids_local
# _convert_entrez_symbol_to_entrez # uses genefinder
# _find_entrez_gene
#
# _clean_genes_for_biomart
# _remove_dups
# _start_R
GLOBAL_R = None
def _start_R():
global GLOBAL_R
from genomicode import jmath
if GLOBAL_R is None:
R = jmath.start_R()
R('library(biomaRt)')
GLOBAL_R = R
return GLOBAL_R
def convert_gene_ids(
gene_ids, in_platform, out_platform, in_delim=" /// ", out_delim=" /// ",
keep_dups=False, keep_emptys=False, no_na=True):
# Return a list of the output IDs, parallel to gene_ids. If a
# gene ID to multiple output IDs, the output IDs will be separated
# by out_delim. If it is missing, then the output will be an
# empty string.
# in_platform and out_platform are the names of the platforms.
from genomicode import arrayplatformlib as apl
# Make a cleaned up version of the gene_ids to convert.
remove_version = False
if in_platform.lower().find("refseq") >= 0 or \
in_platform.lower().find("ensembl") >= 0:
remove_version = True
x = apl.normalize_ids(
gene_ids, delimiter=in_delim, remove_version_number=remove_version)
# No duplicates.
x = {}.fromkeys(x).keys()
gene_ids_c = x
in2out = None
if in_platform == "Entrez_Symbol_human" and \
out_platform in ("Entrez_Symbol_human", "Entrez_ID_human"):
in2out = _convert_entrez_symbol_to_entrez(
gene_ids_c, out_platform, "9606")
if in2out is None:
in2out = _convert_gene_ids_local(in_platform, out_platform)
if in2out is None:
in2out = _convert_gene_ids_biomart(
gene_ids_c, in_platform, out_platform, no_na)
assert not gene_ids_c or in2out, \
"I could not convert %s to %s" % (in_platform, out_platform)
# Make a parallel list of the output IDs.
output_ids = []
for gene_id in gene_ids:
in_ids = apl.normalize_id(
gene_id, delimiter=in_delim, remove_version_number=remove_version)
out_ids = []
for x in in_ids:
y = in2out.get(x, [""])
out_ids.extend(y)
if not keep_emptys:
out_ids = [x for x in out_ids if x]
if not keep_dups:
out_ids = _remove_dups(out_ids)
x = out_delim.join(out_ids)
output_ids.append(x)
return output_ids
def _clean_genes_for_biomart(gene_ids):
gene_ids = [i.replace("'",'') for i in gene_ids] #modify "2'-PDE"
return gene_ids
def _convert_gene_ids_biomart(gene_ids, in_platform, out_platform, no_na):
# Maximum number of genes to request at a time.
MAX_GENES = 25000
in2out = {}
while gene_ids:
batch = gene_ids[:MAX_GENES]
gene_ids = gene_ids[MAX_GENES:]
x = _convert_gene_ids_biomart_h(
batch, in_platform, out_platform, no_na)
assert x is not None, "Cannot convert: %s -> %s" % (
in_platform, out_platform)
in2out.update(x)
return in2out
def _convert_gene_ids_biomart_h(gene_ids, in_platform, out_platform, no_na):
# Return a dictionary of gene_id -> list of converted_ids, or None
# if these platforms cannot be converted.
from genomicode import jmath
from genomicode import arrayplatformlib
from genomicode import timer
if not gene_ids:
return {}
R_fn, R_var = jmath.R_fn, jmath.R_var
# An attribute is the biomart name for the platform.
in_attribute = arrayplatformlib.get_bm_attribute(in_platform)
out_attribute = arrayplatformlib.get_bm_attribute(out_platform)
#assert in_attribute, "Bad platform: %s" % in_platform
#assert out_attribute, "Bad platform: %s" % out_platform
if not in_attribute or not out_attribute:
return None
in_mart = arrayplatformlib.get_bm_organism(in_platform)
out_mart = arrayplatformlib.get_bm_organism(out_platform)
assert in_mart, "No bm organism for platform: %s" % in_platform
gene_ids = _clean_genes_for_biomart(gene_ids)
R = _start_R()
# Select the BioMart dataset to use.
#mart = "ensembl"
mart = "ENSEMBL_MART_ENSEMBL" # Changed 151120.
host = "www.ensembl.org"
R_fn("useMart", mart, in_mart, host=host, RETVAL="in.dataset")
R_fn("useMart", mart, out_mart, host=host, RETVAL="out.dataset")
# Link two data sets and retrieve information from the linked datasets.
jmath.R_equals_vector(gene_ids, 'gene.ids')
# ERROR:
# Error in getLDS(attributes = "ensembl_gene_id", filters =
# "ensembl_gene_id", : The query to the BioMart webservice
# returned an invalid result: the number of columns in the result
# table does not equal the number of attributes in the
# query. Please report this to the mailing list.
# Can mean that the gene IDs are bad. E.g. version numbers still
# on entrez IDs.
timer.wait(3)
R_fn(
"getLDS", attributes=in_attribute, filters=in_attribute,
values=R_var("gene.ids"), mart=R_var("in.dataset"),
attributesL=out_attribute, martL=R_var("out.dataset"),
RETVAL="homolog")
homolog = R["homolog"]
# homolog is DataFrame with two parallel rows:
# <in_ids>
# <out_ids>
assert len(homolog) == 2, \
"BioMart returned no results mapping from %s:%s to %s:%s." % (
in_mart, in_attribute, out_mart, out_attribute)
in_ids = [str(x) for x in homolog[0]]
out_ids = [str(x) for x in homolog[1]]
# Sometimes BioMart will generate "NA" if something is missing.
if no_na:
for i in range(len(out_ids)):
if out_ids[i].upper() == "NA":
out_ids[i] = ""
in2out = {}
for x, y in zip(in_ids, out_ids):
if not y.strip():
continue
val = in2out.get(x, [])
val.append(y)
in2out[x] = sorted(val)
return in2out
def _convert_gene_ids_local(in_platform, out_platform):
# Return a dictionary of gene_id -> list of converted_ids, or None
# if these platforms cannot be converted.
import os
from genomicode import config
from genomicode import filelib
filelib.assert_exists_nz(config.convert_platform)
x = "%s___%s.txt" % (in_platform, out_platform)
filename = os.path.join(config.convert_platform, x)
if not os.path.exists(filename):
return None
in2out = {}
for cols in filelib.read_cols(filename):
# <in_id> <out_id1> ... <out_idn>
assert len(cols) >= 2
in_id = cols[0]
out_ids = cols[1:]
in2out[in_id] = out_ids
return in2out
FOUND_ID2ENTREZ = {}
def _convert_entrez_symbol_to_entrez(gene_ids, out_platform, db_tax_id):
# Return a dictionary of gene_id -> list of converted_ids.
# Need db_tax_id to limit search space.
global FOUND_ID2ENTREZ
FOUND = FOUND_ID2ENTREZ
# Not sure if other organisms are implemented.
assert db_tax_id in ["9606", "10090"]
#db_tax_id = "9606"
#db_tax_id = "10090"
in2out = {}
for in_id in gene_ids:
gene_id = gene_symbol = None
# Try to find the gene ID from the symbol.
# First, look to see if it's already been found.
if in_id in FOUND:
gene_id, gene_symbol = FOUND[in_id]
if not gene_id:
x = _find_entrez_gene(in_id, db_tax_id)
if x:
gene_id, gene_symbol = x
if out_platform == "Entrez_ID_human" and gene_id:
in2out[in_id] = [str(gene_id)]
elif out_platform == "Entrez_Symbol_human" and gene_symbol:
in2out[in_id] = [gene_symbol]
return in2out
FIND_GENE_ERROR = None
def _find_entrez_gene(gene_symbol, tax_id):
# Return tuple of (gene_id, gene_symbol) or None.
global FIND_GENE_ERROR
import MySQLdb
from genomicode import genefinder
FIND_GENE_ERROR = None
# Try to find the gene.
try:
x = genefinder.find_gene(gene_symbol, tax_id=tax_id)
except AssertionError, x:
FIND_GENE_ERROR = str(x)
return None
except MySQLdb.OperationalError, x:
if str(x).find("Can't connect to MySQL server") < 0:
raise
FIND_GENE_ERROR = str(x)
return None
if x:
gene_id = x[0]
gene_symbol = x[1]
return gene_id, gene_symbol
# Could not find gene. Look for a discontinued gene.
try:
x = genefinder.find_discontinued(gene_symbol, tax_id=tax_id)
if x:
x = genefinder.find_gene(x, tax_id=tax_id)
except AssertionError, x:
FIND_GENE_ERROR = str(x)
return None
if x:
gene_id = x[0]
gene_symbol = x[1]
return gene_id, gene_symbol
return None
def _remove_dups(ids):
# Preserve order.
ids_c = []
for x in ids:
if x not in ids_c:
ids_c.append(x)
return ids_c
| jefftc/changlab | genomicode/arrayannot.py | Python | mit | 8,944 |
#!/usr/bin/env python3
N = int(input())
memory = []
for each in range(N):
memory.append(input())
Q = int(input())
for each in range(Q):
query = input()
if query not in memory:
print(0)
else:
print(
memory.count(query)
)
| onyb/cpp | HackerRank/Domains/Data_Structures/Arrays/sparse-arrays.py | Python | mit | 275 |
# Generated by Django 3.2.6 on 2021-08-14 22:58
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| pizzapanther/Neutron-Sync | nsync_server/account/migrations/0001_initial.py | Python | mit | 2,873 |
# The MIT License (MIT)
#
# Copyright (c) 2016 Fabian Wenzelmann <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from django.db import models
import django.utils
import uuid
from .settings import *
from .foodle_polls_exceptions import IDCreationException
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# Create your models here.
class Poll(models.Model):
pub_date = models.DateTimeField(
_('Date published'),
default=django.utils.timezone.now)
name = models.CharField(
_('Name'),
help_text=_('Name of the poll'),
max_length=200)
poll_id = models.CharField(
_('ID'),
help_text=_('ID of the poll'),
max_length=100,
unique=True)
admin_id = models.CharField(
_('Admin ID'),
help_text=_('Admin ID of the poll'),
max_length=100,
unique=True)
initiator = models.CharField(
_('Initiator'),
help_text=_('The person who initiated the poll'),
max_length=20,
blank=True)
location = models.CharField(
_('Location'),
help_text=_('The location where the scheduled event takes place'),
max_length=50,
blank=True)
description = models.CharField(
_('Description'),
help_text=_('A description what this poll is about'),
max_length=100,
blank=True)
ifneedbe = models.BooleanField(
_('If-Need-Be'),
help_text=_('If enabled provides a third option "If-Need-Be"'),
default=False)
hidden = models.BooleanField(
_('Hidden'),
help_text=_(
'If enabled only the admin can see how the participants voted'),
default=False)
freetext = models.BooleanField(
_('Free text poll'),
help_text=_(
'If true free texts can be entered, otherwise dates and times can be selected'),
default=False)
@staticmethod
def gen_id(field):
id_length = get_foodle_setting('id_length')
id_tries = get_foodle_setting('id_tries')
_id = uuid.uuid4().hex[:id_length]
tries = 0
while Poll.objects.filter(**{field: _id}):
if tries >= id_tries:
raise IDCreationException(
'Stoppend creating %s after %d tries' %
(field, tries))
_id = uuid.uuid4().hex[:id_length]
return _id
@staticmethod
def gen_poll_id():
return Poll.gen_id('poll_id')
@staticmethod
def gen_admin_id():
return Poll.gen_id('admin_id')
class PollOption(models.Model):
poll = models.ForeignKey(Poll, on_delete=models.CASCADE)
class Meta:
ordering = ['id']
class FreeTextOption(PollOption):
choice_text = models.CharField(
_('Choice Text'),
help_text=_('Text of the option'),
max_length=50)
class DateTimeOption(PollOption):
choice_time = models.DateTimeField()
class WholeDayOption(PollOption):
choice_day = models.DateField()
class Answer(models.Model):
option = models.ForeignKey(PollOption, on_delete=models.CASCADE)
time_posted = models.DateTimeField(
_('Time posted'),
default=django.utils.timezone.now)
user = models.CharField(
_('User'),
help_text=_('User that gave the answer'),
max_length=50)
user_id = models.PositiveIntegerField(
_('User ID'),
help_text=_('Together with user name defines user'))
answer = models.SmallIntegerField(
_('Answer'),
help_text=_('The selected value, either -1, 0 or 1'))
class Meta:
unique_together = ('option', 'user', 'user_id')
class Comment(models.Model):
time_posted = models.DateTimeField(
_('Time posted'),
default=django.utils.timezone.now)
user = models.CharField(
_('User'),
help_text=_('User that posted the comment'),
max_length=50)
description = models.CharField(
_('Description'),
help_text=_('A description what this poll is about'),
max_length=250)
class FirstAnswered(models.Model):
poll = models.ForeignKey(Poll, on_delete=models.CASCADE)
user = models.CharField(
_('User'),
help_text=_('User that answered'),
max_length=50)
user_id = models.PositiveIntegerField(
_('User ID'),
help_text=_('Together with user name defines user'))
time = models.DateTimeField(
_('Time first answered'),
default=django.utils.timezone.now)
@staticmethod
def get_next_user_id(user_name):
max_id = FirstAnswered.objects.filter(user=user_name).aggregate(
models.Max('user_id'))['user_id__max']
if max_id is None:
return 0
else:
return max_id + 1
class Meta:
unique_together = ('poll', 'user', 'user_id')
| FabianWe/foodle | foodle/foodle_polls/models.py | Python | mit | 5,923 |
import datetime
from HistoryData import HistoryData
from Button import Button
from ButtonGenerator import ButtonGenerator
from typing import Tuple
class History:
def __init__(self):
self.hand_list = []
def add_information(self, center_of_hand, number_of_fingers: int) -> None:
time = datetime.datetime.now()
history_data = HistoryData(center_of_hand, time, number_of_fingers)
self.hand_list.insert(0, history_data)
def get_center_of_hand(self, index) -> Tuple[int, int]:
return self.hand_list[index].center_of_hand
def get_time(self, index) -> datetime:
return self.hand_list[index].time
def get_number_of_fingers(self, index) -> int:
return self.hand_list[index].number_of_fingers
def get_operator(self, index) -> str:
return self.hand_list[index].operator
def confirmed_finger_number(self):
number = None
for point_in_history in range(len(self.hand_list)):
if self.confirm_fingers_from_point_in_past(point_in_history):
number = self.get_number_of_fingers(point_in_history)
return number
def confirm_fingers_from_point_in_past(self, x) -> bool:
time_now = self.get_time(x)
time_old = time_now
i = x + 1
while i < len(self.hand_list) and self.get_number_of_fingers(i) == self.get_number_of_fingers(x):
time_old = self.get_time(i)
i += 1
time_difference = time_now - time_old
return time_difference >= datetime.timedelta(0, 2, 0, 0, 0, 0, 0)
def confirmed_delete(self, button_generator: ButtonGenerator) -> bool:
delete = False
if self.confirmed_buttons(button_generator.generate_all_buttons()[3]):
delete = True
return delete
def confirmed_operator(self, button_generator: ButtonGenerator):
operator = None
if self.confirmed_buttons(button_generator.generate_all_buttons()[0]):
operator = "+"
if self.confirmed_buttons(button_generator.generate_all_buttons()[1]):
operator = "-"
if self.confirmed_buttons(button_generator.generate_all_buttons()[2]):
operator = "*"
if self.confirmed_buttons(button_generator.generate_all_buttons()[4]):
operator = "/"
if self.confirmed_buttons(button_generator.generate_all_buttons()[6]):
operator = "**"
if self.confirmed_buttons(button_generator.generate_all_buttons()[7]):
operator = "v"
if self.confirmed_buttons(button_generator.generate_all_buttons()[8]):
operator = "//"
if self.confirmed_buttons(button_generator.generate_all_buttons()[9]):
operator = "%"
return operator
def confirmed_buttons(self, button: Button):
for point_in_history in range(len(self.hand_list)):
if self.confirm_button_from_point_in_past(button, point_in_history):
return True
return False
def confirm_button_from_point_in_past(self, button: Button, x) -> bool:
time_now = self.get_time(x)
time_old = time_now - datetime.timedelta(0, 2, 0, 0, 0, 0, 0)
i = x + 1
in_button_count = 0
out_of_button_count = 0
while i < len(self.hand_list) and time_old <= time_now:
time_now = self.get_time(i)
if button.contains_point(self.get_center_of_hand(i)):
in_button_count += 1
else:
out_of_button_count += 1
i += 1
return (2 * in_button_count) > out_of_button_count
def reset(self):
self.hand_list = []
| BogyMitutoyoCTL/CalculatorCV | History.py | Python | mit | 3,677 |
# DO NOT EDIT THIS FILE!
#
# Python module CosNotifyChannelAdmin__POA generated by omniidl
import omniORB
omniORB.updateModule("CosNotifyChannelAdmin__POA")
# ** 1. Stub files contributing to this module
import CosNotifyChannelAdmin_idl
# ** 2. Sub-modules
# ** 3. End
| amonmoce/corba_examples | omniORBpy-4.2.1/build/python/COS/CosNotifyChannelAdmin__POA/__init__.py | Python | mit | 273 |
## Code to combine the output files per disease from individual differential expression analysis
## Written by Sumaiya Nazeen, [email protected]
## Python version 2.7.6
## Platform: 3.13.0-35-gneric #62-Ubuntu (64-bit)
## The output file should preferrably have a .tab extension, so that it can be easily opened with MS Excel for further calculations
## The input file list should contain all the output files under a certain disease specified by the user.
## Example command to generate the combined results file:
##### $python combineOutputOfDiffExp.py combo_asthma.tab diff_a1.tab diff_a2.tab diff_a3.tab diff_a4.tab diff_a5.tab
import sys
import numpy as np
def getGenes(inFile):
lines = [line.strip() for line in open(inFile)]
genes = []
for line in lines[1:]:
genes.append(line.split('\t')[1])
return genes
def getPs(inFile):
lines = [line.strip() for line in open(inFile)]
mF = {}
for line in lines[1:]:
x = line.split('\t')
if x[3] == "NA":
x[3] = -1
mF[x[1]] = float(x[3])
return mF
def makeMap(fileList, ofile):
m = {}
i = 0
for f in fileList:
gF = getGenes(f)
for g in gF:
if g not in m.keys():
m[g] = [-1]*len(fileList)
mF = getPs(f)
for g in gF:
m[g][i] = mF[g]
i = i+1
of = open(ofile,'w')
for g in m.keys():
s = g
for j in xrange(len(m[g])):
s += '\t' + str(m[g][j])
s += '\n'
of.write(s)
of.close()
def main():
if len(sys.argv) < 3:
print "Usage: ./combineOutputOfDiffExp.py <outFile> <inFile list>"
sys.exit(1)
ofile = sys.argv[1]
inFiles = sys.argv[2:]
makeMap(inFiles, ofile)
if __name__ == "__main__":
main()
| snz20/3TierMA | combineOutputOfDiffExp.py | Python | mit | 1,613 |
import os
import os.path
import getpass
import json
import time
default = object()
format_version = 1
def get_default_registry_path():
return os.path.join(os.path.expanduser('~'), '.checklists')
def get_default_project_path():
return os.path.join(os.getcwd(), '.checklist')
def json_dump_pretty(data):
return json.dumps(data, sort_keys=True, indent=4, separators=(', ', ': '))
class Category(object):
def __init__(self, name, items=None):
self.name = name
self.items = items or []
def serialize(self):
return {
'name': self.name,
'items': self.items,
}
@classmethod
def deserialize(cls, data):
return cls(
name=data['name'],
items=data['items'],
)
class Checklist(object):
def __init__(self, name, categories=None):
self.name = name
categories = categories or []
self.categories = {cat.name: cat for cat in categories}
def serialize(self):
return {
'name': self.name,
'categories': [cat.serialize() for cat in
self.categories.values()],
}
@classmethod
def deserialize(cls, data):
return cls(
name=data['name'],
categories=[Category.deserialize(cat_data)
for cat_data in data['categories']],
)
class FormatError(Exception):
pass
class Registry(dict):
@classmethod
def load(cls, registry_path=default):
if registry_path == default:
registry_path = get_default_registry_path()
if os.path.exists(registry_path):
with open(registry_path) as f:
data = json.load(f)
version = data.pop('version')
if version == 1:
return cls({
name: Checklist.deserialize(checklist)
for name, checklist
in data.items()
})
else:
raise FormatError(
"The version of checklist used to save this registry "
"is newer than your current version: please upgrade "
"with 'pip install checklist'.")
else:
return cls()
def save(self, registry_path=default):
if registry_path == default:
registry_path = get_default_registry_path()
# Serialize to a string first, then save to the file, to avoid saving
# if there's an error.
data = {name: checklist.serialize()
for name, checklist in self.items()}
data['version'] = format_version
s = json_dump_pretty(data)
with open(registry_path, 'w') as f:
f.write(s)
class Check(object):
def __init__(self, description, category, username=None, timestamp=None):
self.description = description
self.category = category
self.username = username or getpass.getuser()
self.timestamp = timestamp or time.time()
def serialize(self):
return {
'description': self.description,
'category': self.category,
'username': self.username,
'timestamp': self.timestamp,
}
@classmethod
def deserialize(cls, data):
return cls(
description=data['description'],
category=data['category'],
username=data['username'],
timestamp=data['timestamp'],
)
class Project(object):
def __init__(self, registry, checklist_name, history=None):
self.checklist_name = checklist_name
self.checklist = registry[self.checklist_name]
self.history = history or []
self.last_checks = {}
# XXX this is gonna be a slow approach for a large project: ideally,
# traverse from the most recent and then stop when all current items
# have been checked.
for check in self.history:
self.last_checks[(check.description, check.category)] = check
def mark_checked(self, item, category):
self.history.append(Check(
description=item,
category=category.name,
))
@classmethod
def load(cls, registry, path=default):
if path == default:
path = get_default_project_path()
if not os.path.exists(path):
return
with open(path) as f:
data = json.load(f)
version = data.pop('version')
if version == 1:
return cls(registry,
checklist_name=data['checklist_name'],
history=[Check.deserialize(check_data) for
check_data in data['history']])
else:
raise FormatError(
"The version of checklist used to save this registry "
"is newer than your current version: please upgrade "
"with 'pip install checklist'.")
def save(self, path=default):
if path == default:
path = get_default_project_path()
data = {
'version': format_version,
'checklist_name': self.checklist_name,
'history': [check.serialize() for check in self.history],
}
s = json_dump_pretty(data)
with open(path, 'w') as f:
f.write(s)
def check_tree_time(self):
# XXX Get this from the Project instead?
path = os.getcwd()
# XXX
project_file_path = os.path.join(path, '.checklist')
return max(os.path.getmtime(fp) for fp in
(filepaths for filepaths, dirs, files in os.walk(path))
if fp != project_file_path)
def verify(self):
modtime = self.check_tree_time()
for category in self.checklist.categories.values():
for item in category.items:
check = self.last_checks.get((item, category.name))
if (not check) or (check.timestamp < modtime):
return False
return True
def items_to_check(self, category=None):
if category:
category = {cat.name: cat for cat in
self.checklist.categories}[category]
categories = [category]
else:
categories = self.checklist.categories
modtime = self.check_tree_time()
for category in categories.values():
for item in category.items:
last_check = self.last_checks.get((item, category.name))
if (not last_check) or (last_check.timestamp < modtime):
yield item, last_check, category
| storborg/checklist | checklist/model.py | Python | mit | 6,789 |
from django.utils import timezone
from waldur_core.structure import models as structure_models
from . import models
def update_daily_quotas(sender, instance, created=False, **kwargs):
if not isinstance(
instance.scope, (structure_models.Project, structure_models.Customer)
):
return
if not created and not instance.tracker.has_changed('usage'):
return
models.DailyQuotaHistory.objects.update_or_create_quota(
scope=instance.scope,
name=instance.name,
date=timezone.now().date(),
usage=instance.usage,
)
| opennode/waldur-mastermind | src/waldur_mastermind/analytics/handlers.py | Python | mit | 585 |
import logging
from django.contrib.auth.signals import user_login_failed
from django.dispatch import receiver
logger = logging.getLogger(__name__)
@receiver(user_login_failed)
def log_failed_login(sender, **kwargs):
logger.info("login attempt for username '{}' failed".format(
kwargs['credentials']['username'])
)
| zackmdavis/Finetooth | core/signals.py | Python | mit | 333 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-11 08:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tickets', '0016_auto_20161231_0937'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='private_comment',
field=models.TextField(blank=True, null=True),
),
]
| prontotools/zendesk-tickets-machine | zendesk_tickets_machine/tickets/migrations/0017_auto_20170111_0809.py | Python | mit | 468 |
FileIn = 'RegDati/Out/AllUnique.csv'
FileOut = FileIn.replace("AllUnique.csv","ExtractB.csv")
fIn = open(FileIn, 'r')
fOut = open(FileOut, 'w')
fOut.write('"Prov","Comu","Dist","CodScuola","Descrizione","ScAssiciataA","Indirizzo","ScPrincipaleCon"'+"\n")
cont = 0
for line in fIn:
cont+=1
if cont > 1:
a01 = ''.join(line.split(",")[0:1])+","
a02 = ''.join(line.split(",")[1:2])+","
a03 = ''.join(line.split(",")[2:3])+","
a04 = ''.join(line.split(",")[3:4])+","
a05 = ''.join(line.split(",")[4:5])+","
a06 = ''.join(line.split(",")[5:6])+","
a07 = ''.join(line.split(",")[6:7])+","
a08 = ''.join(line.split(",")[7:8])+","
a09 = ''.join(line.split(",")[8:9])+","
a10 = ''.join(line.split(",")[9:10])
# a06 o a09 indicano il Cod di ScPrincipale
# a10 indica che si tratta di Sc Principale
scPrin = a09 if a09 != '"",' else a06
fOut.write(a01+a02+a03+a05+a04+scPrin+a07+a10)
fIn.close()
fOut.close()
print('Linee lette = '+str(cont))
| scarimp/BB_UU_PYPA | ExtractFromUniqueB.py | Python | mit | 1,058 |
#!/usr/bin/env python
"""
Copyright (c) 2014-2022 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
import re
from core.common import retrieve_content
__url__ = "https://www.maxmind.com/en/high-risk-ip-sample-list"
__check__ = "Sample List of Higher Risk IP Addresses"
__info__ = "bad reputation (suspicious)"
__reference__ = "maxmind.com"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for match in re.finditer(r"high-risk-ip-sample/([\d.]+)", content):
retval[match.group(1)] = (__info__, __reference__)
return retval
| stamparm/maltrail | trails/feeds/maxmind.py | Python | mit | 662 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <[email protected]>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import re
import cgi
__all__ = ['highlight']
class Highlighter(object):
"""
Do syntax highlighting.
"""
def __init__(
self,
mode,
link=None,
styles=None,
):
"""
Initialise highlighter:
mode = language (PYTHON, WEB2PY,C, CPP, HTML, HTML_PLAIN)
"""
styles = styles or {}
mode = mode.upper()
if link and link[-1] != '/':
link = link + '/'
self.link = link
self.styles = styles
self.output = []
self.span_style = None
if mode == 'WEB2PY':
(mode, self.suppress_tokens) = ('PYTHON', [])
elif mode == 'PYTHON':
self.suppress_tokens = ['GOTOHTML']
elif mode == 'CPP':
(mode, self.suppress_tokens) = ('C', [])
elif mode == 'C':
self.suppress_tokens = ['CPPKEYWORD']
elif mode == 'HTML_PLAIN':
(mode, self.suppress_tokens) = ('HTML', ['GOTOPYTHON'])
elif mode == 'HTML':
self.suppress_tokens = []
else:
raise SyntaxError, 'Unknown mode: %s' % mode
self.mode = mode
def c_tokenizer(
self,
token,
match,
style,
):
"""
Callback for C specific highlighting.
"""
value = cgi.escape(match.group())
self.change_style(token, style)
self.output.append(value)
def python_tokenizer(
self,
token,
match,
style,
):
"""
Callback for python specific highlighting.
"""
value = cgi.escape(match.group())
if token == 'MULTILINESTRING':
self.change_style(token, style)
self.output.append(value)
self.strMultilineString = match.group(1)
return 'PYTHONMultilineString'
elif token == 'ENDMULTILINESTRING':
if match.group(1) == self.strMultilineString:
self.output.append(value)
self.strMultilineString = ''
return 'PYTHON'
if style and style[:5] == 'link:':
self.change_style(None, None)
(url, style) = style[5:].split(';', 1)
if url == 'None' or url == '':
self.output.append('<span style="%s">%s</span>'
% (style, value))
else:
self.output.append('<a href="%s%s" style="%s">%s</a>'
% (url, value, style, value))
else:
self.change_style(token, style)
self.output.append(value)
if token == 'GOTOHTML':
return 'HTML'
return None
def html_tokenizer(
self,
token,
match,
style,
):
"""
Callback for HTML specific highlighting.
"""
value = cgi.escape(match.group())
self.change_style(token, style)
self.output.append(value)
if token == 'GOTOPYTHON':
return 'PYTHON'
return None
all_styles = {
'C': (c_tokenizer, (
('COMMENT', re.compile(r'//.*\r?\n'),
'color: green; font-style: italic'),
('MULTILINECOMMENT', re.compile(r'/\*.*?\*/', re.DOTALL),
'color: green; font-style: italic'),
('PREPROCESSOR', re.compile(r'\s*#.*?[^\\]\s*\n',
re.DOTALL), 'color: magenta; font-style: italic'),
('PUNC', re.compile(r'[-+*!&|^~/%\=<>\[\]{}(),.:]'),
'font-weight: bold'),
('NUMBER',
re.compile(r'0x[0-9a-fA-F]+|[+-]?\d+(\.\d+)?([eE][+-]\d+)?|\d+'),
'color: red'),
('KEYWORD', re.compile(r'(sizeof|int|long|short|char|void|'
+ r'signed|unsigned|float|double|'
+ r'goto|break|return|continue|asm|'
+ r'case|default|if|else|switch|while|for|do|'
+ r'struct|union|enum|typedef|'
+ r'static|register|auto|volatile|extern|const)(?![a-zA-Z0-9_])'),
'color:#185369; font-weight: bold'),
('CPPKEYWORD',
re.compile(r'(class|private|protected|public|template|new|delete|'
+ r'this|friend|using|inline|export|bool|throw|try|catch|'
+ r'operator|typeid|virtual)(?![a-zA-Z0-9_])'),
'color: blue; font-weight: bold'),
('STRING', re.compile(r'r?u?\'(.*?)(?<!\\)\'|"(.*?)(?<!\\)"'),
'color: #FF9966'),
('IDENTIFIER', re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*'),
None),
('WHITESPACE', re.compile(r'[ \r\n]+'), 'Keep'),
)),
'PYTHON': (python_tokenizer, (
('GOTOHTML', re.compile(r'\}\}'), 'color: red'),
('PUNC', re.compile(r'[-+*!|&^~/%\=<>\[\]{}(),.:]'),
'font-weight: bold'),
('NUMBER',
re.compile(r'0x[0-9a-fA-F]+|[+-]?\d+(\.\d+)?([eE][+-]\d+)?|\d+'
), 'color: red'),
('KEYWORD',
re.compile(r'(def|class|break|continue|del|exec|finally|pass|'
+ r'print|raise|return|try|except|global|assert|lambda|'
+ r'yield|for|while|if|elif|else|and|in|is|not|or|import|'
+ r'from|True|False)(?![a-zA-Z0-9_])'),
'color:#185369; font-weight: bold'),
('WEB2PY',
re.compile(r'(request|response|session|cache|redirect|local_import|HTTP|TR|XML|URL|BEAUTIFY|A|BODY|BR|B|CAT|CENTER|CODE|COL|COLGROUP|DIV|EM|EMBED|FIELDSET|LEGEND|FORM|H1|H2|H3|H4|H5|H6|IFRAME|HEAD|HR|HTML|I|IMG|INPUT|LABEL|LI|LINK|MARKMIN|MENU|META|OBJECT|OL|ON|OPTION|P|PRE|SCRIPT|SELECT|SPAN|STYLE|TABLE|THEAD|TBODY|TFOOT|TAG|TD|TEXTAREA|TH|TITLE|TT|T|UL|XHTML|IS_SLUG|IS_STRONG|IS_LOWER|IS_UPPER|IS_ALPHANUMERIC|IS_DATETIME|IS_DATETIME_IN_RANGE|IS_DATE|IS_DATE_IN_RANGE|IS_DECIMAL_IN_RANGE|IS_EMAIL|IS_EXPR|IS_FLOAT_IN_RANGE|IS_IMAGE|IS_INT_IN_RANGE|IS_IN_SET|IS_IPV4|IS_LIST_OF|IS_LENGTH|IS_MATCH|IS_EQUAL_TO|IS_EMPTY_OR|IS_NULL_OR|IS_NOT_EMPTY|IS_TIME|IS_UPLOAD_FILENAME|IS_URL|CLEANUP|CRYPT|IS_IN_DB|IS_NOT_IN_DB|DAL|Field|SQLFORM|SQLTABLE|xmlescape|embed64)(?![a-zA-Z0-9_])'
), 'link:%(link)s;text-decoration:None;color:#FF5C1F;'),
('MAGIC', re.compile(r'self|None'),
'color:#185369; font-weight: bold'),
('MULTILINESTRING', re.compile(r'r?u?(\'\'\'|""")'),
'color: #FF9966'),
('STRING', re.compile(r'r?u?\'(.*?)(?<!\\)\'|"(.*?)(?<!\\)"'
), 'color: #FF9966'),
('IDENTIFIER', re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*'),
None),
('COMMENT', re.compile(r'\#.*\r?\n'),
'color: green; font-style: italic'),
('WHITESPACE', re.compile(r'[ \r\n]+'), 'Keep'),
)),
'PYTHONMultilineString': (python_tokenizer,
(('ENDMULTILINESTRING',
re.compile(r'.*?("""|\'\'\')',
re.DOTALL), 'color: darkred'), )),
'HTML': (html_tokenizer, (
('GOTOPYTHON', re.compile(r'\{\{'), 'color: red'),
('COMMENT', re.compile(r'<!--[^>]*-->|<!>'),
'color: green; font-style: italic'),
('XMLCRAP', re.compile(r'<![^>]*>'),
'color: blue; font-style: italic'),
('SCRIPT', re.compile(r'<script .*?</script>', re.IGNORECASE
+ re.DOTALL), 'color: black'),
('TAG', re.compile(r'</?\s*[a-zA-Z0-9]+'),
'color: darkred; font-weight: bold'),
('ENDTAG', re.compile(r'/?>'),
'color: darkred; font-weight: bold'),
)),
}
def highlight(self, data):
"""
Syntax highlight some python code.
Returns html version of code.
"""
i = 0
mode = self.mode
while i < len(data):
for (token, o_re, style) in Highlighter.all_styles[mode][1]:
if not token in self.suppress_tokens:
match = o_re.match(data, i)
if match:
if style:
new_mode = \
Highlighter.all_styles[mode][0](self,
token, match, style
% dict(link=self.link))
else:
new_mode = \
Highlighter.all_styles[mode][0](self,
token, match, style)
if not new_mode is None:
mode = new_mode
i += max(1, len(match.group()))
break
else:
self.change_style(None, None)
self.output.append(data[i])
i += 1
self.change_style(None, None)
return ''.join(self.output).expandtabs(4)
def change_style(self, token, style):
"""
Generate output to change from existing style to another style only.
"""
if token in self.styles:
style = self.styles[token]
if self.span_style != style:
if style != 'Keep':
if not self.span_style is None:
self.output.append('</span>')
if not style is None:
self.output.append('<span style="%s">' % style)
self.span_style = style
def highlight(
code,
language,
link='/examples/globals/vars/',
counter=1,
styles=None,
highlight_line=None,
context_lines=None,
attributes=None,
):
styles = styles or {}
attributes = attributes or {}
if not 'CODE' in styles:
code_style = """
font-size: 11px;
font-family: Bitstream Vera Sans Mono,monospace;
background-color: transparent;
margin: 0;
padding: 5px;
border: none;
overflow: auto;
white-space: pre !important;\n"""
else:
code_style = styles['CODE']
if not 'LINENUMBERS' in styles:
linenumbers_style = """
font-size: 11px;
font-family: Bitstream Vera Sans Mono,monospace;
background-color: transparent;
margin: 0;
padding: 5px;
border: none;
color: #A0A0A0;\n"""
else:
linenumbers_style = styles['LINENUMBERS']
if not 'LINEHIGHLIGHT' in styles:
linehighlight_style = "background-color: #EBDDE2;"
else:
linehighlight_style = styles['LINEHIGHLIGHT']
if language and language.upper() in ['PYTHON', 'C', 'CPP', 'HTML',
'WEB2PY']:
code = Highlighter(language, link, styles).highlight(code)
else:
code = cgi.escape(code)
lines = code.split('\n')
if counter is None:
linenumbers = [''] * len(lines)
elif isinstance(counter, str):
linenumbers = [cgi.escape(counter)] * len(lines)
else:
linenumbers = [str(i + counter) + '.' for i in
xrange(len(lines))]
if highlight_line:
if counter and not isinstance(counter, str):
lineno = highlight_line - counter
else:
lineno = highlight_line
if lineno<len(lines):
lines[lineno] = '<div style="%s">%s</div>' % (linehighlight_style, lines[lineno])
linenumbers[lineno] = '<div style="%s">%s</div>' % (linehighlight_style, linenumbers[lineno])
if context_lines:
if lineno + context_lines < len(lines):
del lines[lineno + context_lines:]
del linenumbers[lineno + context_lines:]
if lineno -context_lines > 0:
del lines[0:lineno - context_lines]
del linenumbers[0:lineno - context_lines]
code = '<br/>'.join(lines)
numbers = '<br/>'.join(linenumbers)
items = attributes.items()
fa = ' '.join([key[1:].lower() for (key, value) in items if key[:1]
== '_' and value is None] + ['%s="%s"'
% (key[1:].lower(), str(value).replace('"', "'"))
for (key, value) in attributes.items() if key[:1]
== '_' and value])
if fa:
fa = ' ' + fa
return '<table%s><tr valign="top"><td style="width:40px; text-align: right;"><pre style="%s">%s</pre></td><td><pre style="%s">%s</pre></td></tr></table>'\
% (fa, linenumbers_style, numbers, code_style, code)
if __name__ == '__main__':
import sys
argfp = open(sys.argv[1])
data = argfp.read()
argfp.close()
print '<html><body>' + highlight(data, sys.argv[2])\
+ '</body></html>'
| SEA000/uw-empathica | empathica/gluon/highlight.py | Python | mit | 12,945 |
"""Engine for writing data to a JSON file"""
import os
import json
from retriever.lib.models import Engine, no_cleanup
from retriever import DATA_DIR
class DummyConnection:
def cursor(self):
pass
def commit(self):
pass
def rollback(self):
pass
def close(self):
pass
class DummyCursor(DummyConnection):
pass
class engine(Engine):
"""Engine instance for writing data to a CSV file."""
name = "JSON"
abbreviation = "json"
datatypes = {
"auto": "INTEGER",
"int": "INTEGER",
"bigint": "INTEGER",
"double": "REAL",
"decimal": "REAL",
"char": "TEXT",
"bool": "INTEGER",
}
required_opts = [
("table_name",
"Format of table name",
os.path.join(DATA_DIR, "{db}_{table}.json")),
]
def create_db(self):
"""Override create_db since there is no database just a JSON file"""
return None
def create_table(self):
"""Create the table by creating an empty json file"""
self.output_file = open(self.table_name(), "w")
self.output_file.write('[')
def disconnect(self):
"""Close out the JSON with a ] and close the file
Do to an extra comma after the last entry it is necessary to close the
current file, read it back in, and remove the extra comma, before adding
the closing bracket, and re-writing the file to disk. This will be
inefficient for large files and we may want to replace it with something
better.
"""
try:
self.output_file.close()
current_output_file = open(self.table_name(), "r")
file_contents = current_output_file.readlines()
current_output_file.close()
file_contents[-1] = file_contents[-1].strip(',')
file_contents.append('\n]\n')
self.output_file = open(self.table_name(), "w")
self.output_file.writelines(file_contents)
self.output_file.close()
except:
#when disconnect is called by app.connect_wizard.ConfirmPage to
#confirm the connection, output_file doesn't exist yet, this is
#fine so just pass
pass
def execute(self, statement, commit=True):
"""Write a line to the output file"""
self.output_file.write('\n' + statement + ',')
def format_insert_value(self, value, datatype):
"""Formats a value for an insert statement
Overrides default behavior by:
1. Storing decimal numbers as floats rather than strings
2. Not escaping quotes (handled by the json module)
3. Replacing "null" with None which will convert to the 'null' keyword
in json
"""
#TODO There is a lot of duplicated code with engine.format_insert_value
#Refactoring so that this code could be properly shared would be preferable
datatype = datatype.split('-')[-1]
strvalue = str(value).strip()
# Remove any quotes already surrounding the string
quotes = ["'", '"']
if len(strvalue) > 1 and strvalue[0] == strvalue[-1] and strvalue[0] in quotes:
strvalue = strvalue[1:-1]
nulls = ("null", "none")
if strvalue.lower() in nulls:
return None
elif datatype in ("int", "bigint", "bool"):
if strvalue:
intvalue = strvalue.split('.')[0]
if intvalue:
return int(intvalue)
else:
return None
else:
return None
elif datatype in ("double", "decimal"):
if strvalue:
return float(strvalue)
else:
return None
elif datatype=="char":
if strvalue.lower() in nulls:
return None
else:
return strvalue
else:
return None
def insert_statement(self, values):
if not hasattr(self, 'auto_column_number'):
self.auto_column_number = 1
offset = 0
for i in range(len(self.table.columns)):
column = self.table.columns[i]
if 'auto' in column[1][0]:
values = values[:i+offset] + [self.auto_column_number] + values[i+offset:]
self.auto_column_number += 1
offset += 1
#FIXME: Should nulls be inserted here? I'm guessing the should be skipped. Find out.
datadict = {column[0]: value for column, value in zip(self.table.columns, values)}
return json.dumps(datadict)
def table_exists(self, dbname, tablename):
"""Check to see if the data file currently exists"""
tablename = self.table_name(name=tablename, dbname=dbname)
return os.path.exists(tablename)
def get_connection(self):
"""Gets the db connection."""
self.get_input()
return DummyConnection()
| bendmorris/retriever | engines/jsonengine.py | Python | mit | 5,130 |
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
service = client.messaging \
.services \
.create(friendly_name="My First Service")
print(service.sid)
| TwilioDevEd/api-snippets | messaging/services/service-create/service-create.7.x.py | Python | mit | 505 |
import os
from distutils.core import setup
# Utility function to read files. Used for the long_description.
def read(fname):
""" Reads the description of the package from the README.md file. """
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def get_version():
""" Extract the version of the package from the CHANGES file. """
version_fh = open("CHANGES", "r")
first_line = version_fh.readline().strip()
version_fh.close()
version = first_line.split()[1]
return version
setup(
name='cutlass',
description='An iHMP domain specific API using osdf-python',
long_description=read('README.md'),
version=get_version(),
author='Victor F',
author_email='[email protected]',
url='https://hmpdacc.org',
license='MIT',
packages=['cutlass', 'cutlass.aspera'],
requires=['osdf'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: POSIX",
"Programming Language :: Python :: 2.7",
"Topic :: Utilities",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Bio-Informatics"
]
)
| carze/cutlass | setup.py | Python | mit | 1,303 |
from django import forms
class StripeTokenForm(forms.Form):
stripe_token = forms.CharField()
| PDFGridder/PDFGridder | billing/forms.py | Python | mit | 99 |
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="volume.colorbar.title.font", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/volume/colorbar/title/font/_family.py | Python | mit | 524 |
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ["mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf"]
def __init__(self, sock, mode='rb', bufsize=-1):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
self._rbuf = "" # A string
self._wbuf = [] # A list of strings
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
self.flush()
finally:
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self._sock.sendall(buffer)
def fileno(self):
return self._sock.fileno()
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
def read(self, size=-1):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
| longde123/MultiversePlatform | lib/IPCE/Lib/_fileobject.py | Python | mit | 6,599 |
from odict import *
| jmchilton/galaxy-central | modules/cookbook/__init__.py | Python | mit | 20 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostVirtualSwitchBeaconConfig(vim, *args, **kwargs):
'''This data object type describes the configuration of the beacon to probe
connectivity of physical network adapters. A beacon is sent out of one network
adapter and should arrive on another network adapter in the team. The
successful roundtrip indicates that the network adapters are working.Define
this data object to enable beacon probing as a method to validate the link
status of a physical network adapter. Beacon probing must be configured in
order to use the beacon status as a criteria to determine if a physical network
adapter failed.See checkBeacon'''
obj = vim.client.factory.create('ns0:HostVirtualSwitchBeaconConfig')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'interval' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| xuru/pyvisdk | pyvisdk/do/host_virtual_switch_beacon_config.py | Python | mit | 1,557 |
from __future__ import print_function
#
# Made by duponc_j, modified by sourio_b while supervised by sbriss_a in case he fucked things up (and he did)
# Slightly modified by Arignir
# Version: 1.4.1
#
'''
An Epitech norme checker
Usage: python norme.py <dir to scan> [-nocheat] [-verbose] [-score] [-libc]
-verbose: affiche les messages impossible d\'ouvrir
-nocheat: desactive la detection de la triche
-score: affiche le nombre de faute de norme
-libc: active la verification des fonctions de la libc
-malloc: desactive le controle du malloc
-printline: affiche la ligne provoquant une erreur
-return: active verifier le retour des fonctions (return ;)
-comment: ne pas verifier les commentaire
Non geree:
- Indentation
- +<escape>
- verification de la presence de gl_
Bug:
Il est arrivee que le checker ne trouve aucune faute alors qu\'il en existe, si
ce bug vous arrive maillez moi.
'''
import sys,re,os,pwd
blind_logins = [ "{EPITECH.}" ]
def static_var(varname, value):
def decorate(func):
setattr(func, varname, value)
return func
return decorate
class norme:
def __init__(self):
self.user = []
self.verbose = 0
self.cheat = 1
self.comment = 1
self.score = 0
self.note = 0
self.libc = 1
# self.malloc = 1
self.malloc = 0
self.printline = 0
self.creturn = 1
self.the_dir = ""
def new_file(self):
self.nb_line = 1
self.nb_return = 0
self.nb_funcline = 0
self.nb_func = 0
self.sys_include = 0
self.double_inclusion = 0
self.is_func = 0
self.in_comment = 0
self.out_comment = 0
self.typedef = 0
if self.verbose == 1:
print("Scan: %s" % self.file)
@static_var("passed", 0)
def check_header(self):
norme = -1
if self.check_header.passed:
norme = 0
for blind in blind_logins:
if (blind in self.user):
return
if (self.nb_line == 1):
if (self.line[:2] != '/*'):
self.print_error('Header incorrect', norme)
self.check_header.__func__.passed = 1
elif (self.nb_line == 9):
if (self.line[:2] != '*/'):
self.print_error('Header incorrect', norme)
self.check_header.__func__.passed = 1
elif self.nb_line == 4 or self.nb_line == 7 or self.nb_line == 8:
if self.cheat:
p = re.compile('([\w-]* [\w-]*)$')
test = re.search(p, self.line)
if test:
if not test.group(1) in self.user:
self.print_error('Login '+ test.group(1) +' incorrect', norme)
self.check_header.__func__.passed = 1
elif (self.nb_line == 5):
if self.cheat:
p = re.compile('<(.*)@')
test = re.search(p, self.line)
if test:
if not test.group(1) in self.user:
self.print_error('Login '+ test.group(1) +' incorrect', norme)
self.check_header.__func__.passed = 1
else:
if (self.line[:2] != '**'):
self.print_error('Header incorrect', norme)
self.check_header.__func__.passed = 1
@static_var("passed", 0)
def check_virgule(self):
norme = -1
if self.check_virgule.passed:
norme = 0
n = 0
quote = 0
while n < len(self.line) and self.line[n] != '\n' :
if self.line[n] == '\'' or self.line[n] == '"':
if quote:
quote = 0
else:
quote = 1
if (self.line[n] == ',') and quote == 0:
if self.line[n + 1] != ' ' and self.line[n + 1] != '\n' and self.line[n + 1] != '\t':
if self.line[-2:-1] == '\\':
pass
self.print_error('Point-virgule ou virgule mal place', norme)
self.check_virgule.__func__.passed = 1
break
n = n + 1
@static_var("passed", 0)
def check_space_par(self):
norme = -1
if self.check_space_par.passed:
norme = 0
n = 0
quote = 0
while n < len(self.line) and self.line[n] != '\n' :
if self.line[n] == '\'' or self.line[n] == '"':
if quote:
quote = 0
else:
quote = 1
if (self.line[n] == '(') and quote == 0:
if self.line[n + 1] == ' ':
self.print_error('Espace après une parenthese ouvrante', norme)
self.check_space_par.__func__.passed = 1
break
if (self.line[n] == ')') and quote == 0:
if self.line[n - 1] == ' ':
self.print_error('Espace avant une parenthese fermante', norme)
self.check_space_par.__func__.passed = 1
break
n = n + 1
@static_var("passed", 0)
def check_nbchar(self):
norme = -5
if self.check_nbchar.passed:
norme = 0
line = self.line.replace('\t', " ")
if len(line) > 84:
note = len(line) - 80
self.print_error('Chaine de plus de 80 caracteres', norme)
self.check_nbchar.__func__.passed = 1
@static_var("passed", 0)
def check_return(self):
norme = -1
if self.check_return.passed:
norme = 0
if (self.line[:1] == '\n'):
if (self.nb_return == 1):
self.print_error('Double retour a la ligne', norme)
self.check_return.__func__.passed = 1
else:
self.nb_return = 1
else:
self.nb_return = 0
@static_var("passed", 0)
def check_nbline(self):
norme = -5
if self.check_nbline.passed:
norme = 0
if self.file[-2:] == ".c":
if self.line[:1] == '}':
self.is_func = 0
self.nb_funcline = 0
if self.line[:2] == '};':
self.is_func = 0
self.nb_funcline = 0
self.nb_func = self.nb_func -1
if self.line[:1] == '{' and self.typedef == 0:
self.is_func = 1
self.nb_funcline = 0
self.nb_func = self.nb_func + 1
if self.nb_func == 6:
self.print_error('Plus de 5 fonctions dans le même fichier', norme)
self.check_nbline.__func__.passed = 1
else:
if self.nb_func >= 1 and self.is_func:
self.nb_funcline = self.nb_funcline + 1
if self.nb_funcline >= 26:
self.print_error('Fonction de plus de 25 lignes', norme)
self.check_nbline.__func__.passed = 1
@static_var("passed", 0)
def check_cfunc(self):
norme = -1
if self.check_cfunc.passed:
norme = 0
p = re.compile('[ \t](if|else|return|while|for|break)(\()')
test = re.search(p, self.line)
if test:
self.print_error('Pas d\'espace apres mot clef', norme)
self.check_cfunc.__func__.passed = 1
@static_var("passed", 0)
def check_arg(self):
norme = -5
if self.check_arg.passed:
norme = 0
if self.line[-2:] == ")\n" and self.line[:1] != '\t' and self.line[:1] != ' ':
p = re.compile('(.*),(.*),(.*),(.*),(.*)\)$')
test = re.search(p, self.line)
if test and self.line.find(')') == self.line.rfind(')'):
note = 1
if len(test.groups()) > 0:
note = len(test.groups()) - 4
self.print_error('Plus de 4 arguments passes en parametre', norme)
self.check_arg.__func__.passed = 1
@static_var("passed", 0)
def check_sys_include(self):
return
if self.check_sys_include.passed:
return
if self.line[:1] == "#" and self.line[-2:] == "\"\n":
self.sys_include = 1
else:
if self.line[:1] == "#" and self.line[-2:] == ">\n" and self.sys_include == 1:
self.print_error('Header systeme mal placee')
self.check_sys_include.__func__.passed = 1
@static_var("passed", 0)
def check_comment(self):
norme = -5
if self.check_comment.passed:
norme = 0
if self.is_func and self.comment:
p = re.compile('(//|/\*)')
test = re.search(p, self.line)
if test:
note = 1
if len(test.groups()) > 0:
note = len(test.groups())
self.print_error('Commentaires dans le code', norme)
self.check_comment.__func__.passed = 1
@static_var("passed", 0)
def check_malloc(self):
if self.check_malloc.passed:
return
p = re.compile('[^x](malloc)(\()')
test = re.search(p, self.line)
if test and (self.file != "xmalloc.c"):
self.print_error('Malloc non verifiee')
self.check_malloc.__func__.passed = 1
@static_var("passed", 0)
def check_double(self):
return
norme = -1
if self.check_double.passed:
norme = 0
if self.file[-2:] == ".h":
if self.line[:1] != '\n':
if self.double_inclusion != 1:
self.double_inclusion = 1
if self.line[-4:] != "_H_\n":
self.print_error('Header non protegee', norme)
self.check_double.__func__.passed = 1
else:
self.double_inclusion = 1
@static_var("passed", 0)
def check_operateur(self, op):
norme = -1
if self.check_operateur.passed:
norme = 0
n = 0
quote = 0
while n < len(self.line) and self.line[n] != '\n' :
if self.line[n] == '\'' or self.line[n] == '"':
if quote:
quote = 0
else:
quote = 1
if (self.line[n] == op) and quote == 0:
if (self.line[n + 1] != ' ' or self.line[n - 1] != ' ') and self.line[n + 1] != ';' and self.line[n + 1] != '=' and self.line[n + 1] != '\n':
if self.line[n - 1] != op and self.line[n + 1] != op and not self.line[:n].isspace():
msg = 'Operateur %c mal placé' % op
self.print_error(msg, norme)
self.check_operateur.__func__.passed = 1
n = n + 1
def check_typedef(self):
if self.line[:7] == "typedef":
self.typedef = 1
else:
self.typedef = 0
@static_var("passed", 0)
def check_regex(self, regex, msg):
norme = -42
if self.check_regex.passed:
norme = 0
p = re.compile(regex)
test = re.search(p, self.line)
if test:
note = 1
if len(test.groups()) > 0:
note = len(test.groups())
self.print_error(msg, norme)
self.check_regex.__func__.passed = 1
@static_var("passed", 0)
def check_returns(self):
norme = -1
if self.check_returns.__func__.passed:
norme = 0
dot = self.line.rfind(';')
if dot == -1:
return
pos = dot - 1
left = self.line[:dot]
while pos >= 0 and (left[pos] == ' ' or left[pos] == '\t'):
pos = pos - 1
pos = pos + 1
ret = self.line.find("return")
if ret == -1:
ret = self.line.find("break")
par = self.line.find("(")
if (ret != -1 and par == -1):
pos = pos + 1
if pos < dot:
self.print_error("Espaces avant le ';'", norme)
self.check_returns.__func__.passed = 1
@static_var("passed", 0)
def check_endlinespaces(self):
norme = -1
if self.check_endlinespaces.passed:
norme = 0
dot = self.line.find('\n')
if dot == -1:
return
pos = dot - 1
left = self.line[:dot]
while pos >= 0 and (left[pos] == ' ' or left[pos] == '\t'):
pos = pos - 1
pos = pos + 1
if pos < dot:
self.print_error("Espace(s) en fin de ligne", norme)
self.check_endlinespaces.__func__.passed = 1
@static_var("passed", 0)
def check_line(self):
norme = -5
if self.is_func != 1 and self.line.find("/*") != -1:
self.out_comment = 1
if self.out_comment:
if self.is_func != 1 and self.line.find("*/") != -1:
self.nb_return = 0
self.out_comment = 0
return
if self.is_func == 1 and self.comment and self.line.find("/*") != -1 and self.line.find('\"') == -1:
self.in_comment = 1
if self.check_line.passed:
norme = 0
self.print_error("Commentaires dans le code", norme)
self.check_line.__func__.passed = 1
if self.line.find("*/") != -1:
self.nb_return = 0
self.in_comment = 0
return
self.check_nbline() # DOIT TOUJORS ETRE EN PREMIER
# self.check_sys_include()
self.check_virgule()
self.check_space_par()
self.check_endlinespaces()
self.check_returns()
if self.libc == 0:
self.check_regex('[^_](printf|atof|atoi|atol|strcmp|strlen|strcat|strncat|strncmp|strcpy|strncpy|fprintf|strstr|strtoc|sprintf|asprintf|perror|strtod|strtol|strtoul)(\()', \
'Fonction de la lib C')
self.check_nbchar()
self.check_cfunc()
self.check_arg()
self.check_return()
self.check_double()
self.check_operateur('+')
self.check_operateur('|')
self.check_typedef() #DOIT TOUJOURS ETRE EN DERNIER
if self.malloc:
self.check_malloc()
def print_error(self, msg, val = -1):
self.note = self.note + val
print("Erreur dans %s a la ligne %s:%s => %s"% (self.the_dir + self.file, self.nb_line, msg, val))
if self.printline:
print(self.line)
def get_score(self):
if self.note < -10:
return (1)
elif self.note < -5:
return (-10)
return (self.note)
def cant_open(self, file):
if (self.verbose or file == sys.argv[1]):
print("Impossible d'ouvrir", file)
def scanfile(self, file):
if file[-2:] == '.c' or file[-2:] == '.h':
self.file = file
self.new_file()
try:
fd = open(file, 'r')
except IOError:
self.cant_open(file)
else:
for self.line in fd.readlines():
if self.nb_line <= 9:
self.check_header()
else:
self.check_line()
self.nb_line = self.nb_line + 1
fd.close()
def scandir(self, thedir):
try:
dir = os.listdir(thedir)
except:
self.cant_open(thedir)
else:
# check_makefile(thedir)
for file in dir:
try:
if os.path.islink(thedir + file):
continue
if (os.path.isdir(thedir + file)):
self.scandir(thedir + "/" + file + "/")
self.the_dir = thedir
if file[-2:] == '.c' or file[-2:] == '.h':
self.file = file
self.new_file()
file = thedir + file
try:
fd = open(file, 'r')
except IOError:
self.cant_open(file)
else:
for self.line in fd.readlines():
if self.nb_line <= 9:
self.check_header()
else:
self.check_line()
self.nb_line = self.nb_line + 1
fd.close()
except:
print("Issue on %s: Please do the norm manually"% file)
def get_user(self):
try:
fd = open(sys.argv[1] + 'auteur')
except IOError:
user = os.getenv('USER')
self.user.append(user)
try:
self.user.append(pwd.getpwnam(user)[4]) #Recuperation du nom complet de l'utilisateur
except:
pass
else:
buffer = fd.read()
fd.close()
p = re.compile('([\w]*)')
test = re.findall(p, buffer)
for user in test:
if user:
self.user.append(user)
self.user.append(pwd.getpwnam(user)[4])
def check_makefile(thedir):
file = thedir + "Makefile"
if os.path.isfile(file):
try:
fd = open(file, 'r')
except IOError:
print("Impossible d'ouvrir le Makefile")
else:
buffer = fd.read()
p = re.compile('(-g|-pg|-lefence)')
test = re.search(p, buffer)
if test:
print("Options de debug dans le Makefile")
p = re.compile('(-Wall)')
test = re.search(p, buffer)
if not test:
print("-Wall n'est pas dans le Makefile")
if buffer[:2] != "##":
print("Header du Makefile invalide")
fd.close()
def help():
print("Aide")
print("Usage: norme.py <dir_to_scan>")
print("-verbose: affiche les messages impossible d'ouvrir")
print("-nocheat: desactive la detection de la triche")
print("-score: affiche le nombre de faute de norme")
print("-libc: active la verification des fonctions de la libc")
print("-malloc: desactive le controle du malloc")
print("-printline: affiche la ligne provoquant une erreur")
print("-return: active verifier le retour des fonctions (return ;)")
print("-comment: ne pas verifier les commentaire")
sys.exit()
def main():
if len(sys.argv) == 1:
print("Usage: norme.py <dir_to_scan>")
sys.exit()
moulin = norme()
if '-verbose' in sys.argv[1:]:
moulin.verbose = 1
if '-comment' in sys.argv[1:]:
moulin.comment = 0
if '-nocheat' in sys.argv[1:]:
moulin.cheat = 0
if '-score' in sys.argv[1:]:
moulin.score = 1
if '-libc' in sys.argv[1:]:
moulin.libc = 0
if '-malloc' in sys.argv[1:]:
moulin.malloc = 0
if '-printline' in sys.argv[1:]:
moulin.printline = 1
if '-return' in sys.argv[1:]:
moulin.creturn = 0
if '--user' in sys.argv[1:]:
try:
moulin.user.append(sys.argv[sys.argv.index("--user") + 1])
except:
pass
if '-help' in sys.argv[1:]:
help()
# if sys.argv[1][-1:] != '/':
# sys.argv[1] = sys.argv[1] + '/'
if moulin.cheat == 1:
moulin.get_user()
try:
moulin.scanfile(sys.argv[1])
except NameError:
print("Usage: norme.py <dir_to_scan>")
if moulin.score:
print(moulin.get_score(), file=sys.stderr)
if __name__ == "__main__":
main()
| Arignir/Epitech-norm-linter | norminette/norm.py | Python | mit | 19,880 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# django-asana documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 21 11:12:30 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import django
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath(".."))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
django.setup()
import djasana # Also sets up default app config
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.coverage"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "django-asana"
copyright = "2018, Steve Bywater"
author = djasana.__author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = djasana.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "django-asanadoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "django-asana.tex", "django-asana Documentation", author, "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "django-asana", "django-asana Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"django-asana",
"django-asana Documentation",
author,
"django-asana",
"One line description of project.",
"Miscellaneous",
),
]
| sbywater/django-asana | docs/conf.py | Python | mit | 5,037 |
"""
picture.py
Author: Daniel Wilson
Credit: none
Assignment:
Use the ggame library to "paint" a graphical picture of something (e.g. a house, a face or landscape).
Use at least:
1. Three different Color objects.
2. Ten different Sprite objects.
3. One (or more) RectangleAsset objects.
4. One (or more) CircleAsset objects.
5. One (or more) EllipseAsset objects.
6. One (or more) PolygonAsset objects.
See:
https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics
for general information on how to use ggame.
See:
http://brythonserver.github.io/ggame/
for detailed information on ggame.
"""
from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset
# add your code here \/ \/ \/
red = Color(0xFF4040, 1.0)
blue = Color(0x40FFFF, 1.0)
green = Color(0x000000, 1.0)
purple = Color(0xA040FF, 1.0)
dblue = Color(0x0000ff, 1.0)
black = Color(0x000000, 1.0)
thickline = LineStyle(5, purple)
thinline = LineStyle(2.5, black)
noline = LineStyle(1, dblue)
redline = LineStyle(2, red)
pline = LineStyle(2, purple)
ellipse1 = EllipseAsset(250, 300, thickline, dblue)
ellipse2 = EllipseAsset(50, 70, thinline, green)
circle1 = CircleAsset(50, thinline, red)
circle2 = CircleAsset(50, thinline, purple)
grin1 = CircleAsset (30, thinline, green)
grin2 = CircleAsset (30, noline, dblue)
smile = PolygonAsset ([(0, 75), (125, 30), (0, 50), (-125, 30)], thickline, purple)
eb = RectangleAsset(150, 30, thickline, red)
eb2 = RectangleAsset(140, 30, thickline, green)
prunelle = CircleAsset(25, pline, black)
pupil = CircleAsset(26, redline, black)
Sprite(ellipse1, (800, 400))
Sprite(ellipse2, (800, 400))
Sprite(circle1, (700, 350))
Sprite(circle2, (900, 350))
Sprite(grin1, (950, 530))
Sprite(grin2, (940, 530))
Sprite(smile, (805, 500))
Sprite(eb, (620, 250))
Sprite(eb2, (820, 250))
Sprite(prunelle, (700, 350))
Sprite(pupil, (900, 350))
# add your code here /\ /\ /\
#http://www.colorpicker.com
myapp = App()
myapp.run()
| danielwilson2017/Picture | picture.py | Python | mit | 2,001 |
import os
RASP_URL = "http://rasp.linta.de/GERMANY/"
suffix = "lst.d2.png"
today = "curr"
tomorrow = "curr+1"
parameters = {
"Cu Cloudbase where Cu Potential>0": "zsfclclmask",
"Thermal Updraft Velocity and B/S ratio": "wstar_bsratio",
}
here = os.path.abspath(os.path.dirname(__file__))
hours = ["07", "08", "09"] + list(map(str, range(10, 20)))
| tomislater/raspgif | raspgif/constants.py | Python | mit | 358 |
import random
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import os
from direct.showbase import AppRunnerGlobal
from direct.directnotify import DirectNotifyGlobal
from pandac.PandaModules import *
class PetNameGenerator:
notify = DirectNotifyGlobal.directNotify.newCategory('PetNameGenerator')
boyFirsts = []
girlFirsts = []
neutralFirsts = []
def __init__(self):
self.generateLists()
def generateLists(self):
self.boyFirsts = []
self.girlFirsts = []
self.neutralFirsts = []
self.nameDictionary = {}
searchPath = DSearchPath()
if AppRunnerGlobal.appRunner:
searchPath.appendDirectory(Filename.expandFrom('$TT_3_ROOT/phase_3/etc'))
else:
searchPath.appendDirectory(Filename('/phase_3/etc'))
if os.path.expandvars('$TOONTOWN') != '':
searchPath.appendDirectory(Filename.fromOsSpecific(os.path.expandvars('$TOONTOWN/src/configfiles')))
else:
searchPath.appendDirectory(Filename.fromOsSpecific(os.path.expandvars('toontown/src/configfiles')))
searchPath.appendDirectory(Filename('.'))
if __debug__:
filename = '../RetroResources/phase_3/etc/'+TTLocalizer.PetNameMaster
else:
filename = '/phase_3/etc/'+TTLocalizer.PetNameMaster
input = open(filename, 'r')
if not input:
self.notify.error('PetNameGenerator: Error opening name list text file.')
currentLine = input.readline()
while currentLine:
if currentLine.lstrip()[0:1] != '#':
a1 = currentLine.find('*')
a2 = currentLine.find('*', a1 + 1)
self.nameDictionary[int(currentLine[0:a1])] = (int(currentLine[a1 + 1:a2]), currentLine[a2 + 1:len(currentLine) - 1].strip())
currentLine = input.readline()
masterList = [self.boyFirsts, self.girlFirsts, self.neutralFirsts]
for tu in self.nameDictionary.values():
masterList[tu[0]].append(tu[1])
return 1
def getName(self, uniqueID):
try:
return self.nameDictionary[uniqueID][1]
except:
return self.nameDictionary[0][1]
def returnUniqueID(self, name):
newtu = [(), (), ()]
newtu[0] = (0, name)
newtu[1] = (1, name)
newtu[2] = (2, name)
for tu in self.nameDictionary.items():
for g in newtu:
if tu[1] == g:
return tu[0]
return -1
def randomName(self, gender = None, seed = None):
S = random.getstate()
if seed is not None:
random.seed(seed)
if gender is None:
gender = random.choice([0, 1])
retString = ''
firstList = self.neutralFirsts[:]
if gender == 0:
firstList += self.boyFirsts
elif gender == 1:
firstList += self.girlFirsts
else:
self.error('Must be boy or girl.')
retString += random.choice(firstList)
random.setstate(S)
return retString
| ToonTownInfiniteRepo/ToontownInfinite | toontown/pets/PetNameGenerator.py | Python | mit | 3,155 |
import itertools
import functools
import operator
def squared(x): return x**2
print("Itérateurs créés à partir d'une map")
iterator_of_powers_of_2_for_first_numbers = map(squared, range(1, 10))
print("La fonction map retourne un objet iterator :",
iterator_of_powers_of_2_for_first_numbers)
print("C'est bien un objet iterator avec les méthodes __iter__ et __next__")
print("Il possède la fonction __iter__ ('__iter__' in dir(iterator_of_powers_of_2_for_first_numbers)) :",
'__iter__' in dir(iterator_of_powers_of_2_for_first_numbers))
print("Il possède la fonction __next__ ('__next__' in dir(iterator_of_powers_of_2_for_first_numbers)) :",
'__next__' in dir(iterator_of_powers_of_2_for_first_numbers))
print("Premier élément :", next(iterator_of_powers_of_2_for_first_numbers))
print("Elément suivant :", next(iterator_of_powers_of_2_for_first_numbers))
print("La suite des éléments : ", end='')
print(*iterator_of_powers_of_2_for_first_numbers)
print("Somme des carrés des 9 premiers entiers strictement positifs :",
sum(map(squared, range(1, 10))))
print("Maximum des carrés des 9 premiers entiers strictement positifs :",
max(map(squared, range(1, 10))))
print("Minimum des carrés des 9 premiers entiers strictement positifs :",
min(map(squared, range(1, 10))))
print('\n##############\n')
print("L'équivalent avec une boucle for : ", end='')
powers_of_2 = []
for number in range(1, 10):
powers_of_2.append(squared(number))
print(powers_of_2)
print('\n##############\n')
print('Liste avec map')
iterator_of_powers_of_2_for_first_numbers = map(squared, range(1, 10))
print("Liste des carrés des 9 premiers entiers strictement positifs :",
list(iterator_of_powers_of_2_for_first_numbers))
print('\n##############\n')
print("Ensemble avec map")
iterator_of_powers_of_2 = map(squared, range(-9, 10))
print("Ensemble des carrés des entiers entre -9 et 9 :",
set(iterator_of_powers_of_2))
print('\n##############\n')
def squared_pair(x): return (x, x**2)
print("Dictionnaire avec map")
iterator_powers_of_2_for_first_numbers = map(squared_pair, range(1, 10))
print("Tableau associatif des carrés des 9 premiers entiers strictement positifs :",
dict(iterator_powers_of_2_for_first_numbers))
print('\n##############\n')
print("Compréhension équivalente à l'utilisation de map")
generator_of_powers_of_2_for_first_numbers = (x**2 for x in range(1, 10))
print(generator_of_powers_of_2_for_first_numbers)
print(next(generator_of_powers_of_2_for_first_numbers))
print(next(generator_of_powers_of_2_for_first_numbers))
print(*generator_of_powers_of_2_for_first_numbers)
print('\n##############\n')
print("Itérateurs infinis à partir d'une map")
neverending_iterator_of_powers_of_2 = map(squared, itertools.count(1))
print("Premier élément :", next(neverending_iterator_of_powers_of_2))
print("Elément suivant :", next(neverending_iterator_of_powers_of_2))
print("La suite des éléments pour les entiers inférieurs à 1000 : ", end='')
print(*itertools.takewhile(lambda x: x < 1000,
neverending_iterator_of_powers_of_2))
print("Elément suivant :", next(neverending_iterator_of_powers_of_2))
print('\n##############\n')
print('Map et filter travaillant de concert')
first_event_numbers = filter(lambda x: x % 2 == 0, range(1, 10))
print("Un objet _filter_, first_event_numbers :", first_event_numbers)
list_powers_of_2_for_first_event_numbers = map(squared, first_event_numbers)
print("Les premiers carrés :", list(list_powers_of_2_for_first_event_numbers))
print('\n##############\n')
| TGITS/programming-workouts | python/misc/learning_python/map_examples.py | Python | mit | 3,614 |
# output-json
from typing import Optional
from pydantic import BaseModel, Field
from pydantic.fields import ModelField
class RestrictedAlphabetStr(str):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, value, field: ModelField):
alphabet = field.field_info.extra['alphabet']
if any(c not in alphabet for c in value):
raise ValueError(f'{value!r} is not restricted to {alphabet!r}')
return cls(value)
@classmethod
def __modify_schema__(cls, field_schema, field: Optional[ModelField]):
if field:
alphabet = field.field_info.extra['alphabet']
field_schema['examples'] = [c * 3 for c in alphabet]
class MyModel(BaseModel):
value: RestrictedAlphabetStr = Field(alphabet='ABC')
print(MyModel.schema_json(indent=2))
| samuelcolvin/pydantic | docs/examples/schema_with_field.py | Python | mit | 865 |
# Specify version in Semantic style (with PEP 440 pre-release specification)
__version__ = "2.0.0a1"
| radiocosmology/alpenhorn | alpenhorn/__init__.py | Python | mit | 101 |
import random
from flask import Flask
from restea import errors
from restea.resource import Resource
from restea.adapters.flaskwrap import FlaskResourceWrapper
from restea import fields
app = Flask(__name__)
# Dummy data for the Resource
sites = [
{
'id': i,
'name': 'my__site_{}'.format(i),
'title': 'my site #{}'.format(i),
'rating': random.randint(1, 5),
'domain': 'www.my_domain_for_site_{}.com'.format(i),
'anoher_field_out_of_scope': 'this one shouldn\'t be seen'
} for i in range(1, 20)
]
def add_dummy_data(func):
def wrapper(self, *args, **kwargs):
res = func(self, *args, **kwargs)
if isinstance(res, dict):
res['dummy_key'] = 'dummy value'
return res
return wrapper
class SiteResource(Resource):
decorators = [add_dummy_data]
fields = fields.FieldSet(
id=fields.Integer(required=True, range=(1, 100)),
name=fields.String(max_length=50, required=True),
title=fields.String(max_length=150),
created_at=fields.DateTime(null=True),
)
def list(self):
return sites
def show(self, iden):
self.set_header('Access-Control-Allow-Origin', '*')
try:
return sites[int(iden)]
except IndexError:
raise errors.NotFoundError('Site doesn\'t exist', code=10)
def edit(self, iden):
return self.payload
with app.app_context():
FlaskResourceWrapper(SiteResource).get_routes('/v1/sites')
if __name__ == '__main__':
app.debug = True
app.run()
| bodbdigr/restea | example.py | Python | mit | 1,577 |
import falcon
from io import StringIO
from sqlalchemy import func, distinct, desc, text
from sqlalchemy.exc import SQLAlchemyError
from db import session
import model
import util
class UserExport(object):
def _stringify_users(self, users, sum_points):
"""
Pomocna metoda pro zapis uzivatelu do souboru
Vraci string k zapisu do souboru
"""
res = ""
order = 0
last_points = -1
for i in range(0, len(users)):
user, profile, points, tasks_cnt, cheat = users[i]
if points != last_points:
order = i + 1
last_points = points
res += \
str(order) + ";" +\
user.last_name + ";" +\
user.first_name + ";" +\
str(points) + ";" +\
('A' if points >= 0.6 * sum_points and not cheat else 'N') + ";" +\
('A' if cheat else 'N') + ";" +\
user.email + ";" +\
user.sex + ";" +\
profile.addr_street + ";" +\
profile.addr_city + ";" +\
profile.addr_zip + ";" +\
profile.addr_country + ";" +\
profile.school_name + ";" +\
profile.school_street + ";" +\
profile.school_city + ";" +\
profile.school_zip + ";" +\
profile.school_country + ";" +\
str(profile.school_finish) + ";" +\
profile.tshirt_size + '\n'
return res
def on_get(self, req, resp):
""" Vraci csv vsech resitelu vybraneho rocniku. """
try:
user = req.context['user']
year_obj = req.context['year_obj']
if (not user.is_logged_in()) or (not user.is_org()):
resp.status = falcon.HTTP_400
return
inMemoryOutputFile = StringIO()
# Tady se dela spoustu magie kvuli tomu, aby se usetrily SQL dotazy
# Snazime se minimalizovat pocet dotazu, ktere musi byt provedeny
# pro kadeho uzivatele
# a misto toho provest pouze jeden MEGA dotaz.
# Skore uzivatele per modul (zahrnuje jen moduly evaluation_public)
per_user = session.query(
model.Evaluation.user.label('user'),
func.max(model.Evaluation.points).label('points'),
func.max(model.Evaluation.cheat).label('cheat'),
).\
join(model.Module,
model.Evaluation.module == model.Module.id).\
join(model.Task, model.Task.id == model.Module.task).\
filter(model.Task.evaluation_public).\
join(model.Wave, model.Wave.id == model.Task.wave).\
filter(model.Wave.year == req.context['year']).\
group_by(model.Evaluation.user, model.Evaluation.module).\
subquery()
# Pocet odevzdanych uloh (zahrnuje i module not evaluation_public
# i napriklad automaticky opravovane moduly s 0 body).
tasks_per_user = session.query(
model.Evaluation.user.label('user'),
func.count(distinct(model.Task.id)).label('tasks_cnt')
).\
join(model.Module,
model.Evaluation.module == model.Module.id).\
join(model.Task, model.Task.id == model.Module.task).\
join(model.Wave, model.Wave.id == model.Task.wave).\
filter(model.Wave.year == req.context['year']).\
group_by(model.Evaluation.user).subquery()
# Ziskame vsechny uzivatele
# Tem, kteri maji evaluations, je prirazen pocet bodu a pocet
# odevzdanych uloh.
# Vraci n tici: (model.User, total_score, tasks_cnt, model.Profile)
users = session.query(
model.User,
model.Profile,
func.sum(per_user.c.points).label("total_score"),
tasks_per_user.c.tasks_cnt.label('tasks_cnt'),
func.max(per_user.c.cheat).label('cheat'),
).\
join(per_user, model.User.id == per_user.c.user).\
join(tasks_per_user, model.User.id == tasks_per_user.c.user).\
join(model.Profile, model.User.id == model.Profile.user_id).\
filter(model.User.role == 'participant').\
filter(text("tasks_cnt"), text("tasks_cnt > 0")).\
group_by(model.User).\
order_by(desc("total_score"),
model.User.last_name, model.User.first_name)
year_end = util.year.year_end(year_obj)
users_hs = users.filter(model.Profile.school_finish >= year_end).\
all()
users_other = users.filter(model.Profile.school_finish <
year_end).\
all()
sum_points = util.task.sum_points(
req.context['year'],
bonus=False) + year_obj.point_pad
sum_points_bonus = util.task.sum_points(
req.context['year'],
bonus=True) + year_obj.point_pad
table_header = \
"Pořadí;" +\
"Příjmení;" +\
"Jméno;" +\
"Body;" +\
"Úspěšný řešitel;" +\
"Podvod;" +\
"E-mail;" +\
"Pohlaví;" +\
"Ulice;" +\
"Město;" +\
"PSČ;" +\
"Země;" +\
"Škola;" +\
"Adresa školy;" +\
"Město školy;" +\
"PSČ školy;" +\
"Země školy;" +\
"Rok maturity;" +\
"Velikost trička\n"
inMemoryOutputFile.write(
"Celkem bodů: " + str(sum_points) +
", včetně bonusových úloh: " + str(sum_points_bonus) +
", bodová vycpávka: " + str(year_obj.point_pad) + '\n'
)
# Resitele stredoskolaci
inMemoryOutputFile.write("Středoškoláci\n")
inMemoryOutputFile.write(table_header)
inMemoryOutputFile.write(self._stringify_users(users_hs,
sum_points))
# Resitele ostatni
inMemoryOutputFile.write("\nOstatní\n")
inMemoryOutputFile.write(table_header)
inMemoryOutputFile.write(self._stringify_users(users_other,
sum_points))
resp.set_header(
'Content-Disposition',
('inline; filename="resitele_' + str(req.context['year']) +
'.csv"')
)
resp.content_type = "text/csv"
resp.body = inMemoryOutputFile.getvalue()
resp.stream_len = len(resp.body)
inMemoryOutputFile.close()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
| fi-ksi/web-backend | endpoint/admin/userExport.py | Python | mit | 7,209 |
import requests
import gw2api
import gw2api.v2
from auth_test import AuthenticatedTestBase
class TestGuildAuthenticated(AuthenticatedTestBase):
guild_id = None
@classmethod
def setUpClass(cls):
super(TestGuildAuthenticated, cls).setUpClass()
if not cls.api_key:
return
gw2api.v2.account.set_token(cls.api_key)
gw2api.v2.guild.set_token(cls.api_key)
# Try to find a guild that we can access with the API key we have.
account = gw2api.v2.account.get()
for guild_id in account["guilds"]:
try:
gw2api.v2.guild.get_members(guild_id)
cls.guild_id = guild_id
break
except requests.RequestException:
pass
def setUp(self):
super(TestGuildAuthenticated, self).setUp()
if not self.guild_id:
self.skipTest("No usable guild found")
def test_guild_ranks(self):
ranks = gw2api.v2.guild.get_ranks(self.guild_id)
self.assertIsInstance(ranks, list)
rank_names = [rank["id"] for rank in ranks]
self.assertIn("Leader", rank_names)
def test_guild_members(self):
members = gw2api.v2.guild.get_members(self.guild_id)
self.assertIsInstance(members, list)
def test_guild_treasury(self):
treasury = gw2api.v2.guild.get_treasury(self.guild_id)
self.assertIsInstance(treasury, list)
def test_guild_storage(self):
storage = gw2api.v2.guild.get_storage(self.guild_id)
self.assertIsInstance(storage, list)
def test_guild_upgrades(self):
upgrades = gw2api.v2.guild.get_upgrades(self.guild_id)
self.assertIsInstance(upgrades, list)
for upgrade_id in upgrades:
self.assertIsInstance(upgrade_id, int)
def test_guild_stash(self):
stash = gw2api.v2.guild.get_stash(self.guild_id)
self.assertIsInstance(stash, list)
for first_tab in stash:
self.assertIsInstance(first_tab, dict)
self.assertIn("upgrade_id", first_tab)
self.assertIn("size", first_tab)
self.assertIn("coins", first_tab)
self.assertIn("inventory", first_tab)
def test_guild_log(self):
log = gw2api.v2.guild.get_log(self.guild_id)
self.assertIsInstance(log, list)
for entry in log[:5]:
self.assertIsInstance(entry, dict)
self.assertIn("id", entry)
self.assertIn("type", entry)
def test_guild_log_since(self):
log = gw2api.v2.guild.get_log(self.guild_id)
self.assertIsInstance(log, list)
n = min(5, len(log) - 1)
since_entry = log[n]
log = gw2api.v2.guild.get_log(self.guild_id, since=since_entry["id"])
self.assertIsInstance(log, list)
self.assertEqual(len(log), n)
def test_guild_teams(self):
teams = gw2api.v2.guild.get_teams(self.guild_id)
self.assertIsInstance(teams, list)
for entry in teams[:5]:
self.assertIsInstance(entry, dict)
self.assertIn("id", entry)
self.assertIn("members", entry)
self.assertIn("name", entry)
| hackedd/gw2api | test/guild_test.py | Python | mit | 3,196 |
# coding: utf8
{
' (leave empty to detach account)': ' (leave empty to detach account)',
' Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': ' Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.',
' by ': ' by ',
' is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities.': ' is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities.',
' on ': ' on ',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'# of Houses Damaged': '# of Houses Damaged',
'# of Houses Destroyed': 'Số căn nhà bị phá hủy',
'# of International Staff': '# of International Staff',
'# of National Staff': '# of National Staff',
'# of People Affected': 'Số người bị ảnh hưởng',
'# of People Deceased': '# of People Deceased',
'# of People Injured': '# of People Injured',
'# of Vehicles': '# of Vehicles',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s rows deleted',
'%s rows updated': '%s rows updated',
'(Constraints Only)': '(Constraints Only)',
') & then click on the map below to adjust the Lat/Lon fields:': ') & then click on the map below to adjust the Lat/Lon fields:',
'* Required Fields': '* Required Fields',
'0-15 minutes': '0-15 minutes',
'1 Assessment': '1 Assessment',
'1 location, shorter time, can contain multiple Tasks': '1 location, shorter time, can contain multiple Tasks',
'1-3 days': '1-3 days',
'1. Fill the necessary fields in BLOCK letters.': '1. Fill the necessary fields in BLOCK letters.',
'15-30 minutes': '15-30 phút',
'2 different options are provided here currently:': '2 different options are provided here currently:',
'2. Always use one box per letter and leave one box space to seperate words.': '2. Always use one box per letter and leave one box space to seperate words.',
'2x4 Car': '2x4 Car',
'30-60 minutes': '30-60 minutes',
'4-7 days': '4-7 days',
'4x4 Car': '4x4 Car',
'8-14 days': '8-14 ngày',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'tài liệu tham khảo như file, URL hay thông tin liên hệ đế xác nhận dữ liệu này.Bạn có thể gõ một vài ký tự đầu của tên tài liệu để kết nối tới tài liệu có sẵn',
'A Warehouse is a physical place to store items.': 'A Warehouse is a physical place to store items.',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': 'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.',
'A brief description of the group (optional)': 'A brief description of the group (optional)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'A file downloaded from a GPS containing a series of geographic points in XML format.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.',
'A library of digital resources, such as photos, documents and reports': 'A library of digital resources, such as photos, documents and reports',
'A place within a Site like a Shelf, room, bin number etc.': 'Một nơi trên site như số ngăn ,số phòng,số thùng v.v',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': 'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'Upload ảnh chụp vị trí hoặc tài liệu bổ sung chứa thông tin bổ sung về trang web tại đây',
'A survey series with id %s does not exist. Please go back and create one.': 'A survey series with id %s does not exist. Please go back and create one.',
'ABOUT': 'ABOUT',
'ABOUT THIS MODULE': 'Giới thiệu Module này',
'ACCESS DATA': 'ACCESS DATA',
'ANY': 'ANY',
'API is documented here': 'API is documented here',
'Ability to Fill Out Surveys': 'Ability to Fill Out Surveys',
'Ability to customize the list of details tracked at a Shelter': 'Ability to customize the list of details tracked at a Shelter',
'Ability to customize the list of human resource tracked at a Shelter': 'Khả năng tùy chỉnh danh sách nguồn nhân lực theo dõi tại nơi cư trú',
'Ability to customize the list of important facilities needed at a Shelter': 'Khả năng tùy chỉnh danh sách các điều kiện quan trọng cần thiết tại một cơ sở cư trú',
'Ability to track partial fulfillment of the request': 'Ability to track partial fulfillment of the request',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Ability to view Results of Completed and/or partially filled out Surveys',
'About': 'About',
'About Sahana': 'About Sahana',
'About Sahana Eden': 'About Sahana Eden',
'About this module': 'About this module',
'Access denied': 'Access denied',
'Accessibility of Affected Location': 'Accessibility of Affected Location',
'Account registered, however registration is still pending approval - please wait until confirmation received.': 'Account registered, however registration is still pending approval - please wait until confirmation received.',
'Acronym': 'Acronym',
"Acronym of the organization's name, eg. IFRC.": "Acronym of the organization's name, eg. IFRC.",
'Actionable': 'Actionable',
'Actionable by all targeted recipients': 'Actionable by all targeted recipients',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>',
'Actioned?': 'Actioned?',
'Active Problems': 'Active Problems',
'Activities': 'Activities',
'Activities matching Assessments:': 'Activities matching Assessments:',
'Activities of boys 13-17yrs before disaster': 'Activities of boys 13-17yrs before disaster',
'Activities of boys 13-17yrs now': 'Activities of boys 13-17yrs now',
'Activities of boys <12yrs before disaster': 'Activities of boys <12yrs before disaster',
'Activities of boys <12yrs now': 'Activities of boys <12yrs now',
'Activities of girls 13-17yrs before disaster': 'Activities of girls 13-17yrs before disaster',
'Activities of girls 13-17yrs now': 'Activities of girls 13-17yrs now',
'Activities of girls <12yrs before disaster': 'Activities of girls <12yrs before disaster',
'Activities of girls <12yrs now': 'Activities of girls <12yrs now',
'Activities:': 'Activities:',
'Activity': 'Activity',
'Activity Added': 'Activity Added',
'Activity Deleted': 'Activity Deleted',
'Activity Details': 'Chi tiết Hoạt động',
'Activity Report': 'Activity Report',
'Activity Reports': 'Activity Reports',
'Activity Type': 'Activity Type',
'Activity Updated': 'Activity Updated',
'Add': 'Thêm',
'Add Activity': 'Thêm hoạt động',
'Add Activity Report': 'Add Activity Report',
'Add Activity Type': 'Thêm loại hoạt động',
'Add Address': 'Add Address',
'Add Aid Request': 'Thêm yêu cầu cứu trợ',
'Add Assessment': 'Add Assessment',
'Add Assessment Summary': 'Add Assessment Summary',
'Add Baseline': 'Add Baseline',
'Add Baseline Type': 'Add Baseline Type',
'Add Bed Type': 'Add Bed Type',
'Add Bin Type': 'Add Bin Type',
'Add Bins': 'Thêm Bin',
'Add Budget': 'Add Budget',
'Add Bundle': 'Add Bundle',
'Add Catalog': 'Thêm Catalog',
'Add Catalog Item': 'Add Catalog Item',
'Add Catalog.': 'Add Catalog.',
'Add Category': 'Thêm nhóm',
'Add Category<>Sub-Category<>Catalog Relation': 'Add Category<>Sub-Category<>Catalog Relation',
'Add Cholera Treatment Capability Information': 'Add Cholera Treatment Capability Information',
'Add Cluster': 'Thêm cụm',
'Add Cluster Subsector': 'Add Cluster Subsector',
'Add Config': 'Add Config',
'Add Contact': 'Thêm thông tin liên lạc',
'Add Contact Information': 'Thêm thông tin liên lạc',
'Add Disaster Victims': 'Add Disaster Victims',
'Add Distribution': 'Add Distribution',
'Add Distribution.': 'Add Distribution.',
'Add Donor': 'Thêm tên người quyên góp vào danh sách',
'Add Feature Class': 'Add Feature Class',
'Add Feature Layer': 'Thêm lớp đặc tính',
'Add Flood Report': 'Add Flood Report',
'Add Group': 'Thêm nhóm',
'Add Group Member': 'Add Group Member',
'Add Hospital': 'Thêm Bệnh viện',
'Add Identification Report': 'Add Identification Report',
'Add Identity': 'Thêm thông tin định danh',
'Add Image': 'Thêm ảnh',
'Add Impact': 'Add Impact',
'Add Impact Type': 'Add Impact Type',
'Add Incident': 'Add Incident',
'Add Incident Report': 'Thêm Báo cáo sự việc',
'Add Inventory Item': 'Add Inventory Item',
'Add Inventory Store': 'Add Inventory Store',
'Add Item': 'Add Item',
'Add Item (s)': 'Add Item (s)',
'Add Item Catalog': 'Add Item Catalog',
'Add Item Catalog ': 'Add Item Catalog ',
'Add Item Catalog Category ': 'Thêm tiêu chí cho catalog hàng hóa',
'Add Item Category': 'Add Item Category',
'Add Item Packet': 'Add Item Packet',
'Add Item Sub-Category': 'Add Item Sub-Category',
'Add Key': 'Add Key',
'Add Kit': 'Thêm Kit',
'Add Layer': 'Thêm lớp',
'Add Location': 'Add Location',
'Add Locations': 'Thêm địa điểm mới',
'Add Log Entry': 'Add Log Entry',
'Add Member': 'Thêm thành viên',
'Add Membership': 'Add Membership',
'Add Message': 'Thêm Tin nhắn',
'Add Need': 'Add Need',
'Add Need Type': 'Add Need Type',
'Add New': 'Add New',
'Add New Activity': 'Add New Activity',
'Add New Address': 'Thêm Địa chỉ mới',
'Add New Aid Request': 'Thêm yêu cầu cứu trợ mới',
'Add New Assessment': 'Add New Assessment',
'Add New Assessment Summary': 'Add New Assessment Summary',
'Add New Baseline': 'Add New Baseline',
'Add New Baseline Type': 'Add New Baseline Type',
'Add New Bin': 'Add New Bin',
'Add New Bin Type': 'Add New Bin Type',
'Add New Budget': 'Add New Budget',
'Add New Bundle': 'Add New Bundle',
'Add New Catalog Item': 'Add New Catalog Item',
'Add New Cluster': 'Thêm cụm mới',
'Add New Cluster Subsector': 'Add New Cluster Subsector',
'Add New Config': 'Thêm cấu hình mới',
'Add New Contact': 'Thêm đầu mối liên lạc mới',
'Add New Distribution': 'Add New Distribution',
'Add New Distribution Item': 'Add New Distribution Item',
'Add New Document': 'Thêm Tài liệu mới',
'Add New Donor': 'Thêm Người quyên góp',
'Add New Entry': 'Add New Entry',
'Add New Feature Class': 'Add New Feature Class',
'Add New Feature Layer': 'Add New Feature Layer',
'Add New Flood Report': 'Thêm báo cáo lũ lụt mới',
'Add New Group': 'Thêm nhóm mới',
'Add New Hospital': 'Thêm Bệnh viện mới',
'Add New Identity': 'Thêm thông tin nhận dạng mới',
'Add New Image': 'Thêm ảnh mới',
'Add New Impact': 'Add New Impact',
'Add New Impact Type': 'Add New Impact Type',
'Add New Incident': 'Thêm sự kiện',
'Add New Incident Report': 'Add New Incident Report',
'Add New Inventory Item': 'Add New Inventory Item',
'Add New Inventory Store': 'Add New Inventory Store',
'Add New Item': 'Add New Item',
'Add New Item Catalog': 'Add New Item Catalog',
'Add New Item Catalog Category': 'Add New Item Catalog Category',
'Add New Item Category': 'Add New Item Category',
'Add New Item Packet': 'Add New Item Packet',
'Add New Item Sub-Category': 'Add New Item Sub-Category',
'Add New Item to Kit': 'Add New Item to Kit',
'Add New Key': 'Thêm Key mới ',
'Add New Kit': 'Thêm Kit mới',
'Add New Layer': 'Add New Layer',
'Add New Location': 'Add New Location',
'Add New Log Entry': 'Add New Log Entry',
'Add New Marker': 'Add New Marker',
'Add New Member': 'Thêm thành viên mới',
'Add New Membership': 'Add New Membership',
'Add New Metadata': 'Add New Metadata',
'Add New Need': 'Add New Need',
'Add New Need Type': 'Add New Need Type',
'Add New Office': 'Thêm văn phòng mới',
'Add New Organization': 'Thêm một tô chức mới',
'Add New Photo': 'Add New Photo',
'Add New Position': 'Add New Position',
'Add New Problem': 'Thêm vấn đề mới',
'Add New Project': 'Add New Project',
'Add New Projection': 'Add New Projection',
'Add New Rapid Assessment': 'Add New Rapid Assessment',
'Add New Received Item': 'Add New Received Item',
'Add New Record': 'Add New Record',
'Add New Report': 'Thêm báo cáo mới',
'Add New Request': 'Thêm yêu cầu mới',
'Add New Request Item': 'Thêm yêu cầu hàng hóa mới',
'Add New Resource': 'Thêm nguồn lực mới',
'Add New Response': 'Thêm phản hồi mới',
'Add New River': 'Add New River',
'Add New Role': 'Thêm vai trò mới',
'Add New Role to User': 'Gán vai trò mới cho người dùng',
'Add New Sector': 'Add New Sector',
'Add New Sent Item': 'Add New Sent Item',
'Add New Setting': 'Add New Setting',
'Add New Shelter': 'Thêm Nơi cư trú mới',
'Add New Shelter Service': 'Add New Shelter Service',
'Add New Shelter Type': 'Add New Shelter Type',
'Add New Site': 'Add New Site',
'Add New Skill': 'Thêm kỹ năng mới',
'Add New Skill Type': 'Add New Skill Type',
'Add New Solution': 'Add New Solution',
'Add New Staff': 'Add New Staff',
'Add New Staff Type': 'Add New Staff Type',
'Add New Storage Location': 'Thêm Vị trí kho lưu trữ mới',
'Add New Survey Answer': 'Add New Survey Answer',
'Add New Survey Question': 'Add New Survey Question',
'Add New Survey Section': 'Add New Survey Section',
'Add New Survey Series': 'Add New Survey Series',
'Add New Survey Template': 'Thêm mẫu khảo sát mới',
'Add New Task': 'Thêm một công việc mới',
'Add New Team': 'Thêm đội mới',
'Add New Theme': 'Add New Theme',
'Add New Ticket': 'Add New Ticket',
'Add New Track': 'Add New Track',
'Add New Unit': 'Thêm đơn vị mới',
'Add New User': 'Thêm người dùng mới',
'Add New User to Role': 'Add New User to Role',
'Add New Warehouse': 'Add New Warehouse',
'Add New Warehouse Item': 'Add New Warehouse Item',
'Add Office': 'Thêm thông tin văn phòng',
'Add Organization': 'Thêm Tổ chức',
'Add Peer': 'Add Peer',
'Add Person': 'Thêm cá nhân',
'Add Personal Effects': 'Add Personal Effects',
'Add Photo': 'Add Photo',
'Add Position': 'Add Position',
'Add Problem': 'Add Problem',
'Add Project': 'Thêm dự án',
'Add Projection': 'Add Projection',
'Add Question': 'Add Question',
'Add Rapid Assessment': 'Add Rapid Assessment',
'Add Recipient': 'Thêm người nhận viện trợ',
'Add Recipient Site': 'Add Recipient Site',
'Add Recipient Site.': 'Add Recipient Site.',
'Add Record': 'Add Record',
'Add Recovery Report': 'Add Recovery Report',
'Add Reference Document': 'Add Reference Document',
'Add Report': 'Add Report',
'Add Request': 'Thêm yêu cầu',
'Add Request Detail': 'thêm chi tiết yêu cầu',
'Add Request Item': 'Thêm yêu cầu hàng hóa',
'Add Resource': 'Thêm tại nguyên',
'Add Response': 'Add Response',
'Add River': 'Add River',
'Add Role': 'Add Role',
'Add Section': 'Add Section',
'Add Sector': 'Thêm lĩnh vực',
'Add Sender Organization': 'Thêm tổ chức gửi',
'Add Sender Site': 'Add Sender Site',
'Add Sender Site.': 'Add Sender Site.',
'Add Service Profile': 'Add Service Profile',
'Add Setting': 'Add Setting',
'Add Shelter': 'Add Shelter',
'Add Shelter Service': 'Add Shelter Service',
'Add Shelter Type': 'Add Shelter Type',
'Add Shipment Transit Log': 'Add Shipment Transit Log',
'Add Shipment/Way Bills': 'Add Shipment/Way Bills',
'Add Site': 'Add Site',
'Add Skill': 'Thêm kỹ năng',
'Add Skill Type': 'Thêm loại kỹ năng',
'Add Skill Types': 'Thêm loại kỹ năng',
'Add Solution': 'Add Solution',
'Add Staff': 'Add Staff',
'Add Staff Type': 'Add Staff Type',
'Add Status': 'Add Status',
'Add Storage Bin ': 'Add Storage Bin ',
'Add Storage Bin Type': 'Add Storage Bin Type',
'Add Storage Location': 'Add Storage Location',
'Add Storage Location ': 'Add Storage Location ',
'Add Sub-Category': 'Thêm danh mục cấp dưới',
'Add Subscription': 'Add Subscription',
'Add Survey Answer': 'Thêm trả lời khảo sát',
'Add Survey Question': 'Thêm câu hỏi khảo sát',
'Add Survey Section': 'Add Survey Section',
'Add Survey Series': 'Add Survey Series',
'Add Survey Template': 'Thêm mẫu khảo sát',
'Add Task': 'Add Task',
'Add Team': 'Thêm đội',
'Add Theme': 'Add Theme',
'Add Ticket': 'Add Ticket',
'Add Unit': 'Thêm đơn vị',
'Add User': 'Thêm người dùng',
'Add Volunteer': 'Add Volunteer',
'Add Volunteer Registration': 'Thêm Đăng ký tình nguyện viên',
'Add Warehouse': 'Add Warehouse',
'Add Warehouse Item': 'Add Warehouse Item',
'Add a Person': 'Add a Person',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.',
'Add a Volunteer': 'Add a Volunteer',
'Add a new Site from where the Item is being sent.': 'Thêm Site nơi gửi hàng hóa đến ',
'Add a new Site where the Item is being sent to.': 'Add a new Site where the Item is being sent to.',
'Add an Photo.': 'Add an Photo.',
'Add main Item Category.': 'Add main Item Category.',
'Add main Item Sub-Category.': 'Add main Item Sub-Category.',
'Add new Group': 'Add new Group',
'Add new Individual': 'Add new Individual',
'Add new position.': 'Thêm địa điểm mới',
'Add new project.': 'Thêm dự án mới',
'Add new staff role.': 'Thêm vai trò nhân viên mới',
'Add the Storage Bin Type.': 'Add the Storage Bin Type.',
'Add the Storage Location where this bin is located.': 'Add the Storage Location where this bin is located.',
'Add the Storage Location where this this Bin belongs to.': 'Thêm vị trí kho lưu trữ chứa Bin này',
'Add the main Warehouse/Site information where this Bin belongs to.': 'Add the main Warehouse/Site information where this Bin belongs to.',
'Add the main Warehouse/Site information where this Item is to be added.': 'Thêm thông tin Nhà kho/Site chứa hàng hóa đã được nhập thông tin',
'Add the main Warehouse/Site information where this Storage location is.': 'Add the main Warehouse/Site information where this Storage location is.',
'Add the unit of measure if it doesnt exists already.': 'Add the unit of measure if it doesnt exists already.',
'Add to Bundle': 'Add to Bundle',
'Add to Catalog': 'Add to Catalog',
'Add to budget': 'Add to budget',
'Add/Edit/Remove Layers': 'Thêm/Sửa/Xóa các lớp',
'Additional Beds / 24hrs': 'Additional Beds / 24hrs',
'Additional Comments': 'Additional Comments',
"Additional quantity quantifier – e.g. '4x5'.": "Additional quantity quantifier – e.g. '4x5'.",
'Address': 'Địa chỉ',
'Address Details': 'Address Details',
'Address Type': 'Loại địa chỉ',
'Address added': 'Địa chỉ đã được thêm',
'Address deleted': 'Đã xóa địa chỉ',
'Address updated': 'Address updated',
'Addresses': 'Các địa chỉ',
'Adequate': 'Adequate',
'Adequate food and water available': 'Adequate food and water available',
'Adjust Item(s) Quantity': 'Adjust Item(s) Quantity',
'Adjust Items due to Theft/Loss': 'Adjust Items due to Theft/Loss',
'Admin': 'Quản trị viên',
'Admin Email': 'Email của quản trị viên',
'Admin Name': 'Tên quản trị viên',
'Admin Tel': 'Số điện thoại của Quản trị viên',
'Administration': 'Quản trị',
'Administrator': 'Quản trị viên',
'Admissions/24hrs': 'Admissions/24hrs',
'Adolescent (12-20)': 'Adolescent (12-20)',
'Adolescent participating in coping activities': 'Adolescent participating in coping activities',
'Adult (21-50)': 'Adult (21-50)',
'Adult ICU': 'Adult ICU',
'Adult Psychiatric': 'Bệnh nhân tâm thần',
'Adult female': 'Nữ giới',
'Adult male': 'Adult male',
'Adults in prisons': 'Adults in prisons',
'Advanced Bin Search': 'Advanced Bin Search',
'Advanced Catalog Search': 'Tìm kiếm danh mục nâng cao',
'Advanced Category Search': 'Tìm kiếm danh mục nâng cao',
'Advanced Item Search': 'Advanced Item Search',
'Advanced Location Search': 'Tìm kiếm vị trí nâng cao',
'Advanced Site Search': 'Advanced Site Search',
'Advanced Sub-Category Search': 'Advanced Sub-Category Search',
'Advanced Unit Search': 'Advanced Unit Search',
'Advanced:': 'Advanced:',
'Advisory': 'Advisory',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.',
'Age Group': 'Nhóm tuổi',
'Age group': 'Age group',
'Age group does not match actual age.': 'Nhóm tuổi không phù hợp với tuổi thật',
'Aggravating factors': 'Aggravating factors',
'Aggregate Items': 'Aggregate Items',
'Agriculture': 'Agriculture',
'Aid Request': 'Yêu cầu cứu trợ',
'Aid Request Details': 'Chi tiết yêu cầu cứu trợ',
'Aid Request added': 'Đã thêm yêu cầu viện trợ',
'Aid Request deleted': 'Đã xóa yêu cầu cứu trợ',
'Aid Request updated': 'Đã cập nhật Yêu cầu cứu trợ',
'Aid Requests': 'yêu cầu cứu trợ',
'Air Transport Service': 'Air Transport Service',
'Air tajin': 'Air tajin',
'Aircraft Crash': 'Aircraft Crash',
'Aircraft Hijacking': 'Aircraft Hijacking',
'Airport Closure': 'Đóng cửa sân bay',
'Airspace Closure': 'Airspace Closure',
'Alcohol': 'Alcohol',
'Alert': 'Alert',
'All': 'All',
'All Inbound & Outbound Messages are stored here': 'All Inbound & Outbound Messages are stored here',
'All Locations': 'All Locations',
'All Requested Items': 'Hàng hóa được yêu cầu',
'All Resources': 'All Resources',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.',
'Allowed to push': 'Cho phép bấm nút',
'Allows a Budget to be drawn up': 'Allows a Budget to be drawn up',
'Allows authorized users to control which layers are available to the situation map.': 'Cho phép người dùng đã đăng nhập kiểm soát layer nào phù hợp với bản đồ tình huống',
'Alternative infant nutrition in use': 'Alternative infant nutrition in use',
'Alternative places for studying': 'Alternative places for studying',
'Alternative places for studying available': 'Alternative places for studying available',
'Ambulance Service': 'Ambulance Service',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.',
'Analysis of Completed Surveys': 'Analysis of Completed Surveys',
'Animal Die Off': 'Animal Die Off',
'Animal Feed': 'Animal Feed',
'Animals': 'Animals',
'Answer Choices (One Per Line)': 'Chọn câu trả lời',
'Anthropolgy': 'Anthropolgy',
'Antibiotics available': 'Antibiotics available',
'Antibiotics needed per 24h': 'Antibiotics needed per 24h',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'Thông tin có sẵn trong file như Timestamp,Tác giả, Kinh độ, Vĩ độ sẽ được đọc tự động',
'Apparent Age': 'Apparent Age',
'Apparent Gender': 'Apparent Gender',
'Appropriate clothing available': 'Appropriate clothing available',
'Appropriate cooking equipment/materials in HH': 'Appropriate cooking equipment/materials in HH',
'Approx. number of cases/48h': 'Approx. number of cases/48h',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': 'Approximately how many children under 5 with diarrhea in the past 48 hours?',
'Archive not Delete': 'Archive not Delete',
'Arctic Outflow': 'Arctic Outflow',
'Are basic medical supplies available for health services since the disaster?': 'Are basic medical supplies available for health services since the disaster?',
'Are breast milk substitutes being used here since the disaster?': 'Are breast milk substitutes being used here since the disaster?',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': 'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?',
'Are the chronically ill receiving sufficient care and assistance?': 'Are the chronically ill receiving sufficient care and assistance?',
'Are there adults living in prisons in this area?': 'Are there adults living in prisons in this area?',
'Are there alternative places for studying?': 'Are there alternative places for studying?',
'Are there cases of diarrhea among children under the age of 5?': 'Are there cases of diarrhea among children under the age of 5?',
'Are there children living in adult prisons in this area?': 'Are there children living in adult prisons in this area?',
'Are there children living in boarding schools in this area?': 'Are there children living in boarding schools in this area?',
'Are there children living in homes for disabled children in this area?': 'Are there children living in homes for disabled children in this area?',
'Are there children living in juvenile detention in this area?': 'Are there children living in juvenile detention in this area?',
'Are there children living in orphanages in this area?': 'Are there children living in orphanages in this area?',
'Are there children with chronical illnesses in your community?': 'Are there children with chronical illnesses in your community?',
'Are there health services functioning for the community since the disaster?': 'Are there health services functioning for the community since the disaster?',
'Are there older people living in care homes in this area?': 'Are there older people living in care homes in this area?',
'Are there older people with chronical illnesses in your community?': 'Are there older people with chronical illnesses in your community?',
'Are there people with chronical illnesses in your community?': 'Are there people with chronical illnesses in your community?',
'Are there separate latrines for women and men available?': 'Are there separate latrines for women and men available?',
'Are there staff present and caring for the residents in these institutions?': 'Are there staff present and caring for the residents in these institutions?',
'Area': 'Area',
'Assessment': 'Assessment',
'Assessment Details': 'Assessment Details',
'Assessment Reported': 'Assessment Reported',
'Assessment Summaries': 'Assessment Summaries',
'Assessment Summary Details': 'Assessment Summary Details',
'Assessment Summary added': 'Assessment Summary added',
'Assessment Summary deleted': 'Assessment Summary deleted',
'Assessment Summary updated': 'Assessment Summary updated',
'Assessment Type': 'Assessment Type',
'Assessment added': 'Assessment added',
'Assessment deleted': 'Assessment deleted',
'Assessment updated': 'Đã cập nhật Trị giá tính thuế',
'Assessments': 'Assessments',
'Assessments Needs vs. Activities': 'Assessments Needs vs. Activities',
'Assessments and Activities': 'Assessments and Activities',
'Assessments are structured reports done by Professional Organizations': 'Assessments are structured reports done by Professional Organizations',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': 'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments',
'Assessments:': 'Assessments:',
'Assessor': 'Assessor',
'Assign Storage Location': 'Assign Storage Location',
'Assign to Org.': 'Assign to Org.',
'Assigned': 'Assigned',
'Assigned To': 'Assigned To',
'Assigned to': 'Assigned to',
'Assistance for immediate repair/reconstruction of houses': 'Assistance for immediate repair/reconstruction of houses',
'Assistant': 'Assistant',
'At/Visited Location (not virtual)': 'At/Visited Location (not virtual)',
'Attend to information sources as described in <instruction>': 'Attend to information sources as described in <instruction>',
'Attribution': 'Attribution',
'Audit Read': 'Audit Read',
'Audit Write': 'Audit Write',
"Authenticate system's Twitter account": "Authenticate system's Twitter account",
'Author': 'Author',
'Automotive': 'Automotive',
'Available Beds': 'Available Beds',
'Available Messages': 'Available Messages',
'Available Records': 'Available Records',
'Available databases and tables': 'Available databases and tables',
'Available from': 'Available from',
'Available in Viewer?': 'Available in Viewer?',
'Available until': 'Sẵn sàng cho đến khi',
'Availablity': 'Availablity',
'Avalanche': 'Avalanche',
'Avoid the subject event as per the <instruction>': 'Avoid the subject event as per the <instruction>',
'Babies who are not being breastfed, what are they being fed on?': 'Babies who are not being breastfed, what are they being fed on?',
'Baby And Child Care': 'Chăm sóc trẻ em',
'Background Colour': 'Background Colour',
'Background Colour for Text blocks': 'Background Colour for Text blocks',
'Bahai': 'Bahai',
'Baldness': 'Baldness',
'Balochi': 'Balochi',
'Banana': 'Banana',
'Bank/micro finance': 'Tài chính Ngân hàng',
'Base Layer?': 'Base Layer?',
'Base Unit': 'Đơn vị cơ sở',
'Baseline Number of Beds': 'Baseline Number of Beds',
'Baseline Type': 'Baseline Type',
'Baseline Type Details': 'Baseline Type Details',
'Baseline Type added': 'Baseline Type added',
'Baseline Type deleted': 'Baseline Type deleted',
'Baseline Type updated': 'Baseline Type updated',
'Baseline Types': 'Baseline Types',
'Baseline added': 'Baseline added',
'Baseline deleted': 'Baseline deleted',
'Baseline number of beds of that type in this unit.': 'Baseline number of beds of that type in this unit.',
'Baseline updated': 'Baseline updated',
'Baselines': 'Baselines',
'Baselines Details': 'Baselines Details',
'Basic Assessment': 'Basic Assessment',
'Basic Assessment Reported': 'Basic Assessment Reported',
'Basic Details': 'Basic Details',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': 'Thông tin cơ bản về các yêu cầu và quyên góp như thể loại, tên đơn vị, chi tiết liên lạc và tình trạng',
'Basic medical supplies available prior to disaster': 'Basic medical supplies available prior to disaster',
'Basic medical supplies available since disaster': 'Basic medical supplies available since disaster',
'Basic reports on the Shelter and drill-down by region': 'Báo cáo cơ bản về nơi cư trú và báo cáo chi tiết theo vùng',
'Baud': 'Bốt',
'Baud rate to use for your modem - The default is safe for most cases': 'Baud rate to use for your modem - The default is safe for most cases',
'Bed Capacity': 'Bed Capacity',
'Bed Capacity per Unit': 'Bed Capacity per Unit',
'Bed Type': 'Loại Giường',
'Bed type already registered': 'Bed type already registered',
'Bedding materials available': 'Bedding materials available',
'Beneficiary Type': 'Beneficiary Type',
'Biological Hazard': 'Biological Hazard',
'Biscuits': 'Biscuits',
'Blizzard': 'Blizzard',
'Blood Type (AB0)': 'Blood Type (AB0)',
'Blowing Snow': 'Gió tuyết đang thổi',
'Boat': 'Boat',
'Bodies found': 'Bodies found',
'Bodies recovered': 'Bodies recovered',
'Body': 'Body',
'Body Recovery Reports': 'Body Recovery Reports',
'Body Recovery Request': 'Body Recovery Request',
'Body Recovery Requests': 'Body Recovery Requests',
'Bomb': 'Bomb',
'Bomb Explosion': 'Nổ bom',
'Bomb Threat': 'Bomb Threat',
'Border Colour for Text blocks': 'Màu viền cho khối văn bản',
'Boys 13-18 yrs in affected area': 'Boys 13-18 yrs in affected area',
'Boys 13-18 yrs not attending school': 'Boys 13-18 yrs not attending school',
'Boys 6-12 yrs in affected area': 'Boys 6-12 yrs in affected area',
'Boys 6-12 yrs not attending school': 'Boys 6-12 yrs not attending school',
'Breast milk substitutes in use since disaster': 'Breast milk substitutes in use since disaster',
'Breast milk substitutes used prior to disaster': 'Breast milk substitutes used prior to disaster',
'Bricks': 'Bricks',
'Bridge Closed': 'Bridge Closed',
'Bucket': 'Bucket',
'Buddhist': 'Người theo đạo Phật',
'Budget': 'Ngân sách',
'Budget Details': 'Budget Details',
'Budget Updated': 'Budget Updated',
'Budget added': 'Budget added',
'Budget deleted': 'Budget deleted',
'Budget updated': 'Budget updated',
'Budgeting Module': 'Budgeting Module',
'Budgets': 'Ngân sách',
'Buffer': 'Buffer',
'Building Aide': 'Building Aide',
'Building Collapsed': 'Sập nhà',
'Built using the Template agreed by a group of NGOs working together as the': 'Built using the Template agreed by a group of NGOs working together as the',
'Bulk Uploader': 'Bulk Uploader',
'Bundle': 'Bundle',
'Bundle Contents': 'Bundle Contents',
'Bundle Details': 'Bundle Details',
'Bundle Updated': 'Cập nhật Bundle',
'Bundle added': 'Bundle added',
'Bundle deleted': 'Bundle deleted',
'Bundle updated': 'Bundle updated',
'Bundles': 'Bundles',
'Burn': 'Burn',
'Burn ICU': 'Burn ICU',
'Burned/charred': 'Burned/charred',
'Business damaged': 'Business damaged',
'By Warehouse': 'By Warehouse',
'CBA Women': 'CBA Women',
'CSS file %s not writable - unable to apply theme!': 'không viết được file CSS %s - không thể áp dụng chủ đề',
'Calculate': 'Tính toán',
'Camp': 'Camp',
'Camp Coordination/Management': 'Camp Coordination/Management',
'Can users register themselves for authenticated login access?': 'Can users register themselves for authenticated login access?',
"Can't import tweepy": "Can't import tweepy",
'Cancel': 'Cancel',
'Cancelled': 'Cancelled',
'Candidate Matches for Body %s': 'Candidate Matches for Body %s',
'Canned Fish': 'Canned Fish',
'Cannot be empty': 'Cannot be empty',
'Cannot delete whilst there are linked records. Please delete linked records first.': 'Không xóa được khi đang có bản thu liên quan.Hãy xóa bản thu trước',
'Capacity (Max Persons)': 'Capacity (Max Persons)',
'Capacity (W x D X H)': 'Capacity (W x D X H)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Nắm bắt thông tin của các nạn nhân chịu ảnh hưởng của thiên tai(Khách du lịch,Gia đình...)',
'Capture Information on each disaster victim': 'Capture Information on each disaster victim',
'Capturing organizational information of a relief organization and all the projects they have in the region': 'Capturing organizational information of a relief organization and all the projects they have in the region',
'Capturing the essential services each Volunteer is providing and where': 'Capturing the essential services each Volunteer is providing and where',
'Capturing the projects each organization is providing and where': 'Capturing the projects each organization is providing and where',
'Cardiology': 'Bệnh tim mạch',
'Cash available to restart business': 'Cash available to restart business',
'Cassava': 'Cassava',
'Casual Labor': 'Nhân công thời vụ',
'Catalog': 'Catalog',
'Catalog Item': 'Catalog Item',
'Catalog Item added': 'Catalog Item added',
'Catalog Item deleted': 'Catalog Item deleted',
'Catalog Item updated': 'Catalog Item updated',
'Catalog Items': 'Catalog Items',
'Catalog Name': 'Catalog Name',
'Category': 'Category',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog Relation',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog Relation added',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog Relation deleted',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog Relation updated',
'Central point to record details on People': 'Central point to record details on People',
'Change Password': 'Change Password',
'Check for errors in the URL, maybe the address was mistyped.': 'Kiểm tra lỗi URL, có lẽ địa chỉ URL bị gõ sai.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Kiểm tra URL trỏ về thư mục hay trang web',
'Check outbox for the message status': 'Check outbox for the message status',
'Check to delete': 'Check to delete',
'Checklist': 'Checklist',
'Checklist created': 'Checklist created',
'Checklist deleted': 'Checklist deleted',
'Checklist of Operations': 'Checklist of Operations',
'Checklist updated': 'Checklist updated',
'Chemical Hazard': 'Chemical Hazard',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack',
'Chicken': 'Chicken',
'Child': 'Child',
'Child (2-11)': 'Child (2-11)',
'Child (< 18 yrs)': 'Child (< 18 yrs)',
'Child Abduction Emergency': 'Child Abduction Emergency',
'Child headed households (<18 yrs)': 'Child headed households (<18 yrs)',
'Children (2-5 years)': 'Children (2-5 years)',
'Children (5-15 years)': 'Children (5-15 years)',
'Children (< 2 years)': 'Trẻ em (dưới 2 tuổi)',
'Children in adult prisons': 'Children in adult prisons',
'Children in boarding schools': 'Children in boarding schools',
'Children in homes for disabled children': 'Children in homes for disabled children',
'Children in juvenile detention': 'Children in juvenile detention',
'Children in orphanages': 'Children in orphanages',
'Children living on their own (without adults)': 'Children living on their own (without adults)',
'Children not enrolled in new school': 'Children not enrolled in new school',
'Children orphaned by the disaster': 'Children orphaned by the disaster',
'Children separated from their parents/caregivers': 'Children separated from their parents/caregivers',
'Children that have been sent to safe places': 'Children that have been sent to safe places',
'Children who have disappeared since the disaster': 'Children who have disappeared since the disaster',
'Children with chronical illnesses': 'Children with chronical illnesses',
'Chinese (Taiwan)': 'Chinese (Taiwan)',
'Cholera Treatment': 'Cholera Treatment',
'Cholera Treatment Capability': 'Cholera Treatment Capability',
'Cholera Treatment Center': 'Cholera Treatment Center',
'Cholera-Treatment-Center': 'Cholera-Treatment-Center',
'Choosing Skill and Resources of Volunteers': 'Choosing Skill and Resources of Volunteers',
'Christian': 'Christian',
'Church': 'Church',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Hoàn cảnh mất tích, những nhân chứng nhìn thấy lần gần đây nhất nạn nhân còn sống',
'City': 'City',
'Civil Emergency': 'Civil Emergency',
'Clear Selection': 'Clear Selection',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.",
'Click on the link ': 'Click on the link ',
'Client IP': 'Client IP',
'Clinical Laboratory': 'Clinical Laboratory',
'Clinical Operations': 'Clinical Operations',
'Clinical Status': 'Clinical Status',
'Close map': 'Close map',
'Closed': 'Đã đóng',
'Closure': 'Closure',
'Clothing': 'Clothing',
'Cluster': 'Cluster',
'Cluster Details': 'Cluster Details',
'Cluster Distance': 'Cluster Distance',
'Cluster Subsector': 'Cluster Subsector',
'Cluster Subsector Details': 'Cluster Subsector Details',
'Cluster Subsector added': 'Cluster Subsector added',
'Cluster Subsector deleted': 'Cluster Subsector deleted',
'Cluster Subsector updated': 'Cluster Subsector updated',
'Cluster Subsectors': 'Cluster Subsectors',
'Cluster Threshold': 'Cluster Threshold',
'Cluster added': 'Đã thêm cụm',
'Cluster deleted': 'Cluster deleted',
'Cluster updated': 'Cluster updated',
'Cluster(s)': 'Cluster(s)',
'Clusters': 'Clusters',
'Code': 'Mã',
'Cold Wave': 'Cold Wave',
'Collective center': 'Collective center',
'Colour for Underline of Subheadings': 'Colour for Underline of Subheadings',
'Colour of Buttons when hovering': 'Colour of Buttons when hovering',
'Colour of bottom of Buttons when not pressed': 'Colour of bottom of Buttons when not pressed',
'Colour of bottom of Buttons when pressed': 'Colour of bottom of Buttons when pressed',
'Colour of dropdown menus': 'Colour of dropdown menus',
'Colour of selected Input fields': 'Màu của trường đã được chọn',
'Colour of selected menu items': 'Colour of selected menu items',
'Column Choices (One Per Line': 'Chọn cột',
'Combined Method': 'Combined Method',
'Come back later.': 'Come back later.',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.',
'Comments': 'Bình luận',
'Commiting a changed spreadsheet to the database': 'Commiting a changed spreadsheet to the database',
'Communication problems': 'Communication problems',
'Community Centre': 'Community Centre',
'Community Health Center': 'Trung tâm sức khỏe cộng đồng',
'Community Member': 'Thành viên cộng đồng',
'Complete Unit Label for e.g. meter for m.': 'hoàn thành các bản đơn vị, ví dụ đơn vị của mét là m',
'Completed': 'Completed',
'Complexion': 'Complexion',
'Compose': 'Compose',
'Compromised': 'Compromised',
'Config': 'Tùy chỉnh',
'Config added': 'Cấu hình đã được thêm',
'Config deleted': 'Config deleted',
'Config updated': 'Cập nhật tùy chỉnh',
'Configs': 'Configs',
'Configure Run-time Settings': 'Configure Run-time Settings',
'Confirmed': 'Confirmed',
'Confirmed Incidents': 'Confirmed Incidents',
'Conflict Details': 'Conflict Details',
'Conflict Resolution': 'Conflict Resolution',
'Consumable': 'Consumable',
'Contact': 'Contact',
'Contact Data': 'Dữ liệu liên lạc',
'Contact Details': 'Contact Details',
'Contact Information': 'Contact Information',
'Contact Method': 'Contact Method',
'Contact Person': 'Contact Person',
'Contact details': 'Contact details',
'Contact information added': 'Contact information added',
'Contact information deleted': 'Contact information deleted',
'Contact information updated': 'Contact information updated',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.',
'Contact us': 'Contact us',
'Contacts': 'Contacts',
'Contents': 'Contents',
'Contradictory values!': 'Contradictory values!',
'Contributor': 'Người đóng góp',
'Conversion Tool': 'Conversion Tool',
'Cooking NFIs': 'Cooking NFIs',
'Cooking Oil': 'Cooking Oil',
'Coordinate Conversion': 'Coordinate Conversion',
'Copy': 'Copy',
'Copy any data from the one to be deleted into the one to keep': 'Copy any data from the one to be deleted into the one to keep',
'Corn': 'Corn',
'Cost Type': 'Cost Type',
'Cost per Megabyte': 'Cost per Megabyte',
'Cost per Minute': 'Cost per Minute',
"Couldn't import tweepy library": "Couldn't import tweepy library",
'Country': 'Country',
'Country of Residence': 'Country of Residence',
'Create & manage Distribution groups to receive Alerts': 'Create & manage Distribution groups to receive Alerts',
'Create Checklist': 'Create Checklist',
'Create Group Entry': 'Create Group Entry',
'Create Impact Assessment': 'Create Impact Assessment',
'Create Import Job': 'Create Import Job',
'Create Mobile Impact Assessment': 'Create Mobile Impact Assessment',
'Create New Import Job': 'Create New Import Job',
'Create Rapid Assessment': 'Create Rapid Assessment',
'Create Request': 'Khởi tạo yêu cầu',
'Create Task': 'Create Task',
'Create a group entry in the registry.': 'Create a group entry in the registry.',
'Create, enter, and manage surveys.': 'Create, enter, and manage surveys.',
'Creation of Surveys': 'Creation of Surveys',
'Crime': 'Tội phạm',
'Criteria': 'Criteria',
'Currency': 'Currency',
'Current Group Members': 'Nhóm thành viên hiện tại',
'Current Identities': 'Current Identities',
'Current Location': 'Current Location',
'Current Log Entries': 'Current Log Entries',
'Current Memberships': 'Thành viên hiện tại',
'Current Registrations': 'Current Registrations',
'Current Status': 'Current Status',
'Current Team Members': 'Current Team Members',
'Current Twitter account': 'Tài khoản Twitter hiện tại',
'Current greatest needs of vulnerable groups': 'Current greatest needs of vulnerable groups',
'Current main income sources': 'Current main income sources',
'Current major expenses': 'Current major expenses',
'Current number of patients': 'Current number of patients',
'Current problems, categories': 'Current problems, categories',
'Current problems, details': 'Current problems, details',
'Current request': 'Yêu cầu hiện tại',
'Current response': 'Current response',
'Current session': 'Current session',
'Current type of health problems, adults': 'Current type of health problems, adults',
'Current type of health problems, children': 'Current type of health problems, children',
'Current type of source for drinking water': 'Current type of source for drinking water',
'Current type of source for sanitary water': 'Current type of source for sanitary water',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'Custom Database Resource (e.g., anything defined as a resource in Sahana)',
'Customisable category of aid': 'Các tiêu chí cứu trợ có thể tùy chỉnh',
'DECISION': 'DECISION',
'DNA Profile': 'DNA Profile',
'DNA Profiling': 'DNA Profiling',
'Daily': 'Hàng ngày',
'Dam Overflow': 'Tràn đập',
'Dangerous Person': 'Người nguy hiểm',
'Data uploaded': 'Đã cập nhật dữ liệu',
'Database': 'Database',
'Date': 'Date',
'Date & Time': 'Date & Time',
'Date Requested': 'Date Requested',
'Date Required': 'Date Required',
'Date and Time': 'Date and Time',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': 'Ngày giờ nhận hàng hóa.Hiển thị thời gian theo mặc định nhưng vẫn có thể chỉnh sửa',
'Date and time this report relates to.': 'Date and time this report relates to.',
'Date of Birth': 'Date of Birth',
'Date of Latest Information on Beneficiaries Reached': 'Date of Latest Information on Beneficiaries Reached',
'Date of Report': 'Ngày báo cáo',
'Date/Time': 'Ngày/Giờ',
'Date/Time of Find': 'Ngày giờ tìm kiếm',
'Date/Time of disappearance': 'Date/Time of disappearance',
'De-duplicator': 'De-duplicator',
'Dead Body Details': 'Dead Body Details',
'Dead Body Reports': 'Báo cáo thiệt hại về người',
'Deaths in the past 24h': 'Deaths in the past 24h',
'Deaths/24hrs': 'Số người chết/24h',
'Debug': 'Debug',
'Decimal Degrees': 'Độ âm',
'Decomposed': 'Decomposed',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Default Marker': 'Default Marker',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Default synchronization policy': 'Chính sách đồng bộ hóa mặc định',
'Defaults': 'Defaults',
'Defaults updated': 'Defaults updated',
'Defecation area for animals': 'Defecation area for animals',
'Defines the icon used for display of features on handheld GPS.': 'Defines the icon used for display of features on handheld GPS.',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': 'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.',
'Defines the marker used for display & the attributes visible in the popup.': 'Defines the marker used for display & the attributes visible in the popup.',
'Degrees must be a number between -180 and 180': 'Degrees must be a number between -180 and 180',
'Dehydration': 'Dehydration',
'Delete': 'Delete',
'Delete Aid Request': 'Xóa yêu cầu cứu trợ',
'Delete Assessment': 'Delete Assessment',
'Delete Assessment Summary': 'Delete Assessment Summary',
'Delete Baseline': 'Delete Baseline',
'Delete Baseline Type': 'Delete Baseline Type',
'Delete Budget': 'Delete Budget',
'Delete Bundle': 'Delete Bundle',
'Delete Catalog Item': 'Delete Catalog Item',
'Delete Cluster Subsector': 'Delete Cluster Subsector',
'Delete Config': 'Delete Config',
'Delete Distribution': 'Delete Distribution',
'Delete Distribution Item': 'Delete Distribution Item',
'Delete Document': 'Delete Document',
'Delete Donor': 'Delete Donor',
'Delete Entry': 'Delete Entry',
'Delete Feature Class': 'Delete Feature Class',
'Delete Feature Layer': 'Delete Feature Layer',
'Delete Group': 'Delete Group',
'Delete Hospital': 'Xóa Bệnh viện',
'Delete Image': 'Delete Image',
'Delete Impact': 'Delete Impact',
'Delete Impact Type': 'Delete Impact Type',
'Delete Incident': 'Delete Incident',
'Delete Incident Report': 'Delete Incident Report',
'Delete Inventory Item': 'Delete Inventory Item',
'Delete Inventory Store': 'Xóa kho lưu trữ',
'Delete Item': 'Xóa mục',
'Delete Item Category': 'Delete Item Category',
'Delete Item Packet': 'Delete Item Packet',
'Delete Key': 'Delete Key',
'Delete Kit': 'Delete Kit',
'Delete Layer': 'Xóa Layer',
'Delete Location': 'Xóa Vị trí',
'Delete Marker': 'Delete Marker',
'Delete Membership': 'Delete Membership',
'Delete Message': 'Delete Message',
'Delete Metadata': 'Delete Metadata',
'Delete Need': 'Delete Need',
'Delete Need Type': 'Delete Need Type',
'Delete Office': 'Delete Office',
'Delete Old': 'Delete Old',
'Delete Organization': 'Delete Organization',
'Delete Peer': 'Delete Peer',
'Delete Person': 'Delete Person',
'Delete Photo': 'Delete Photo',
'Delete Project': 'Delete Project',
'Delete Projection': 'Delete Projection',
'Delete Rapid Assessment': 'Delete Rapid Assessment',
'Delete Received Item': 'Delete Received Item',
'Delete Received Shipment': 'Delete Received Shipment',
'Delete Record': 'Delete Record',
'Delete Recovery Report': 'Delete Recovery Report',
'Delete Report': 'Delete Report',
'Delete Request': 'Xóa yêu cầu',
'Delete Request Item': 'Xóa yêu cầu hàng hóa',
'Delete Resource': 'Delete Resource',
'Delete Section': 'Delete Section',
'Delete Sector': 'Delete Sector',
'Delete Sent Item': 'Delete Sent Item',
'Delete Sent Shipment': 'Delete Sent Shipment',
'Delete Service Profile': 'Delete Service Profile',
'Delete Setting': 'Delete Setting',
'Delete Skill': 'Delete Skill',
'Delete Skill Type': 'Delete Skill Type',
'Delete Staff Type': 'Delete Staff Type',
'Delete Status': 'Delete Status',
'Delete Subscription': 'Delete Subscription',
'Delete Survey Answer': 'Delete Survey Answer',
'Delete Survey Question': 'Xóa câu hỏi khảo sát',
'Delete Survey Section': 'Delete Survey Section',
'Delete Survey Series': 'Delete Survey Series',
'Delete Survey Template': 'Xóa mẫu khảo sát',
'Delete Unit': 'Xóa đơn vị',
'Delete User': 'Delete User',
'Delete Volunteer': 'Delete Volunteer',
'Delete Warehouse': 'Delete Warehouse',
'Delete Warehouse Item': 'Delete Warehouse Item',
'Delete from Server?': 'Delete from Server?',
'Delivered': 'Delivered',
'Delphi Decision Maker': 'Delphi Decision Maker',
'Demographic': 'Ngành nhân khẩu học',
'Demonstrations': 'Biểu tình',
'Dental Examination': 'Khám nha khoa',
'Dental Profile': 'Dental Profile',
'Department/Unit Name': 'Department/Unit Name',
'Deployment': 'Deployment',
'Describe the condition of the roads to your hospital.': 'Mô tả tình trạng các con đường tới bệnh viện.',
'Describe the procedure which this record relates to (e.g. "medical examination")': 'Describe the procedure which this record relates to (e.g. "medical examination")',
'Description': 'Mô tả',
'Description of Bin Type': 'Description of Bin Type',
'Description of Contacts': 'Description of Contacts',
'Description of defecation area': 'Mo tả khu vực defecation',
'Description of drinking water source': 'Description of drinking water source',
'Description of sanitary water source': 'Description of sanitary water source',
'Description of water source before the disaster': 'Description of water source before the disaster',
'Descriptive Text (e.g., Prose, etc)': 'Descriptive Text (e.g., Prose, etc)',
'Designated for': 'Designated for',
'Desire to remain with family': 'Desire to remain with family',
'Destination': 'Điểm đích',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": "Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.",
'Details': 'Details',
'Dialysis': 'Dialysis',
'Diarrhea': 'Diarrhea',
'Diarrhea among children under 5': 'Diarrhea among children under 5',
'Dignitary Visit': 'Dignitary Visit',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': 'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': 'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.',
'Direction': 'Hướng',
'Disabled': 'Disabled',
'Disabled participating in coping activities': 'Disabled participating in coping activities',
'Disabled?': 'Disabled?',
'Disaster Victim Identification': 'Disaster Victim Identification',
'Disaster Victim Registry': 'Disaster Victim Registry',
'Disaster clean-up/repairs': 'Disaster clean-up/repairs',
'Discharge (cusecs)': 'Discharge (cusecs)',
'Discharges/24hrs': 'Discharges/24hrs',
'Discussion Forum': 'Discussion Forum',
'Discussion Forum on item': 'Discussion Forum on item',
'Disease vectors': 'Disease vectors',
'Dispatch': 'Gửi hàng cứu trợ',
'Dispatch Items': 'Dispatch Items',
'Dispensary': 'Y tế dự phòng',
'Displaced': 'Displaced',
'Displaced Populations': 'Displaced Populations',
'Display Polygons?': 'Display Polygons?',
'Display Routes?': 'Display Routes?',
'Display Tracks?': 'Display Tracks?',
'Display Waypoints?': 'Display Waypoints?',
'Dispose': 'Dispose',
'Dispose Expired/Unusable Items': 'Dispose Expired/Unusable Items',
'Distance between defecation area and water source': 'Distance between defecation area and water source',
'Distance between latrines and temporary shelter in meters': 'Distance between latrines and temporary shelter in meters',
'Distance between shelter and latrines': 'Distance between shelter and latrines',
'Distance(Kms)': 'Distance(Kms)',
'Distribution': 'Distribution',
'Distribution Details': 'Distribution Details',
'Distribution Item': 'Hàng hóa đóng góp',
'Distribution Item Details': 'Distribution Item Details',
'Distribution Item added': 'Distribution Item added',
'Distribution Item deleted': 'Distribution Item deleted',
'Distribution Item updated': 'Distribution Item updated',
'Distribution Items': 'Distribution Items',
'Distribution added': 'Distribution added',
'Distribution deleted': 'Distribution deleted',
'Distribution groups': 'Distribution groups',
'Distribution updated': 'Distribution updated',
'Distributions': 'Distributions',
'District': 'Quận',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': 'Do households each have at least 2 containers (10-20 litres each) to hold water?',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': 'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'Do households have bedding materials available (tarps, plastic mats, blankets)?',
'Do households have household water storage containers?': 'Do households have household water storage containers?',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': 'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': 'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do women and girls have easy access to sanitary materials?': 'Do women and girls have easy access to sanitary materials?',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do you have access to cash to restart your business?': 'Do you have access to cash to restart your business?',
'Do you know of any incidents of violence?': 'Do you know of any incidents of violence?',
'Do you know of children living on their own (without adults)?': 'Do you know of children living on their own (without adults)?',
'Do you know of children separated from their parents or caregivers?': 'Do you know of children separated from their parents or caregivers?',
'Do you know of children that have been orphaned by the disaster?': 'Do you know of children that have been orphaned by the disaster?',
'Do you know of children that have been sent to safe places?': 'Do you know of children that have been sent to safe places?',
'Do you know of children that have disappeared without explanation in the period since the disaster?': 'Do you know of children that have disappeared without explanation in the period since the disaster?',
'Do you know of older people who are primary caregivers of children?': 'Do you know of older people who are primary caregivers of children?',
'Do you know of parents/caregivers missing children?': 'Do you know of parents/caregivers missing children?',
'Do you really want to delete these records?': 'Do you really want to delete these records?',
'Do you want to over-write the file metadata with new default values?': 'Bạn có muốn thay dữ liệu file bằng giá trị mặc định mới không?',
'Do you want to receive this shipment?': 'Do you want to receive this shipment?',
'Document': 'Document',
'Document Details': 'Chi tiết văn bản',
'Document Scan': 'Document Scan',
'Document added': 'Đã thêm tài liệu',
'Document deleted': 'Document deleted',
'Document updated': 'Document updated',
'Documents': 'Documents',
'Documents and Photos': 'Documents and Photos',
'Does this facility provide a cholera treatment center?': 'Does this facility provide a cholera treatment center?',
'Doing nothing (no structured activity)': 'Không làm gì (không có hoạt động theo kế hoạch',
'Dollars': 'Dollars',
'Domestic chores': 'Công việc nội trợ',
'Donation Phone #': 'Donation Phone #',
'Donor': 'Donor',
'Donor Details': 'Donor Details',
'Donor added': 'Đã thêm người quyên góp',
'Donor deleted': 'Donor deleted',
'Donor updated': 'Đã cập nhật người quyên góp',
'Donors': 'Donors',
'Donors Report': 'Báo cáo về tình hình quyên góp',
'Door frame': 'Door frame',
'Draft': 'Bản nháp',
'Drainage': 'Drainage',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Drawing up a Budget for Staff & Equipment across various Locations.',
'Drill Down by Group': 'Drill Down by Group',
'Drill Down by Incident': 'Drill Down by Incident',
'Drill Down by Shelter': 'Drill Down by Shelter',
'Driving License': 'Driving License',
'Drought': 'Drought',
'Drugs': 'Drugs',
'Dug Well': 'Dug Well',
'Duplicate?': 'Duplicate?',
'Duration': 'Duration',
'Dust Storm': 'Dust Storm',
'Dwellings': 'Dwellings',
'E-mail': 'E-mail',
'EMS Reason': 'EMS Reason',
'EMS Status': 'Tình trạng EMS',
'EMS Status Reason': 'EMS Status Reason',
'EMS Traffic Status': 'EMS Traffic Status',
'ER Status': 'ER Status',
'ER Status Reason': 'ER Status Reason',
'Early Recovery': 'Early Recovery',
'Earthquake': 'Động đất',
'Easy access to sanitation items for women/girls': 'Easy access to sanitation items for women/girls',
'Edit': 'Edit',
'Edit Activity': 'Edit Activity',
'Edit Address': 'Edit Address',
'Edit Aid Request': 'Chỉnh sửa Yêu cầu cứu trợ',
'Edit Application': 'Edit Application',
'Edit Assessment': 'Chỉnh sửa Đánh giá',
'Edit Assessment Summary': 'Edit Assessment Summary',
'Edit Baseline': 'Edit Baseline',
'Edit Baseline Type': 'Edit Baseline Type',
'Edit Budget': 'Edit Budget',
'Edit Bundle': 'Edit Bundle',
'Edit Catalog Item': 'Edit Catalog Item',
'Edit Category<>Sub-Category<>Catalog Relation': 'Edit Category<>Sub-Category<>Catalog Relation',
'Edit Cluster': 'Edit Cluster',
'Edit Cluster Subsector': 'Edit Cluster Subsector',
'Edit Config': 'Edit Config',
'Edit Contact': 'Edit Contact',
'Edit Contact Information': 'Chỉnh sửa thông tin liên lạc',
'Edit Contents': 'Edit Contents',
'Edit Defaults': 'Edit Defaults',
'Edit Description': 'Edit Description',
'Edit Details': 'Chỉnh sửa chi tiết',
'Edit Disaster Victims': 'Edit Disaster Victims',
'Edit Distribution': 'Chỉnh sửa Quyên góp',
'Edit Distribution Item': 'Edit Distribution Item',
'Edit Document': 'Chỉnh sửa tài liệu',
'Edit Donor': 'Edit Donor',
'Edit Email Settings': 'Edit Email Settings',
'Edit Feature Class': 'Edit Feature Class',
'Edit Feature Layer': 'Edit Feature Layer',
'Edit Flood Report': 'Edit Flood Report',
'Edit Gateway Settings': 'Edit Gateway Settings',
'Edit Group': 'Edit Group',
'Edit Hospital': 'Chỉnh sửa Bệnh viện',
'Edit Identification Report': 'Chỉnh sửa báo cáo định dạng',
'Edit Identity': 'Edit Identity',
'Edit Image': 'Edit Image',
'Edit Image Details': 'Edit Image Details',
'Edit Impact': 'Edit Impact',
'Edit Impact Type': 'Edit Impact Type',
'Edit Incident': 'Chỉnh sửa Các sự việc xảy ra',
'Edit Incident Report': 'Edit Incident Report',
'Edit Inventory Item': 'Edit Inventory Item',
'Edit Inventory Store': 'Edit Inventory Store',
'Edit Item': 'Edit Item',
'Edit Item Catalog': 'Edit Item Catalog',
'Edit Item Catalog Categories': 'Chỉnh sửa danh mục hàng hóa',
'Edit Item Category': 'Edit Item Category',
'Edit Item Packet': 'Edit Item Packet',
'Edit Item Sub-Categories': 'Edit Item Sub-Categories',
'Edit Key': 'Chỉnh sửa Key',
'Edit Kit': 'Edit Kit',
'Edit Layer': 'Edit Layer',
'Edit Location': 'Edit Location',
'Edit Log Entry': 'Edit Log Entry',
'Edit Map Services': 'Chỉnh sửa dịch vụ bản đồ',
'Edit Marker': 'Edit Marker',
'Edit Membership': 'Edit Membership',
'Edit Message': 'Edit Message',
'Edit Messaging Settings': 'Edit Messaging Settings',
'Edit Metadata': 'Chỉnh sửa dữ liệu',
'Edit Modem Settings': 'Edit Modem Settings',
'Edit Need': 'Edit Need',
'Edit Need Type': 'Edit Need Type',
'Edit Office': 'Edit Office',
'Edit Options': 'Edit Options',
'Edit Organization': 'Edit Organization',
'Edit Parameters': 'Edit Parameters',
'Edit Peer Details': 'Chỉnh sửa chi tiết nhóm người',
'Edit Person Details': 'Edit Person Details',
'Edit Personal Effects Details': 'Edit Personal Effects Details',
'Edit Photo': 'Edit Photo',
'Edit Pledge': 'Edit Pledge',
'Edit Position': 'Edit Position',
'Edit Problem': 'Chỉnh sửa Vấn đề',
'Edit Project': 'Edit Project',
'Edit Projection': 'Edit Projection',
'Edit Rapid Assessment': 'Edit Rapid Assessment',
'Edit Received Item': 'Edit Received Item',
'Edit Received Shipment': 'Edit Received Shipment',
'Edit Record': 'Edit Record',
'Edit Recovery Details': 'Chỉnh sửa chi tiết khôi phục',
'Edit Registration': 'Edit Registration',
'Edit Registration Details': 'Edit Registration Details',
'Edit Report': 'Chỉnh sửa báo cáo',
'Edit Request': 'Edit Request',
'Edit Request Item': 'Chỉnh sửa yêu cầu hàng hóa',
'Edit Resource': 'Edit Resource',
'Edit Response': 'Chỉnh sửa phản hồi',
'Edit River': 'Edit River',
'Edit Role': 'Chỉnh sửa Vai trò',
'Edit Sector': 'Edit Sector',
'Edit Sent Item': 'Edit Sent Item',
'Edit Sent Shipment': 'Edit Sent Shipment',
'Edit Setting': 'Chỉnh sửa cài đặt',
'Edit Settings': 'Edit Settings',
'Edit Shelter': 'Chỉnh sửa thông tin cư trú',
'Edit Shelter Service': 'Chỉnh sửa dịch vụ cư trú',
'Edit Shelter Type': 'Edit Shelter Type',
'Edit Shipment Transit Log': 'Edit Shipment Transit Log',
'Edit Shipment/Way Bills': 'Edit Shipment/Way Bills',
'Edit Shipment<>Item Relation': 'Edit Shipment<>Item Relation',
'Edit Site': 'Edit Site',
'Edit Skill': 'Chỉnh sửa kỹ năng',
'Edit Skill Type': 'Edit Skill Type',
'Edit Solution': 'Edit Solution',
'Edit Staff': 'Edit Staff',
'Edit Staff Type': 'Edit Staff Type',
'Edit Storage Bin Type(s)': 'Edit Storage Bin Type(s)',
'Edit Storage Bins': 'Edit Storage Bins',
'Edit Storage Location': 'Edit Storage Location',
'Edit Subscription': 'Edit Subscription',
'Edit Survey Answer': 'Chỉnh sửa trả lời khảo sát',
'Edit Survey Question': 'Edit Survey Question',
'Edit Survey Section': 'Edit Survey Section',
'Edit Survey Series': 'Edit Survey Series',
'Edit Survey Template': 'Edit Survey Template',
'Edit Task': 'Edit Task',
'Edit Team': 'Edit Team',
'Edit Theme': 'Edit Theme',
'Edit Themes': 'Edit Themes',
'Edit Ticket': 'Edit Ticket',
'Edit Track': 'Edit Track',
'Edit Tropo Settings': 'Edit Tropo Settings',
'Edit Unit': 'Edit Unit',
'Edit User': 'Edit User',
'Edit Volunteer Details': 'Edit Volunteer Details',
'Edit Volunteer Registration': 'Chỉnh sửa đăng ký tình nguyện viên',
'Edit Warehouse': 'Edit Warehouse',
'Edit Warehouse Item': 'Edit Warehouse Item',
'Edit current record': 'Chỉnh sửa bản thu hiện tại',
'Edit message': 'Edit message',
'Edit the Application': 'Chỉnh sửa ứng dụng',
'Editable?': 'Editable?',
'Education': 'Giáo dục',
'Education materials received': 'Đã nhận được tài liệu, dụng cụ phục vụ học tập',
'Education materials, source': 'Dụng cụ học tập, nguồn',
'Effects Inventory': 'Effects Inventory',
'Eggs': 'Eggs',
'Either a shelter or a location must be specified': 'Either a shelter or a location must be specified',
'Either file upload or document URL required.': 'Either file upload or document URL required.',
'Either file upload or image URL required.': 'yêu cầu upload file hoặc URL ảnh',
'Elderly person headed households (>60 yrs)': 'Elderly person headed households (>60 yrs)',
'Electrical': 'Electrical',
'Elevated': 'Nâng cao lên',
'Email': 'Email',
'Email Settings': 'Email Settings',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': 'Địa chỉ email đã được xác nhận, tuy nhiên đăng ký vẫn còn chờ duyệt - hãy đợi đến khi nhận được phê chuẩn',
'Email settings updated': 'Email settings updated',
'Embalming': 'Embalming',
'Embassy': 'Embassy',
'Emergency Capacity Building project': 'Emergency Capacity Building project',
'Emergency Department': 'Bộ phận cấp cứu',
'Emergency Shelter': 'Emergency Shelter',
'Emergency Support Facility': 'Emergency Support Facility',
'Emergency Support Service': 'Emergency Support Service',
'Emergency Telecommunications': 'Emergency Telecommunications',
'Enable/Disable Layers': 'Kích hoạt/Tắt Layer',
'Enabled': 'Enabled',
'End date': 'Ngày kết thúc',
'End date should be after start date': 'End date should be after start date',
'End of Period': 'End of Period',
'English': 'English',
'Enter Coordinates in Deg Min Sec': 'Nhập tọa độ ở dạng Độ,Phút,Giây',
'Enter Coordinates:': 'Enter Coordinates:',
'Enter a GPS Coord': 'Enter a GPS Coord',
'Enter a date before': 'Enter a date before',
'Enter a location': 'Enter a location',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Nhập tên cho bảng tính bạn đang tải lên(bắt buộc)',
'Enter a new support request.': 'Nhập một yêu cầu hỗ trợ mới',
'Enter a summary of the request here.': 'Nhập tóm tắt các yêu cầu ở đây',
'Enter a unique label!': 'Enter a unique label!',
'Enter a valid email': 'Enter a valid email',
'Enter tags separated by commas.': 'Enter tags separated by commas.',
'Enter the same password as above': 'Enter the same password as above',
'Enter your firstname': 'Nhập họ của bạn',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Không bắt buộc phải nhập số điện thoại nhưng nếu nhập, bạn sẽ nhận được tin nhắn từ hệ thống',
'Entry deleted': 'Entry deleted',
'Equipment': 'Equipment',
'Error encountered while applying the theme.': 'Error encountered while applying the theme.',
'Error in message': 'Error in message',
'Error logs for "%(app)s"': 'Error logs for "%(app)s"',
'Errors': 'Lỗi',
'Estimated # of households who are affected by the emergency': 'Ước tính # số hộ chịu ảnh hưởng từ thiên tai',
'Estimated # of people who are affected by the emergency': 'Estimated # of people who are affected by the emergency',
'Estimated total number of people in institutions': 'Estimated total number of people in institutions',
'Euros': 'Euro',
'Evacuating': 'Evacuating',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Đánh giá thông tin trong thư. (giá trị này KHÔNG NÊN sử dụng trong các ứng dụng cảnh báo công cộng)',
'Event Time': 'Event Time',
'Event Type': 'Loại Sự kiện',
'Event type': 'Event type',
'Example': 'Example',
'Exceeded': 'Exceeded',
'Excreta disposal': 'Excreta disposal',
'Execute a pre-planned activity identified in <instruction>': 'Execute a pre-planned activity identified in <instruction>',
'Existing food stocks, main dishes': 'Existing food stocks, main dishes',
'Existing food stocks, side dishes': 'Existing food stocks, side dishes',
'Expected In': 'Expected In',
'Expected Out': 'Theo dự kiến',
'Expiry Time': 'Expiry Time',
'Explosive Hazard': 'Explosive Hazard',
'Export': 'Export',
'Export Data': 'Export Data',
'Export Database as CSV': 'Export Database as CSV',
'Export in GPX format': 'Export in GPX format',
'Export in KML format': 'Export in KML format',
'Export in OSM format': 'Export in OSM format',
'Export in PDF format': 'Export in PDF format',
'Export in RSS format': 'Export in RSS format',
'Export in XLS format': 'Export in XLS format',
'Eye Color': 'Màu mắt',
'Facebook': 'Facebook',
'Facial hair, color': 'Facial hair, color',
'Facial hair, type': 'Facial hair, type',
'Facial hear, length': 'Facial hear, length',
'Facility Operations': 'Facility Operations',
'Facility Status': 'Facility Status',
'Facility Type': 'Facility Type',
'Factors affecting school attendance': 'Factors affecting school attendance',
'Failed!': 'Failed!',
'Falling Object Hazard': 'Falling Object Hazard',
'Families/HH': 'Families/HH',
'Family': 'Family',
'Family tarpaulins received': 'Family tarpaulins received',
'Family tarpaulins, source': 'Family tarpaulins, source',
'Family/friends': 'Gia đình/Bạn bè',
'Farmland/fishing material assistance, Rank': 'Farmland/fishing material assistance, Rank',
'Fax': 'Fax',
'Feature Class': 'Feature Class',
'Feature Class Details': 'Feature Class Details',
'Feature Class added': 'Feature Class added',
'Feature Class deleted': 'Feature Class deleted',
'Feature Class updated': 'Feature Class updated',
'Feature Classes': 'Các mức phân loại tính năng',
'Feature Classes are collections of Locations (Features) of the same type': 'Các mức phân loại tính năng là tập hợp các vị trí (tính năng) cùng loại',
'Feature Layer Details': 'Feature Layer Details',
'Feature Layer added': 'Lớp đặc tính đã được thêm',
'Feature Layer deleted': 'Feature Layer deleted',
'Feature Layer updated': 'Cập nhật Layer tính năng',
'Feature Layers': 'Feature Layers',
'Feature Namespace': 'Feature Namespace',
'Feature Type': 'Loại tính năng',
'Features Include': 'Features Include',
'Female': 'Female',
'Female headed households': 'Female headed households',
'Few': 'Một vài',
'Field Hospital': 'Field Hospital',
'File': 'File',
'Fill in Latitude': 'Fill in Latitude',
'Fill in Longitude': 'Fill in Longitude',
'Filter': 'Filter',
'Filter Field': 'Filter Field',
'Filter Value': 'Giá trị lọc',
'Filtered search of aid pledges and requests': 'Filtered search of aid pledges and requests',
'Find': 'Tìm',
'Find Dead Body Report': 'Find Dead Body Report',
'Find Recovery Report': 'Tìm Báo cáo phục hồi',
'Find Volunteers': 'Find Volunteers',
'Find by Name': 'Find by Name',
'Finder': 'Finder',
'Fingerprint': 'Fingerprint',
'Fingerprinting': 'Dấu vân tay',
'Fingerprints': 'Fingerprints',
'Finish': 'Finish',
'Finished Jobs': 'Finished Jobs',
'Fire': 'Fire',
'Fire suppression and rescue': 'Fire suppression and rescue',
'First Name': 'First Name',
'First name': 'Tên',
'Fishing': 'Fishing',
'Flash Flood': 'Flash Flood',
'Flash Freeze': 'Flash Freeze',
'Fleet Management': 'Fleet Management',
'Flexible Impact Assessments': 'Flexible Impact Assessments',
'Flood': 'Lũ lụt',
'Flood Alerts': 'Flood Alerts',
'Flood Alerts show water levels in various parts of the country': 'Flood Alerts show water levels in various parts of the country',
'Flood Report': 'Flood Report',
'Flood Report Details': 'Chi tiết báo cáo tình hình lũ lụt',
'Flood Report added': 'Báo cáo lũ lụt đã được thêm',
'Flood Report deleted': 'Flood Report deleted',
'Flood Report updated': 'Flood Report updated',
'Flood Reports': 'Flood Reports',
'Flow Status': 'Flow Status',
'Focal Point': 'Tiêu điểm ',
'Fog': 'Fog',
'Food': 'Food',
'Food Supply': 'Food Supply',
'Food assistance available/expected': 'Food assistance available/expected',
'Footer': 'Footer',
'Footer file %s missing!': 'Footer file %s missing!',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Đối với mỗi đối tác đồng bộ, có một công việc đồng bộ mặc định chạy sau một khoảng thời gian nhất định. Bạn cũng có thể thiết lập thêm công việc đồng bộ hơn nữa để có thể tùy biến theo nhu cầu. Nhấp vào liên kết bên phải để bắt đầu',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners',
'For live help from the Sahana community on using this application, go to': 'For live help from the Sahana community on using this application, go to',
'For messages that support alert network internal functions': 'For messages that support alert network internal functions',
'For more details on the Sahana Eden system, see the': 'For more details on the Sahana Eden system, see the',
'For more information, see ': 'For more information, see ',
'For:': 'For:',
'Forest Fire': 'Forest Fire',
'Formal camp': 'Trại chính thức',
'Format': 'Định dạng',
'Forms': 'Forms',
'Found': 'Found',
'Freezing Drizzle': 'Freezing Drizzle',
'Freezing Rain': 'Freezing Rain',
'Freezing Spray': 'Freezing Spray',
'French': 'French',
'Friday': 'Friday',
'From Location': 'From Location',
'From Warehouse': 'From Warehouse',
'Frost': 'Frost',
'Full': 'Full',
'Full beard': 'Full beard',
'Fullscreen Map': 'Fullscreen Map',
'Functional Tests': 'Functional Tests',
'Functions available': 'Functions available',
'Funding Organization': 'Funding Organization',
'Funeral': 'Funeral',
'GIS Reports of Shelter': 'GIS Reports of Shelter',
'GIS integration to view location details of the Shelter': 'GIS integration to view location details of the Shelter',
'GPS': 'GPS',
'GPS Marker': 'Đánh dấu GPS',
'GPS Track': 'GPS Track',
'GPS Track File': 'GPS Track File',
'GPX Track': 'GPX Track',
'Gale Wind': 'Gale Wind',
'Gap Analysis': 'Gap Analysis',
'Gap Analysis Map': 'Gap Analysis Map',
'Gap Analysis Report': 'Gap Analysis Report',
'Gap Map': 'Gap Map',
'Gap Report': 'Gap Report',
'Gateway Settings': 'Gateway Settings',
'Gateway settings updated': 'Gateway settings updated',
'Gender': 'Gender',
'General Medical/Surgical': 'General Medical/Surgical',
'General emergency and public safety': 'General emergency and public safety',
'Generator': 'Bộ sinh',
'Geocoder Selection': 'Geocoder Selection',
'Geometry Name': 'Geometry Name',
'Geophysical (inc. landslide)': 'Geophysical (inc. landslide)',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo module not available within the running Python - this needs installing for PDF output!',
'Girls 13-18 yrs in affected area': 'Girls 13-18 yrs in affected area',
'Girls 13-18 yrs not attending school': 'Girls 13-18 yrs not attending school',
'Girls 6-12 yrs in affected area': 'Girls 6-12 yrs in affected area',
'Girls 6-12 yrs not attending school': 'Girls 6-12 yrs not attending school',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).',
'Give information about where and when you have seen the person': 'Give information about where and when you have seen the person',
'Give information about where and when you have seen them': 'Give information about where and when you have seen them',
'Global Messaging Settings': 'Cài đặt hộp thư tin nhắn toàn cầu',
'Goatee': 'Goatee',
'Government': 'Government',
'Government UID': 'Government UID',
'Government building': 'Government building',
'Grade': 'Grade',
'Greek': 'Greek',
'Group': 'Group',
'Group Details': 'Group Details',
'Group ID': 'Group ID',
'Group Member added': 'Group Member added',
'Group Members': 'Group Members',
'Group Memberships': 'Group Memberships',
'Group Title': 'Group Title',
'Group Type': 'Loại nhóm',
'Group added': 'Đã thêm nhóm',
'Group deleted': 'Group deleted',
'Group description': 'Mô tả nhóm',
'Group name': 'Group name',
'Group type': 'Loại nhóm',
'Group updated': 'Group updated',
'Groups': 'Groups',
'Groups removed': 'Groups removed',
'Guest': 'Guest',
'Hail': 'Hail',
'Hair Color': 'Hair Color',
'Hair Length': 'Hair Length',
'Hair Style': 'Kiểu tóc',
'Has data from this Reference Document been entered into Sahana?': 'Has data from this Reference Document been entered into Sahana?',
'Has the safety and security of women and children in your community changed since the emergency?': 'Has the safety and security of women and children in your community changed since the emergency?',
'Has your business been damaged in the course of the disaster?': 'Has your business been damaged in the course of the disaster?',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': 'Have households received any shelter/NFI assistance or is assistance expected in the coming days?',
'Have normal food sources been disrupted?': 'Have normal food sources been disrupted?',
'Have schools received or are expecting to receive any assistance?': 'Have schools received or are expecting to receive any assistance?',
'Have the people received or are you expecting any medical or food assistance in the coming days?': 'Have the people received or are you expecting any medical or food assistance in the coming days?',
'Hazard Pay': 'Hazard Pay',
'Hazardous Material': 'Hazardous Material',
'Hazardous Road Conditions': 'Hazardous Road Conditions',
'Header Background': 'Header Background',
'Header background file %s missing!': 'Header background file %s missing!',
'Headquarters': 'Headquarters',
'Health': 'Health',
'Health care assistance, Rank': 'Health care assistance, Rank',
'Health center': 'Trung tâm y tế',
'Health center with beds': 'Health center with beds',
'Health center without beds': 'Health center without beds',
'Health services functioning prior to disaster': 'Health services functioning prior to disaster',
'Health services functioning since disaster': 'Health services functioning since disaster',
'Healthcare Worker': 'Healthcare Worker',
'Heat Wave': 'Heat Wave',
'Heat and Humidity': 'Heat and Humidity',
'Height': 'Height',
'Height (cm)': 'Height (cm)',
'Help': 'Help',
'Helps to monitor status of hospitals': 'Hỗ trợ giám sát trạng thái các bệnh viện',
'Helps to report and search for Missing Persons': 'Hỗ trợ báo cáo và tìm kếm những người mất tích',
'Here are the solution items related to the problem.': 'Here are the solution items related to the problem.',
'High': 'High',
'High Water': 'High Water',
'Hindu': 'Hindu',
'History': 'Lịch sử',
'Hit the back button on your browser to try again.': 'Nhấp vào nút Back trên trình duyệt để tải lại',
'Holiday Address': 'Holiday Address',
'Home': 'Trang chủ',
'Home Address': 'Địa chỉ nhà',
'Home Country': 'Quê quán',
'Home Crime': 'Home Crime',
'Hospital': 'Bệnh viện',
'Hospital Details': 'Chi tiết thông tin bệnh viện',
'Hospital Status Report': 'Báo cáo tình trạng bệnh viện',
'Hospital information added': 'Đã thêm thông tin Bệnh viện',
'Hospital information deleted': 'Đã xóa thông tin bệnh viện',
'Hospital information updated': 'Đã cập nhật thông tin bệnh viện',
'Hospital status assessment.': 'Đánh giá trạng thái bệnh viện',
'Hospitals': 'Bệnh viện',
'Hot Spot': 'Điểm nóng',
'Hourly': 'Hourly',
'Household kits received': 'Household kits received',
'Household kits, source': 'Household kits, source',
'How did boys 13-17yrs spend most of their time prior to the disaster?': 'How did boys 13-17yrs spend most of their time prior to the disaster?',
'How did boys <12yrs spend most of their time prior to the disaster?': 'How did boys <12yrs spend most of their time prior to the disaster?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': 'How did boys girls 13-17yrs spend most of their time prior to the disaster?',
'How did girls <12yrs spend most of their time prior to the disaster?': 'How did girls <12yrs spend most of their time prior to the disaster?',
'How do boys 13-17yrs spend most of their time now?': 'How do boys 13-17yrs spend most of their time now?',
'How do boys <12yrs spend most of their time now?': 'How do boys <12yrs spend most of their time now?',
'How do girls 13-17yrs spend most of their time now?': 'How do girls 13-17yrs spend most of their time now?',
'How do girls <12yrs spend most of their time now?': 'How do girls <12yrs spend most of their time now?',
'How does it work?': 'How does it work?',
'How is this person affected by the disaster? (Select all that apply)': 'How is this person affected by the disaster? (Select all that apply)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': 'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.',
'How long does it take you to walk to the health service?': 'How long does it take you to walk to the health service?',
'How long will the food last?': 'How long will the food last?',
'How long will this water resource last?': 'How long will this water resource last?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'How many Boys (0-17 yrs) are Dead due to the crisis',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'How many Boys (0-17 yrs) are Injured due to the crisis',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'Có bao nhiêu bé trai (0 đến 17 tuổi) bị mất tích do thiên tai',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'How many Girls (0-17 yrs) are Dead due to the crisis',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'How many Girls (0-17 yrs) are Injured due to the crisis',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'How many Girls (0-17 yrs) are Missing due to the crisis',
'How many Men (18 yrs+) are Dead due to the crisis': 'Bao nhiêu người (trên 18 tuổi) chết trong thảm họa',
'How many Men (18 yrs+) are Injured due to the crisis': 'How many Men (18 yrs+) are Injured due to the crisis',
'How many Men (18 yrs+) are Missing due to the crisis': 'How many Men (18 yrs+) are Missing due to the crisis',
'How many Women (18 yrs+) are Dead due to the crisis': 'How many Women (18 yrs+) are Dead due to the crisis',
'How many Women (18 yrs+) are Injured due to the crisis': 'Số nạn nhân là nữ trên 18 tuổi chịu ảnh hưởng của cuộc khủng hoảng',
'How many Women (18 yrs+) are Missing due to the crisis': 'How many Women (18 yrs+) are Missing due to the crisis',
'How many days will the supplies last?': 'How many days will the supplies last?',
'How many doctors in the health centers are still actively working?': 'How many doctors in the health centers are still actively working?',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': 'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': 'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?',
'How many latrines are available in the village/IDP centre/Camp?': 'How many latrines are available in the village/IDP centre/Camp?',
'How many midwives in the health centers are still actively working?': 'How many midwives in the health centers are still actively working?',
'How many new cases have been admitted to this facility in the past 24h?': 'How many new cases have been admitted to this facility in the past 24h?',
'How many nurses in the health centers are still actively working?': 'How many nurses in the health centers are still actively working?',
'How many of the patients with the disease died in the past 24h at this facility?': 'How many of the patients with the disease died in the past 24h at this facility?',
'How many of the primary school age boys (6-12) in the area are not attending school?': 'How many of the primary school age boys (6-12) in the area are not attending school?',
'How many of the primary school age girls (6-12) in the area are not attending school?': 'How many of the primary school age girls (6-12) in the area are not attending school?',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': 'How many of the primary/secondary schools are now open and running a regular schedule of class?',
'How many of the secondary school age boys (13-18) in the area are not attending school?': 'How many of the secondary school age boys (13-18) in the area are not attending school?',
'How many of the secondary school age girls (13-18) in the area are not attending school?': 'How many of the secondary school age girls (13-18) in the area are not attending school?',
'How many patients with the disease are currently hospitalized at this facility?': 'How many patients with the disease are currently hospitalized at this facility?',
'How many primary school age boys (6-12) are in the affected area?': 'How many primary school age boys (6-12) are in the affected area?',
'How many primary school age girls (6-12) are in the affected area?': 'How many primary school age girls (6-12) are in the affected area?',
'How many primary/secondary schools were opening prior to the disaster?': 'How many primary/secondary schools were opening prior to the disaster?',
'How many secondary school age boys (13-18) are in the affected area?': 'How many secondary school age boys (13-18) are in the affected area?',
'How many secondary school age girls (13-18) are in the affected area?': 'How many secondary school age girls (13-18) are in the affected area?',
'How many teachers have been affected by the disaster (affected = unable to work)?': 'How many teachers have been affected by the disaster (affected = unable to work)?',
'How many teachers worked in the schools prior to the disaster?': 'How many teachers worked in the schools prior to the disaster?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.',
'Humanitarian NGO': 'Humanitarian NGO',
'Hurricane': 'Hurricane',
'Hurricane Force Wind': 'Hurricane Force Wind',
'Hygiene': 'Hygiene',
'Hygiene NFIs': 'Hygiene NFIs',
'Hygiene kits received': 'Hygiene kits received',
'Hygiene kits, source': 'Dụng cụ vệ sinh, nguồn',
'Hygiene practice': 'Hygiene practice',
'Hygiene problems': 'Hygiene problems',
'ID Label': 'ID Label',
'ID Tag': 'ID Tag',
'ID Tag Number': 'ID Tag Number',
'ID type': 'ID type',
'Ice Pressure': 'Áp suất băng',
'Iceberg': 'Iceberg',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': 'Ideally a full URL to the source file, otherwise just a note on where data came from.',
'Identification': 'Identification',
'Identification Report': 'Identification Report',
'Identification Reports': 'Identification Reports',
'Identification Status': 'Identification Status',
'Identification label of the Storage bin.': 'Nhãn xác định Bin lưu trữ',
'Identified as': 'Identified as',
'Identified by': 'Identified by',
'Identity': 'Identity',
'Identity Details': 'Identity Details',
'Identity added': 'Identity added',
'Identity deleted': 'Identity deleted',
'Identity updated': 'Identity updated',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': 'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': 'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': 'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.',
'If no marker defined then the system default marker is used': 'If no marker defined then the system default marker is used',
'If no, specify why': 'If no, specify why',
'If the location is a geographic area, then state at what level here.': 'If the location is a geographic area, then state at what level here.',
'If this is set to True then mails will be deleted from the server after downloading.': 'If this is set to True then mails will be deleted from the server after downloading.',
'If this record should be restricted then select which role is required to access the record here.': 'If this record should be restricted then select which role is required to access the record here.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'If this record should be restricted then select which role(s) are permitted to access the record here.',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": "If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.",
'If yes, specify what and by whom': 'If yes, specify what and by whom',
'If yes, which and how': 'nếu có thì cái nào và như thế nào',
"If you cannot find the person you want to register as a volunteer, you can add them by clicking 'Add Person' below:": "If you cannot find the person you want to register as a volunteer, you can add them by clicking 'Add Person' below:",
"If you cannot find the person you want to report missing, you can add them by clicking 'Add Person' below:": "If you cannot find the person you want to report missing, you can add them by clicking 'Add Person' below:",
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": "If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:",
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'If you know what the Geonames ID of this location is then you can enter it here.',
'If you know what the OSM ID of this location is then you can enter it here.': 'If you know what the OSM ID of this location is then you can enter it here.',
'If you need to add a new document then you can click here to attach one.': 'Nếu cần thêm một tài liệu mới, nhấn vào đây để đính kèm',
'If you would like to help, then please': 'If you would like to help, then please',
'Illegal Immigrant': 'Illegal Immigrant',
'Image': 'Image',
'Image Details': 'Hình ảnh chi tiết',
'Image Tags': 'Image Tags',
'Image Type': 'Image Type',
'Image Upload': 'Image Upload',
'Image added': 'Image added',
'Image deleted': 'Image deleted',
'Image updated': 'Image updated',
'Image/Attachment': 'Image/Attachment',
'Image/Other Attachment': 'Image/Other Attachment',
'Imagery': 'Imagery',
'Images': 'Images',
'Immediate reconstruction assistance, Rank': 'Immediate reconstruction assistance, Rank',
'Impact Assessments': 'Impact Assessments',
'Impact Details': 'Impact Details',
'Impact Type': 'Impact Type',
'Impact Type Details': 'Impact Type Details',
'Impact Type added': 'Impact Type added',
'Impact Type deleted': 'Impact Type deleted',
'Impact Type updated': 'Impact Type updated',
'Impact Types': 'Impact Types',
'Impact added': 'Impact added',
'Impact deleted': 'Impact deleted',
'Impact updated': 'Impact updated',
'Impacts': 'Impacts',
'Import': 'Import',
'Import & Export Data': 'Import & Export Data',
'Import Data': 'Import Data',
'Import Job': 'Import Job',
'Import Jobs': 'Chuyển đổi nghề nghiệp',
'Import and Export': 'Import and Export',
'Import from Ushahidi Instance': 'Import from Ushahidi Instance',
'Import if Master': 'Import if Master',
'Import job created': 'Import job created',
'Import multiple tables as CSV': 'Chuyển đổi định dạng bảng sang CSV',
'Import/Export': 'Import/Export',
'Important': 'Quan trọng',
'Importantly where there are no aid services being provided': 'Importantly where there are no aid services being provided',
'Imported': 'Imported',
'Importing data from spreadsheets': 'Importing data from spreadsheets',
'Improper decontamination': 'Improper decontamination',
'Improper handling of dead bodies': 'Improper handling of dead bodies',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Trong GeoServer, đây là tên lớp. Trong WFS getCapabilities, đây là tên FeatureType, phần sau dấu hai chấm (:).',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
'In Inventories': 'In Inventories',
'In Process': 'In Process',
'In Progress': 'In Progress',
'In Transit': 'In Transit',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': 'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?',
'Inbound Mail Settings': 'Inbound Mail Settings',
'Incident': 'Incident',
'Incident Categories': 'Incident Categories',
'Incident Details': 'Incident Details',
'Incident Report': 'Incident Report',
'Incident Report Details': 'Incident Report Details',
'Incident Report added': 'Incident Report added',
'Incident Report deleted': 'Incident Report deleted',
'Incident Report updated': 'Incident Report updated',
'Incident Reporting': 'Incident Reporting',
'Incident Reporting System': 'Incident Reporting System',
'Incident Reports': 'Incident Reports',
'Incident added': 'Incident added',
'Incident deleted': 'Incident deleted',
'Incident updated': 'Incident updated',
'Incidents': 'Incidents',
'Incomplete': 'Incomplete',
'Individuals': 'Individuals',
'Industrial Crime': 'Industrial Crime',
'Industry Fire': 'Industry Fire',
'Industry close to village/camp': 'Industry close to village/camp',
'Infant (0-1)': 'Trẻ sơ sinh',
'Infectious Disease': 'Infectious Disease',
'Infectious Diseases': 'Infectious Diseases',
'Infestation': 'Infestation',
'Informal Leader': 'Informal Leader',
'Informal camp': 'Informal camp',
'Information gaps': 'Information gaps',
'Infusion catheters available': 'Infusion catheters available',
'Infusion catheters need per 24h': 'Infusion catheters need per 24h',
'Infusion catheters needed per 24h': 'Infusion catheters needed per 24h',
'Infusions available': 'Infusions available',
'Infusions needed per 24h': 'Infusions needed per 24h',
'Input Job': 'Input Job',
'Instant Porridge': 'Instant Porridge',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.",
'Institution': 'Institution',
'Insufficient': 'Insufficient',
'Insufficient vars: Need module, resource, jresource, instance': 'Insufficient vars: Need module, resource, jresource, instance',
'Intake Items': 'Intake Items',
'Intergovernmental Organisation': 'Intergovernmental Organisation',
'Internal State': 'Internal State',
'International NGO': 'Tổ chức phi chính phủ quốc tế',
'International Organization': 'International Organization',
'International Staff': 'International Staff',
'Intervention': 'Intervention',
'Interview taking place at': 'Interview taking place at',
'Invalid': 'Invalid',
'Invalid Query': 'Truy vấn không hợp lệ',
'Invalid email': 'Invalid email',
'Invalid request!': 'Yêu cầu không hợp lệ',
'Invalid ticket': 'Ticket không hợp lệ',
'Inventories with Item': 'Inventories with Item',
'Inventory': 'Inventory',
'Inventory Item Details': 'Chi tiết hàng hóa trong kho',
'Inventory Item added': 'Bổ sung hàng hóa vào kho lưu trữ.',
'Inventory Item deleted': 'Inventory Item deleted',
'Inventory Item updated': 'Inventory Item updated',
'Inventory Items': 'Inventory Items',
'Inventory Management': 'Inventory Management',
'Inventory Store': 'Inventory Store',
'Inventory Store Details': 'Chi tiết kho lưu trữ',
'Inventory Store added': 'Inventory Store added',
'Inventory Store deleted': 'Inventory Store deleted',
'Inventory Store updated': 'Inventory Store updated',
'Inventory Stores': 'Inventory Stores',
'Inventory of Effects': 'Kho dự phòng',
'Inventory/Ledger': 'Inventory/Ledger',
'Is adequate food and water available for these institutions?': 'Is adequate food and water available for these institutions?',
'Is it safe to collect water?': 'Is it safe to collect water?',
'Is there any industrial or agro-chemical production close to the affected area/village?': 'Is there any industrial or agro-chemical production close to the affected area/village?',
'Issuing Authority': 'Issuing Authority',
'It is built using the Template agreed by a group of NGOs working together as the': 'It is built using the Template agreed by a group of NGOs working together as the',
'Item': 'Item',
'Item Catalog Categories': 'Item Catalog Categories',
'Item Catalog Category': 'Item Catalog Category',
'Item Catalog Category Details': 'Item Catalog Category Details',
'Item Catalog Category added': 'Item Catalog Category added',
'Item Catalog Category deleted': 'Item Catalog Category deleted',
'Item Catalog Category updated': 'Item Catalog Category updated',
'Item Catalog Details': 'Item Catalog Details',
'Item Catalog added': 'Item Catalog added',
'Item Catalog deleted': 'Đã xóa danh mục hàng hóa',
'Item Catalog updated': 'Item Catalog updated',
'Item Catalogs': 'Item Catalogs',
'Item Categories': 'Item Categories',
'Item Category': 'Item Category',
'Item Category Details': 'Item Category Details',
'Item Category added': 'Item Category added',
'Item Category deleted': 'Đã xóa Tiêu chí hàng hóa',
'Item Category updated': 'Item Category updated',
'Item Details': 'Item Details',
'Item Packet Details': 'Item Packet Details',
'Item Packet added': 'Item Packet added',
'Item Packet deleted': 'Item Packet deleted',
'Item Packet updated': 'Item Packet updated',
'Item Packets': 'Item Packets',
'Item Sub-Categories': 'Item Sub-Categories',
'Item Sub-Category': 'Item Sub-Category',
'Item Sub-Category Details': 'Item Sub-Category Details',
'Item Sub-Category added': 'Item Sub-Category added',
'Item Sub-Category deleted': 'Item Sub-Category deleted',
'Item Sub-Category updated': 'Đã cập nhật tiêu chí phụ của hàng hóa',
'Item added': 'Item added',
'Item already in Bundle!': 'Hàng đã có trong Bundle!',
'Item already in Kit!': 'Item already in Kit!',
'Item already in budget!': 'Item already in budget!',
'Item deleted': 'Item deleted',
'Item updated': 'Item updated',
'Items': 'Hàng hóa',
'Japanese': 'Japanese',
'Jerry can': 'Jerry can',
'Jew': 'Jew',
'Job Title': 'Job Title',
'Jobs': 'Jobs',
'Just Once': 'Just Once',
'KPIs': 'KPIs',
'Key': 'Key',
'Key Details': 'Key Details',
'Key added': 'Key added',
'Key deleted': 'Key deleted',
'Key updated': 'Key updated',
'Keys': 'Keys',
'Kit': 'Kit',
'Kit Contents': 'Kit Contents',
'Kit Details': 'Chi tiết Kit',
'Kit Updated': 'Kit Updated',
'Kit added': 'Kit added',
'Kit deleted': 'Đã xóa Kit',
'Kit updated': 'Kit updated',
'Kits': 'Kits',
'Known Identities': 'Known Identities',
'Known incidents of violence against women/girls': 'Known incidents of violence against women/girls',
'Known incidents of violence since disaster': 'Known incidents of violence since disaster',
'LICENCE': 'bản quyền',
'LICENSE': 'LICENSE',
'LMS Administration': 'Quản trị LMS',
'Label': 'Nhãn',
'Lack of material': 'Lack of material',
'Lack of school uniform': 'Lack of school uniform',
'Lack of supplies at school': 'Lack of supplies at school',
'Lack of transport to school': 'Lack of transport to school',
'Lactating women': 'Lactating women',
'Lahar': 'Lahar',
'Landslide': 'Landslide',
'Language': 'Language',
'Last Name': 'Last Name',
'Last known location': 'Last known location',
'Last name': 'Last name',
'Last synchronization time': 'Last synchronization time',
'Last updated by': 'Last updated by',
'Last updated on': 'Last updated on',
'Latitude': 'Latitude',
'Latitude & Longitude': 'Latitude & Longitude',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.',
'Latitude should be between': 'Latitude should be between',
'Law enforcement, military, homeland and local/private security': 'Law enforcement, military, homeland and local/private security',
'Layer': 'Layer',
'Layer Details': 'Layer Details',
'Layer added': 'Layer added',
'Layer deleted': 'Đã xóa layer',
'Layer updated': 'Đã cập nhật Layer',
'Layers': 'Layers',
'Layers updated': 'Đã cập nhật Layer',
'Layout': 'Layout',
'Legend Format': 'Legend Format',
'Length': 'Độ dài',
'Level': 'Level',
"Level is higher than parent's": "Level is higher than parent's",
'Library support not available for OpenID': 'Library support not available for OpenID',
'Line': 'Line',
'Link Item & Shipment': 'Link Item & Shipment',
'Link an Item & Shipment': 'Link an Item & Shipment',
'Linked Records': 'Linked Records',
'Linked records': 'Linked records',
'List': 'List',
'List / Add Baseline Types': 'List / Add Baseline Types',
'List / Add Impact Types': 'List / Add Impact Types',
'List / Add Services': 'List / Add Services',
'List / Add Types': 'List / Add Types',
'List Activities': 'List Activities',
'List Aid Requests': 'Danh sách Yêu cầu cứu trợ',
'List All': 'List All',
'List All Entries': 'List All Entries',
'List All Memberships': 'Danh sách tất cả các thành viên',
'List Assessment Summaries': 'List Assessment Summaries',
'List Assessments': 'Danh sách Trị giá tính thuế',
'List Baseline Types': 'List Baseline Types',
'List Baselines': 'List Baselines',
'List Budgets': 'List Budgets',
'List Bundles': 'List Bundles',
'List Catalog Items': 'List Catalog Items',
'List Category<>Sub-Category<>Catalog Relation': 'List Category<>Sub-Category<>Catalog Relation',
'List Checklists': 'Danh sách Checklists ',
'List Cluster Subsectors': 'List Cluster Subsectors',
'List Clusters': 'List Clusters',
'List Configs': 'List Configs',
'List Conflicts': 'List Conflicts',
'List Contacts': 'List Contacts',
'List Distribution Items': 'List Distribution Items',
'List Distributions': 'Danh sách ủng hộ,quyên góp',
'List Documents': 'List Documents',
'List Donors': 'List Donors',
'List Feature Classes': 'List Feature Classes',
'List Feature Layers': 'List Feature Layers',
'List Flood Reports': 'List Flood Reports',
'List Groups': 'Danh sách Nhóm',
'List Groups/View Members': 'List Groups/View Members',
'List Hospitals': 'Danh sách Bệnh viện',
'List Identities': 'List Identities',
'List Images': 'List Images',
'List Impact Assessments': 'List Impact Assessments',
'List Impact Types': 'List Impact Types',
'List Impacts': 'List Impacts',
'List Incident Reports': 'List Incident Reports',
'List Incidents': 'List Incidents',
'List Inventory Items': 'List Inventory Items',
'List Inventory Stores': 'List Inventory Stores',
'List Item Catalog Categories': 'List Item Catalog Categories',
'List Item Catalogs': 'List Item Catalogs',
'List Item Categories': 'List Item Categories',
'List Item Packets': 'List Item Packets',
'List Item Sub-Categories': 'List Item Sub-Categories',
'List Items': 'List Items',
'List Keys': 'List Keys',
'List Kits': 'Danh sách Kit',
'List Layers': 'List Layers',
'List Locations': 'Danh sách Vị trí',
'List Log Entries': 'List Log Entries',
'List Markers': 'List Markers',
'List Members': 'List Members',
'List Memberships': 'Danh sách thành viên',
'List Messages': 'Danh sách tin nhắn ',
'List Metadata': 'Danh sách dữ liệu',
'List Missing Persons': 'Danh sách những người mất tích',
'List Need Types': 'List Need Types',
'List Needs': 'List Needs',
'List Offices': 'List Offices',
'List Organizations': 'List Organizations',
'List Peers': 'List Peers',
'List Personal Effects': 'List Personal Effects',
'List Persons': 'List Persons',
'List Photos': 'List Photos',
'List Positions': 'List Positions',
'List Problems': 'List Problems',
'List Projections': 'List Projections',
'List Projects': 'List Projects',
'List Rapid Assessments': 'List Rapid Assessments',
'List Received Items': 'List Received Items',
'List Received Shipments': 'List Received Shipments',
'List Records': 'List Records',
'List Registrations': 'List Registrations',
'List Reports': 'List Reports',
'List Request Items': 'Danh sách Hang hóa yêu cầu',
'List Requests': 'Danh sách yêu cầu',
'List Resources': 'Danh sách tài nguyên',
'List Responses': 'List Responses',
'List Rivers': 'Danh sách sông',
'List Roles': 'Danh sách Vai trò',
'List Sections': 'List Sections',
'List Sector': 'List Sector',
'List Sent Items': 'List Sent Items',
'List Sent Shipments': 'List Sent Shipments',
'List Service Profiles': 'List Service Profiles',
'List Settings': 'List Settings',
'List Shelter Services': 'List Shelter Services',
'List Shelter Types': 'List Shelter Types',
'List Shelters': 'List Shelters',
'List Shipment Transit Logs': 'List Shipment Transit Logs',
'List Shipment/Way Bills': 'Danh sách Đơn hàng/Phí đường bộ',
'List Shipment<>Item Relation': 'List Shipment<>Item Relation',
'List Sites': 'List Sites',
'List Skill Types': 'List Skill Types',
'List Skills': 'Danh sách kỹ năng',
'List Solutions': 'List Solutions',
'List Staff': 'Danh sách Nhân viên',
'List Staff Types': 'List Staff Types',
'List Status': 'List Status',
'List Storage Bin Type(s)': 'List Storage Bin Type(s)',
'List Storage Bins': 'List Storage Bins',
'List Storage Location': 'Danh sách vị trí kho lưu trữ',
'List Subscriptions': 'Danh sách Đăng ký',
'List Survey Answers': 'List Survey Answers',
'List Survey Questions': 'Danh sách câu hỏi khảo sát',
'List Survey Sections': 'List Survey Sections',
'List Survey Series': 'List Survey Series',
'List Survey Templates': 'List Survey Templates',
'List Tasks': 'List Tasks',
'List Teams': 'List Teams',
'List Themes': 'List Themes',
'List Tickets': 'Danh sách Ticket',
'List Tracks': 'List Tracks',
'List Units': 'Danh sách đơn vị',
'List Users': 'Danh sách người dùng',
'List Volunteers': 'List Volunteers',
'List Warehouse Items': 'List Warehouse Items',
'List Warehouses': 'List Warehouses',
'List all': 'Hiển thị tất cả',
'List of Items': 'List of Items',
'List of Missing Persons': 'Danh sách những người mất tích',
'List of Peers': 'List of Peers',
'List of Reports': 'List of Reports',
'List of Requests': 'Danh sách yêu cầu',
'List of Spreadsheets': 'List of Spreadsheets',
'List of Spreadsheets uploaded': 'List of Spreadsheets uploaded',
'List of Volunteers for this skills set': 'List of Volunteers for this skills set',
'List of addresses': 'Danh sách các địa chỉ',
'List unidentified': 'List unidentified',
'List/Add': 'Danh sách/Thêm',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Danh sách "Ai làm gì, ở đâu"Cho phép các tổ chức cứu trợ điều phối hoạt động của mình',
'Live Help': 'Trợ giúp',
'Livelihood': 'Livelihood',
'Load Cleaned Data into Database': 'Load Cleaned Data into Database',
'Load Details': 'Load Details',
'Load Raw File into Grid': 'Load Raw File into Grid',
'Load the details to help decide which is the best one to keep out of the 2.': 'Load the details to help decide which is the best one to keep out of the 2.',
'Loading Locations...': 'Loading Locations...',
'Local Name': 'Tên địa phương',
'Local Names': 'Local Names',
'Location': 'Location',
'Location 1': 'Location 1',
'Location 2': 'Location 2',
'Location De-duplicated': 'Location De-duplicated',
'Location Details': 'Location Details',
'Location added': 'Location added',
'Location deleted': 'Đã xóa vị trí',
'Location details': 'Location details',
'Location updated': 'Location updated',
'Location: ': 'Location: ',
'Locations': 'Locations',
'Locations De-duplicator': 'Locations De-duplicator',
'Locations of this level need to have a parent of level': 'Locations of this level need to have a parent of level',
'Locations should be different!': 'Locations should be different!',
'Lockdown': 'Lockdown',
'Log': 'Log',
'Log Entry Details': 'Log Entry Details',
'Log entry added': 'Log entry added',
'Log entry deleted': 'Xóa theo dõi đăng nhập',
'Log entry updated': 'Cập nhật theo dõi đăng nhập',
'Login': 'Đăng nhập',
'Logistics': 'Logistics',
'Logistics Management': 'Logistics Management',
'Logistics Management System': 'Logistics Management System',
'Logo': 'Logo',
'Logo file %s missing!': 'Logo file %s missing!',
'Logout': 'Logout',
'Long Text': 'Long Text',
'Longitude': 'Longitude',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': 'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Kinh độ trải dài theo hướng Đông-Tây. Kinh tuyến không nằm trên kinh tuyến gốc (Greenwich Mean Time) hướng về phía đông, vắt ngang châu Âu và châu Á.',
'Longitude should be between': 'Longitude should be between',
'Looting': 'Looting',
'Lost Password': 'Lost Password',
'Low': 'Low',
'Magnetic Storm': 'Magnetic Storm',
'Main cash source': 'Main cash source',
'Main income sources before disaster': 'Main income sources before disaster',
'Major outward damage': 'Major outward damage',
'Make Pledge': 'Make Pledge',
'Make Request': 'Make Request',
'Make a Request': 'Tạo yêu cầu',
'Make a Request for Aid': 'Tạo yêu cầu cứu trợ',
'Make preparations per the <instruction>': 'Make preparations per the <instruction>',
'Male': 'Male',
'Malnutrition present prior to disaster': 'Malnutrition present prior to disaster',
'Manage': 'Manage',
'Manage Category': 'Manage Category',
'Manage Item catalog': 'Manage Item catalog',
'Manage Kits': 'Manage Kits',
'Manage Relief Item Catalogue': 'Manage Relief Item Catalogue',
'Manage Sub-Category': 'Quản lý Tiêu chí phụ',
'Manage Users & Roles': 'Manage Users & Roles',
'Manage Warehouses/Sites': 'Manage Warehouses/Sites',
'Manage requests of hospitals for assistance.': 'Manage requests of hospitals for assistance.',
'Manage volunteers by capturing their skills, availability and allocation': 'Nắm bắt kỹ năng, khả năng và khu vực hoạt động của tình nguyện viên để quản lý',
'Manager': 'Manager',
'Managing Office': 'Managing Office',
'Managing, Storing and Distributing Catalog Items.': 'Managing, Storing and Distributing Catalog Items.',
'Managing, Storing and Distributing Relief Items': 'Quản lý, Lưu trữ và Quyên góp hàng cứu trợ',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).',
'Mandatory. The URL to access the service.': 'Mandatory. The URL to access the service.',
'Manual': 'Manual',
'Manual Synchronization': 'Manual Synchronization',
'Many': 'Many',
'Map': 'Map',
'Map Height': 'Chiều cao bản đồ',
'Map Service Catalogue': 'Catalogue bản đồ dịch vụ',
'Map Settings': 'Cài đặt bản đồ',
'Map Viewing Client': 'Map Viewing Client',
'Map Width': 'Độ rộng bản đồ',
'Map of Hospitals': 'Bản đồ bệnh viện',
'Mapping': 'Mapping',
'Marine Security': 'Marine Security',
'Marital Status': 'Tình trạng hôn nhân',
'Marker': 'Marker',
'Marker Details': 'Chi tiết Đèn hiệu',
'Marker added': 'Marker added',
'Marker deleted': 'Marker deleted',
'Marker updated': 'Marker updated',
'Markers': 'Markers',
'Master Message Log': 'Master Message Log',
'Master Message Log to process incoming reports & requests': 'Kiểm soát log tin nhắn để xử lý báo cáo và yêu cầu gửi đến',
'Match Percentage': 'Match Percentage',
'Match percentage indicates the % match between these two records': 'Match percentage indicates the % match between these two records',
'Matching Records': 'Matching Records',
'Matrix of Choices (Multiple Answers)': 'Matrix of Choices (Multiple Answers)',
'Matrix of Choices (Only one answer)': 'Matrix of Choices (Only one answer)',
'Matrix of Text Fields': 'Matrix of Text Fields',
'Max Persons per Dwelling': 'Max Persons per Dwelling',
'Maximum Weight': 'Maximum Weight',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': 'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': 'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.',
'Medical and public health': 'Medical and public health',
'Medicine': 'Medicine',
'Medium': 'Medium',
'Megabytes per Month': 'Megabytes per Month',
'Members': 'Members',
'Membership': 'Membership',
'Membership Details': 'Membership Details',
'Membership added': 'Đã thêm thành viên',
'Membership deleted': 'Membership deleted',
'Membership updated': 'Cập nhật thông tin thành viên',
'Memberships': 'Memberships',
'Message': 'Message',
'Message Details': 'Message Details',
'Message Variable': 'Message Variable',
'Message added': 'Đã thêm tin nhắn',
'Message deleted': 'Message deleted',
'Message sent to outbox': 'Message sent to outbox',
'Message updated': 'Message updated',
'Message variable': 'Message variable',
'Messages': 'Messages',
'Messaging': 'Messaging',
'Messaging settings updated': 'Messaging settings updated',
'Metadata': 'Metadata',
'Metadata Details': 'Metadata Details',
'Metadata added': 'Đã thêm dữ liệu',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': 'Metadata can be supplied here to be applied to all uploaded photos, if desired.',
'Metadata deleted': 'Metadata deleted',
'Metadata updated': 'Metadata updated',
'Meteorite': 'Meteorite',
'Meteorological (inc. flood)': 'Meteorological (inc. flood)',
'Method used': 'Method used',
'Micronutrient malnutrition prior to disaster': 'Micronutrient malnutrition prior to disaster',
'Middle Name': 'Middle Name',
'Migrants or ethnic minorities': 'Dân di cư hoặc dân tộc thiểu số',
'Military': 'Military',
'Minorities participating in coping activities': 'Minorities participating in coping activities',
'Minutes must be a number between 0 and 60': 'Minutes must be a number between 0 and 60',
'Minutes per Month': 'Minutes per Month',
'Minutes should be a number greater than 0 and less than 60': 'Minutes should be a number greater than 0 and less than 60',
'Miscellaneous': 'Miscellaneous',
'Missing': 'Missing',
'Missing Person': 'Người mất tích',
'Missing Person Details': 'Chi tiết về người mất tích',
'Missing Person Reports': 'Báo cáo số người mất tích',
'Missing Persons': 'Người mất tích',
'Missing Persons Registry': 'Missing Persons Registry',
'Missing Persons Report': 'Báo cáo số người mất tích',
'Missing Report': 'Missing Report',
'Missing Senior Citizen': 'Missing Senior Citizen',
'Missing Vulnerable Person': 'Missing Vulnerable Person',
'Mobile': 'Mobile',
'Mobile Assess': 'Mobile Assess',
'Mobile Assess.': 'Mobile Assess.',
'Mobile Basic Assessment': 'Mobile Basic Assessment',
'Mobile Phone': 'Mobile Phone',
'Mode': 'Mode',
'Modem Settings': 'Modem Settings',
'Modem settings updated': 'Modem settings updated',
'Moderator': 'Moderator',
'Modify Information on groups and individuals': 'Modify Information on groups and individuals',
'Modifying data in spreadsheet before importing it to the database': 'Modifying data in spreadsheet before importing it to the database',
'Module Administration': 'Quản trị Mô-đun',
'Module disabled!': 'Module disabled!',
'Module provides access to information on current Flood Levels.': 'Module provides access to information on current Flood Levels.',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': 'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.',
'Monday': 'Thứ Hai',
'Monthly Cost': 'Monthly Cost',
'Monthly Salary': 'Monthly Salary',
'Months': 'Months',
'Morgue Status': 'Morgue Status',
'Morgue Units Available': 'Morgue Units Available',
'Mosque': 'Mosque',
'Motorcycle': 'Motorcycle',
'Moustache': 'Moustache',
'Movements (Filter In/Out/Lost)': 'Movements (Filter In/Out/Lost)',
'MultiPolygon': 'MultiPolygon',
'Multiple': 'Multiple',
'Multiple Choice (Multiple Answers)': 'Multiple Choice (Multiple Answers)',
'Multiple Choice (Only One Answer)': 'Multiple Choice (Only One Answer)',
'Multiple Text Fields': 'Multiple Text Fields',
'Multiplicator': 'Multiplicator',
'Muslim': 'Muslim',
'My Tasks': 'My Tasks',
'N/A': 'Không xác định',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.",
'Name': 'Tên',
'Name and/or ID': 'Name and/or ID',
'Name and/or ID Label': 'Name and/or ID Label',
'Name of Storage Bin Type.': 'Tên loại Bin lưu trữ',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name of the file (& optional sub-path) located in views which should be used for footer.',
'Name of the person in local language and script (optional).': 'Name of the person in local language and script (optional).',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': 'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.',
'Names can be added in multiple languages': 'Names can be added in multiple languages',
'National ID Card': 'Chứng minh thư',
'National NGO': 'Các tổ chức phi chính phủ ',
'National Staff': 'National Staff',
'Nationality': 'Nationality',
'Nationality of the person.': 'Nationality of the person.',
'Nautical Accident': 'Nautical Accident',
'Nautical Hijacking': 'Nautical Hijacking',
'Need Type': 'Need Type',
'Need Type Details': 'Need Type Details',
'Need Type added': 'Need Type added',
'Need Type deleted': 'Need Type deleted',
'Need Type updated': 'Need Type updated',
'Need Types': 'Need Types',
"Need a 'url' argument!": "Need a 'url' argument!",
'Need added': 'Need added',
'Need deleted': 'Need deleted',
'Need to configure Twitter Authentication': 'Need to configure Twitter Authentication',
'Need to select 2 Locations': 'Need to select 2 Locations',
'Need to specify a Budget!': 'Need to specify a Budget!',
'Need to specify a Kit!': 'Need to specify a Kit!',
'Need to specify a Resource!': 'Need to specify a Resource!',
'Need to specify a bundle!': 'Need to specify a bundle!',
'Need to specify a group!': 'Need to specify a group!',
'Need to specify a location to search for.': 'Cần chọn địa điểm tìm kiếm',
'Need to specify a role!': 'Yêu cầu xác định vai trò',
'Need to specify a table!': 'Need to specify a table!',
'Need to specify a user!': 'Need to specify a user!',
'Need updated': 'Need updated',
'Needs': 'Needs',
'Needs Details': 'Needs Details',
'Needs to reduce vulnerability to violence': 'Needs to reduce vulnerability to violence',
'Negative Flow Isolation': 'Negative Flow Isolation',
'Neighbourhood': 'Neighbourhood',
'Neonatal ICU': 'Neonatal ICU',
'Neonatology': 'Neonatology',
'Network': 'Network',
'Neurology': 'Neurology',
'New': 'New',
'New Assessment reported from': 'New Assessment reported from',
'New Checklist': 'Checklist mới',
'New Peer': 'New Peer',
'New Record': 'New Record',
'New Report': 'New Report',
'New Request': 'Yêu cầu mới',
'New Solution Choice': 'New Solution Choice',
'New Synchronization Peer': 'New Synchronization Peer',
'New cases in the past 24h': 'New cases in the past 24h',
'Next': 'Next',
'No': 'No',
'No Activities Found': 'No Activities Found',
'No Addresses currently registered': 'Hiện tại chưa đăng ký Địa chỉ',
'No Aid Requests have been made yet': 'Chưa có yêu cầu cứu trợ nào được tạo',
'No Assessment Summaries currently registered': 'No Assessment Summaries currently registered',
'No Assessments currently registered': 'Chưa đăng ký trị giá tính thuế',
'No Baseline Types currently registered': 'No Baseline Types currently registered',
'No Baselines currently registered': 'No Baselines currently registered',
'No Budgets currently registered': 'No Budgets currently registered',
'No Bundles currently registered': 'No Bundles currently registered',
'No Catalog Items currently registered': 'No Catalog Items currently registered',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Hiện tại chưa có Category<>Sub-Category<>Catalog Relation được đăng ký',
'No Checklist available': 'No Checklist available',
'No Cluster Subsectors currently registered': 'No Cluster Subsectors currently registered',
'No Clusters currently registered': 'No Clusters currently registered',
'No Configs currently defined': 'No Configs currently defined',
'No Details currently registered': 'No Details currently registered',
'No Distribution Items currently registered': 'Chưa đăng ký danh sách hàng hóa đóng góp',
'No Distributions currently registered': 'No Distributions currently registered',
'No Documents found': 'No Documents found',
'No Donors currently registered': 'No Donors currently registered',
'No Feature Classes currently defined': 'No Feature Classes currently defined',
'No Feature Layers currently defined': 'No Feature Layers currently defined',
'No Flood Reports currently registered': 'Chưa đăng ký báo cáo lũ lụt',
'No Groups currently defined': 'Hiện tại không xác định được nhóm',
'No Groups currently registered': 'No Groups currently registered',
'No Hospitals currently registered': 'Chưa có bệnh viện nào đăng ký',
'No Identification Report Available': 'No Identification Report Available',
'No Identities currently registered': 'No Identities currently registered',
'No Image': 'Không có ảnh',
'No Images currently registered': 'Hiện tại không có ảnh nào được đăng ký',
'No Impact Types currently registered': 'No Impact Types currently registered',
'No Impacts currently registered': 'No Impacts currently registered',
'No Incident Reports currently registered': 'No Incident Reports currently registered',
'No Incidents currently registered': 'Chưa sự việc nào được đưa lên',
'No Inventory Items currently registered': 'No Inventory Items currently registered',
'No Inventory Stores currently registered': 'No Inventory Stores currently registered',
'No Item Catalog Category currently registered': 'No Item Catalog Category currently registered',
'No Item Catalog currently registered': 'No Item Catalog currently registered',
'No Item Categories currently registered': 'No Item Categories currently registered',
'No Item Packets currently registered': 'No Item Packets currently registered',
'No Item Sub-Category currently registered': 'No Item Sub-Category currently registered',
'No Item currently registered': 'No Item currently registered',
'No Items currently registered': 'No Items currently registered',
'No Items currently requested': 'Hiện tại không có hàng hóa nào được yêu cầu',
'No Keys currently defined': 'No Keys currently defined',
'No Kits currently registered': 'No Kits currently registered',
'No Locations currently available': 'No Locations currently available',
'No Locations currently registered': 'No Locations currently registered',
'No Markers currently available': 'Chưa đăng ký marker ',
'No Members currently registered': 'Chưa đăng ký thành viên',
'No Memberships currently defined': 'Chưa xác nhận đăng ký thành viên',
'No Memberships currently registered': 'Chưa có thông tin đăng ký thành viên',
'No Messages currently in Outbox': 'No Messages currently in Outbox',
'No Metadata currently defined': 'No Metadata currently defined',
'No Need Types currently registered': 'No Need Types currently registered',
'No Needs currently registered': 'No Needs currently registered',
'No Offices currently registered': 'No Offices currently registered',
'No Offices found!': 'No Offices found!',
'No Organizations currently registered': 'No Organizations currently registered',
'No People currently registered in this shelter': 'No People currently registered in this shelter',
'No Persons currently registered': 'No Persons currently registered',
'No Persons currently reported missing': 'No Persons currently reported missing',
'No Persons found': 'No Persons found',
'No Photos found': 'Không tìm thấy ảnh nào',
'No Presence Log Entries currently registered': 'No Presence Log Entries currently registered',
'No Problems currently defined': 'No Problems currently defined',
'No Projections currently defined': 'Hiện tại chưa xác định được kế hoạch dự phòng',
'No Projects currently registered': 'Chưa đăng ký dự án',
'No Rapid Assessments currently registered': 'No Rapid Assessments currently registered',
'No Received Items currently registered': 'No Received Items currently registered',
'No Received Shipments': 'No Received Shipments',
'No Records currently available': 'No Records currently available',
'No Records matching the query': 'No Records matching the query',
'No Request Items currently registered': 'No Request Items currently registered',
'No Request Shipments': 'No Request Shipments',
'No Requests have been made yet': 'No Requests have been made yet',
'No Requests match this criteria': 'No Requests match this criteria',
'No Responses currently registered': 'No Responses currently registered',
'No Rivers currently registered': 'No Rivers currently registered',
'No Roles currently defined': 'No Roles currently defined',
'No Sections currently registered': 'No Sections currently registered',
'No Sectors currently registered': 'No Sectors currently registered',
'No Sent Items currently registered': 'No Sent Items currently registered',
'No Sent Shipments': 'No Sent Shipments',
'No Settings currently defined': 'No Settings currently defined',
'No Shelter Services currently registered': 'No Shelter Services currently registered',
'No Shelter Types currently registered': 'No Shelter Types currently registered',
'No Shelters currently registered': 'Hiện tại chưa đăng ký nơi cư trú',
'No Shipment Transit Logs currently registered': 'No Shipment Transit Logs currently registered',
'No Shipment/Way Bills currently registered': 'No Shipment/Way Bills currently registered',
'No Shipment<>Item Relation currently registered': 'No Shipment<>Item Relation currently registered',
'No Sites currently registered': 'No Sites currently registered',
'No Skill Types currently set': 'Chưa cài đặt loại kỹ năng',
'No Solutions currently defined': 'No Solutions currently defined',
'No Staff Types currently registered': 'No Staff Types currently registered',
'No Staff currently registered': 'No Staff currently registered',
'No Storage Bin Type currently registered': 'Chưa đăng ký Loại Bin lưu trữ',
'No Storage Bins currently registered': 'No Storage Bins currently registered',
'No Storage Locations currently registered': 'No Storage Locations currently registered',
'No Subscription available': 'No Subscription available',
'No Survey Answers currently registered': 'No Survey Answers currently registered',
'No Survey Questions currently registered': 'No Survey Questions currently registered',
'No Survey Sections currently registered': 'No Survey Sections currently registered',
'No Survey Series currently registered': 'No Survey Series currently registered',
'No Survey Template currently registered': 'No Survey Template currently registered',
'No Tasks with Location Data': 'No Tasks with Location Data',
'No Tasks with Location Data!': 'No Tasks with Location Data!',
'No Themes currently defined': 'No Themes currently defined',
'No Tickets currently registered': 'Hiện tại chưa đăng ký Ticket ',
'No Tracks currently available': 'No Tracks currently available',
'No Units currently registered': 'Chưa đăng ký tên đơn vị',
'No Users currently registered': 'Chưa đăng ký người dùng',
'No Volunteers currently registered': 'No Volunteers currently registered',
'No Warehouse Items currently registered': 'No Warehouse Items currently registered',
'No Warehouses currently registered': 'No Warehouses currently registered',
'No Warehouses match this criteria': 'No Warehouses match this criteria',
'No access at all': 'Không truy cập',
'No access to this record!': 'No access to this record!',
'No action recommended': 'No action recommended',
'No conflicts logged': 'No conflicts logged',
'No contact information available': 'No contact information available',
'No contacts currently registered': 'Chưa đăng ký thông tin liên lạc',
'No data in this table - cannot create PDF!': 'Không có dữ liệu trong bảng - không thể tạo file PDF',
'No databases in this application': 'No databases in this application',
'No entries found': 'No entries found',
'No entries matching the query': 'No entries matching the query',
'No import jobs': 'No import jobs',
'No linked records': 'Không có bản thu liên quan',
'No location found': 'No location found',
'No location known for this person': 'No location known for this person',
'No location known for this team': 'No location known for this team',
'No locations registered at this level': 'No locations registered at this level',
'No log entries matching the query': 'No log entries matching the query',
'No matching records found.': 'No matching records found.',
'No messages in the system': 'No messages in the system',
'No peers currently registered': 'No peers currently registered',
'No pending registrations found': 'Không tìm thấy đăng ký đang chờ',
'No pending registrations matching the query': 'No pending registrations matching the query',
'No person record found for current user.': 'No person record found for current user.',
'No positions currently registered': 'No positions currently registered',
'No problem group defined yet': 'No problem group defined yet',
'No records matching the query': 'No records matching the query',
'No records to delete': 'Không có bản thu để xóa',
'No recovery reports available': 'No recovery reports available',
'No report available.': 'Không có báo cáo',
'No reports available.': 'No reports available.',
'No reports currently available': 'No reports currently available',
'No requests found': 'Không tìm thấy yêu cầu',
'No resources currently registered': 'No resources currently registered',
'No resources currently reported': 'No resources currently reported',
'No service profile available': 'No service profile available',
'No skills currently set': 'No skills currently set',
'No status information available': 'No status information available',
'No synchronization': 'Chưa đồng bộ hóa',
'No tasks currently registered': 'No tasks currently registered',
'No template found!': 'Không tìm thấy mẫu',
'No units currently registered': 'No units currently registered',
'No volunteer information registered': 'Chưa đăng ký thông tin tình nguyện viên',
'None': 'None',
'None (no such record)': 'None (no such record)',
'Noodles': 'Mì',
'Normal': 'Normal',
'Normal food sources disrupted': 'Normal food sources disrupted',
'Not Applicable': 'Not Applicable',
'Not Authorised!': 'Chưa đăng nhập',
'Not Possible': 'Not Possible',
'Not Set': 'Not Set',
'Not authorised!': 'Not authorised!',
'Not installed or incorrectly configured.': 'Chưa cài đặt hoặc tùy chỉnh chưa đúng',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.': 'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.',
'Notice to Airmen': 'Lưu ý đối với các phi công',
'Number': 'Số',
'Number of Columns': 'Number of Columns',
'Number of Patients': 'Number of Patients',
'Number of Rows': 'Số hàng',
'Number of Vehicles': 'Number of Vehicles',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.',
'Number of alternative places for studying': 'Số địa điểm có thể dùng làm trường học tạm thời',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Number of available/vacant beds of that type in this unit at the time of reporting.',
'Number of deaths during the past 24 hours.': 'Number of deaths during the past 24 hours.',
'Number of discharged patients during the past 24 hours.': 'Number of discharged patients during the past 24 hours.',
'Number of doctors': 'Number of doctors',
'Number of doctors actively working': 'Number of doctors actively working',
'Number of houses damaged, but usable': 'Number of houses damaged, but usable',
'Number of houses destroyed/uninhabitable': 'Number of houses destroyed/uninhabitable',
'Number of in-patients at the time of reporting.': 'Number of in-patients at the time of reporting.',
'Number of latrines': 'Number of latrines',
'Number of midwives actively working': 'Number of midwives actively working',
'Number of newly admitted patients during the past 24 hours.': 'Số lượng bệnh nhân tiếp nhận trong 24h qua',
'Number of non-medical staff': 'Number of non-medical staff',
'Number of nurses': 'Number of nurses',
'Number of nurses actively working': 'Number of nurses actively working',
'Number of private schools': 'Số lượng trường tư',
'Number of public schools': 'Number of public schools',
'Number of religious schools': 'Number of religious schools',
'Number of schools damaged but usable': 'Number of schools damaged but usable',
'Number of schools destroyed/uninhabitable': 'Number of schools destroyed/uninhabitable',
'Number of schools open before disaster': 'Number of schools open before disaster',
'Number of schools open now': 'Number of schools open now',
'Number of teachers affected by disaster': 'Number of teachers affected by disaster',
'Number of teachers before disaster': 'Number of teachers before disaster',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Số các giường bệnh trống trong bệnh viện. Tự động cập nhật từ các báo cáo hàng ngày.',
'Number of vacant/available units to which victims can be transported immediately.': 'Number of vacant/available units to which victims can be transported immediately.',
'Number or Label on the identification tag this person is wearing (if any).': 'Number or Label on the identification tag this person is wearing (if any).',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Number/Percentage of affected population that is Female & Aged 0-5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Number/Percentage of affected population that is Female & Aged 13-17',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Number/Percentage of affected population that is Female & Aged 18-25',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Number/Percentage of affected population that is Female & Aged 26-60',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Number/Percentage of affected population that is Female & Aged 6-12',
'Number/Percentage of affected population that is Female & Aged 61+': 'Number/Percentage of affected population that is Female & Aged 61+',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Đối tượng nam trong độ tuổi 0-5 chịu ảnh hưởng từ thiên tai',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Number/Percentage of affected population that is Male & Aged 13-17',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Number/Percentage of affected population that is Male & Aged 18-25',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Đối tượng là Nam giới và trong độ tuổi từ 26-60 chịu ảnh hưởng lớn từ thiên tai',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Number/Percentage of affected population that is Male & Aged 6-12',
'Number/Percentage of affected population that is Male & Aged 61+': 'Number/Percentage of affected population that is Male & Aged 61+',
'Numbers Only': 'Chỉ dùng số',
'Nursery Beds': 'Nursery Beds',
'Nutrition': 'Dinh dưỡng',
'OK': 'OK',
'OR Reason': 'OR Reason',
'OR Status': 'OR Status',
'OR Status Reason': 'OR Status Reason',
'Observer': 'Observer',
'Obstetrics/Gynecology': 'Sản khoa/Phụ khoa',
'Office': 'Office',
'Office Address': 'Địa chỉ văn phòng',
'Office Details': 'Office Details',
'Office added': 'Đã thêm Văn phòng',
'Office deleted': 'Đã xóa Văn phòng',
'Office updated': 'Office updated',
'Offices': 'Offices',
'Offline Sync': 'Offline Sync',
'Offline Sync (from USB/File Backup)': 'Offline Sync (from USB/File Backup)',
'Old': 'Old',
'Older people as primary caregivers of children': 'Older people as primary caregivers of children',
'Older people in care homes': 'Older people in care homes',
'Older people participating in coping activities': 'Older people participating in coping activities',
'Older people with chronical illnesses': 'Older people with chronical illnesses',
'Older person (>60 yrs)': 'Older person (>60 yrs)',
'On by default?': 'Bật theo mặc định',
'On by default? (only applicable to Overlays)': 'On by default? (only applicable to Overlays)',
'One Time Cost': 'One Time Cost',
'One time cost': 'One time cost',
'One-time': 'One-time',
'One-time costs': 'One-time costs',
'Oops! Something went wrong...': 'Oops! Something went wrong...',
'Oops! something went wrong on our side.': 'Oops! something went wrong on our side.',
'Open': 'Open',
'Open Assessment': 'Open Assessment',
'Open area': 'Open area',
'Open recent': 'Open recent',
'Operating Rooms': 'Operating Rooms',
'Optional link to an Incident which this Assessment was triggered by.': 'Optional link to an Incident which this Assessment was triggered by.',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.",
'Options': 'Tùy chọn',
'Organisation': 'Organisation',
'Organization': 'Tổ chức',
'Organization Details': 'Chi tiết Tổ chức',
'Organization Registry': 'Đăng ký tổ chức',
'Organization added': 'Organization added',
'Organization deleted': 'Organization deleted',
'Organization updated': 'Organization updated',
'Organizations': 'Tổ chức',
'Origin': 'Origin',
'Origin of the separated children': 'Origin of the separated children',
'Other': 'Other',
'Other (describe)': 'Other (describe)',
'Other (specify)': 'Other (specify)',
'Other Evidence': 'Bằng chứng khác',
'Other Faucet/Piped Water': 'Các đường xả lũ khác',
'Other Isolation': 'Những vùng bị cô lập khác',
'Other Name': 'Other Name',
'Other activities of boys 13-17yrs': 'Các hoạt động khác của nam thanh niên từ 13-17 tuổi',
'Other activities of boys 13-17yrs before disaster': 'Other activities of boys 13-17yrs before disaster',
'Other activities of boys <12yrs': 'Other activities of boys <12yrs',
'Other activities of boys <12yrs before disaster': 'Các hoạt động khác của bé trai dưới 12 tuổi trước khi xảy ra thiên tai',
'Other activities of girls 13-17yrs': 'Other activities of girls 13-17yrs',
'Other activities of girls 13-17yrs before disaster': 'Other activities of girls 13-17yrs before disaster',
'Other activities of girls<12yrs': 'Other activities of girls<12yrs',
'Other activities of girls<12yrs before disaster': 'Other activities of girls<12yrs before disaster',
'Other alternative infant nutrition in use': 'Other alternative infant nutrition in use',
'Other alternative places for study': 'Những nơi có thể dùng làm trường học tạm thời',
'Other assistance needed': 'Các hỗ trợ cần thiết',
'Other assistance, Rank': 'Những sự hỗ trợ khác,thứ hạng',
'Other current health problems, adults': 'Other current health problems, adults',
'Other current health problems, children': 'Other current health problems, children',
'Other events': 'Other events',
'Other factors affecting school attendance': 'Những yếu tố khác ảnh hưởng đến việc đến trường',
'Other major expenses': 'Other major expenses',
'Other school assistance received': 'Other school assistance received',
'Other school assistance, details': 'Other school assistance, details',
'Other school assistance, source': 'Other school assistance, source',
'Other side dishes in stock': 'Other side dishes in stock',
'Other types of water storage containers': 'Other types of water storage containers',
'Other ways to obtain food': 'Other ways to obtain food',
'Outbound Mail settings are configured in models/000_config.py.': 'Outbound Mail settings are configured in models/000_config.py.',
'Outbox': 'Outbox',
'Outgoing SMS Handler': 'Outgoing SMS Handler',
'Outgoing SMS handler': 'Outgoing SMS handler',
'Overland Flow Flood': 'Overland Flow Flood',
'Owned Resources': 'Owned Resources',
'PDAM': 'PDAM',
'PIN': 'PIN',
'PIN number ': 'PIN number ',
'PL Women': 'PL Women',
'Packet': 'Packet',
'Parameters': 'Parameters',
'Parent': 'Parent',
'Parent Office': 'Parent Office',
"Parent level should be higher than this record's level. Parent level is": "Parent level should be higher than this record's level. Parent level is",
'Parent needs to be of the correct level': 'Parent needs to be of the correct level',
'Parent needs to be set': 'Parent needs to be set',
'Parent needs to be set for locations of level': 'Parent needs to be set for locations of level',
'Parents/Caregivers missing children': 'Parents/Caregivers missing children',
'Participant': 'Participant',
'Pashto': 'Pashto',
'Passport': 'Passport',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'Pathology': 'Pathology',
'Patients': 'Bệnh nhân',
'Pediatric ICU': 'Pediatric ICU',
'Pediatric Psychiatric': 'Pediatric Psychiatric',
'Pediatrics': 'Khoa Nhi',
'Peer': 'Peer',
'Peer Details': 'Peer Details',
'Peer Registration': 'Peer Registration',
'Peer Registration Details': 'Peer Registration Details',
'Peer Registration Request': 'yêu cầu đăng ký',
'Peer Type': 'Peer Type',
'Peer UID': 'Peer UID',
'Peer added': 'Peer added',
'Peer deleted': 'Peer deleted',
'Peer not allowed to push': 'Peer not allowed to push',
'Peer registration request added': 'Đã thêm yêu cầu đăng ký',
'Peer registration request deleted': 'Peer registration request deleted',
'Peer registration request updated': 'Cập nhật yêu cẩu đăng ký',
'Peer updated': 'Peer updated',
'Peers': 'Peers',
'Pending Requests': 'yêu cầu đang chờ',
'People': 'People',
'People Needing Food': 'People Needing Food',
'People Needing Shelter': 'People Needing Shelter',
'People Needing Water': 'People Needing Water',
'People Trapped': 'People Trapped',
'People with chronical illnesses': 'People with chronical illnesses',
'Person': 'Cá nhân',
'Person 1': 'Person 1',
'Person 1, Person 2 are the potentially duplicate records': 'Person 1, Person 2 are the potentially duplicate records',
'Person 2': 'Person 2',
'Person Data': 'Person Data',
'Person De-duplicator': 'Person De-duplicator',
'Person Details': 'Chi tiết cá nhân',
'Person Finder': 'Person Finder',
'Person Registry': 'Person Registry',
'Person added': 'Person added',
'Person deleted': 'Person deleted',
'Person details updated': 'Person details updated',
'Person interviewed': 'Person interviewed',
'Person missing': 'Person missing',
'Person reporting': 'Person reporting',
'Person who has actually seen the person/group.': 'Person who has actually seen the person/group.',
'Person who is reporting about the presence.': 'Person who is reporting about the presence.',
'Person who observed the presence (if different from reporter).': 'Người quan sát tình hình (nếu khác với phóng viên)',
'Person/Group': 'Person/Group',
'Personal Data': 'Personal Data',
'Personal Effects': 'Personal Effects',
'Personal Effects Details': 'Chi tiết ảnh hưởng cá nhân',
'Personal impact of disaster': 'Personal impact of disaster',
'Persons': 'Cá nhân',
'Persons with disability (mental)': 'Người tàn tật (về tinh thần)',
'Persons with disability (physical)': 'Người tàn tật (về thể chất)',
'Phone': 'Phone',
'Phone 1': 'Điện thoại 1',
'Phone 2': 'Điện thoại 2',
"Phone number to donate to this organization's relief efforts.": 'Số điện thoại để ủng hộ cho nỗ lực cứu trợ của tổ chức này',
'Phone/Business': 'Phone/Business',
'Phone/Emergency': 'Phone/Emergency',
'Phone/Exchange': 'Phone/Exchange',
'Photo': 'Photo',
'Photo Details': 'Chi tiết ảnh',
'Photo added': 'Photo added',
'Photo deleted': 'Photo deleted',
'Photo updated': 'Photo updated',
'Photograph': 'Photograph',
'Photos': 'Photos',
'Physical Description': 'Physical Description',
'Picture upload and finger print upload facility': 'Picture upload and finger print upload facility',
'Place for solid waste disposal': 'Place for solid waste disposal',
'Place of Recovery': 'Place of Recovery',
'Places the children have been sent to': 'Places the children have been sent to',
'Playing': 'Playing',
"Please come back after sometime if that doesn't help.": "Please come back after sometime if that doesn't help.",
'Please correct all errors.': 'Please correct all errors.',
'Please enter a First Name': 'Please enter a First Name',
'Please enter a valid email address': 'Please enter a valid email address',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Please enter the first few letters of the Person/Group for the autocomplete.',
'Please enter the recipient': 'Please enter the recipient',
'Please fill this!': 'Please fill this!',
'Please report here where you are:': 'Please report here where you are:',
'Please select another level': 'Please select another level',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Trường này được dùng để lưu các thông tin thêm, bao gồm lịch sử theo dõi của hồ sơ nếu nó được cập nhật.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.',
'Pledge': 'Pledge',
'Pledge Aid': 'Pledge Aid',
'Pledge Aid to match these Requests': 'Pledge Aid to match these Requests',
'Pledge Status': 'Pledge Status',
'Pledge Support': 'Pledge Support',
'Pledged': 'Pledged',
'Pledges': 'Pledges',
'Point': 'Point',
'Poisoning': 'Poisoning',
'Poisonous Gas': 'Poisonous Gas',
'Police': 'Police',
'Pollution and other environmental': 'Ô nhiễm và các vấn đề môi trường khác',
'Polygon': 'Polygon',
'Population': 'Population',
'Porridge': 'Cháo yến mạch',
'Port': 'Port',
'Port Closure': 'Port Closure',
'Position Details': 'Position Details',
'Position added': 'Position added',
'Position deleted': 'Position deleted',
'Position type': 'Position type',
'Position updated': 'Position updated',
'Positions': 'Positions',
'Postcode': 'Postcode',
'Poultry': 'Poultry',
'Poultry restocking, Rank': 'Thu mua gia cầm, thứ hạng',
'Pounds': 'Pounds',
'Power Failure': 'Power Failure',
'Powered by Sahana Eden': 'Powered by Sahana Eden',
'Preferred Name': 'Preferred Name',
'Pregnant women': 'Pregnant women',
'Preliminary': 'Preliminary',
'Presence': 'Presence',
'Presence Condition': 'Presence Condition',
'Presence Log': 'Presence Log',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.",
'Previous': 'Previous',
'Primary Name': 'Primary Name',
'Priority': 'Ưu tiên',
'Priority Level': 'Priority Level',
'Private': 'Private',
'Problem': 'Problem',
'Problem Administration': 'Quản lý vấn đề',
'Problem Details': 'Problem Details',
'Problem Group': 'Problem Group',
'Problem Title': 'Problem Title',
'Problem added': 'Problem added',
'Problem deleted': 'Problem deleted',
'Problem updated': 'Đã cập nhật vấn đề',
'Problems': 'Vấn đề',
'Procedure': 'Procedure',
'Procurements': 'Procurements',
'Product Description': 'Product Description',
'Product Name': 'Product Name',
'Profile': 'Profile',
'Project': 'Project',
'Project Activities': 'Các hoạt động của dự án',
'Project Details': 'Project Details',
'Project Management': 'Project Management',
'Project Status': 'Project Status',
'Project Tracking': 'Project Tracking',
'Project added': 'Dự án đã được thêm',
'Project deleted': 'Project deleted',
'Project has no Lat/Lon': 'Project has no Lat/Lon',
'Project updated': 'Project updated',
'Projection': 'Projection',
'Projection Details': 'Projection Details',
'Projection added': 'Projection added',
'Projection deleted': 'Projection deleted',
'Projection updated': 'Đã cập nhật kế hoạch dự phòng',
'Projections': 'Projections',
'Projects': 'Projects',
'Protected resource': 'Protected resource',
'Protection': 'Protection',
'Provide Metadata for your media files': 'Provide Metadata for your media files',
'Provide a password': 'Provide a password',
'Province': 'Tỉnh/thành',
'Proxy-server': 'Proxy-server',
'Psychiatrics/Adult': 'Psychiatrics/Adult',
'Psychiatrics/Pediatric': 'Khoa thần kinh/Khoa nhi',
'Public': 'Public',
'Public Event': 'Public Event',
'Public and private transportation': 'Phương tiện vận chuyển công cộng và cá nhân',
'Pull tickets from external feed': 'Pull tickets from external feed',
'Punjabi': 'Punjabi',
'Push tickets to external system': 'Push tickets to external system',
'Put a choice in the box': 'Put a choice in the box',
'Pyroclastic Flow': 'Pyroclastic Flow',
'Pyroclastic Surge': 'Núi lửa phun',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial module not available within the running Python - this needs installing to activate the Modem',
'Quantity': 'Quantity',
'Quarantine': 'Quarantine',
'Queries': 'Queries',
'Query': 'Query',
'Queryable?': 'Queryable?',
'RECORD A': 'RECORD A',
'RECORD B': 'RECORD B',
'RESPONSE': 'RESPONSE',
'Race': 'Race',
'Radiological Hazard': 'Radiological Hazard',
'Radiology': 'Radiology',
'Railway Accident': 'Tại nạn đường sắt',
'Railway Hijacking': 'Railway Hijacking',
'Rain Fall': 'Rain Fall',
'Rapid Assessment': 'Rapid Assessment',
'Rapid Assessment Details': 'Rapid Assessment Details',
'Rapid Assessment added': 'Rapid Assessment added',
'Rapid Assessment deleted': 'Rapid Assessment deleted',
'Rapid Assessment updated': 'Rapid Assessment updated',
'Rapid Assessments': 'Rapid Assessments',
'Rapid Assessments & Flexible Impact Assessments': 'Rapid Assessments & Flexible Impact Assessments',
'Rapid Close Lead': 'Rapid Close Lead',
'Rating Scale': 'Rating Scale',
'Raw Database access': 'Raw Database access',
'Real World Arbitrary Units': 'Real World Arbitrary Units',
'Receive': 'Receive',
'Receive Items': 'Receive Items',
'Receive Shipment': 'Receive Shipment',
'Received': 'Received',
'Received By': 'Received By',
'Received Item Details': 'Received Item Details',
'Received Item added': 'Received Item added',
'Received Item deleted': 'Received Item deleted',
'Received Item updated': 'Received Item updated',
'Received Items': 'Received Items',
'Received Items added to Warehouse Items': 'Received Items added to Warehouse Items',
'Received Shipment Details': 'Received Shipment Details',
'Received Shipment canceled': 'Received Shipment canceled',
'Received Shipment updated': 'Received Shipment updated',
'Received Shipments': 'Received Shipments',
'Recipient': 'Recipient',
'Recipients': 'Người nhận viện trợ',
'Record Details': 'Record Details',
'Record ID': 'Record ID',
'Record Saved': 'Record Saved',
'Record added': 'Hồ sơ đã được thêm',
'Record deleted': 'Record deleted',
'Record last updated': 'Record last updated',
'Record not found!': 'Record not found!',
'Record updated': 'Record updated',
'Records': 'Records',
'Recovery': 'Recovery',
'Recovery Request': 'Phục hồi yêu cầu',
'Recovery Request added': 'Đã thêm yêu cầu phục hồi',
'Recovery Request deleted': 'phục hồi các yêu cầu bị xóa',
'Recovery Request updated': 'Cập nhật Yêu cầu phục hồi',
'Recovery Requests': 'Phục hồi yêu cầu',
'Recovery report added': 'Recovery report added',
'Recovery report deleted': 'Recovery report deleted',
'Recovery report updated': 'Recovery report updated',
'Recurring': 'Định kỳ',
'Recurring Cost': 'Recurring Cost',
'Recurring cost': 'Recurring cost',
'Recurring costs': 'Chi phí định kỳ',
'Reference Document': 'Reference Document',
'Regional': 'Địa phương',
'Register': 'Register',
'Register Person': 'Đăng ký Cá nhân',
'Register Person into this Shelter': 'Register Person into this Shelter',
'Register them as a volunteer': 'Register them as a volunteer',
'Registered People': 'Registered People',
'Registered users can': 'Người dùng đã đăng ký có thể',
'Registering ad-hoc volunteers willing to contribute': 'Registering ad-hoc volunteers willing to contribute',
'Registration': 'Registration',
'Registration Details': 'Registration Details',
'Registration added': 'Bản đăng ký đã được thêm',
'Registration entry deleted': 'Registration entry deleted',
'Registration key': 'Registration key',
'Registration updated': 'Registration updated',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'Rehabilitation/Long Term Care': 'Rehabilitation/Long Term Care',
'Reliable access to sanitation/hygiene items': 'Reliable access to sanitation/hygiene items',
'Relief': 'Relief',
'Relief Item Catalog': 'Relief Item Catalog',
'Relief Team': 'Relief Team',
'Religion': 'Religion',
'Religious Leader': 'Religious Leader',
'Relocate as instructed in the <instruction>': 'Relocate as instructed in the <instruction>',
'Remove': 'Remove',
'Repeat your password': 'Repeat your password',
'Replace': 'Replace',
'Replace if Master': 'Replace if Master',
'Replace if Newer': 'Thay thế nếu mới hơn',
'Report': 'Report',
'Report Another Assessment...': 'Report Another Assessment...',
'Report Details': 'Report Details',
'Report Resource': 'Report Resource',
'Report Type': 'Loại báo cáo',
'Report Types Include': 'Report Types Include',
'Report a Problem with the Software': 'báo cáo lỗi bằng phần mềm',
'Report added': 'Đã thêm báo cáo',
'Report deleted': 'Đã xóa báo cáo',
'Report my location': 'Report my location',
'Report that person missing': 'Report that person missing',
'Report the contributing factors for the current EMS status.': 'Báo cáo các nhân tố đóng góp cho tình trạng EMS hiện tại.',
'Report the contributing factors for the current OR status.': 'Report the contributing factors for the current OR status.',
'Report the person as found': 'Report the person as found',
'Report them as found': 'Report them as found',
'Report them missing': 'Report them missing',
'Report updated': 'Report updated',
'ReportLab module not available within the running Python - this needs installing for PDF output!': 'ReportLab module not available within the running Python - this needs installing for PDF output!',
'Reporter': 'Reporter',
'Reporter Name': 'Reporter Name',
'Reporting on the projects in the region': 'Reporting on the projects in the region',
'Reports': 'Reports',
'Request': 'Yêu cầu',
'Request Added': 'Request Added',
'Request Canceled': 'Request Canceled',
'Request Details': 'Chi tiết yêu cầu',
'Request Item': 'Request Item',
'Request Item Details': 'Chi tiết yêu cầu hàng hóa',
'Request Item added': 'Đã thêm yêu cầu hàng hóa',
'Request Item deleted': 'Xóa yêu cầu hàng hóa',
'Request Item updated': 'Đã cập nhật hàng hóa yêu cầu',
'Request Items': 'Yêu cầu hàng hóa',
'Request Type': 'Loại yêu cầu',
'Request Updated': 'Request Updated',
'Request added': 'Request added',
'Request deleted': 'Request deleted',
'Request for Role Upgrade': 'yêu cầu nâng cấp vai trò',
'Request updated': 'Request updated',
'Request, Response & Session': 'Yêu cầu, Phản hồi và Tương tác',
'Requested': 'Đã yêu cầu',
'Requested By Location': 'Requested By Location',
'Requested From Warehouse': 'Requested From Warehouse',
'Requested by': 'Yêu cầu bởi',
'Requested on': 'Requested on',
'Requester': 'Requester',
'Requestor': 'Người yêu cầu',
'Requests': 'Yêu cầu',
'Requests From': 'Requests From',
'Requests for Item': 'Yêu cầu hàng hóa',
'Requires Login!': 'Requires Login!',
'Requires login': 'Requires login',
'Rescue and recovery': 'Rescue and recovery',
'Reset': 'Reset',
'Reset Password': 'Đặt lại mật khẩu',
'Reset Password key': 'Reset Password key',
'Reset form': 'Đặt lại mẫu',
'Resolve': 'Resolve',
'Resolve Conflict': 'Resolve Conflict',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.',
'Resource': 'Resource',
'Resource Details': 'Resource Details',
'Resource added': 'Resource added',
'Resource deleted': 'Resource deleted',
'Resource updated': 'Resource updated',
'Resources': 'Tài nguyên',
'Respiratory Infections': 'Respiratory Infections',
'Response': 'Response',
'Response Details': 'Response Details',
'Response added': 'Response added',
'Response deleted': 'Xóa phản hồi',
'Response updated': 'Response updated',
'Responses': 'Responses',
'Restricted Access': 'Restricted Access',
'Restrictions': 'Restrictions',
'Results': 'Results',
'Retail Crime': 'Retail Crime',
'Retrieve Password': 'Retrieve Password',
'Rice': 'Rice',
'Riot': 'Riot',
'River': 'River',
'River Details': 'Chi tiết Sông',
'River added': 'River added',
'River deleted': 'River deleted',
'River updated': 'River updated',
'Rivers': 'Rivers',
'Road Accident': 'Tai nạn giao thông đường bộ',
'Road Closed': 'Road Closed',
'Road Conditions': 'Road Conditions',
'Road Delay': 'Road Delay',
'Road Hijacking': 'Road Hijacking',
'Road Usage Condition': 'Điều kiện lưu thông đường bộ',
'Role': 'Role',
'Role Details': 'Chi tiết vai trò',
'Role Manager': 'Role Manager',
'Role Required': 'Role Required',
'Role Updated': 'Role Updated',
'Role added': 'Role added',
'Role deleted': 'Role deleted',
'Role updated': 'Role updated',
'Role-based': 'Role-based',
'Roles': 'Roles',
'Roles Permitted': 'Roles Permitted',
'Roof tile': 'Roof tile',
'Row Choices (One Per Line)': 'Row Choices (One Per Line)',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'Run Functional Tests': 'Kiểm thử chức năng',
'Run Interval': 'Run Interval',
'Running Cost': 'Running Cost',
'SITUATION': 'SITUATION',
'Safe environment for vulnerable groups': 'Safe environment for vulnerable groups',
'Safety of children and women affected by disaster': 'Safety of children and women affected by disaster',
'Sahana Administrator': 'Quản trị viên Sahana',
'Sahana Agasti': 'Sahana Agasti',
'Sahana Blue': 'Sahana Blue',
'Sahana Community Chat': 'Sahana Community Chat',
'Sahana Eden': 'Sahana Eden',
'Sahana Eden <=> Other': 'Sahana Eden <=> Other',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)',
'Sahana Eden <=> Sahana Eden': 'Sahana Eden <=> Sahana Eden',
'Sahana Eden Disaster Management Platform': 'Sahana Eden Disaster Management Platform',
'Sahana Eden Open Source Disaster Management Platform': 'Sahana Eden Open Source Disaster Management Platform',
'Sahana Eden Website': 'Website Sahana Eden',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.': 'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.',
'Sahana FOSS Disaster Management System': 'Sahana FOSS Disaster Management System',
'Sahana Green': 'Sahana Green',
'Sahana Login Approval Pending': 'Chờ chấp nhận đăng nhập vào Sahana',
'Sahana Steel': 'Thép Sahana',
'Sahana access granted': 'Sahana access granted',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana: new request has been made. Please login to see if you can fulfil the request.',
'Salted Fish': 'Salted Fish',
'Salvage material usable from destroyed houses': 'Salvage material usable from destroyed houses',
'Salvage material usable from destroyed schools': 'Salvage material usable from destroyed schools',
'Sanitation problems': 'Sanitation problems',
'Satellite': 'Vệ tinh',
'Satellite Office': 'Satellite Office',
'Saturday': 'Saturday',
'Save': 'Lưu',
'Save any Changes in the one you wish to keep': 'Lưu mọi thay đổi ở bất kỳ nơi nào bạn muốn',
'Saved.': 'Saved.',
'Saving...': 'Saving...',
'Scale of Results': 'Phạm vi của kết quả',
'Schedule': 'Lịch trình',
'School': 'School',
'School Closure': 'School Closure',
'School Lockdown': 'School Lockdown',
'School Reports': 'School Reports',
'School Teacher': 'School Teacher',
'School assistance received/expected': 'School assistance received/expected',
'School destroyed': 'School destroyed',
'School heavily damaged': 'School heavily damaged',
'School tents received': 'School tents received',
'School tents, source': 'School tents, source',
'School used for other purpose': 'School used for other purpose',
'School/studying': 'School/studying',
'Schools': 'Schools',
'Search': 'Tìm kiếm',
'Search & List Bin Types': 'Search & List Bin Types',
'Search & List Bins': 'Search & List Bins',
'Search & List Catalog': 'Tìm kiếm và liệt kê các danh mục',
'Search & List Category': 'Tìm và liệt kê danh mục',
'Search & List Items': 'Tìm kiếm và hiển thị danh sách hàng hóa',
'Search & List Locations': 'Tìm và liệt kê các địa điểm',
'Search & List Site': 'Search & List Site',
'Search & List Sub-Category': 'Tìm kiếm và lên danh sách Tiêu chí phụ',
'Search & List Unit': 'Search & List Unit',
'Search Activities': 'Tìm kiếm các hoạt động',
'Search Activity Report': 'Search Activity Report',
'Search Addresses': 'Search Addresses',
'Search Aid Requests': 'Tìm kiếm Yêu cầu cứu trợ',
'Search Assessment Summaries': 'Search Assessment Summaries',
'Search Assessments': 'Tìm kiếm các đánh giá',
'Search Baseline Type': 'Search Baseline Type',
'Search Baselines': 'Search Baselines',
'Search Budgets': 'Tìm kiếm các ngân sách',
'Search Bundles': 'Search Bundles',
'Search Catalog Items': 'Search Catalog Items',
'Search Category<>Sub-Category<>Catalog Relation': 'Search Category<>Sub-Category<>Catalog Relation',
'Search Checklists': 'Tìm kiếm Checklist',
'Search Cluster Subsectors': 'Search Cluster Subsectors',
'Search Clusters': 'Search Clusters',
'Search Configs': 'Search Configs',
'Search Contact Information': 'Tìm thông tin liên lạc',
'Search Contacts': 'Tìm kiếm các đầu mối liên lạc',
'Search Distribution Items': 'Search Distribution Items',
'Search Distributions': 'Tìm kiếm Quyên góp',
'Search Documents': 'Tìm kiếm các tài liệu',
'Search Donors': 'Tìm kiếm những người ủng hộ',
'Search Feature Class': 'Search Feature Class',
'Search Feature Layers': 'Tìm kiếm Layer chức năng',
'Search Flood Reports': 'Tìm các báo cáo về lũ lụt',
'Search Groups': 'Search Groups',
'Search Hospitals': 'Tìm kếm các bệnh viện',
'Search Identity': 'Search thông tin nhận dạng',
'Search Images': 'Tìm kếm các ảnh',
'Search Impact Type': 'Search Impact Type',
'Search Impacts': 'Search Impacts',
'Search Incident Reports': 'Search Incident Reports',
'Search Incidents': 'Search Incidents',
'Search Inventory Items': 'Search Inventory Items',
'Search Inventory Stores': 'Search Inventory Stores',
'Search Item Catalog Category(s)': 'Search Item Catalog Category(s)',
'Search Item Catalog(s)': 'Tìm kiếm Catalog hàng hóa',
'Search Item Categories': 'Search Item Categories',
'Search Item Packets': 'Search Item Packets',
'Search Item Sub-Category(s)': 'Search Item Sub-Category(s)',
'Search Items': 'Search Items',
'Search Keys': 'Tìm kiếm mã',
'Search Kits': 'Search Kits',
'Search Layers': 'Tìm kiếm các lớp',
'Search Locations': 'Tìm kiếm các địa điểm',
'Search Log Entry': 'Search Log Entry',
'Search Markers': 'Search Markers',
'Search Member': 'Tìm thành viên',
'Search Membership': 'Tìm kiếm thành viên',
'Search Memberships': 'Tim kiếm thành viên',
'Search Metadata': 'Tìm kiếm dữ liệu',
'Search Need Type': 'Search Need Type',
'Search Needs': 'Search Needs',
'Search Offices': 'Tìm các văn phòng',
'Search Organizations': 'Tìm kiếm các tổ chức',
'Search Peer': 'Search Peer',
'Search Personal Effects': 'Search Personal Effects',
'Search Persons': 'Tìm kiếm Cá nhân',
'Search Photos': 'Tìm kiếm ảnh',
'Search Positions': 'Search Positions',
'Search Problems': 'Search Problems',
'Search Projections': 'Search Projections',
'Search Projects': 'Tìm kiếm các dự án',
'Search Rapid Assessments': 'Search Rapid Assessments',
'Search Received Items': 'Search Received Items',
'Search Received Shipments': 'Search Received Shipments',
'Search Records': 'Tìm các hồ sơ',
'Search Recovery Reports': 'Search Recovery Reports',
'Search Registations': 'Tìm kiếm các đăng ký',
'Search Registration Request': 'Tìm kiếm Yêu cầu Đăng ký',
'Search Report': 'Tìm kiếm báo cáo',
'Search Reports': 'Tìm kiếm Báo cáo',
'Search Request': 'Tìm kiếm yêu cầu',
'Search Request Items': 'Tìm kiếm Yêu cầu hàng hóa',
'Search Requests': 'Search Requests',
'Search Resources': 'Tìm kiếm các nguồn lực',
'Search Responses': 'Search Responses',
'Search Rivers': 'Search Rivers',
'Search Roles': 'Tìm các vai trò',
'Search Sections': 'Search Sections',
'Search Sectors': 'Search Sectors',
'Search Sent Items': 'Search Sent Items',
'Search Sent Shipments': 'Search Sent Shipments',
'Search Service Profiles': 'Search Service Profiles',
'Search Settings': 'Search Settings',
'Search Shelter Services': 'Search Shelter Services',
'Search Shelter Types': 'Tìm kiếm Loại Cư trú',
'Search Shelters': 'Search Shelters',
'Search Shipment Transit Logs': 'Search Shipment Transit Logs',
'Search Shipment/Way Bills': 'Search Shipment/Way Bills',
'Search Shipment<>Item Relation': 'Search Shipment<>Item Relation',
'Search Site(s)': 'Search Site(s)',
'Search Skill Types': 'Search Skill Types',
'Search Skills': 'Search Skills',
'Search Solutions': 'Search Solutions',
'Search Staff': 'Search Staff',
'Search Staff Types': 'Search Staff Types',
'Search Status': 'Search Status',
'Search Storage Bin Type(s)': 'Search Storage Bin Type(s)',
'Search Storage Bin(s)': 'Search Storage Bin(s)',
'Search Storage Location(s)': 'Tìm kiếm kho lưu trữ',
'Search Subscriptions': 'Tìm kiếm danh sách, số tiền quyên góp',
'Search Tasks': 'Search Tasks',
'Search Teams': 'Tìm kiếm các đội',
'Search Themes': 'Tìm kiếm chủ đề',
'Search Tickets': 'Search Tickets',
'Search Tracks': 'Tìm kiếm dấu vết',
'Search Twitter Tags': 'Search Twitter Tags',
'Search Units': 'Search Units',
'Search Users': 'Search Users',
'Search Volunteer Registrations': 'Tìm kiếm Đăng ký tình nguyện viên',
'Search Volunteers': 'Search Volunteers',
'Search Warehouse Items': 'Search Warehouse Items',
'Search Warehouses': 'Search Warehouses',
'Search and Edit Group': 'Tìm và sửa thông tin nhóm',
'Search and Edit Individual': 'Tìm kiếm và chỉnh sửa cá nhân',
'Search by ID Tag': 'Search by ID Tag',
'Search by Skill Types': 'Search by Skill Types',
'Search for Items': 'Search for Items',
'Search for a Hospital': 'Tìm kiếm bệnh viện',
'Search for a Location': 'Tìm một địa điểm',
'Search for a Person': 'Tìm kiếm một người',
'Search for a Project': 'Tìm kiếm dự án',
'Search for a Request': 'Tìm kiếm một yêu cầu',
'Search here for a person in order to:': 'Search here for a person in order to:',
"Search here for a person's record in order to:": "Search here for a person's record in order to:",
'Search messages': 'Search messages',
'Searching for different groups and individuals': 'Searching for different groups and individuals',
'Secondary Server (Optional)': 'Secondary Server (Optional)',
'Seconds must be a number between 0 and 60': 'Giây phải là số từ 0 đến 60',
'Section Details': 'Chi tiết khu vực',
'Section deleted': 'Section deleted',
'Section updated': 'Section updated',
'Sections': 'Sections',
'Sector': 'Sector',
'Sector Details': 'Sector Details',
'Sector added': 'Sector added',
'Sector deleted': 'Sector deleted',
'Sector updated': 'Sector updated',
'Sectors': 'Sectors',
'Security Policy': 'Chính sách bảo mật',
'Security Status': 'Security Status',
'Security problems': 'Security problems',
'Seen': 'Seen',
'Select 2 potential locations from the dropdowns.': 'Select 2 potential locations from the dropdowns.',
'Select Photos': 'Select Photos',
'Select a location': 'Select a location',
"Select a person in charge for status 'assigned'": "Select a person in charge for status 'assigned'",
'Select a question from the list': 'Chọn một câu hỏi trong danh sách',
'Select all that apply': 'Chọn tất cả các áp dụng trên',
'Select an Organization to see a list of offices': 'Select an Organization to see a list of offices',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.',
'Select the person assigned to this role for this project.': 'Select the person assigned to this role for this project.',
'Select the person associated with this scenario.': 'Select the person associated with this scenario.',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS',
'Self Registration': 'Self Registration',
'Self-registration': 'Self-registration',
'Send': 'Send',
'Send Alerts using Email &/or SMS': 'Send Alerts using Email &/or SMS',
'Send Mail': 'Send Mail',
'Send Notification': 'Send Notification',
'Send Shipment': 'Send Shipment',
'Send message': 'Gửi tin nhắn',
'Send new message': 'Gửi tin nhắn mới',
'Sends & Receives Alerts via Email & SMS': 'Sends & Receives Alerts via Email & SMS',
'Senior (50+)': 'Senior (50+)',
'Sensitivity': 'Mức độ nhạy cảm',
'Sent': 'Sent',
'Sent Item': 'Sent Item',
'Sent Item Details': 'Sent Item Details',
'Sent Item added': 'Sent Item added',
'Sent Item deleted': 'Sent Item deleted',
'Sent Item updated': 'Sent Item updated',
'Sent Items': 'Sent Items',
'Sent Shipment Details': 'Sent Shipment Details',
'Sent Shipment canceled': 'Sent Shipment canceled',
'Sent Shipment updated': 'Sent Shipment updated',
'Sent Shipments': 'Sent Shipments',
'Separate latrines for women and men': 'Separate latrines for women and men',
'Seraiki': 'Seraiki',
'Series': 'Series',
'Server': 'Server',
'Service': 'Service',
'Service Catalogue': 'Service Catalogue',
'Service or Facility': 'Dịch vụ hoặc phương tiện',
'Service profile added': 'Đã thêm thông tin dịch vụ',
'Service profile deleted': 'Service profile deleted',
'Service profile updated': 'Service profile updated',
'Services': 'Dịch vụ',
'Services Available': 'Các dịch vụ đang triển khai',
'Setting Details': 'Setting Details',
'Setting added': 'Đã thêm cài đặt',
'Setting deleted': 'Setting deleted',
'Setting updated': 'Setting updated',
'Settings': 'Cài đặt',
'Settings updated': 'Settings updated',
'Settings were reset because authenticating with Twitter failed': 'Settings were reset because authenticating with Twitter failed',
'Severity': 'Severity',
'Severity:': 'Severity:',
'Share a common Marker (unless over-ridden at the Feature level)': 'Chia sẻ Đèn hiệu chung(nếu không vượt mức tính năng)',
'Shelter': 'Cư trú',
'Shelter & Essential NFIs': 'Shelter & Essential NFIs',
'Shelter Details': 'Shelter Details',
'Shelter Name': 'Shelter Name',
'Shelter Registry': 'Đăng ký tạm trú',
'Shelter Service': 'Shelter Service',
'Shelter Service Details': 'Chi tiết dịch vụ cư trú',
'Shelter Service added': 'Shelter Service added',
'Shelter Service deleted': 'Shelter Service deleted',
'Shelter Service updated': 'Shelter Service updated',
'Shelter Services': 'Dịch vụ cư trú',
'Shelter Type': 'Shelter Type',
'Shelter Type Details': 'Shelter Type Details',
'Shelter Type added': 'Shelter Type added',
'Shelter Type deleted': 'Shelter Type deleted',
'Shelter Type updated': 'Shelter Type updated',
'Shelter Types': 'Shelter Types',
'Shelter Types and Services': 'Shelter Types and Services',
'Shelter added': 'Đã thêm Thông tin cư trú',
'Shelter deleted': 'Shelter deleted',
'Shelter updated': 'Shelter updated',
'Shelter/NFI assistance received/expected': 'Shelter/NFI assistance received/expected',
'Shelters': 'Shelters',
'Shipment Received': 'Shipment Received',
'Shipment Sent': 'Shipment Sent',
'Shipment Transit Log Details': 'Shipment Transit Log Details',
'Shipment Transit Log added': 'Shipment Transit Log added',
'Shipment Transit Log deleted': 'Shipment Transit Log deleted',
'Shipment Transit Log updated': 'Shipment Transit Log updated',
'Shipment Transit Logs': 'Shipment Transit Logs',
'Shipment/Way Bill added': 'Shipment/Way Bill added',
'Shipment/Way Bills': 'Shipment/Way Bills',
'Shipment/Way Bills Details': 'Shipment/Way Bills Details',
'Shipment/Way Bills deleted': 'Shipment/Way Bills deleted',
'Shipment/Way Bills updated': 'Shipment/Way Bills updated',
'Shipment<>Item Relation added': 'Shipment<>Item Relation added',
'Shipment<>Item Relation deleted': 'Shipment<>Item Relation deleted',
'Shipment<>Item Relation updated': 'Shipment<>Item Relation updated',
'Shipment<>Item Relations': 'Shipment<>Item Relations',
'Shipment<>Item Relations Details': 'Shipment<>Item Relations Details',
'Shipments': 'Shipments',
'Shipments To': 'Shipments To',
'Shooting': 'Shooting',
'Short Assessment': 'Short Assessment',
'Short Description': 'Short Description',
'Show Checklist': 'Show Checklist',
'Show on map': 'Hiển thị trên bản đồ',
'Sindhi': 'Sindhi',
'Site': 'Địa điểm',
'Site Address': 'Site Address',
'Site Administration': 'Quản trị Site',
'Site Description': 'Site Description',
'Site Details': 'Site Details',
'Site ID': 'Site ID',
'Site Location Description': 'Site Location Description',
'Site Location Name': 'Site Location Name',
'Site Manager': 'Site Manager',
'Site Name': 'Site Name',
'Site added': 'Site added',
'Site deleted': 'Site deleted',
'Site updated': 'Site updated',
'Site/Warehouse': 'Site/Warehouse',
'Sites': 'Trang web',
'Situation Awareness & Geospatial Analysis': 'Nhận biết tình huống và phân tích tọa độ địa lý',
'Sketch': 'Sketch',
'Skill': 'Skill',
'Skill Details': 'Chi tiết kỹ năng',
'Skill Status': 'Skill Status',
'Skill Type Details': 'Skill Type Details',
'Skill Type added': 'Skill Type added',
'Skill Type deleted': 'Skill Type deleted',
'Skill Type updated': 'Skill Type updated',
'Skill Types': 'Skill Types',
'Skill added': 'Đã thêm kỹ năng',
'Skill deleted': 'Skill deleted',
'Skill updated': 'Skill updated',
'Skills': 'Skills',
'Skype ID': 'Skype ID',
'Small Trade': 'Small Trade',
'Smoke': 'Smoke',
'Snow Fall': 'Snow Fall',
'Snow Squall': 'Snow Squall',
'Solid waste': 'Solid waste',
'Solution': 'Solution',
'Solution Details': 'Solution Details',
'Solution Item': 'Solution Item',
'Solution added': 'Solution added',
'Solution deleted': 'Đã xóa giải pháp',
'Solution updated': 'Solution updated',
'Solutions': 'Solutions',
'Some': 'Some',
'Sorry - the server has a problem, please try again later.': 'Sorry - the server has a problem, please try again later.',
'Sorry that location appears to be outside the area of the Parent.': 'Sorry that location appears to be outside the area of the Parent.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Sorry that location appears to be outside the area supported by this deployment.',
'Sorry, I could not understand your request': 'Xin lỗi, tôi không hiểu yêu cầu của bạn',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Sorry, only users with the MapAdmin role are allowed to edit these locations',
'Sorry, something went wrong.': 'Sorry, something went wrong.',
'Sorry, that page is forbidden for some reason.': 'Xin lỗi, trang này bị cấm vì một số lý do',
'Sorry, that service is temporary unavailable.': 'Xin lỗi, dịch vụ đó tạm thời không hoạt động',
'Sorry, there are no addresses to display': 'Sorry, there are no addresses to display',
"Sorry, things didn't get done on time.": "Sorry, things didn't get done on time.",
"Sorry, we couldn't find that page.": 'Xin lỗi, chúng tôi không tìm thấy trang đó',
'Source': 'Source',
'Source ID': 'Source ID',
'Source Time': 'Source Time',
'Source Type': 'Source Type',
'Space Debris': 'Space Debris',
'Spanish': 'Người Tây Ban Nha',
'Special Ice': 'Special Ice',
'Special Marine': 'Special Marine',
'Special needs': 'Nhu cầu đặc biệt',
'Specialized Hospital': 'Bệnh viện chuyên khoa',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.',
'Specific locations need to have a parent of level': 'Specific locations need to have a parent of level',
'Specify a descriptive title for the image.': 'Specify a descriptive title for the image.',
'Specify the bed type of this unit.': 'Specify the bed type of this unit.',
'Specify the minimum sustainability in weeks or days.': 'Specify the minimum sustainability in weeks or days.',
'Specify the number of available sets': 'Specify the number of available sets',
'Specify the number of available units (adult doses)': 'Specify the number of available units (adult doses)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions',
'Specify the number of sets needed per 24h': 'Specify the number of sets needed per 24h',
'Specify the number of units (adult doses) needed per 24h': 'Specify the number of units (adult doses) needed per 24h',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h',
'Spherical Mercator?': 'Spherical Mercator?',
'Spreadsheet Importer': 'Spreadsheet Importer',
'Spreadsheet uploaded': 'Spreadsheet uploaded',
'Spring': 'Spring',
'Squall': 'Squall',
'Staff': 'Staff',
'Staff 2': 'Staff 2',
'Staff Details': 'Staff Details',
'Staff Type Details': 'Staff Type Details',
'Staff Type added': 'Staff Type added',
'Staff Type deleted': 'Staff Type deleted',
'Staff Type updated': 'Staff Type updated',
'Staff Types': 'Staff Types',
'Staff added': 'Staff added',
'Staff deleted': 'Xóa tên nhân viên',
'Staff present and caring for residents': 'Staff present and caring for residents',
'Staff updated': 'Staff updated',
'Staffing': 'Staffing',
'Start date': 'Ngày bắt đầu',
'Start of Period': 'Start of Period',
'State': 'State',
'Stationery': 'Stationery',
'Status': 'Status',
'Status Report': 'Status Report',
'Status added': 'Status added',
'Status deleted': 'Status deleted',
'Status of clinical operation of the facility.': 'Status of clinical operation of the facility.',
'Status of general operation of the facility.': 'Status of general operation of the facility.',
'Status of morgue capacity.': 'Status of morgue capacity.',
'Status of operations of the emergency department of this hospital.': 'Tình trạng hoạt động của phòng cấp cứu tại bệnh viện này',
'Status of security procedures/access restrictions in the hospital.': 'Trạng thái của các giới hạn thủ tục/truy nhập an ninh trong bệnh viện',
'Status of the operating rooms of this hospital.': 'Trạng thái các phòng bệnh trong bệnh viện này',
'Status updated': 'Status updated',
'Storage Bin': 'Storage Bin',
'Storage Bin Details': 'Storage Bin Details',
'Storage Bin Number': 'Storage Bin Number',
'Storage Bin Type': 'Storage Bin Type',
'Storage Bin Type Details': 'Storage Bin Type Details',
'Storage Bin Type added': 'Storage Bin Type added',
'Storage Bin Type deleted': 'Storage Bin Type deleted',
'Storage Bin Type updated': 'Storage Bin Type updated',
'Storage Bin Types': 'Storage Bin Types',
'Storage Bin added': 'Storage Bin added',
'Storage Bin deleted': 'Storage Bin deleted',
'Storage Bin updated': 'Storage Bin updated',
'Storage Bins': 'Storage Bins',
'Storage Location': 'Storage Location',
'Storage Location Details': 'Storage Location Details',
'Storage Location ID': 'Storage Location ID',
'Storage Location Name': 'Storage Location Name',
'Storage Location added': 'Storage Location added',
'Storage Location deleted': 'Storage Location deleted',
'Storage Location updated': 'Storage Location updated',
'Storage Locations': 'Storage Locations',
'Store spreadsheets in the Eden database': 'Store spreadsheets in the Eden database',
'Storm Force Wind': 'Storm Force Wind',
'Storm Surge': 'Storm Surge',
'Stowaway': 'Stowaway',
'Street': 'Street',
'Street (continued)': 'Street (continued)',
'Street Address': 'Street Address',
'Strong Wind': 'Strong Wind',
'Sub Category': 'Sub Category',
'Sub-type': 'Sub-type',
'Subject': 'Subject',
'Submission successful - please wait': 'Submission successful - please wait',
'Submission successful - please wait...': 'Submission successful - please wait...',
'Subscription Details': 'Subscription Details',
'Subscription added': 'Subscription added',
'Subscription deleted': 'Subscription deleted',
'Subscription updated': 'Subscription updated',
'Subscriptions': 'Quyên góp',
'Subsistence Cost': 'Mức sống tối thiểu',
'Sufficient care/assistance for chronically ill': 'Sufficient care/assistance for chronically ill',
'Suggest not changing this field unless you know what you are doing.': 'Khuyến nghị bạn không thay đổi trường này khi chưa chắc chắn',
'Summary': 'Summary',
'Sunday': 'Sunday',
'Support Request': 'Hỗ trợ yêu cầu',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.',
'Sure you want to delete this object?': 'Sure you want to delete this object?',
'Surgery': 'Surgery',
'Survey Answer': 'Survey Answer',
'Survey Answer Details': 'Survey Answer Details',
'Survey Answer added': 'Trả lời khảo sát đã được thêm',
'Survey Answer deleted': 'Survey Answer deleted',
'Survey Answer updated': 'Survey Answer updated',
'Survey Module': 'Survey Module',
'Survey Name': 'Tên khảo sát',
'Survey Question': 'Survey Question',
'Survey Question Details': 'Survey Question Details',
'Survey Question Display Name': 'Tên trên bảng câu hỏi khảo sát',
'Survey Question added': 'Survey Question added',
'Survey Question deleted': 'Survey Question deleted',
'Survey Question updated': 'Survey Question updated',
'Survey Section': 'Survey Section',
'Survey Section Details': 'Survey Section Details',
'Survey Section Display Name': 'Survey Section Display Name',
'Survey Section added': 'Đã thêm khu vực khảo sát',
'Survey Section deleted': 'Survey Section deleted',
'Survey Section updated': 'Cập nhật khu vực khảo sát',
'Survey Series': 'Survey Series',
'Survey Series Details': 'Survey Series Details',
'Survey Series Name': 'Survey Series Name',
'Survey Series added': 'Survey Series added',
'Survey Series deleted': 'Survey Series deleted',
'Survey Series updated': 'Đã cập nhật serie khảo sát',
'Survey Template': 'Survey Template',
'Survey Template Details': 'Survey Template Details',
'Survey Template added': 'Thêm mẫu Khảo sát',
'Survey Template deleted': 'Survey Template deleted',
'Survey Template updated': 'Survey Template updated',
'Survey Templates': 'Survey Templates',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': 'Switch this on to use individual CSS/Javascript files for diagnostics during development.',
'Symbology': 'Symbology',
'Sync Conflicts': 'Sync Conflicts',
'Sync History': 'Sync History',
'Sync Now': 'Đồng bộ hóa ngay bây giờ',
'Sync Partners': 'Sync Partners',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.',
'Sync Pools': 'Sync Pools',
'Sync Schedule': 'Sync Schedule',
'Sync Settings': 'Sync Settings',
'Sync process already started on ': 'Sync process already started on ',
'Synchronisation': 'Synchronisation',
'Synchronization': 'Synchronization',
'Synchronization Conflicts': 'Synchronization Conflicts',
'Synchronization Details': 'Synchronization Details',
'Synchronization History': 'Synchronization History',
'Synchronization Peers': 'Synchronization Peers',
'Synchronization Settings': 'Synchronization Settings',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden',
'Synchronization not configured.': 'Synchronization not configured.',
'Synchronization settings updated': 'Synchronization settings updated',
'Syncronisation History': 'Lịch sử đồng bộ hóa',
'System allows the General Public to Report Incidents & have these Tracked.': 'System allows the General Public to Report Incidents & have these Tracked.',
'System allows the tracking & discovery of Items stored in Locations.': 'System allows the tracking & discovery of Items stored in Locations.',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'Hệ thống luôn theo sát quá trình làm việc của tất cả các tình nguyện viên trong khu vực bị thiên tai.Hệ thống nắm bắt không chỉ vị trí hoạt động của họ mà còn cả thông tin về các dịch vụ mà họ đang cung cấp ở mỗi khu vực.',
"System's Twitter account updated": 'Cập nhật tài khoản Twitter của hệ thống',
'Table name': 'Table name',
'Tags': 'Tags',
'Take shelter in place or per <instruction>': 'Take shelter in place or per <instruction>',
'Task Details': 'Task Details',
'Task List': 'Task List',
'Task Status': 'Task Status',
'Task added': 'Đã thêm Nhiệm vụ',
'Task deleted': 'Đã xóa Nhiệm vụ',
'Task status': 'Task status',
'Task updated': 'Đã cập nhật nhiệm vụ',
'Tasks': 'Tasks',
'Team': 'Team',
'Team Description': 'Team Description',
'Team Details': 'Team Details',
'Team Head': 'Team Head',
'Team Id': 'Team Id',
'Team Leader': 'Đội trưởng',
'Team Member added': 'Thành viên đội đã được thêm',
'Team Members': 'Team Members',
'Team Name': 'Team Name',
'Team Type': 'Loại Đội',
'Team added': 'Đội đã được thêm',
'Team deleted': 'Team deleted',
'Team updated': 'Team updated',
'Teams': 'Teams',
'Technical testing only, all recipients disregard': 'Technical testing only, all recipients disregard',
'Telecommunications': 'Telecommunications',
'Telephone': 'Telephone',
'Telephony': 'Đường điện thoại',
'Temp folder %s not writable - unable to apply theme!': 'Temp folder %s not writable - unable to apply theme!',
'Template file %s not readable - unable to apply theme!': 'Template file %s not readable - unable to apply theme!',
'Templates': 'Templates',
'Terrorism': 'Terrorism',
'Tertiary Server (Optional)': 'Tertiary Server (Optional)',
'Test Results': 'Test Results',
'Text': 'Văn bản',
'Text Colour for Text blocks': 'Màu vản bản cho khối văn bản',
'Text before each Text Field (One per line)': 'Text before each Text Field (One per line)',
'Text in Message': 'Text in Message',
'Text in Message: ': 'Text in Message: ',
'Thanks for your assistance': 'Thanks for your assistance',
'The': 'The',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.',
'The Area which this Site is located within.': 'Xác định khu vực site này định vị trong đó',
'The Assessments module allows field workers to send in assessments.': 'The Assessments module allows field workers to send in assessments.',
'The Author of this Document (optional)': 'The Author of this Document (optional)',
'The Current Location of the Person, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Current Location of the Person, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The District for this Report.': 'The District for this Report.',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.",
'The Group whose members can edit data in this record.': 'The Group whose members can edit data in this record.',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Office this record is associated with.': 'The Office this record is associated with.',
'The Organization this record is associated with.': 'The Organization this record is associated with.',
'The Organization which is funding this Activity.': 'The Organization which is funding this Activity.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': 'The Rapid Assessments Module stores structured reports done by Professional Organizations.',
'The Request this record is associated with.': 'The Request this record is associated with.',
'The Role this person plays within this Office/Project.': 'The Role this person plays within this Office/Project.',
'The Role this person plays within this hospital.': 'Vai trò của người này trong bệnh viện',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": "The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.",
'The Shelter this Request is from (optional).': 'The Shelter this Request is from (optional).',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": "The URL of the image file. If you don't upload an image file, then you must specify its location here.",
'The URL of your web gateway without the post parameters': 'The URL of your web gateway without the post parameters',
'The URL to access the service.': 'The URL to access the service.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'The Unique Identifier (UUID) as assigned to this facility by the government.',
'The attribute within the KML which is used for the title of popups.': 'The attribute within the KML which is used for the title of popups.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)',
'The body height (crown to heel) in cm.': 'The body height (crown to heel) in cm.',
'The category of the Item.': 'The category of the Item.',
'The contact person for this organization.': 'Người chịu trách nhiệm liên lạc cho tổ chức này',
'The country the person usually lives in.': 'The country the person usually lives in.',
'The duplicate record will be deleted': 'The duplicate record will be deleted',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': 'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.',
'The first or only name of the person (mandatory).': 'The first or only name of the person (mandatory).',
'The following modules are available': 'The following modules are available',
'The hospital this record is associated with.': 'Bệnh viện lưu hồ sơ này',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': 'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.',
'The language to use for notifications.': 'The language to use for notifications.',
'The last known location of the missing person before disappearance.': 'The last known location of the missing person before disappearance.',
'The list of Item categories are maintained by the Administrators.': 'Danh sách category hàng hóa được quản trị viên quản lý',
'The name to be used when calling for or directly addressing the person (optional).': 'The name to be used when calling for or directly addressing the person (optional).',
'The next screen will allow you to detail the number of people here & their needs.': 'The next screen will allow you to detail the number of people here & their needs.',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': 'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.',
'The person at the location who is reporting this incident (optional)': 'The person at the location who is reporting this incident (optional)',
'The person reporting about the missing person.': 'Người báo cáo về người mất tích',
'The person reporting the missing person.': 'The person reporting the missing person.',
"The person's manager within this Office/Project.": 'Quản lý của một cá nhân trong Văn phòng/Dự án',
'The post variable containing the phone number': 'The post variable containing the phone number',
'The post variable on the URL used for sending messages': 'Bài viết thay đổi trên URL dùng để gửi tin nhắn',
'The post variables other than the ones containing the message and the phone number': 'The post variables other than the ones containing the message and the phone number',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'Các chính sách đơn giản cho phép người dùng ẩn danh đọc và đăng ký để chỉnh sửa. Các chính sách bảo mật đầy đủ cho phép quản trị viên thiết lập phân quyền trên các bảng cá nhân hay - xem mô hình / zzz.py.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>',
'The title of the WMS Browser panel in the Tools panel.': 'The title of the WMS Browser panel in the Tools panel.',
'The token associated with this application on': 'The token associated with this application on',
'The unique identifier which identifies this instance to other instances.': 'The unique identifier which identifies this instance to other instances.',
'The weight in kg.': 'The weight in kg.',
'Theme': 'Chủ đề',
'Theme Details': 'Theme Details',
'Theme added': 'Theme added',
'Theme deleted': 'Theme deleted',
'Theme updated': 'Theme updated',
'Themes': 'Themes',
'There are errors': 'There are errors',
'There was a problem, sorry, please try again later.': 'There was a problem, sorry, please try again later.',
'These are settings for Inbound Mail.': 'Đây là những cài đặt cho thư gửi vào',
'These are the Incident Categories visible to normal End-Users': 'These are the Incident Categories visible to normal End-Users',
'These are the default settings for all users. To change settings just for you, click ': 'These are the default settings for all users. To change settings just for you, click ',
'They': 'Người ta',
'This appears to be a duplicate of ': 'This appears to be a duplicate of ',
'This file already exists on the server as': 'This file already exists on the server as',
'This form allows the administrator to remove a duplicate location.': 'Mẫu này cho phép quản trị viên xóa bỏ các địa điểm trùng',
'This is the way to transfer data between machines as it maintains referential integrity.': 'Đây là cách truyền dữ liệu giữa các máy vì nó bảo toàn tham chiếu',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!',
'This might be due to a temporary overloading or maintenance of the server.': 'Vấn đề này có thể do tình trạng quá tải hoặc máy chủ đang trong thời gian bảo trì',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'This page shows you logs of past syncs. Click on the link below to go to this page.',
'This screen allows you to upload a collection of photos to the server.': 'Màn hình cho phép bạn upload bộ sưu ảnh lên server',
'Thunderstorm': 'Thunderstorm',
'Thursday': 'Thursday',
'Ticket': 'Ticket',
'Ticket Details': 'Chi tiết Ticket',
'Ticket added': 'Ticket added',
'Ticket deleted': 'Đã xóa Ticket',
'Ticket updated': 'Ticket updated',
'Ticketing Module': 'Ticketing Module',
'Tickets': 'Tickets',
'Time needed to collect water': 'Time needed to collect water',
'Time of Request': 'Thời gian yêu cầu',
'Timestamp': 'Timestamp',
'Title': 'Title',
'To Location': 'To Location',
'To begin the sync process, click the button on the right => ': 'Nhấp chuột vào nút bên phải để kích hoạt quá trình đồng bộ',
'To begin the sync process, click this button => ': 'To begin the sync process, click this button => ',
'To delete': 'To delete',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.",
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": 'Để tìm kiếm một bệnh viện, nhập một phần tên hoặc ID. Có thể sử dụng % như một ký tự thay thế cho một nhóm ký tự. Nhấn "Tìm kiếm" mà không nhập thông tin, sẽ hiển thị toàn bộ các bệnh viện.',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": 'Để tìm kiếm một địa điểm, nhập tên. Có thể sử dụng ký tự % để thay thế cho một nhóm ký tự. Nhấn "Tìm kiếm" mà không nhập thông tin sẽ hiển thị tất cả các địa điểm.',
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'Để tìm kiếm một người,bạn có thể nhập tên, tên đệm hay họ và/hoặc số chứng minh thư của người đó viết cách nhau.Bạn có thể dùng % a làm ký tự đại diện',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": "To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.",
'To submit a new job, use the': 'To submit a new job, use the',
'To variable': 'Thay đổi',
'Tools': 'Tools',
'Tornado': 'Lốc xoáy',
'Total # of Beneficiaries Reached ': 'Total # of Beneficiaries Reached ',
'Total # of Target Beneficiaries': 'Tổng số # đối tượng hưởng lợi',
'Total # of households of site visited': 'Total # of households of site visited',
'Total Beds': 'Total Beds',
'Total Beneficiaries': 'Total Beneficiaries',
'Total Cost per Megabyte': 'Tổng chi phí cho mỗi Megabyte',
'Total Cost per Minute': 'Total Cost per Minute',
'Total Households': 'Total Households',
'Total Monthly': 'Total Monthly',
'Total Monthly Cost': 'Total Monthly Cost',
'Total Monthly Cost: ': 'Total Monthly Cost: ',
'Total One-time Costs': 'Total One-time Costs',
'Total Persons': 'Total Persons',
'Total Recurring Costs': 'Tổng chi phí định kỳ',
'Total Unit Cost': 'Total Unit Cost',
'Total Unit Cost: ': 'Total Unit Cost: ',
'Total Units': 'Total Units',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Tổng số giường bệnh trong bệnh viện này. Tự động cập nhật từ các báo cáo hàng ngày.',
'Total number of houses in the area': 'Tổng số nóc nhà trong khu vực',
'Total number of schools in affected area': 'Total number of schools in affected area',
'Total population of site visited': 'Total population of site visited',
'Totals for Budget:': 'Totals for Budget:',
'Totals for Bundle:': 'Totals for Bundle:',
'Totals for Kit:': 'Totals for Kit:',
'Tourist Group': 'Tourist Group',
'Town': 'Town',
'Traces internally displaced people (IDPs) and their needs': 'Traces internally displaced people (IDPs) and their needs',
'Tracing': 'Đang tìm kiếm',
'Track': 'Dấu viết',
'Track Details': 'Track Details',
'Track deleted': 'Track deleted',
'Track updated': 'Track updated',
'Track uploaded': 'Track uploaded',
'Tracking of Projects, Activities and Tasks': 'Tracking of Projects, Activities and Tasks',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Tracking of basic information on the location, facilities and size of the Shelters',
'Tracks': 'Tracks',
'Tracks requests for aid and matches them against donors who have pledged aid': 'Tracks requests for aid and matches them against donors who have pledged aid',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Tracks the location, distibution, capacity and breakdown of victims in Shelters',
'Traffic Report': 'Traffic Report',
'Transit': 'Transit',
'Transition Effect': 'Transition Effect',
'Transparent?': 'Transparent?',
'Transportation assistance, Rank': 'Transportation assistance, Rank',
'Trauma Center': 'Trauma Center',
'Travel Cost': 'Travel Cost',
'Tree': 'Tree',
'Tropical Storm': 'Tropical Storm',
'Tropo Messaging Token': 'Tropo Messaging Token',
'Tropo Settings': 'Tropo Settings',
'Tropo Voice Token': 'Tropo Voice Token',
'Tropo settings updated': 'Cập nhật cài đặt Tropo',
'Truck': 'Xe tải',
'Try checking the URL for errors, maybe it was mistyped.': 'Thử kiểm tra lỗi trên URL, có thể do gõ sai',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Thử bấm nút refresh/reload hoặc kiểm tra URL',
'Try refreshing the page or hitting the back button on your browser.': 'Try refreshing the page or hitting the back button on your browser.',
'Tsunami': 'Tsunami',
'Tuesday': 'Tuesday',
'Twitter': 'Twitter',
'Twitter ID or #hashtag': 'Twitter ID or #hashtag',
'Twitter Settings': 'Twitter Settings',
'Type': 'Type',
'Type of cause': 'Type of cause',
'Type of latrines': 'Type of latrines',
'Type of place for defecation': 'Type of place for defecation',
'Type of water source before the disaster': 'Type of water source before the disaster',
'Types of health services available': 'Types of health services available',
'Types of water storage containers available': 'Types of water storage containers available',
'UID': 'UID',
'URL': 'URL',
'UTC Offset': 'UTC Offset',
'Unable to parse CSV file!': 'Không thể đọc file CSV',
'Understaffed': 'Understaffed',
'Unidentified': 'Unidentified',
'Unit': 'Unit',
'Unit Bed Capacity': 'Unit Bed Capacity',
'Unit Cost': 'Unit Cost',
'Unit Details': 'Unit Details',
'Unit Name': 'Unit Name',
'Unit Set': 'Unit Set',
'Unit Short Code for e.g. m for meter.': 'Unit Short Code for e.g. m for meter.',
'Unit added': 'Đã thêm đơn vị',
'Unit deleted': 'Unit deleted',
'Unit updated': 'Đơn vị được cập nhật',
'Units': 'Units',
'Units of Measure': 'Units of Measure',
'Unknown': 'Unknown',
'Unknown Peer': 'Unknown Peer',
'Unknown type of facility': 'Unknown type of facility',
'Unresolved Conflicts': 'Unresolved Conflicts',
'Unselect to disable the modem': 'Unselect to disable the modem',
'Unsent': 'Unsent',
'Unsupported data format!': 'Unsupported data format!',
'Unsupported method!': 'Unsupported method!',
'Update': 'Update',
'Update Activity Report': 'Update Activity Report',
'Update Cholera Treatment Capability Information': 'Update Cholera Treatment Capability Information',
'Update Import Job': 'Update Import Job',
'Update Request': 'Cập nhật Yêu cầu',
'Update Service Profile': 'Update Service Profile',
'Update Task Status': 'Update Task Status',
'Update Unit': 'Update Unit',
'Update if Master': 'Update if Master',
'Update if Newer': 'Cập nhật nếu mới hơn',
'Update your current ordered list': 'Update your current ordered list',
'Upload': 'Upload',
'Upload Photos': 'Upload Photos',
'Upload Spreadsheet': 'Upload Spreadsheet',
'Upload Track': 'Upload Track',
'Upload a Spreadsheet': 'Tải một bảng tính lên',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.",
'Urban Fire': 'Urban Fire',
'Urban area': 'Urban area',
'Urdu': 'Urdu',
'Urgent': 'Urgent',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Dùng (...)&(...) thay cho VÀ(AND), (...)|(...) cho HOẶC(OR), and ~(...) cho KHÔNG (NOT) để .đưa ra những câu hỏi phúc tạp',
'Use default': 'Use default',
'Use these links to download data that is currently in the database.': 'Dùng liên kết này để tải dữ liệu hiện có trên cơ sở dữ liệu xuống',
'Use this space to add a description about the Bin Type.': 'Thêm thông tin mô tả loại Bin ở đây',
'Use this space to add a description about the site location.': 'Use this space to add a description about the site location.',
'Use this space to add a description about the warehouse/site.': 'Thêm mô tả nhà kho/site ở đây',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Viết bình luận và ghi chú về site/nhà kho ở đây',
'Used to import data from spreadsheets into the database': 'Used to import data from spreadsheets into the database',
'User': 'User',
'User Details': 'User Details',
'User ID': 'User ID',
'User Management': 'User Management',
'User Profile': 'User Profile',
'User Requests': 'Yêu cầu của người dùng',
'User Updated': 'Đã cập nhât người dùng',
'User added': 'User added',
'User already has this role': 'User already has this role',
'User deleted': 'Đã xóa người dùng',
'User updated': 'User updated',
'Username': 'Username',
'Users': 'Users',
'Users removed': 'Xóa người dùng',
'Ushahidi': 'Ushahidi',
'Usual food sources in the area': 'Usual food sources in the area',
'Utility, telecommunication, other non-transport infrastructure': 'Utility, telecommunication, other non-transport infrastructure',
'Various Reporting functionalities': 'Various Reporting functionalities',
'Vehicle': 'Vehicle',
'Vehicle Crime': 'Tai nạn giao thông',
'Vehicle Types': 'Loại phương tiện',
'Vendor': 'Vendor',
'Verified': 'Verified',
'Verified?': 'Đã xác nhận?',
'Verify password': 'Verify password',
'Version': 'Phiên bản',
'Very High': 'Very High',
'View Alerts received using either Email or SMS': 'Xem nhắc nhở gửi đến qua email hoặc sms',
'View Fullscreen Map': 'View Fullscreen Map',
'View Image': 'View Image',
'View On Map': 'Hiển thị trên bản đồ',
'View Outbox': 'View Outbox',
'View Requests for Aid': 'Xem Yêu cầu viện trợ',
'View Settings': 'View Settings',
'View Tickets': 'View Tickets',
"View and/or update details of the person's record": 'Xem và/hoặc cập nhật chi tiết mục ghi cá nhân',
'View and/or update their details': 'View and/or update their details',
'View or update the status of a hospital.': 'Xem hoặc cập nhật trạng thái của một bệnh viện',
'View pending requests and pledge support.': 'View pending requests and pledge support.',
'View the hospitals on a map.': 'Hiển thị bệnh viện trên bản đồ',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": "View/Edit the Database directly (caution: doesn't respect the framework rules!)",
'Village': 'Village',
'Village Leader': 'Village Leader',
'Visible?': 'Visible?',
'Visual Recognition': 'Visual Recognition',
'Volcanic Ash Cloud': 'Đám mây tro bụi từ núi lửa',
'Volcanic Event': 'Volcanic Event',
'Volume - Fluids': 'Volume - Fluids',
'Volume - Solids': 'Volume - Solids',
'Volume Capacity': 'Volume Capacity',
'Volume/Dimensions': 'Volume/Dimensions',
'Volunteer Data': 'Dữ liệu tình nguyện viên',
'Volunteer Details': 'Volunteer Details',
'Volunteer Management': 'Volunteer Management',
'Volunteer Project': 'Dự án tình nguyện',
'Volunteer Registration': 'Đăng ký tình nguyện viên',
'Volunteer Registrations': 'Đăng ksy tình nguyện viên',
'Volunteer Request': 'Yêu cầu tình nguyện viên',
'Volunteer added': 'Volunteer added',
'Volunteer deleted': 'Volunteer deleted',
'Volunteer details updated': 'Volunteer details updated',
'Volunteer registration added': 'Đã thêm đăng ký tình nguyện viên',
'Volunteer registration deleted': 'Đã xóa đăng ký tình nguyện viên',
'Volunteer registration updated': 'Đã cập nhật đăng ký tình nguyện viên',
'Volunteers': 'Tình nguyện viên',
'Volunteers were notified!': 'Volunteers were notified!',
'Vote': 'Vote',
'Votes': 'Votes',
'WASH': 'WASH',
'WMS Browser Name': 'WMS Browser Name',
'WMS Browser URL': 'WMS Browser URL',
'Walking Only': 'Walking Only',
'Walking time to the health service': 'Walking time to the health service',
'Warehouse': 'Warehouse',
'Warehouse Details': 'Warehouse Details',
'Warehouse Item Details': 'Warehouse Item Details',
'Warehouse Item added': 'Warehouse Item added',
'Warehouse Item deleted': 'Warehouse Item deleted',
'Warehouse Item updated': 'Warehouse Item updated',
'Warehouse Items': 'Warehouse Items',
'Warehouse Management': 'Quản lý kho hàng',
'Warehouse added': 'Warehouse added',
'Warehouse deleted': 'Warehouse deleted',
'Warehouse updated': 'Warehouse updated',
'Warehouse/Sites Registry': 'Warehouse/Sites Registry',
'Warehouses': 'Warehouses',
'WatSan': 'WatSan',
'Water': 'Water',
'Water Sanitation Hygiene': 'Water Sanitation Hygiene',
'Water gallon': 'Ga-lông nước',
'Water storage containers available for HH': 'Water storage containers available for HH',
'Water storage containers sufficient per HH': 'Water storage containers sufficient per HH',
'Water supply': 'Water supply',
'Waterspout': 'Waterspout',
'Way Bill(s)': 'Hóa đơn thu phí đường bộ',
'We have tried': 'We have tried',
'Website': 'Website',
'Wednesday': 'Wednesday',
'Weekly': 'Weekly',
'Weight': 'Weight',
'Weight (kg)': 'Khối lượng',
'Welcome to the Sahana Eden Disaster Management System': 'Welcome to the Sahana Eden Disaster Management System',
'Welcome to the Sahana Portal at ': 'Welcome to the Sahana Portal at ',
'Well-Known Text': 'Well-Known Text',
'Were basic medical supplies available for health services prior to the disaster?': 'Were basic medical supplies available for health services prior to the disaster?',
'Were breast milk substitutes used prior to the disaster?': 'Were breast milk substitutes used prior to the disaster?',
'Were there cases of malnutrition in this area prior to the disaster?': 'Were there cases of malnutrition in this area prior to the disaster?',
'Were there health services functioning for the community prior to the disaster?': 'Were there health services functioning for the community prior to the disaster?',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': 'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?',
'What are the factors affecting school attendance?': 'What are the factors affecting school attendance?',
"What are the people's normal ways to obtain food in this area?": "What are the people's normal ways to obtain food in this area?",
'What are your main sources of cash to restart your business?': 'What are your main sources of cash to restart your business?',
'What are your main sources of income now?': 'What are your main sources of income now?',
'What do you spend most of your income on now?': 'What do you spend most of your income on now?',
'What food stocks exist? (main dishes)': 'What food stocks exist? (main dishes)',
'What food stocks exist? (side dishes)': 'What food stocks exist? (side dishes)',
'What is the estimated total number of people in all of these institutions?': 'What is the estimated total number of people in all of these institutions?',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': 'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?',
'What is your major source of drinking water?': 'What is your major source of drinking water?',
"What should be done to reduce women and children's vulnerability to violence?": "What should be done to reduce women and children's vulnerability to violence?",
'What type of latrines are available in the village/IDP centre/Camp?': 'What type of latrines are available in the village/IDP centre/Camp?',
'What type of salvage material can be used from destroyed houses?': 'What type of salvage material can be used from destroyed houses?',
'What type of salvage material can be used from destroyed schools?': 'What type of salvage material can be used from destroyed schools?',
'What types of health problems do children currently have?': 'What types of health problems do children currently have?',
'What types of health problems do people currently have?': 'What types of health problems do people currently have?',
'What types of health services are still functioning in the affected area?': 'What types of health services are still functioning in the affected area?',
'What types of household water storage containers are available?': 'What types of household water storage containers are available?',
'What were your main sources of income before the disaster?': 'What were your main sources of income before the disaster?',
'Wheat': 'Wheat',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.",
'Where are the alternative places for studying?': 'Where are the alternative places for studying?',
'Where are the separated children originally from?': 'Where are the separated children originally from?',
'Where do the majority of people defecate?': 'Where do the majority of people defecate?',
'Where have the children been sent?': 'Where have the children been sent?',
'Where is solid waste disposed in the village/camp?': 'Where is solid waste disposed in the village/camp?',
'Whiskers': 'Whiskers',
'Who is doing what and where': 'Who is doing what and where',
'Who usually collects water for the family?': 'Ai là người thường đi lấy nước cho cả gia đình',
'Width': 'Độ rộng',
'Wild Fire': 'Wild Fire',
'Wind Chill': 'Wind Chill',
'Window frame': 'Window frame',
'Winter Storm': 'Winter Storm',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': 'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?',
'Women of Child Bearing Age': 'Women of Child Bearing Age',
'Women participating in coping activities': 'Women participating in coping activities',
'Women who are Pregnant or in Labour': 'Women who are Pregnant or in Labour',
'Womens Focus Groups': 'Womens Focus Groups',
'Wooden plank': 'Wooden plank',
'Wooden poles': 'Wooden poles',
'Working hours end': 'Hết giờ làm việc',
'Working hours start': 'Bắt đầu giờ làm việc',
'Working or other to provide money/food': 'Working or other to provide money/food',
'Would you like to display the photos on the map?': 'Would you like to display the photos on the map?',
'X-Ray': 'X-Ray',
'XMPP': 'XMPP',
'Yes': 'Yes',
'You are attempting to delete your own account - are you sure you want to proceed?': 'You are attempting to delete your own account - are you sure you want to proceed?',
'You are currently reported missing!': 'You are currently reported missing!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.',
'You can click on the map below to select the Lat/Lon fields:': 'You can click on the map below to select the Lat/Lon fields:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': 'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.',
'You can select the Draw tool (': 'You can select the Draw tool (',
'You can set the modem settings for SMS here.': 'Bạn có thể thiết lập cài đặt modem cho SMS ở đây',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": 'Bạn đã thiết lập các cài đặt cá nhân, vì vậy bạn không xem được các thay đổi ở đây.Để thiết lập lại, nhấp chuột vào',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.",
"You haven't made any calculations": "You haven't made any calculations",
'You must be logged in to register volunteers.': 'You must be logged in to register volunteers.',
'You must be logged in to report persons missing or found.': 'You must be logged in to report persons missing or found.',
'You must provide a series id to proceed.': 'Bạn phải nhập số id của serie để thao tác tiếp',
'You should edit Twitter settings in models/000_config.py': 'Bạn có thể chỉnh sửa cài đặt Twitter tại models/000_config.py',
'Your action is required. Please approve user %s asap: ': 'Your action is required. Please approve user %s asap: ',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Your current ordered list of solution items is shown below. You can change it by voting again.',
'Your post was added successfully.': 'Bạn đã gửi thông tin thành công',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.',
'ZIP/Postcode': 'ZIP/Postcode',
'Zinc roof': 'Zinc roof',
'Zoom': 'Zoom',
'Zoom Levels': 'Zoom Levels',
'act': 'act',
'active': 'đang hoạt động',
'added': 'added',
'all records': 'all records',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'allows for creation and management of surveys to assess the damage following a natural disaster.',
'an individual/team to do in 1-2 days': 'an individual/team to do in 1-2 days',
'approved': 'approved',
'assigned': 'đã phân công',
'average': 'trung bình',
'black': 'màu đen',
'blond': 'blond',
'blue': 'blue',
'brown': 'brown',
'c/o Name': 'c/o Name',
'can be used to extract data from spreadsheets and put them into database tables.': 'có thể dùng để trích xuất dữ liệu từ bẳng tính đưa vào cơ sở dữ liệu',
'cancelled': 'cancelled',
'caucasoid': 'caucasoid',
'check all': 'check all',
'click for more details': 'click for more details',
'collateral event': 'collateral event',
'completed': 'completed',
'consider': 'consider',
'constraint_id': 'constraint_id',
'criminal intent': 'criminal intent',
'crud': 'crud',
'curly': 'curly',
'currently registered': 'currently registered',
'daily': 'hàng ngày',
'dark': 'dark',
'data uploaded': 'đã upload dữ liệu',
'database': 'database',
'database %s select': 'chọn cơ sở dữ liệu %s',
'db': 'db',
'delete all checked': 'delete all checked',
'deleted': 'deleted',
'denied': 'denied',
'description': 'description',
'design': 'design',
'diseased': 'diseased',
'displaced': 'displaced',
'divorced': 'divorced',
'done!': 'done!',
'edit': 'edit',
'editor': 'người biên tập',
'embedded': 'embedded',
'enclosed area': 'enclosed area',
'export as csv file': 'chuyển đổi file csv',
'fat': 'fat',
'feedback': 'phản hồi',
'female': 'female',
'final report': 'final report',
'flush latrine with septic tank': 'flush latrine with septic tank',
'follow-up assessment': 'follow-up assessment',
'forehead': 'forehead',
'form data': 'form data',
'from Twitter': 'from Twitter',
'from_id': 'from_id',
'full': 'full',
'getting': 'getting',
'green': 'green',
'grey': 'grey',
'here': 'ở đây',
'high': 'high',
'hourly': 'hourly',
'households': 'households',
'human error': 'human error',
'identified': 'identified',
'ignore': 'ignore',
'immediately': 'immediately',
'in Deg Min Sec format': 'in Deg Min Sec format',
'in GPS format': 'Ở định dạng GPS',
'inactive': 'inactive',
'initial assessment': 'initial assessment',
'injured': 'injured',
'insert new': 'chèn mới',
'insert new %s': 'insert new %s',
'invalid request': 'yêu cầu không hợp lệ',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'là trung tâm thông tin trực tuyến, nơi lưu trữ thông tin về các nạn nhân và gia đình chịu ảnh hưởng của thiên tai, đặc biệt là xác định con số thương vong và lượng người sơ tán.Thông tin như tên, tuổi, số điện thoại, số CMND, nơi sơ tán và các thông tin khác cũng được lưu lại.Ảnh và dấu vân tay cũng có thể tải lên hệ thống.Để hiệu quả và tiện lợi hơn có thể quản lý theo nhóm',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'theo dõi ticket gửi đến cho phép người ta phân loại và điều phương tiện cứu trợ tới các nơi hợp lý ',
'kilogram': 'kilogram',
'kit': 'kit',
'latrines': 'latrines',
'legend URL': 'legend URL',
'light': 'light',
'liter': 'liter',
'login': 'Đăng nhập',
'long': 'long',
'long>12cm': 'long>12cm',
'low': 'low',
'male': 'male',
'manual': 'manual',
'married': 'married',
'maxExtent': 'maxExtent',
'maxResolution': 'Độ phân giải tối đa',
'medium': 'medium',
'medium<12cm': 'trung bình dưới 12cm',
'menu item': 'menu item',
'message_id': 'message_id',
'meter': 'meter',
'meter cubed': 'meter cubed',
'meters': 'meters',
'module allows the site administrator to configure various options.': 'Mô-đun cho phép người quản trị site cấu hình các tùy chọn khác nhau',
'module helps monitoring the status of hospitals.': 'module giúp theo dõi tình trạng bệnh viện',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).',
'mongoloid': 'mongoloid',
'more': 'more',
'n/a': 'n/a',
'natural hazard': 'thảm họa thiên nhiên',
'negroid': 'negroid',
'never': 'không bao giờ',
'new': 'Mới',
'new record inserted': 'new record inserted',
'next 100 rows': 'next 100 rows',
'no': 'no',
'none': 'none',
'normal': 'bình thường',
'not needed': 'not needed',
'not specified': 'không xác định',
'num Zoom Levels': 'num Zoom Levels',
'once': 'once',
'open defecation': 'open defecation',
'operational intent': 'operational intent',
'or import from csv file': 'or import from csv file',
'other': 'other',
'over one hour': 'hơn một tiếng',
'pack of 10': 'pack of 10',
'pending': 'pending',
'people': 'con người',
'piece': 'piece',
'pit': 'pit',
'pit latrine': 'pit latrine',
'postponed': 'postponed',
'preliminary template or draft, not actionable in its current form': 'preliminary template or draft, not actionable in its current form',
'previous 100 rows': 'previous 100 rows',
'primary incident': 'primary incident',
'problem connecting to twitter.com - please refresh': 'problem connecting to twitter.com - please refresh',
'provides a catalogue of digital media.': 'cung cấp danh mục các phương tiện truyền thông kỹ thuật số',
'record does not exist': 'record does not exist',
'record id': 'record id',
'records deleted': 'records deleted',
'red': 'red',
'reports successfully imported.': 'import báo cáo thành công',
'retired': 'retired',
'retry': 'retry',
'river': 'river',
'sack 20kg': 'sack 20kg',
'sack 50kg': 'sack 50kg',
'secondary effect': 'secondary effect',
'see comment': 'see comment',
'selected': 'selected',
'separated': 'separated',
'separated from family': 'thất lạc gia đình',
'shaved': 'shaved',
'shift_end': 'shift_end',
'shift_start': 'shift_start',
'short': 'short',
'short<6cm': 'short<6cm',
'sides': 'sides',
'sign-up now': 'sign-up now',
'simple': 'simple',
'single': 'single',
'slim': 'slim',
'state': 'state',
'straight': 'straight',
'suffered financial losses': 'thiệt hại về tài chính',
'table': 'table',
'table_name': 'table_name',
'tall': 'chiều cao',
'technical failure': 'technical failure',
'this': 'this',
'times and it is still not working. We give in. Sorry.': 'times and it is still not working. We give in. Sorry.',
'to access the system': 'to access the system',
'to reset your password': 'to reset your password',
'to verify your email': 'to verify your email',
'to_id': 'to_id',
'ton': 'ton',
'tonsure': 'tonsure',
'total': 'total',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!',
'unable to parse csv file': 'unable to parse csv file',
'unapproved': 'unapproved',
'uncheck all': 'uncheck all',
'unidentified': 'unidentified',
'uninhabitable = foundation and structure destroyed': 'uninhabitable = foundation and structure destroyed',
'unknown': 'unknown',
'unspecified': 'unspecified',
'updated': 'đã cập nhật',
'updates only': 'updates only',
'urgent': 'khẩn cấp',
'vm_action': 'vm_action',
'wavy': 'wavy',
'weekly': 'weekly',
'white': 'white',
'wider area, longer term, usually contain multiple Activities': 'wider area, longer term, usually contain multiple Activities',
'widowed': 'widowed',
'window': 'window',
'windows broken, cracks in walls, roof slightly damaged': 'windows broken, cracks in walls, roof slightly damaged',
'within human habitat': 'trong khu dân cư',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt module not available within the running Python - this needs installing for XLS output!',
'yes': 'có',
}
| dotskapes/dotSkapes | languages/vi.py | Python | mit | 250,282 |
""" The file is responsable for cart in flask-webpage """
from flask import current_app as app
from flask_seguro.products import Products
class Cart(object):
""" The classe is responsable for cart in webpage """
def __init__(self, cart_dict=None):
""" Initializing class """
cart_dict = cart_dict or {}
if cart_dict == {}:
self.total = 0
self.subtotal = 0
self.items = []
else:
self.total = cart_dict["total"]
self.subtotal = cart_dict["subtotal"]
self.items = cart_dict["items"]
self.extra_amount = float(app.config['EXTRA_AMOUNT'])
def to_dict(self):
""" Attribute values to dict """
return {
"total": self.total,
"subtotal": self.subtotal,
"items": self.items,
"extra_amount": self.extra_amount
}
def change_item(self, item_id, operation):
""" Remove items in cart """
product = Products().get_one(item_id)
if product:
if operation == 'add':
self.items.append(product)
elif operation == 'remove':
cart_p = [x for x in self.items if x['id'] == product['id']]
self.items.remove(cart_p[0])
self.update()
return True
else:
return False
def update(self):
""" Remove items in cart """
subtotal = float(0)
total = float(0)
for product in self.items:
subtotal += float(product["price"])
if subtotal > 0:
total = subtotal + self.extra_amount
self.subtotal = subtotal
self.total = total
| rochacbruno/python-pagseguro | examples/flask/flask_seguro/cart.py | Python | mit | 1,715 |
# coding=utf-8
class _Users:
def __init__(self, client=None):
self.client = client
def get_favorites_for_user(self, user_gid, params=None, **options):
"""Get a user's favorites
:param str user_gid: (required) A string identifying a user. This can either be the string \"me\", an email, or the gid of a user.
:param Object params: Parameters for the request
- resource_type {str}: (required) The resource type of favorites to be returned.
- workspace {str}: (required) The workspace in which to get favorites.
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/users/{user_gid}/favorites".replace("{user_gid}", user_gid)
return self.client.get_collection(path, params, **options)
def get_user(self, user_gid, params=None, **options):
"""Get a user
:param str user_gid: (required) A string identifying a user. This can either be the string \"me\", an email, or the gid of a user.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/users/{user_gid}".replace("{user_gid}", user_gid)
return self.client.get(path, params, **options)
def get_users(self, params=None, **options):
"""Get multiple users
:param Object params: Parameters for the request
- workspace {str}: The workspace or organization ID to filter users on.
- team {str}: The team ID to filter users on.
:param **options
- offset {str}: Offset token. An offset to the next page returned by the API. A pagination request will return an offset token, which can be used as an input parameter to the next request. If an offset is not passed in, the API will return the first page of results. 'Note: You can only pass in an offset that was returned to you via a previously paginated request.'
- limit {int}: Results per page. The number of objects to return per page. The value must be between 1 and 100.
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/users"
return self.client.get_collection(path, params, **options)
def get_users_for_team(self, team_gid, params=None, **options):
"""Get users in a team
:param str team_gid: (required) Globally unique identifier for the team.
:param Object params: Parameters for the request
:param **options
- offset {str}: Offset token. An offset to the next page returned by the API. A pagination request will return an offset token, which can be used as an input parameter to the next request. If an offset is not passed in, the API will return the first page of results. 'Note: You can only pass in an offset that was returned to you via a previously paginated request.'
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/teams/{team_gid}/users".replace("{team_gid}", team_gid)
return self.client.get_collection(path, params, **options)
def get_users_for_workspace(self, workspace_gid, params=None, **options):
"""Get users in a workspace or organization
:param str workspace_gid: (required) Globally unique identifier for the workspace or organization.
:param Object params: Parameters for the request
:param **options
- offset {str}: Offset token. An offset to the next page returned by the API. A pagination request will return an offset token, which can be used as an input parameter to the next request. If an offset is not passed in, the API will return the first page of results. 'Note: You can only pass in an offset that was returned to you via a previously paginated request.'
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/workspaces/{workspace_gid}/users".replace("{workspace_gid}", workspace_gid)
return self.client.get_collection(path, params, **options)
| Asana/python-asana | asana/resources/gen/users.py | Python | mit | 8,331 |
from mk2.plugins import Plugin
from mk2.events import ServerOutput, StatPlayerCount, ServerStop, ServerEvent, Event
class Check(object):
alive = True
timeout = 0
time = 0
warn = 0
def __init__(self, parent, **kw):
self.dispatch = parent.dispatch
self.console = parent.console
for k, v in kw.items():
setattr(self, k, v)
def check(self):
if self.alive:
self.alive = False
return True
return False
def step(self):
if self.check():
return
self.time += 1
if self.timeout and self.time == self.timeout:
timeout = "{0} minutes".format(self.timeout)
self.console("{0} -- restarting.".format(self.message.format(timeout=timeout)))
self.dispatch(ServerEvent(cause="server/error/" + self.event[0],
data="REBOOTING SERVER: " + self.event[1].format(timeout=timeout),
priority=1))
self.dispatch(ServerStop(reason=self.stop_reason, respawn=ServerStop.RESTART))
elif self.warn and self.time == self.warn:
if self.timeout:
self.console("{0} -- auto restart in {1} minutes".format(self.warning, self.timeout - self.time))
else:
self.console(self.warning)
time = "{0} minutes".format(self.warn)
self.dispatch(ServerEvent(cause="server/warning/" + self.event[0],
data="WARNING: " + self.event[1].format(timeout=time),
priority=1))
else:
if self.timeout:
self.console("{0} -- auto restart in {1} minutes".format(self.warning, self.timeout - self.time))
else:
self.console(self.warning)
def reset(self):
self.alive = True
self.time = 0
class Monitor(Plugin):
crash_enabled = Plugin.Property(default=True)
crash_timeout = Plugin.Property(default=3)
crash_warn = Plugin.Property(default=0)
crash_unknown_cmd_message = Plugin.Property(default="Unknown command.*")
crash_check_command = Plugin.Property(default="")
oom_enabled = Plugin.Property(default=True)
crash_report_enabled = Plugin.Property(default=True)
ping_enabled = Plugin.Property(default=True)
ping_timeout = Plugin.Property(default=3)
ping_warn = Plugin.Property(default=0)
pcount_enabled = Plugin.Property(default=False)
pcount_timeout = Plugin.Property(default=3)
pcount_warn = Plugin.Property(default=0)
def setup(self):
do_step = False
self.checks = {}
if self.oom_enabled:
self.register(self.handle_oom, ServerOutput, level='SEVERE', pattern='java\.lang\.OutOfMemoryError.*')
if self.crash_report_enabled:
self.register(self.handle_unknown_crash, ServerOutput, level='ERROR', pattern='This crash report has been saved to.*')
if self.crash_enabled:
do_step = True
self.checks['crash'] = Check(self, name="crash",
timeout=self.crash_timeout,
warn=self.crash_warn,
message="server might have crashed: not accepting accepting console commands or crash-unknown-cmd-message is not set",
warning="server might have crashed",
event=("hang", "server didn't respond for {timeout}"),
stop_reason="crashed")
if self.ping_enabled:
self.register(self.handle_ping, StatPlayerCount)
do_step = True
self.checks['ping'] = Check(self, name="ping",
timeout=self.ping_timeout,
warn=self.ping_warn,
message="server might have crashed: not accepting connections or wrong port is being pinged.",
warning="server might have stopped accepting connections",
event=("ping", "server didn't respond for {timeout}"),
stop_reason="not accepting connections")
if self.pcount_enabled:
self.register(self.handle_pcount, StatPlayerCount)
do_step = True
self.checks['pcount'] = Check(self, name="pcount",
timeout=self.pcount_timeout,
warn=self.pcount_warn,
message="server might have crashed: has had 0 players for {timeout}",
warning="server has 0 players, might be inaccessible",
event=("player-count", "server had 0 players for {timeout}"),
stop_reason="zero players")
self.do_step = do_step
def server_started(self, event):
self.reset_counts()
if self.do_step:
self.repeating_task(self.step, 60)
def load_state(self, state):
self.server_started(None)
def step(self, *a):
for c in self.checks.values():
c.step()
if self.crash_enabled:
self.register(self.handle_crash_ok, ServerOutput,
pattern=self.crash_unknown_cmd_message,
track=False)
self.send(self.crash_check_command) # Blank command to trigger 'Unknown command'
def reset_counts(self):
for c in self.checks.values():
c.reset()
### handlers
# crash
def handle_crash_ok(self, event):
self.checks["crash"].reset()
return Event.EAT | Event.UNREGISTER
# out of memory
def handle_oom(self, event):
self.console('server out of memory, restarting...')
self.dispatch(ServerEvent(cause='server/error/oom',
data="server ran out of memory",
priority=1))
self.dispatch(ServerStop(reason='out of memory', respawn=ServerStop.RESTART))
# unknown crash
def handle_unknown_crash(self, event):
self.console('server crashed for unknown reason, restarting...')
self.dispatch(ServerEvent(cause='server/error/unknown',
data="server crashed for unknown reason",
priority=1))
self.dispatch(ServerStop(reason='unknown reason', respawn=ServerStop.RESTART))
# ping
def handle_ping(self, event):
if event.source == 'ping':
self.checks["ping"].reset()
# pcount
def handle_pcount(self, event):
if event.players_current > 0:
self.checks["pcount"].reset()
else:
self.checks["pcount"].alive = False
| SupaHam/mark2 | mk2/plugins/monitor.py | Python | mit | 7,104 |
# -*- coding: utf-8 -*-
"""
Configuration data for the system.
"""
import os
data_dir = None
FILES = {}
def get_config_paths(directory):
"""Sets the data directory containing the data for the models."""
assert os.path.isdir(directory), 'Invalid data directory'
return { key: os.path.join(directory, value) for key, value in [
# cross-task data
('.', '.'), #for data_dir access
# vocabulary file used as a fallback if a reader doesn't have a specific one
('vocabulary' , 'vocabulary.txt'),
('type_features' , 'types-features.npy'),
('termvectors' , 'termvectors.txt'),
# POS
('network_pos' , 'pos-network.npz'),
('pos_tags' , 'pos-tags.txt'),
('pos_tag_dict' , 'pos-tags.txt'),
('suffix' , 'suffixes.txt'),
('suffixes' , 'suffixes.txt'),
('prefix' , 'prefixes.txt'),
('prefixes' , 'prefixes.txt'),
('metadata_pos' , 'metadata-pos.pickle'),
('type_features_pos' , 'types-features-pos.npy'),
('caps_features_pos' , 'caps-features-pos.npy'),
('suffix_features_pos' , 'suffix-features-pos.npy'),
('prefix_features_pos' , 'prefix-features-pos.npy'),
('vocabulary_pos' , 'vocabulary-pos.txt'),
# dependency
('network_labeled_dependency', 'ldep-network.npz'),
('type_features_labeled_dependency', 'types-features-ldep.npy'),
('caps_features_labeled_dependency', 'caps-features-ldep.npy'),
('pos_features_labeled_dependency', 'pos-features-ldep.npy'),
('metadata_labeled_dependency', 'metadata-ldep.pickle'),
('dependency_tag_dict', 'dependency-tags.txt'),
('labeled_dependency_tag_dict', 'dependency-tags.txt'),
('vocabulary_labeled_dependency', 'vocabulary-ldep.txt'),
('dependency_pos_tags', 'dep-pos-tags.txt'),
('network_unlabeled_dependency', 'udep-network.npz'),
('type_features_unlabeled_dependency', 'types-features-udep.npy'),
('caps_features_unlabeled_dependency', 'caps-features-udep.npy'),
('pos_features_unlabeled_dependency', 'pos-features-udep.npy'),
('metadata_unlabeled_dependency', 'metadata-udep.pickle'),
('vocabulary_unlabeled_dependency', 'vocabulary-udep.txt'),
# chunk
('chunk_tag_dict' , 'chunk-tag-dict.pickle'),
('chunk_tags' , 'chunk-tags.txt'),
# SRL
('network_srl' , 'srl-network.npz'),
('network_srl_boundary' , 'srl-id-network.npz'),
('network_srl_classify' , 'srl-class-network.npz'),
('network_srl_predicates' , 'srl-class-predicates.npz'),
('srl_iob_tag_dict' , 'srl-tags.txt'),
('srl_iob_tags' , 'srl-tags.txt'),
('srl_tags' , 'srl-tags.txt'),
('srl_classify_tag_dict' , 'srl-tags.txt'),
('srl_classify_tags' , 'srl-tags.txt'),
('srl_predicates_tag_dict' , 'srl-predicates-tags.txt'),
('srl_predicates_tags' , 'srl-predicates-tags.txt'),
('type_features_boundary' , 'types-features-id.npy'),
('caps_features_boundary' , 'caps-features-id.npy'),
('pos_features_boundary' , 'pos-features-id.npy'),
('chunk_features_boundary' , 'chunk-features-id.npy'),
('type_features_classify' , 'types-features-class.npy'),
('caps_features_classify' , 'caps-features-class.npy'),
('pos_features_classify' , 'pos-features-class.npy'),
('chunk_features_classify' , 'chunk-features-class.npy'),
('type_features_1step' , 'types-features-1step.npy'),
('caps_features_1step' , 'caps-features-1step.npy'),
('pos_features_1step' , 'pos-features-1step.npy'),
('chunk_features_1step' , 'chunk-features-1step.npy'),
('type_features_srl_predicates', 'types-features-preds.npy'),
('caps_features_srl_predicates', 'caps-features-preds.npy'),
('pos_features_srl_predicates' , 'pos-features-preds.npy'),
('metadata_srl' , 'metadata-srl.pickle'),
('metadata_srl_boundary' , 'metadata-srl-boundary.pickle'),
('metadata_srl_classify' , 'metadata-srl-classify.pickle'),
('metadata_srl_predicates' , 'metadata-srl-predicates.pickle'),
('vocabulary_srl', 'vocabulary-srl.txt'),
('vocabulary_srl_boundary', 'vocabulary-srl-boundary.txt'),
('vocabulary_srl_classify', 'vocabulary-srl-classify.txt'),
('vocabulary_srl_predicates', 'vocabulary-srl-predicates.txt'),
]
}
def set_data_dir(directory):
"""Sets the global data directory containing the data for the models."""
global data_dir, FILES
data_dir = directory
FILES = get_config_paths(directory)
| papower1/nlpnet-for-korean | nlpnet/config.py | Python | mit | 5,209 |
import os
import random
import sqlalchemy
import sqlalchemy.orm
import sqlalchemy.ext
import sqlalchemy.ext.declarative
SqlAlchemyBase = sqlalchemy.ext.declarative.declarative_base()
class Measurement(SqlAlchemyBase):
__tablename__ = 'Measurement'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True, autoincrement=True)
x = sqlalchemy.Column(sqlalchemy.Integer)
y = sqlalchemy.Column(sqlalchemy.Integer)
value = sqlalchemy.Column(sqlalchemy.Float)
# run this code only once per process assuming 1 database
db_file = os.path.join(
os.path.dirname(__file__),
'slicing_db.sqlite')
conn_str = 'sqlite:///' + db_file
engine = sqlalchemy.create_engine(conn_str, echo=True)
session_factory = sqlalchemy.orm.sessionmaker(bind=engine)
SqlAlchemyBase.metadata.create_all(engine)
session = session_factory()
count = session.query(Measurement).count()
if not count:
print("No data, adding test data")
for _ in range(100):
m = Measurement()
m.value = random.random()
m.x = random.randint(0, 100)
m.y = random.randint(0, 100)
session.add(m)
session.commit()
| mikeckennedy/write-pythonic-code-demos | code/ch_04_collections/_03_slice_support.py | Python | mit | 1,180 |
from __future__ import print_function, unicode_literals
from django.core.management.base import BaseCommand
from django.apps import apps
from optparse import make_option
from resize.fields import ResizedImageField
from resize.utils import resize_image
class Command(BaseCommand):
help = 'Ensures that all ResizedImageFields are available in all required resolutions.'
option_list = BaseCommand.option_list + (
make_option('--fail-loud',
action='store_true',
dest='fails',
default=False,
help='Fail on IOErrors',
),
)
def handle(self, *args, **options):
print('Looking for resized fields')
for Model in apps.get_models():
print(' {}.{}'.format(Model._meta.app_label, Model._meta.model_name))
resized_fields = []
for field in Model._meta.fields:
if isinstance(field, ResizedImageField):
resized_fields.append(field)
for field in resized_fields:
print(' ', field.name)
images = (
Model
.objects
.filter(**{'{}__isnull'.format(field.name): False})
.exclude(**{field.name: ''})
.values_list(field.name, flat=True)
)
for resolution in field.resolutions:
print(' resizing {} images to {}'.format(
len(images),
resolution,
))
for image in images:
try:
resize_image(field.storage.open(image), resolution)
except IOError:
print(' Image does not exist', image)
if options['fails']:
raise
print('Resizing complete')
| defrex/django-resize | resize/management/commands/resize_fields.py | Python | mit | 1,954 |
import time
from slackclient import SlackClient
from slackBot.recommender import recommend, query_item
# starterbot's ID as an environment variable
# BOT_ID = os.environ.get("BOT_ID")
BOT_ID = "ID"
# constants
AT_BOT = "<@" + BOT_ID + ">"
EXAMPLE_COMMAND = "!do"
CHECK = "!check"
LAKSHMI = "!lakshmi"
CODE = "!code"
THINK = "!think"
RECOMMEND = "!recommend"
PRODUCT = ""
COUNT = 0
# instantiate Slack & Twilio clients
# slack_client = SlackClient(os.environ.get('xoxb-117640426146-oCZScxy8XJKzpHPJ0IALFoL5'))
slack_client = SlackClient('xoxb-117640426146-oCZScxy8XJKzpHPJ0IALFoL5')
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
response = "Not sure what you mean. Use the *" + EXAMPLE_COMMAND + \
"* command with numbers, delimited by spaces."
if command.startswith(EXAMPLE_COMMAND):
response = "Sure...write some more code then I can do that!"
if command.startswith(CHECK):
response = "Hi! What do you want me to check?"
if command.startswith(LAKSHMI):
response = "Hi Chunnu! :3"
if command.startswith(CODE):
response = "```This is a code snippet```"
if command.startswith(THINK):
response = ":thinking_face:"
if command.startswith(RECOMMEND):
PRODUCT = command.split(" ")[1]
COUNT = command.split(" ")[2]
response = COUNT + " products similar to " + query_item(int(PRODUCT)) + " : \n" + recommend(int(PRODUCT), int(COUNT))
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("StarterBot connected and running!")
while True:
command, channel = parse_slack_output(slack_client.rtm_read())
if command and channel:
handle_command(command, channel)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
| kanishkamisra/something-like | slackBot/starterbot.py | Python | mit | 2,886 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Abstract class with common functions
#
# Copyright (c) 2012-2014 Alan Aguiar [email protected]
# Copyright (c) 2012-2014 Butiá Team [email protected]
# Butia is a free and open robotic platform
# www.fing.edu.uy/inco/proyectos/butia
# Facultad de Ingeniería - Universidad de la República - Uruguay
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
class ButiaFunctions:
def isPresent(self, module_name):
"""
Check if module: module_name is present
"""
module_list = self.getModulesList()
return (module_name in module_list)
def loopBack(self, data, board='0'):
"""
LoopBack command: send data to the board and get the result. If all is ok
the return must be exactly of the data parameter
"""
msg = [str(data)]
return self.callModule('lback', str(board), '0', 'send', msg, ret_type=str)
############################## Movement calls ##############################
def set2MotorSpeed(self, leftSense='0', leftSpeed='0', rightSense='0', rightSpeed='0', board='0'):
"""
Set the speed of 2 motors. The sense is 0 or 1, and the speed is
between 0 and 1023
"""
msg = [str(leftSense), str(leftSpeed), str(rightSense), str(rightSpeed)]
return self.callModule('motors', str(board), '0', 'setvel2mtr', msg)
def setMotorSpeed(self, idMotor='0', sense='0', speed='0', board='0'):
"""
Set the speed of one motor. idMotor = 0 for left motor and 1 for the
right motor. The sense is 0 or 1, and the speed is between 0 and 1023
"""
msg = [str(idMotor), str(sense), str(speed)]
return self.callModule('motors', str(board), '0', 'setvelmtr', msg)
def getMotorType(self, board='0'):
"""
If AX-12 motors present returns 1. If there are a shield "cc" returns 2
"""
return self.callModule('motors', str(board), '0', 'getType')
##################### Operations for ax.lua driver #########################
def writeInfo(self, idMotor, regstart, value, board='0'):
"""
Writes the motor: idMotor in the registry: regstart with value: value
"""
msg = [str(idMotor), str(regstart), str(value)]
return self.callModule('ax', str(board), '0', 'writeInfo', msg)
def readInfo(self, idMotor, regstart, length='1', board='0'):
"""
Reads the motor: idMotor in the registry: regstart
"""
msg = [str(idMotor), str(regstart), str(length)]
return self.callModule('ax', str(board), '0', 'writeInfo', msg)
def sendPacket(self, msg, board='0'):
"""
Send a raw packet to ax module
"""
msg_s = [str(i) for i in msg]
return self.callModule('ax', str(board), '0', 'sendPacket', msg_s, ret_type=str)
def wheelMode(self, idMotor='0', board='0'):
"""
Sets the motor: idMotor in wheel mode (continuos rotation)
"""
msg = [str(idMotor)]
return self.callModule('ax', str(board), '0', 'wheelMode', msg)
def jointMode(self, idMotor='0', _min='0', _max='1023', board='0'):
"""
Sets the motor: idMotor in servo mode
"""
msg = [str(idMotor), str(_min), str(_max)]
return self.callModule('ax', str(board), '0', 'jointMode', msg)
def setPosition(self, idMotor='0', pos='0', board='0'):
"""
Sets the position: pos of the motor: idMotor
"""
msg = [str(idMotor), str(pos)]
return self.callModule('ax', str(board), '0', 'setPosition', msg)
def getPosition(self, idMotor='0', board='0'):
"""
Gets the position of motor: idMotor
"""
msg = [str(idMotor)]
return self.callModule('ax', str(board), '0', 'getPosition', msg)
def setSpeed(self, idMotor='0', speed='0', board='0'):
"""
Set the speed: speed to the motor: idMotor
"""
msg = [str(idMotor), str(speed)]
return self.callModule('ax', str(board), '0', 'setSpeed', msg)
############################### General calls ##############################
def getBatteryCharge(self, board='0'):
"""
Gets the battery level charge
"""
return self.callModule('butia', str(board), '0', 'getVolt', ret_type=float)
def getVersion(self, board='0'):
"""
Gets the version of Butiá module. 22 for new version
"""
return self.callModule('butia', str(board), '0', 'getVersion')
def getFirmwareVersion(self, board='0'):
"""
Gets the version of the Firmware
"""
return self.callModule('admin', str(board), '0', 'getVersion')
############################### Sensors calls ###############################
def getButton(self, port, board='0'):
"""
Gets the value of the button connected in port
"""
return self.callModule('button', str(board), str(port), 'getValue')
def getLight(self, port, board='0'):
"""
Gets the value of the light sensor connected in port
"""
return self.callModule('light', str(board), str(port), 'getValue')
def getDistance(self, port, board='0'):
"""
Gets the value of the distance sensor connected in port
"""
return self.callModule('distanc', str(board), str(port), 'getValue')
def getGray(self, port, board='0'):
"""
Gets the value of the gray sensor connected in port
"""
return self.callModule('grey', str(board), str(port), 'getValue')
def getResistance(self, port, board='0'):
"""
Gets the value of the resistance sensor connected in port
"""
return self.callModule('res', str(board), str(port), 'getValue', ret_type=float)
def getVoltage(self, port, board='0'):
"""
Gets the value of the voltage sensor connected in port
"""
return self.callModule('volt', str(board), str(port), 'getValue', ret_type=float)
def getTemperature(self, port, board='0'):
"""
Gets the value of the temperature sensor connected in port
"""
return self.callModule('temp', str(board), str(port), 'getValue', ret_type=float)
def setLed(self, port, on_off, board='0'):
"""
Sets on or off the LED connected in port (0 is off, 1 is on)
"""
return self.callModule('led', str(board), str(port), 'turn', [str(on_off)])
################################ Extras ################################
def setModeHack(self, pin, mode, board='0'):
"""
Sets the mode of hack pin. If mode 0 = output, mode 1 = input
"""
msg = [str(pin), str(mode)]
return self.callModule('hackp', str(board), '0', 'setMode', msg)
def getModeHack(self, pin, board='0'):
"""
Get the mode of hack pin. If mode 0 = output, mode 1 = input
"""
return self.callModule('hackp', str(board), '0', 'getMode', [str(pin)])
def setHack(self, pin, value, board='0'):
"""
Sets the value of hack pin configured as output. Value is 0 or 1
"""
msg = [str(pin), str(value)]
return self.callModule('hackp', str(board), '0', 'write', msg)
def getHack(self, pin, board='0'):
"""
Gets the value of hack pin configured as input. Returns 0 or 1
"""
return self.callModule('hackp', str(board), '0', 'read', [str(pin)])
############################# Generic modules #############################
def getModuleA(self, port, board='0'):
"""
Gets the value of the generic sensor A connected in port
"""
return self.callModule('modSenA', str(board), str(port), 'getValue')
def getModuleB(self, port, board='0'):
"""
Gets the value of the generic sensor B connected in port
"""
return self.callModule('modSenB', str(board), str(port), 'getValue')
def getModuleC(self, port, board='0'):
"""
Gets the value of the generic sensor C connected in port
"""
return self.callModule('modSenC', str(board), str(port), 'getValue')
def setModuleA(self, port, on_off, board='0'):
"""
Sets on or off the generic actuator module A
"""
return self.callModule('modActA', str(board), str(port), 'turn', [str(on_off)])
def setModuleB(self, port, on_off, board='0'):
"""
Sets on or off the generic actuator module B
"""
return self.callModule('modActB', str(board), str(port), 'turn', [str(on_off)])
def setModuleC(self, port, on_off, board='0'):
"""
Sets on or off the generic actuator module C
"""
return self.callModule('modActC', str(board), str(port), 'turn', [str(on_off)])
############################# Useful functions #############################
def _split_module(self, mbn):
"""
Split a modulename: module@board:port to (number, modulename, board)
"""
board = '0'
number = '0'
if mbn.count('@') > 0:
modulename, bn = mbn.split('@')
if bn.count(':') > 0:
board, number = bn.split(':')
else:
board = bn
else:
if mbn.count(':') > 0:
modulename, number = mbn.split(':')
else:
modulename = mbn
return (number, modulename, board)
| nvazquez/Turtlebots | plugins/butia/pybot/functions.py | Python | mit | 10,197 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @date Dec 13 2015
# @brief
#
import sys
import threading
import logging
from logging.handlers import (
TimedRotatingFileHandler,
SocketHandler,
DatagramHandler,
SysLogHandler,
SMTPHandler,
HTTPHandler,
)
from logging import LoggerAdapter
try:
from six import with_metaclass
except:
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass. copy from six """
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
from .util import (
read_from_yaml,
read_from_etcd,
parse_config,
)
PY2 = sys.version_info[0] == 2
class IntField(int):
""" the instance have both int type and other attributes """
def __new__(cls, value=0, name=None, **kwargs):
obj = int.__new__(cls, value)
kwargs['name'] = name
obj.__dict__.update(**kwargs)
return obj
class StrField(str):
""" the instance have both str type and other attributes """
def __new__(cls, value=0, name="", **kwargs):
obj = str.__new__(cls, value)
kwargs['name'] = name
obj.__dict__.update(**kwargs)
return obj
class ConstMetaClass(type):
def __new__(mcs, name, bases, namespace):
field_dict = {}
for k, v in namespace.items():
if k.isupper() and isinstance(v, (int, str)):
if isinstance(v, int) and not isinstance(v, IntField):
# default name is k
namespace[k] = IntField(v, name=k)
elif isinstance(v, str) and not isinstance(v, StrField):
namespace[k] = StrField(v, name=k)
field_dict[k] = namespace[k]
namespace["FIELD_DICT"] = field_dict
return type.__new__(mcs, name, bases, namespace)
class _Const(with_metaclass(ConstMetaClass)):
FIELD_DICT = NotImplemented
class Logger(LoggerAdapter):
"""
inspired from log4j2
Technical Terms:
Java Python
Appender -> Handler
Layout -> Formatter
Logger -> Logger
Layout -> format
"""
# [ 16.6.7. LogRecord attributes -Python docs ](https://docs.python.org/3/library/logging.html#logrecord-attributes)
DEFAULT_FORMAT = '[%(levelname)s %(process)d:%(asctime)s:%(funcName)s:%(lineno)d] %(message)s'
DEFAULT_DATE_FORMAT = '%Y%m%d %H:%M:%S' # display as local zone
class RotateMode(_Const):
"""
https://docs.python.org/3/library/logging.handlers.html
need learn about TimedRotatingFileHandler rotate type
not support Weekday 'W0' - 'W6'
"""
SECONDS = 'S'
MINUTES = 'M'
HOURS = 'H'
DAYS = 'D'
MIDNIGHT = 'midnight'
def __init__(self, logger, formatter, extra=None):
"""
:param logger:
:param formatter:
:param extra:
for logging.LoggerAdapter specify contextual
:return:
"""
self._formatter = formatter
super(Logger, self).__init__(logger, extra=extra)
def set_extra(self, extra):
if not isinstance(extra, dict):
raise TypeError("extra not dict")
self.extra = extra
def update_extra(self, update_extra):
if not isinstance(update_extra, dict):
raise TypeError("update_extra not dict")
self.extra.update(update_extra)
def config_file_handler(self, filename, level=None, rotate_mode=RotateMode.DAYS):
self.add_handler(TimedRotatingFileHandler(filename, when=rotate_mode, utc=True), level=level)
def config_tcp_handler(self, host, port, level=None):
self.add_handler(SocketHandler(host, port), level=level)
def config_udp_handler(self, host, port, level=None):
self.add_handler(DatagramHandler(host, port), level=level)
def config_syslog_handler(self, *args, **kwargs):
# should known SysLogHandler params
level = kwargs.pop('level', None)
self.add_handler(SysLogHandler(*args, **kwargs), level=level)
def config_smtp_handler(self, *args, **kwargs):
# should known SMTPHandler params
level = kwargs.pop('level', None)
self.add_handler(SMTPHandler(*args, **kwargs), level=level)
def config_http_handler(self, *args, **kwargs):
# should known HTTPHandler params
level = kwargs.pop('level', None)
self.add_handler(HTTPHandler(*args, **kwargs), level=level)
def add_handler(self, handler, **kwargs):
level = kwargs.get('level')
handler.setFormatter(self._formatter)
if level:
handler.setLevel(level)
self.logger.addHandler(handler)
class LogManager(object):
_lock = threading.RLock()
_REGISTERED_LOGGER_DICT = {}
# config options:
CONFIG_YAML = 1
CONFIG_ETCD = 2
CONFIG_READ_HANDLER_DICT = {
CONFIG_YAML: read_from_yaml,
CONFIG_ETCD: read_from_etcd,
}
_META_CONFIG = NotImplemented
@classmethod
def register_meta_config(cls, config_type, **kwargs):
"""
register your meta config:
1. tell LogManager way do want to read from
2. tell the specified config type with parameters that you can correctly read the config data
:param config_type:
:param kwargs: config read_handler with read the parameters
"""
if config_type not in cls.CONFIG_READ_HANDLER_DICT.keys():
raise ValueError("no support config_type= {0} it should be defined in LogManager.ConfigType".format(
config_type))
with cls._lock:
cls._META_CONFIG = {'type': config_type, 'kwargs': kwargs}
@classmethod
def load(cls):
"""
Recommendation:
just load once at the startup of the process/app
eg:
LogManager.register_meta_config(LogManager.ConfigType.YAML, host="127.0.0.1", port=2379)
LogManager.load()
# your app start running
app.run()
"""
with cls._lock:
config_type = cls._META_CONFIG['type']
read_handler = cls.CONFIG_READ_HANDLER_DICT[config_type]
config_data = read_handler(**cls._META_CONFIG['kwargs'])
parse_config(cls, config_data)
@staticmethod
def get_root_logger():
return logging.getLogger()
@staticmethod
def create_logger(
name=None, level=logging.INFO, propagate=True,
date_fmt=Logger.DEFAULT_DATE_FORMAT, fmt=Logger.DEFAULT_FORMAT
):
"""
:param name: default None
:param level: default logging.INFO
:param propagate: default True
:param date_fmt:
:param fmt:
:return: Logger instance
"""
logger = logging.getLogger(name)
formatter = logging.Formatter(datefmt=date_fmt, fmt=fmt)
logger.setLevel(level)
logger.propagate = propagate
return Logger(logger, formatter)
@classmethod
def get_logger(cls, name):
registered_logger = cls._REGISTERED_LOGGER_DICT.get(name)
if registered_logger:
return registered_logger
else:
root_logger = cls.get_root_logger()
root_logger.warning("not found logger by name= {0}".format(name))
return root_logger
| kaka19ace/kklogger | kklogger/logger.py | Python | mit | 7,452 |
# coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
from api import fields
import model
class Artist(model.Base):
name = ndb.StringProperty()
artist_type = ndb.StringProperty()
gender = ndb.StringProperty(default='none', choices=['male', 'female', 'none'])
website = ndb.StringProperty()
image_url = ndb.StringProperty()
FIELDS = {
'name': fields.String,
'artist_type': fields.String,
'gender': fields.String,
'website': fields.String,
'image_url': fields.String,
}
FIELDS.update(model.Base.FIELDS)
| ssxenon01/music-app | main/model/artist.py | Python | mit | 618 |
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import utils
import plotly.graph_objs as go
from plotly import tools
from scipy.stats import pearsonr
# main database
db = create_engine('sqlite:///data/processed.db')
def get_params(dataset):
tb = pd.read_sql_table(dataset, db, index_col = 'ID')
X = tb.iloc[:, :-1]
# remove low var columns
low_var_cols = X.columns[X.var() < 1e-5]
cols = [c for c in X.columns if c not in low_var_cols]
return (cols, low_var_cols)
def scatter_plot(dataset, varx, vary):
# read data
tb = pd.read_sql_table(dataset, db, index_col = 'ID')
x = tb[varx].values.tolist()
y = tb[vary].values.tolist()
z = np.array(tb[tb.columns[-1]].values.tolist())
z = ((z - z.min()) / (z.max() - z.min()) + 0.5) * 40
# plot
trace = go.Scatter(x = x, y = y, mode = 'markers',
marker=dict(color = 'rgb(93, 164, 214)',
opacity = 0.35,
size = z,
sizemode = 'diameter',))
layout= go.Layout(margin = go.Margin(l = 100, r = 100, b = 50,
t = 30, pad = 0),
width = 550, height = 500,
xaxis = dict(title = varx),
yaxis = dict(title = vary))
fig = go.Figure(data = [trace], layout = layout)
return utils.plot_to_div(fig)
def rlasso_scores_plot(names, scores):
trace = go.Bar(x = names, y = scores, opacity = 0.7)
layout= go.Layout(margin = go.Margin(l = 50, r = 80,
b = 70, t = 10, pad = 0),
height = 200,
yaxis = dict(title = 'Score'))
fig = go.Figure(data = [trace], layout = layout)
return utils.plot_to_div(fig)
def correlation_analysis(dataset):
# read
tb = pd.read_sql_table(dataset, db, index_col = 'ID')
X = tb.iloc[:, :-1]; y = tb.iloc[:, -1]
# compute correlation
X.drop(X.columns[X.var() < 1e-5], axis = 1, inplace = True)
r = np.array([pearsonr(X.ix[:,i], y) for i in range(X.shape[1])])
rank = np.abs(r[:, 0]).argsort()[::-1]
# plot top ones
N = 9
top = rank[:N]
traces = []
names = []
for (i, c) in enumerate(X.columns[top]):
names.append('{}<br>(r={:0.2g} p={:0.2g})'.format(
c, r[top[i], 0], r[top[i], 1]))
traces.append(go.Scatter(x = X[c].values.tolist(),
y = y.values.tolist(),
mode = 'markers',
showlegend = False))
fig = tools.make_subplots(rows = 3, cols = 3,
subplot_titles = names,
vertical_spacing = 0.1,
horizontal_spacing = 0.1)
for (i, p) in enumerate(traces):
fig.append_trace(p, i // 3 + 1, i % 3 + 1)
fig['layout'].update(height = 700, width = 1100)
fig['layout'].update(margin = go.Margin(l = 50, r = 50, b = 50,
t = 50, pad = 0))
for a in fig.layout.annotations:
a['font'].update(size = 14)
return (X.columns[rank], utils.plot_to_div(fig))
# RFE random forest for feature importance ranking
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
def rfe_with_grid_search(X, y, estimator, params, n_iter = 10,
test_size = 0.2):
R2 = []; rank = []
features = list(range(X.shape[1]))
# make a copy of X
Xc = X.copy()
while features:
print("{} features are left.".format(len(features)))
# find the best parameter with cross validation
ss = ShuffleSplit(Xc.shape[0], n_iter, test_size)
reg = GridSearchCV(estimator, params, cv = ss)
reg.fit(Xc, y)
# record the best score
R2.append(reg.best_score_)
# find the best feature
best = reg.best_estimator_.feature_importances_.argmax()
rank.append(features[best])
# eliminate the the worst feature
del features[best]
Xc = np.delete(Xc, best, axis = 1)
# descending order
return (rank, R2)
# RandomizedLasso for feature selection
from sklearn.linear_model import RandomizedLasso, LassoCV
def lassocv_n_random_lasso(X, y, n_iter = 30, test_size = 0.2,
max_iter = 50000, n_resampling = 2000):
# find a good alpha using cv
ss = ShuffleSplit(X.shape[0], n_iter, test_size)
reg = LassoCV(normalize = True, cv = ss, max_iter = max_iter)
reg.fit(X, y)
reg = RandomizedLasso(alpha = reg.alpha_,
n_resampling = n_resampling,
max_iter = max_iter, normalize = True)
reg.fit(X, y)
rank = reg.scores_.argsort()[::-1]
return (rank, reg.scores_[rank])
# Rank the features
def rank(dataset, force_overwrite = False):
# name of the result table
res_tb = dataset + '_result'
# read even when cached. column names are extracted from X
tb = pd.read_sql_table(dataset, db, index_col = 'ID')
X = tb.iloc[:, :-1]; y = tb.iloc[:, -1]
# check if it is cached
if res_tb in db.table_names() and not force_overwrite:
# yes, get it
res = pd.read_sql_table(res_tb, db, index_col = 'index')
else:
# no, compute it
# remove low var columns
low_var_cols = X.columns[X.var() < 1e-5]
X.drop(low_var_cols, axis = 1, inplace = True)
# rank
(rank1, R2) = rfe_with_grid_search(X.values, y,
RandomForestRegressor(n_jobs = -1),
[{'n_estimators': [5, 10, 30],
'max_features': [1.0]}])
(rank2, scores) = lassocv_n_random_lasso(X, y)
res = pd.DataFrame(np.array([X.columns[rank1], R2,
X.columns[rank2], scores]).T,
columns = ['rfe_random_forest',
'R2',
'randomized_lasso',
'scores'])
res.to_sql(res_tb, db, if_exists = 'replace')
return (res['rfe_random_forest'], res['R2'],
res['randomized_lasso'], res['scores'])
if __name__ == '__main__':
datasets = ['dat_full_with_common_params',
'dat_full_with_lysate_params',
'dat_full_with_base_flow_params',
'dat_full_with_lysate_and_base_flow_params',
'dat_nb_with_common_params',
'dat_nb_with_scaleup_params',
'dat_nb_with_common_params_base_flow_params',
'dat_ref_with_common_params',
'dat_ref_with_lysate_params',
'dat_ref_with_base_flow_params']
for dataset in datasets:
rank(dataset, force_overwrite = True)
| lzlarryli/limelight | app/parameter_importance.py | Python | mit | 7,053 |
import cv2
import numpy as np
import sys
from collections import OrderedDict
import matplotlib.pyplot as plt
from gaussian_pyramid import *
from lucas_kanade import *
def backwarp(img, flow):
h, w = flow.shape[:2]
flow_map = -flow.copy()
flow_map[:,:,0] += np.arange(w)
flow_map[:,:,1] += np.arange(h)[:,np.newaxis]
warped = cv2.remap(img, flow_map, None, cv2.INTER_LINEAR)
return warped
def lk_level1(images, levels, winSize):
gau_pyrs = []
for i in range(len(images)):
gau_pyrs += gaussian(images[i], levels)
print('Generating Flows')
flows = []
for i in range(len(images)-1):
flows += [-lk_flow(images[i], images[i+1], winSize)]
print('Generating Warps')
warps = []
for fr, flow in zip(images[1:], flows):
warps += [backwarp(fr, flow)]
print('Generating Differences')
diffs = []
for i in range(len(warps)):
diffs += [cv2.subtract(images[i], warps[i])]
diffs1 = []
for img in diffs:
diffs1 += [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)]
return flows,warps,diffs1
def draw_flows(images, flows, filename):
#Splitting the images in equal rows and cols
cols = 2
rows = np.ceil(float(len(flows)) / cols)
#Plotting all images
plt.figure(1)
for i, (img, flow) in enumerate(zip(images, flows)):
plot_i = rows * 100 + cols * 10 + 1 + i
plt.subplot(plot_i)
plt.imshow(img, cmap='gray', interpolation='bicubic')
step=int(img.shape[0] / 30)
x = np.arange(0, img.shape[1], 1)
y = np.arange(0, img.shape[0], 1)
x, y = np.meshgrid(x, y)
plt.quiver(x[::step, ::step], y[::step, ::step], flow[::step, ::step, 0], flow[::step, ::step, 1], color='r', pivot='middle', headwidth=5, headlength=5)
plt.savefig(filename)
plt.clf()
def warp():
print('Warping Data Sequence 1')
#Lambda functions in python
path = lambda i: 'resources/DataSeq1/yos_img_0' + str(i+1) + '.jpg'
#frs = np.array([cv2.imread(path(i), cv2.IMREAD_GRAYSCALE) for i in range(3)])
images = np.array([cv2.imread(path(0), cv2.IMREAD_GRAYSCALE)])
for i in range(1,3):
img = cv2.imread(path(i), cv2.IMREAD_GRAYSCALE)
images = np.append(images, np.array([img]), axis=0)
flows, warps, diffs = lk_level1(images, 1, 15)
draw_flows(images, flows, 'results/DataSeq1-flowDir.png')
save_images(diffs, 'results/DataSeq1-difference.png')
print('Flow and Difference images saved successfully')
cv2.imwrite('results/DataSeq1-img1-warped.png', warps[0])
cv2.imwrite('results/DataSeq1-img2-warped.png', warps[1])
print('Warped Images saved successfully')
for i, flow in enumerate(flows):
cv2.imwrite('results/DataSeq1-flows-img'+str(i+1)+'.jpg', draw_flow(flow))
#######***************########
print('\nWarping Data Sequence 2')
path = lambda i: 'resources/DataSeq2/' + str(i) + '.png'
#frs = np.array([cv2.imread(path(i), cv2.IMREAD_GRAYSCALE) for i in range(3)])
images = np.array([cv2.imread(path(0), cv2.IMREAD_GRAYSCALE)])
for i in range(1,3):
img = cv2.imread(path(i), cv2.IMREAD_GRAYSCALE)
images = np.append(images, np.array([img]), axis=0)
flows, warps, diffs = lk_level1(images, 3, 7)
draw_flows(images, flows, 'results/DataSeq2-flowDir.png')
save_images(diffs, 'results/DataSeq2-difference.png')
print('Flow and Difference images saved successfully')
cv2.imwrite('results/DataSeq2-img1-warped.png', warps[0])
cv2.imwrite('results/DataSeq2-img2-warped.png', warps[1])
print('Warped Images saved successfully')
for i, flow in enumerate(flows):
cv2.imwrite('results/DataSeq2-flows-img'+str(i+1)+'.jpg', draw_flow(flow))
if __name__ == '__main__':
warp() | rohithredd94/Computer-Vision-using-OpenCV | Warping-using-LK/flow_warping.py | Python | mit | 3,807 |
import mobula as M
import mobula.operators as O
import numpy as np
def test_exp():
N, C, H, W = 2,3,4,5
a = np.random.random((N, C, H, W))
l = O.Exp(a)
y = l.eval()
l.dY = np.random.random(l.Y.shape)
l.backward()
exp = np.exp(a)
assert np.allclose(y, exp)
assert np.allclose(l.dX, exp * l.dY)
def test_log():
N, C, H, W = 2,3,4,5
a = np.random.random((N, C, H, W))
a[a == 0] = 1.0
l = O.Log(a)
y = l.eval()
l.dY = np.random.random(l.Y.shape)
l.backward()
assert np.allclose(y, np.log(a))
assert np.allclose(l.dX, (1.0 / a) * l.dY)
def test_exp_op():
N, C, H, W = 2,3,4,5
X = np.random.random((N, C, H, W))
assert np.allclose(M.exp(X).eval(), np.exp(X))
def test_log_op():
N, C, H, W = 2,3,4,5
X = np.random.random((N, C, H, W))
X[X == 0] = 1.0
assert np.allclose(M.log(X).eval(), np.log(X))
| wkcn/mobula | tests/test_ops/test_exp_log.py | Python | mit | 895 |
# -*- coding: utf-8 -*-
from django.conf import settings
ARTICLE_PER_PAGE = getattr(settings, 'ARTICLE_PER_PAGE', 10)
| indexofire/gork | src/gork/application/article/settings.py | Python | mit | 120 |
#!/usr/bin/env python
#
# Electrum - lightweight Fujicoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from decimal import Decimal
from PyQt5.QtGui import QFontMetrics
from electrum import bitcoin
from electrum.util import bfh
from electrum.transaction import TxOutput, push_script
from electrum.bitcoin import opcodes
from electrum.logging import Logger
from .qrtextedit import ScanQRTextEdit
from .completion_text_edit import CompletionTextEdit
from . import util
RE_ALIAS = r'(.*?)\s*\<([0-9A-Za-z]{1,})\>'
frozen_style = "QWidget { background-color:none; border:none;}"
normal_style = "QPlainTextEdit { }"
class PayToEdit(CompletionTextEdit, ScanQRTextEdit, Logger):
def __init__(self, win):
CompletionTextEdit.__init__(self)
ScanQRTextEdit.__init__(self)
Logger.__init__(self)
self.win = win
self.amount_edit = win.amount_e
self.document().contentsChanged.connect(self.update_size)
self.heightMin = 0
self.heightMax = 150
self.c = None
self.textChanged.connect(self.check_text)
self.outputs = []
self.errors = []
self.is_pr = False
self.is_alias = False
self.scan_f = win.pay_to_URI
self.update_size()
self.payto_address = None
self.previous_payto = ''
def setFrozen(self, b):
self.setReadOnly(b)
self.setStyleSheet(frozen_style if b else normal_style)
for button in self.buttons:
button.setHidden(b)
def setGreen(self):
self.setStyleSheet(util.ColorScheme.GREEN.as_stylesheet(True))
def setExpired(self):
self.setStyleSheet(util.ColorScheme.RED.as_stylesheet(True))
def parse_address_and_amount(self, line):
x, y = line.split(',')
out_type, out = self.parse_output(x)
amount = self.parse_amount(y)
return TxOutput(out_type, out, amount)
def parse_output(self, x):
try:
address = self.parse_address(x)
return bitcoin.TYPE_ADDRESS, address
except:
script = self.parse_script(x)
return bitcoin.TYPE_SCRIPT, script
def parse_script(self, x):
script = ''
for word in x.split():
if word[0:3] == 'OP_':
opcode_int = opcodes[word]
assert opcode_int < 256 # opcode is single-byte
script += bitcoin.int_to_hex(opcode_int)
else:
bfh(word) # to test it is hex data
script += push_script(word)
return script
def parse_amount(self, x):
if x.strip() == '!':
return '!'
p = pow(10, self.amount_edit.decimal_point())
return int(p * Decimal(x.strip()))
def parse_address(self, line):
r = line.strip()
m = re.match('^'+RE_ALIAS+'$', r)
address = str(m.group(2) if m else r)
assert bitcoin.is_address(address)
return address
def check_text(self):
self.errors = []
if self.is_pr:
return
# filter out empty lines
lines = [i for i in self.lines() if i]
outputs = []
total = 0
self.payto_address = None
if len(lines) == 1:
data = lines[0]
if data.startswith("fujicoin:"):
self.scan_f(data)
return
try:
self.payto_address = self.parse_output(data)
except:
pass
if self.payto_address:
self.win.lock_amount(False)
return
is_max = False
for i, line in enumerate(lines):
try:
output = self.parse_address_and_amount(line)
except:
self.errors.append((i, line.strip()))
continue
outputs.append(output)
if output.value == '!':
is_max = True
else:
total += output.value
self.win.max_button.setChecked(is_max)
self.outputs = outputs
self.payto_address = None
if self.win.max_button.isChecked():
self.win.do_update_fee()
else:
self.amount_edit.setAmount(total if outputs else None)
self.win.lock_amount(total or len(lines)>1)
def get_errors(self):
return self.errors
def get_recipient(self):
return self.payto_address
def get_outputs(self, is_max):
if self.payto_address:
if is_max:
amount = '!'
else:
amount = self.amount_edit.get_amount()
_type, addr = self.payto_address
self.outputs = [TxOutput(_type, addr, amount)]
return self.outputs[:]
def lines(self):
return self.toPlainText().split('\n')
def is_multiline(self):
return len(self.lines()) > 1
def paytomany(self):
self.setText("\n\n\n")
self.update_size()
def update_size(self):
lineHeight = QFontMetrics(self.document().defaultFont()).height()
docHeight = self.document().size().height()
h = docHeight * lineHeight + 11
h = min(max(h, self.heightMin), self.heightMax)
self.setMinimumHeight(h)
self.setMaximumHeight(h)
self.verticalScrollBar().hide()
def qr_input(self):
data = super(PayToEdit,self).qr_input()
if data.startswith("fujicoin:"):
self.scan_f(data)
# TODO: update fee
def resolve(self):
self.is_alias = False
if self.hasFocus():
return
if self.is_multiline(): # only supports single line entries atm
return
if self.is_pr:
return
key = str(self.toPlainText())
key = key.strip() # strip whitespaces
if key == self.previous_payto:
return
self.previous_payto = key
if not (('.' in key) and (not '<' in key) and (not ' ' in key)):
return
parts = key.split(sep=',') # assuming single line
if parts and len(parts) > 0 and bitcoin.is_address(parts[0]):
return
try:
data = self.win.contacts.resolve(key)
except Exception as e:
self.logger.info(f'error resolving address/alias: {repr(e)}')
return
if not data:
return
self.is_alias = True
address = data.get('address')
name = data.get('name')
new_url = key + ' <' + address + '>'
self.setText(new_url)
self.previous_payto = new_url
#if self.win.config.get('openalias_autoadd') == 'checked':
self.win.contacts[key] = ('openalias', name)
self.win.contact_list.update()
self.setFrozen(True)
if data.get('type') == 'openalias':
self.validated = data.get('validated')
if self.validated:
self.setGreen()
else:
self.setExpired()
else:
self.validated = None
| fujicoin/electrum-fjc | electrum/gui/qt/paytoedit.py | Python | mit | 8,129 |
# Problem:
# Write a program that reads from the console an integer n and prints the numbers from 1 to 2n.
n = int(input())
for i in range(0, n + 1, 1):
print(pow(2,i))
| YaniLozanov/Software-University | Python/PyCharm/07.Advanced Loops/03. Powers of Two.py | Python | mit | 176 |
# -*- coding: utf-8 *-*
from redis._compat import b, iteritems, iterkeys
class TestListCommands(object):
def test_binary_lists(self, r):
mapping = {
b('foo bar'): [b('1'), b('2'), b('3')],
b('foo\r\nbar\r\n'): [b('4'), b('5'), b('6')],
b('foo\tbar\x07'): [b('7'), b('8'), b('9')],
}
# fill in lists
for key, value in iteritems(mapping):
r.rpush(key, *value)
# check that KEYS returns all the keys as they are
assert sorted(r.keys('*')) == sorted(list(iterkeys(mapping)))
# check that it is possible to get list content by key name
for key, value in iteritems(mapping):
assert r.lrange(key, 0, -1) == value
def test_blpop(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
assert r.blpop(['b', 'a'], timeout=1) == (b('b'), b('3'))
assert r.blpop(['b', 'a'], timeout=1) == (b('b'), b('4'))
assert r.blpop(['b', 'a'], timeout=1) == (b('a'), b('1'))
assert r.blpop(['b', 'a'], timeout=1) == (b('a'), b('2'))
assert r.blpop(['b', 'a'], timeout=1) is None
r.rpush('c', '1')
assert r.blpop('c', timeout=1) == (b('c'), b('1'))
def test_brpop(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
assert r.brpop(['b', 'a'], timeout=1) == (b('b'), b('4'))
assert r.brpop(['b', 'a'], timeout=1) == (b('b'), b('3'))
assert r.brpop(['b', 'a'], timeout=1) == (b('a'), b('2'))
assert r.brpop(['b', 'a'], timeout=1) == (b('a'), b('1'))
assert r.brpop(['b', 'a'], timeout=1) is None
r.rpush('c', '1')
assert r.brpop('c', timeout=1) == (b('c'), b('1'))
def test_brpoplpush(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
assert r.brpoplpush('a', 'b') == b('2')
assert r.brpoplpush('a', 'b') == b('1')
assert r.brpoplpush('a', 'b', timeout=1) is None
assert r.lrange('a', 0, -1) == []
assert r.lrange('b', 0, -1) == [b('1'), b('2'), b('3'), b('4')]
def test_brpoplpush_empty_string(self, r):
r.rpush('a', '')
assert r.brpoplpush('a', 'b') == b('')
def test_lindex(self, r):
r.rpush('a', '1', '2', '3')
assert r.lindex('a', '0') == b('1')
assert r.lindex('a', '1') == b('2')
assert r.lindex('a', '2') == b('3')
def test_linsert(self, r):
r.rpush('a', '1', '2', '3')
assert r.linsert('a', 'after', '2', '2.5') == 4
assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('2.5'), b('3')]
assert r.linsert('a', 'before', '2', '1.5') == 5
assert r.lrange('a', 0, -1) == \
[b('1'), b('1.5'), b('2'), b('2.5'), b('3')]
def test_llen(self, r):
r.rpush('a', '1', '2', '3')
assert r.llen('a') == 3
def test_lpop(self, r):
r.rpush('a', '1', '2', '3')
assert r.lpop('a') == b('1')
assert r.lpop('a') == b('2')
assert r.lpop('a') == b('3')
assert r.lpop('a') is None
def test_lpush(self, r):
assert r.lpush('a', '1') == 1
assert r.lpush('a', '2') == 2
assert r.lpush('a', '3', '4') == 4
assert r.lrange('a', 0, -1) == [b('4'), b('3'), b('2'), b('1')]
def test_lpushx(self, r):
assert r.lpushx('a', '1') == 0
assert r.lrange('a', 0, -1) == []
r.rpush('a', '1', '2', '3')
assert r.lpushx('a', '4') == 4
assert r.lrange('a', 0, -1) == [b('4'), b('1'), b('2'), b('3')]
def test_lrange(self, r):
r.rpush('a', '1', '2', '3', '4', '5')
assert r.lrange('a', 0, 2) == [b('1'), b('2'), b('3')]
assert r.lrange('a', 2, 10) == [b('3'), b('4'), b('5')]
assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4'), b('5')]
def test_lrem(self, r):
r.rpush('a', '1', '1', '1', '1')
assert r.lrem('a', '1', 1) == 1
assert r.lrange('a', 0, -1) == [b('1'), b('1'), b('1')]
assert r.lrem('a', '1') == 3
assert r.lrange('a', 0, -1) == []
def test_strict_lrem(self, sr):
sr.rpush('a', 'a1', 'a2', 'a3', 'a1')
sr.lrem('a', 0, 'a1')
assert sr.lrange('a', 0, -1) == [b('a2'), b('a3')]
def test_lset(self, r):
r.rpush('a', '1', '2', '3')
assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3')]
assert r.lset('a', 1, '4')
assert r.lrange('a', 0, 2) == [b('1'), b('4'), b('3')]
def test_ltrim(self, r):
r.rpush('a', '1', '2', '3')
assert r.ltrim('a', 0, 1)
assert r.lrange('a', 0, -1) == [b('1'), b('2')]
def test_rpop(self, r):
r.rpush('a', '1', '2', '3')
assert r.rpop('a') == b('3')
assert r.rpop('a') == b('2')
assert r.rpop('a') == b('1')
assert r.rpop('a') is None
def test_rpoplpush(self, r):
r.rpush('a', 'a1', 'a2', 'a3')
r.rpush('b', 'b1', 'b2', 'b3')
assert r.rpoplpush('a', 'b') == b('a3')
assert r.lrange('a', 0, -1) == [b('a1'), b('a2')]
assert r.lrange('b', 0, -1) == [b('a3'), b('b1'), b('b2'), b('b3')]
def test_rpush(self, r):
assert r.rpush('a', '1') == 1
assert r.rpush('a', '2') == 2
assert r.rpush('a', '3', '4') == 4
assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')]
def test_rpushx(self, r):
assert r.rpushx('a', 'b') == 0
assert r.lrange('a', 0, -1) == []
r.rpush('a', '1', '2', '3')
assert r.rpushx('a', '4') == 4
assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')]
| katakumpo/niceredis | tests/commands/test_list.py | Python | mit | 5,629 |
from setuptools import setup, find_packages
setup(name='MODEL1110130000',
version=20140916,
description='MODEL1110130000 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1110130000',
maintainer='Stanley Gu',
maintainer_url='[email protected]',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | biomodels/MODEL1110130000 | setup.py | Python | cc0-1.0 | 377 |
Subsets and Splits