commit
stringlengths 40
40
| old_file
stringlengths 5
117
| new_file
stringlengths 5
117
| old_contents
stringlengths 0
1.93k
| new_contents
stringlengths 19
3.3k
| subject
stringlengths 17
320
| message
stringlengths 18
3.28k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 7
42.4k
| completion
stringlengths 152
6.66k
| prompt
stringlengths 21
3.65k
|
---|---|---|---|---|---|---|---|---|---|---|---|
0ebac1925b3d4b32188a6f2c9e40760b21d933ce | backend/uclapi/dashboard/app_helpers.py | backend/uclapi/dashboard/app_helpers.py | from binascii import hexlify
import os
def generate_api_token():
key = hexlify(os.urandom(30)).decode()
dashes_key = ""
for idx, char in enumerate(key):
if idx % 15 == 0 and idx != len(key)-1:
dashes_key += "-"
else:
dashes_key += char
final = "uclapi" + dashes_key
return final
def generate_app_id():
key = hexlify(os.urandom(5)).decode()
final = "A" + key
return final
| from binascii import hexlify
from random import choice
import os
import string
def generate_api_token():
key = hexlify(os.urandom(30)).decode()
dashes_key = ""
for idx, char in enumerate(key):
if idx % 15 == 0 and idx != len(key)-1:
dashes_key += "-"
else:
dashes_key += char
final = "uclapi" + dashes_key
return final
def generate_app_id():
key = hexlify(os.urandom(5)).decode()
final = "A" + key
return final
def generate_app_client_id():
client_id = ''.join(random.choice(string.digits, k=16))
client_id += "."
client_id += ''.join(random.choice(string.digits, k=16))
return client_id
def generate_app_client_secret():
client_secret = ''.join(random.choice(string.ascii_lowercase + string.digits, k=64))
return client_secret | Add helpers to the dashboard code to generate OAuth keys | Add helpers to the dashboard code to generate OAuth keys
| Python | mit | uclapi/uclapi,uclapi/uclapi,uclapi/uclapi,uclapi/uclapi | <REPLACE_OLD> hexlify
import os
def <REPLACE_NEW> hexlify
from random import choice
import os
import string
def <REPLACE_END> <REPLACE_OLD> final
<REPLACE_NEW> final
def generate_app_client_id():
client_id = ''.join(random.choice(string.digits, k=16))
client_id += "."
client_id += ''.join(random.choice(string.digits, k=16))
return client_id
def generate_app_client_secret():
client_secret = ''.join(random.choice(string.ascii_lowercase + string.digits, k=64))
return client_secret <REPLACE_END> <|endoftext|> from binascii import hexlify
from random import choice
import os
import string
def generate_api_token():
key = hexlify(os.urandom(30)).decode()
dashes_key = ""
for idx, char in enumerate(key):
if idx % 15 == 0 and idx != len(key)-1:
dashes_key += "-"
else:
dashes_key += char
final = "uclapi" + dashes_key
return final
def generate_app_id():
key = hexlify(os.urandom(5)).decode()
final = "A" + key
return final
def generate_app_client_id():
client_id = ''.join(random.choice(string.digits, k=16))
client_id += "."
client_id += ''.join(random.choice(string.digits, k=16))
return client_id
def generate_app_client_secret():
client_secret = ''.join(random.choice(string.ascii_lowercase + string.digits, k=64))
return client_secret | Add helpers to the dashboard code to generate OAuth keys
from binascii import hexlify
import os
def generate_api_token():
key = hexlify(os.urandom(30)).decode()
dashes_key = ""
for idx, char in enumerate(key):
if idx % 15 == 0 and idx != len(key)-1:
dashes_key += "-"
else:
dashes_key += char
final = "uclapi" + dashes_key
return final
def generate_app_id():
key = hexlify(os.urandom(5)).decode()
final = "A" + key
return final
|
6602471252a7c8e3dd3ab94db54e45fccfc6e62f | yarn_api_client/__init__.py | yarn_api_client/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.3.6'
__all__ = ['ApplicationMaster', 'HistoryServer', 'NodeManager', 'ResourceManager']
from .application_master import ApplicationMaster
from .history_server import HistoryServer
from .node_manager import NodeManager
from .resource_manager import ResourceManager
| # -*- coding: utf-8 -*-
__version__ = '0.3.7.dev'
__all__ = ['ApplicationMaster', 'HistoryServer', 'NodeManager', 'ResourceManager']
from .application_master import ApplicationMaster
from .history_server import HistoryServer
from .node_manager import NodeManager
from .resource_manager import ResourceManager
| Prepare for next development iteration | Prepare for next development iteration
| Python | bsd-3-clause | toidi/hadoop-yarn-api-python-client | <REPLACE_OLD> '0.3.6'
__all__ <REPLACE_NEW> '0.3.7.dev'
__all__ <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*-
__version__ = '0.3.7.dev'
__all__ = ['ApplicationMaster', 'HistoryServer', 'NodeManager', 'ResourceManager']
from .application_master import ApplicationMaster
from .history_server import HistoryServer
from .node_manager import NodeManager
from .resource_manager import ResourceManager
| Prepare for next development iteration
# -*- coding: utf-8 -*-
__version__ = '0.3.6'
__all__ = ['ApplicationMaster', 'HistoryServer', 'NodeManager', 'ResourceManager']
from .application_master import ApplicationMaster
from .history_server import HistoryServer
from .node_manager import NodeManager
from .resource_manager import ResourceManager
|
9baf9ede15fa988b5da711605b67cc5bbbbc5b36 | wanorlan/wanorlan.py | wanorlan/wanorlan.py | import time
import datetime
import subprocess
import json
import sys
import re
PING_RTT_REGEX = re.compile('rtt.+=\s*([\d.]+)')
def get_status(ip, timeout):
t0 = time.time()
error = subprocess.call(['ping', '-c', '1', '-W', str(timeout), ip],
stdout=sys.stderr.fileno(),
stderr=sys.stderr.fileno())
delay = time.time() - t0
return None if error else delay
def log_status(hosts, timeout):
now = datetime.datetime.now().isoformat(timespec='seconds')
processes = []
for host in hosts:
processes.append(subprocess.Popen(
['ping', '-qnc', '1', '-W', str(timeout), host],
stdout=subprocess.PIPE))
results = dict(time=now)
for host, process in zip(hosts, processes):
if process.wait():
results[host] = None
else:
last_line = list(process.stdout)[-1].strip().decode('utf8')
results[host] = float(PING_RTT_REGEX.match(last_line).group(1))
return results
TIMEOUT = 2
INTERVAL = 15
HOSTS = ['localhost', '192.168.1.1', '1.1.1.1', '8.8.8.8', 'www.google.co.uk']
if __name__ == '__main__':
t0 = time.time()
while True:
time.sleep(max(0, t0 + INTERVAL - time.time()))
t0 = time.time()
print(json.dumps(log_status(HOSTS, timeout=TIMEOUT)), flush=True)
| Add simple script for logging to diagnose WAN vs LAN connection issues | Add simple script for logging to diagnose WAN vs LAN connection issues
| Python | mit | DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets | <REPLACE_OLD> <REPLACE_NEW> import time
import datetime
import subprocess
import json
import sys
import re
PING_RTT_REGEX = re.compile('rtt.+=\s*([\d.]+)')
def get_status(ip, timeout):
t0 = time.time()
error = subprocess.call(['ping', '-c', '1', '-W', str(timeout), ip],
stdout=sys.stderr.fileno(),
stderr=sys.stderr.fileno())
delay = time.time() - t0
return None if error else delay
def log_status(hosts, timeout):
now = datetime.datetime.now().isoformat(timespec='seconds')
processes = []
for host in hosts:
processes.append(subprocess.Popen(
['ping', '-qnc', '1', '-W', str(timeout), host],
stdout=subprocess.PIPE))
results = dict(time=now)
for host, process in zip(hosts, processes):
if process.wait():
results[host] = None
else:
last_line = list(process.stdout)[-1].strip().decode('utf8')
results[host] = float(PING_RTT_REGEX.match(last_line).group(1))
return results
TIMEOUT = 2
INTERVAL = 15
HOSTS = ['localhost', '192.168.1.1', '1.1.1.1', '8.8.8.8', 'www.google.co.uk']
if __name__ == '__main__':
t0 = time.time()
while True:
time.sleep(max(0, t0 + INTERVAL - time.time()))
t0 = time.time()
print(json.dumps(log_status(HOSTS, timeout=TIMEOUT)), flush=True)
<REPLACE_END> <|endoftext|> import time
import datetime
import subprocess
import json
import sys
import re
PING_RTT_REGEX = re.compile('rtt.+=\s*([\d.]+)')
def get_status(ip, timeout):
t0 = time.time()
error = subprocess.call(['ping', '-c', '1', '-W', str(timeout), ip],
stdout=sys.stderr.fileno(),
stderr=sys.stderr.fileno())
delay = time.time() - t0
return None if error else delay
def log_status(hosts, timeout):
now = datetime.datetime.now().isoformat(timespec='seconds')
processes = []
for host in hosts:
processes.append(subprocess.Popen(
['ping', '-qnc', '1', '-W', str(timeout), host],
stdout=subprocess.PIPE))
results = dict(time=now)
for host, process in zip(hosts, processes):
if process.wait():
results[host] = None
else:
last_line = list(process.stdout)[-1].strip().decode('utf8')
results[host] = float(PING_RTT_REGEX.match(last_line).group(1))
return results
TIMEOUT = 2
INTERVAL = 15
HOSTS = ['localhost', '192.168.1.1', '1.1.1.1', '8.8.8.8', 'www.google.co.uk']
if __name__ == '__main__':
t0 = time.time()
while True:
time.sleep(max(0, t0 + INTERVAL - time.time()))
t0 = time.time()
print(json.dumps(log_status(HOSTS, timeout=TIMEOUT)), flush=True)
| Add simple script for logging to diagnose WAN vs LAN connection issues
|
|
bef2796fc1df98d15c7198ee26b2526f42150b59 | infrastructure/migrations/0016_auto_20210907_0131.py | infrastructure/migrations/0016_auto_20210907_0131.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-06 23:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0015_financialyear_active'),
]
operations = [
migrations.CreateModel(
name='AnnualSpendFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('document', models.FileField(upload_to='annual/')),
('status', models.IntegerField(default=3)),
('financial_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear')),
],
),
migrations.AlterField(
model_name='budgetphase',
name='code',
field=models.CharField(blank=True, max_length=10),
),
]
| Add migration for annual spend changes | Add migration for annual spend changes
| Python | mit | Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data | <REPLACE_OLD> <REPLACE_NEW> # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-06 23:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0015_financialyear_active'),
]
operations = [
migrations.CreateModel(
name='AnnualSpendFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('document', models.FileField(upload_to='annual/')),
('status', models.IntegerField(default=3)),
('financial_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear')),
],
),
migrations.AlterField(
model_name='budgetphase',
name='code',
field=models.CharField(blank=True, max_length=10),
),
]
<REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-06 23:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0015_financialyear_active'),
]
operations = [
migrations.CreateModel(
name='AnnualSpendFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('document', models.FileField(upload_to='annual/')),
('status', models.IntegerField(default=3)),
('financial_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear')),
],
),
migrations.AlterField(
model_name='budgetphase',
name='code',
field=models.CharField(blank=True, max_length=10),
),
]
| Add migration for annual spend changes
|
|
842007194a9a5736d8e33d6152cd1bfe934e24bc | smashcache/cache/filler.py | smashcache/cache/filler.py | # Copyright (c) 2015 Sachi King
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
def getHeaders(url):
r = requests.head(url)
if r.status_code != 200:
print("Server returned" + r.status_code)
return None
return r.headers
def fetchRangeToFile(url, byte_range, destination_path):
print("Fetching: %s range: %s to: %s" %
(url, byte_range, destination_path))
headers = {'Range': ("bytes=%s-%s" %
(byte_range[0], byte_range[1]))}
r = requests.get(url, headers=headers, stream=True)
with open(destination_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
| # Copyright (c) 2015 Sachi King
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
def getHeaders(url):
r = requests.head(url)
if r.status_code != 200:
print("Server returned %s" % r.status_code)
return None
return r.headers
def fetchRangeToFile(url, byte_range, destination_path):
print("Fetching: %s range: %s to: %s" %
(url, byte_range, destination_path))
headers = {'Range': ("bytes=%s-%s" %
(byte_range[0], byte_range[1]))}
r = requests.get(url, headers=headers, stream=True)
with open(destination_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
| Fix print with subsition instead of concat | Fix print with subsition instead of concat
| Python | apache-2.0 | nakato/smashcache | <REPLACE_OLD> returned" + <REPLACE_NEW> returned %s" % <REPLACE_END> <|endoftext|> # Copyright (c) 2015 Sachi King
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
def getHeaders(url):
r = requests.head(url)
if r.status_code != 200:
print("Server returned %s" % r.status_code)
return None
return r.headers
def fetchRangeToFile(url, byte_range, destination_path):
print("Fetching: %s range: %s to: %s" %
(url, byte_range, destination_path))
headers = {'Range': ("bytes=%s-%s" %
(byte_range[0], byte_range[1]))}
r = requests.get(url, headers=headers, stream=True)
with open(destination_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
| Fix print with subsition instead of concat
# Copyright (c) 2015 Sachi King
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
def getHeaders(url):
r = requests.head(url)
if r.status_code != 200:
print("Server returned" + r.status_code)
return None
return r.headers
def fetchRangeToFile(url, byte_range, destination_path):
print("Fetching: %s range: %s to: %s" %
(url, byte_range, destination_path))
headers = {'Range': ("bytes=%s-%s" %
(byte_range[0], byte_range[1]))}
r = requests.get(url, headers=headers, stream=True)
with open(destination_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
|
d5049edc8567cebf936bb07847906c5400f9a6d9 | ceph_deploy/tests/unit/hosts/test_suse.py | ceph_deploy/tests/unit/hosts/test_suse.py | from ceph_deploy.hosts import suse
class TestSuseInit(object):
def setup(self):
self.host = suse
def test_choose_init_default(self):
self.host.release = None
init_type = self.host.choose_init()
assert init_type == "sysvinit"
def test_choose_init_SLE_11(self):
self.host.release = '11'
init_type = self.host.choose_init()
assert init_type == "sysvinit"
def test_choose_init_SLE_12(self):
self.host.release = '12'
init_type = self.host.choose_init()
assert init_type == "systemd"
def test_choose_init_openSUSE_13_1(self):
self.host.release = '13.1'
init_type = self.host.choose_init()
assert init_type == "systemd"
| from ceph_deploy.hosts import suse
from ceph_deploy.hosts.suse.install import map_components
class TestSuseInit(object):
def setup(self):
self.host = suse
def test_choose_init_default(self):
self.host.release = None
init_type = self.host.choose_init()
assert init_type == "sysvinit"
def test_choose_init_SLE_11(self):
self.host.release = '11'
init_type = self.host.choose_init()
assert init_type == "sysvinit"
def test_choose_init_SLE_12(self):
self.host.release = '12'
init_type = self.host.choose_init()
assert init_type == "systemd"
def test_choose_init_openSUSE_13_1(self):
self.host.release = '13.1'
init_type = self.host.choose_init()
assert init_type == "systemd"
class TestSuseMapComponents(object):
def test_valid(self):
pkgs = map_components(['ceph-osd', 'ceph-common', 'ceph-radosgw'])
assert 'ceph' in pkgs
assert 'ceph-common' in pkgs
assert 'ceph-radosgw' in pkgs
assert 'ceph-osd' not in pkgs
def test_invalid(self):
pkgs = map_components(['not-provided', 'ceph-mon'])
assert 'not-provided' not in pkgs
assert 'ceph' in pkgs
| Add tests for component to SUSE package mapping | Add tests for component to SUSE package mapping
Signed-off-by: David Disseldorp <[email protected]>
| Python | mit | zhouyuan/ceph-deploy,shenhequnying/ceph-deploy,ceph/ceph-deploy,ghxandsky/ceph-deploy,zhouyuan/ceph-deploy,imzhulei/ceph-deploy,SUSE/ceph-deploy,Vicente-Cheng/ceph-deploy,ceph/ceph-deploy,branto1/ceph-deploy,trhoden/ceph-deploy,trhoden/ceph-deploy,osynge/ceph-deploy,ghxandsky/ceph-deploy,SUSE/ceph-deploy,branto1/ceph-deploy,codenrhoden/ceph-deploy,isyippee/ceph-deploy,isyippee/ceph-deploy,Vicente-Cheng/ceph-deploy,shenhequnying/ceph-deploy,osynge/ceph-deploy,imzhulei/ceph-deploy,codenrhoden/ceph-deploy | <REPLACE_OLD>
class <REPLACE_NEW>
from ceph_deploy.hosts.suse.install import map_components
class <REPLACE_END> <REPLACE_OLD> "systemd"
<REPLACE_NEW> "systemd"
class TestSuseMapComponents(object):
def test_valid(self):
pkgs = map_components(['ceph-osd', 'ceph-common', 'ceph-radosgw'])
assert 'ceph' in pkgs
assert 'ceph-common' in pkgs
assert 'ceph-radosgw' in pkgs
assert 'ceph-osd' not in pkgs
def test_invalid(self):
pkgs = map_components(['not-provided', 'ceph-mon'])
assert 'not-provided' not in pkgs
assert 'ceph' in pkgs
<REPLACE_END> <|endoftext|> from ceph_deploy.hosts import suse
from ceph_deploy.hosts.suse.install import map_components
class TestSuseInit(object):
def setup(self):
self.host = suse
def test_choose_init_default(self):
self.host.release = None
init_type = self.host.choose_init()
assert init_type == "sysvinit"
def test_choose_init_SLE_11(self):
self.host.release = '11'
init_type = self.host.choose_init()
assert init_type == "sysvinit"
def test_choose_init_SLE_12(self):
self.host.release = '12'
init_type = self.host.choose_init()
assert init_type == "systemd"
def test_choose_init_openSUSE_13_1(self):
self.host.release = '13.1'
init_type = self.host.choose_init()
assert init_type == "systemd"
class TestSuseMapComponents(object):
def test_valid(self):
pkgs = map_components(['ceph-osd', 'ceph-common', 'ceph-radosgw'])
assert 'ceph' in pkgs
assert 'ceph-common' in pkgs
assert 'ceph-radosgw' in pkgs
assert 'ceph-osd' not in pkgs
def test_invalid(self):
pkgs = map_components(['not-provided', 'ceph-mon'])
assert 'not-provided' not in pkgs
assert 'ceph' in pkgs
| Add tests for component to SUSE package mapping
Signed-off-by: David Disseldorp <[email protected]>
from ceph_deploy.hosts import suse
class TestSuseInit(object):
def setup(self):
self.host = suse
def test_choose_init_default(self):
self.host.release = None
init_type = self.host.choose_init()
assert init_type == "sysvinit"
def test_choose_init_SLE_11(self):
self.host.release = '11'
init_type = self.host.choose_init()
assert init_type == "sysvinit"
def test_choose_init_SLE_12(self):
self.host.release = '12'
init_type = self.host.choose_init()
assert init_type == "systemd"
def test_choose_init_openSUSE_13_1(self):
self.host.release = '13.1'
init_type = self.host.choose_init()
assert init_type == "systemd"
|
b74be667803abed58c08a298d5a806692d2fab74 | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
import os.path
from setuptools import setup, find_packages
def get_version():
"""
Loads the current module version from version.py and returns
it.
:returns: module version identifier.
:rtype: str
"""
local_results = {}
version_file_path = os.path.join('pytextql', 'version.py')
# This is compatible with py3k which removed execfile.
with open(version_file_path, 'rb') as fin:
# Compiling instead of passing the text straight to exec
# associates any errors with the correct file name.
code = compile(fin.read(), version_file_path, 'exec')
exec(code, {}, local_results)
return local_results['__version__']
if __name__ == '__main__':
setup(
name='pytextql',
version=get_version(),
long_description=__doc__,
packages=find_packages(),
include_package_data=True,
install_requires=[
'docopt'
],
scripts=[
'pytextql/pytextql'
]
)
| #!/usr/bin/env python
# -*- coding: utf8 -*-
import os.path
from setuptools import setup, find_packages
def get_version():
"""
Loads the current module version from version.py and returns
it.
:returns: module version identifier.
:rtype: str
"""
local_results = {}
version_file_path = os.path.join('pytextql', 'version.py')
# This is compatible with py3k which removed execfile.
with open(version_file_path, 'rb') as fin:
# Compiling instead of passing the text straight to exec
# associates any errors with the correct file name.
code = compile(fin.read(), version_file_path, 'exec')
exec(code, {}, local_results)
return local_results['__version__']
if __name__ == '__main__':
with open('README.md', 'rb') as readme:
long_description = readme.read()
setup(
name='pytextql',
version=get_version(),
long_description=long_description,
packages=find_packages(),
include_package_data=True,
install_requires=[
'docopt'
],
scripts=[
'pytextql/pytextql'
]
)
| Use README.md for the long description. | Use README.md for the long description.
| Python | mit | TkTech/pytextql | <INSERT> with open('README.md', 'rb') as readme:
long_description = readme.read()
<INSERT_END> <REPLACE_OLD> long_description=__doc__,
<REPLACE_NEW> long_description=long_description,
<REPLACE_END> <|endoftext|> #!/usr/bin/env python
# -*- coding: utf8 -*-
import os.path
from setuptools import setup, find_packages
def get_version():
"""
Loads the current module version from version.py and returns
it.
:returns: module version identifier.
:rtype: str
"""
local_results = {}
version_file_path = os.path.join('pytextql', 'version.py')
# This is compatible with py3k which removed execfile.
with open(version_file_path, 'rb') as fin:
# Compiling instead of passing the text straight to exec
# associates any errors with the correct file name.
code = compile(fin.read(), version_file_path, 'exec')
exec(code, {}, local_results)
return local_results['__version__']
if __name__ == '__main__':
with open('README.md', 'rb') as readme:
long_description = readme.read()
setup(
name='pytextql',
version=get_version(),
long_description=long_description,
packages=find_packages(),
include_package_data=True,
install_requires=[
'docopt'
],
scripts=[
'pytextql/pytextql'
]
)
| Use README.md for the long description.
#!/usr/bin/env python
# -*- coding: utf8 -*-
import os.path
from setuptools import setup, find_packages
def get_version():
"""
Loads the current module version from version.py and returns
it.
:returns: module version identifier.
:rtype: str
"""
local_results = {}
version_file_path = os.path.join('pytextql', 'version.py')
# This is compatible with py3k which removed execfile.
with open(version_file_path, 'rb') as fin:
# Compiling instead of passing the text straight to exec
# associates any errors with the correct file name.
code = compile(fin.read(), version_file_path, 'exec')
exec(code, {}, local_results)
return local_results['__version__']
if __name__ == '__main__':
setup(
name='pytextql',
version=get_version(),
long_description=__doc__,
packages=find_packages(),
include_package_data=True,
install_requires=[
'docopt'
],
scripts=[
'pytextql/pytextql'
]
)
|
e2ce9ad697cd686e91b546f6f3aa7b24b5e9266f | masters/master.tryserver.chromium.angle/master_site_config.py | masters/master.tryserver.chromium.angle/master_site_config.py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ActiveMaster definition."""
from config_bootstrap import Master
class TryServerANGLE(Master.Master4a):
project_name = 'ANGLE Try Server'
master_port = 21403
slave_port = 31403
master_port_alt = 41403
buildbot_url = 'http://build.chromium.org/p/tryserver.chromium.angle/'
gerrit_host = 'https://chromium-review.googlesource.com'
| # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ActiveMaster definition."""
from config_bootstrap import Master
class TryServerANGLE(Master.Master4a):
project_name = 'ANGLE Try Server'
master_port = 21403
slave_port = 31403
master_port_alt = 41403
buildbot_url = 'http://build.chromium.org/p/tryserver.chromium.angle/'
gerrit_host = 'https://chromium-review.googlesource.com'
service_account_file = 'service-account-chromium-tryserver.json'
buildbucket_bucket = 'master.tryserver.chromium.linux'
| Add buildbucket service account to Angle master. | Add buildbucket service account to Angle master.
BUG=577560
[email protected]
Review URL: https://codereview.chromium.org/1624703003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@298368 0039d316-1c4b-4281-b951-d872f2087c98
| Python | bsd-3-clause | eunchong/build,eunchong/build,eunchong/build,eunchong/build | <INSERT> service_account_file = 'service-account-chromium-tryserver.json'
buildbucket_bucket = 'master.tryserver.chromium.linux'
<INSERT_END> <|endoftext|> # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ActiveMaster definition."""
from config_bootstrap import Master
class TryServerANGLE(Master.Master4a):
project_name = 'ANGLE Try Server'
master_port = 21403
slave_port = 31403
master_port_alt = 41403
buildbot_url = 'http://build.chromium.org/p/tryserver.chromium.angle/'
gerrit_host = 'https://chromium-review.googlesource.com'
service_account_file = 'service-account-chromium-tryserver.json'
buildbucket_bucket = 'master.tryserver.chromium.linux'
| Add buildbucket service account to Angle master.
BUG=577560
[email protected]
Review URL: https://codereview.chromium.org/1624703003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@298368 0039d316-1c4b-4281-b951-d872f2087c98
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ActiveMaster definition."""
from config_bootstrap import Master
class TryServerANGLE(Master.Master4a):
project_name = 'ANGLE Try Server'
master_port = 21403
slave_port = 31403
master_port_alt = 41403
buildbot_url = 'http://build.chromium.org/p/tryserver.chromium.angle/'
gerrit_host = 'https://chromium-review.googlesource.com'
|
528759e6ba579de185616190e3e514938989a54e | tests/console/asciimatics/widgets/testcheckbox.py | tests/console/asciimatics/widgets/testcheckbox.py |
from scriptcore.testing.testcase import TestCase
from scriptcore.console.asciimatics.widgets.checkbox import CheckBox
from asciimatics.widgets import CheckBox as ACheckBox
class TestCheckBox(TestCase):
def test_checkbox(self):
"""
Test the checkbox
:return: void
"""
changed_checkbox = []
def change_handler(checkbox):
changed_checkbox.append(checkbox)
checkbox = CheckBox(self.rand_str(), on_change=change_handler)
self.assert_is_instance(checkbox, ACheckBox)
for value in [True, False, True]:
previous_count = len(changed_checkbox)
checkbox.value = value
self.assert_equal(previous_count + 1, len(changed_checkbox))
self.assert_equal(checkbox, changed_checkbox[-1])
|
from scriptcore.testing.testcase import TestCase
from scriptcore.console.asciimatics.widgets.checkbox import CheckBox
from asciimatics.widgets import CheckBox as ACheckBox
class TestCheckBox(TestCase):
def test_checkbox(self):
"""
Test the checkbox
:return: void
"""
changed_checkbox = []
def change_handler(checkbox):
changed_checkbox.append(checkbox)
checkbox = CheckBox(self.rand_str(), on_change=change_handler)
self.assert_is_instance(checkbox, ACheckBox)
for value in [True, False, True]:
previous_count = len(changed_checkbox)
checkbox.value = value
self.assert_equal(value, checkbox.value)
self.assert_equal(previous_count + 1, len(changed_checkbox))
self.assert_equal(checkbox, changed_checkbox[-1])
| Check if checkbox value has updated. | Check if checkbox value has updated.
| Python | apache-2.0 | LowieHuyghe/script-core | <INSERT> self.assert_equal(value, checkbox.value)
<INSERT_END> <|endoftext|>
from scriptcore.testing.testcase import TestCase
from scriptcore.console.asciimatics.widgets.checkbox import CheckBox
from asciimatics.widgets import CheckBox as ACheckBox
class TestCheckBox(TestCase):
def test_checkbox(self):
"""
Test the checkbox
:return: void
"""
changed_checkbox = []
def change_handler(checkbox):
changed_checkbox.append(checkbox)
checkbox = CheckBox(self.rand_str(), on_change=change_handler)
self.assert_is_instance(checkbox, ACheckBox)
for value in [True, False, True]:
previous_count = len(changed_checkbox)
checkbox.value = value
self.assert_equal(value, checkbox.value)
self.assert_equal(previous_count + 1, len(changed_checkbox))
self.assert_equal(checkbox, changed_checkbox[-1])
| Check if checkbox value has updated.
from scriptcore.testing.testcase import TestCase
from scriptcore.console.asciimatics.widgets.checkbox import CheckBox
from asciimatics.widgets import CheckBox as ACheckBox
class TestCheckBox(TestCase):
def test_checkbox(self):
"""
Test the checkbox
:return: void
"""
changed_checkbox = []
def change_handler(checkbox):
changed_checkbox.append(checkbox)
checkbox = CheckBox(self.rand_str(), on_change=change_handler)
self.assert_is_instance(checkbox, ACheckBox)
for value in [True, False, True]:
previous_count = len(changed_checkbox)
checkbox.value = value
self.assert_equal(previous_count + 1, len(changed_checkbox))
self.assert_equal(checkbox, changed_checkbox[-1])
|
32ea27cfa3994984d8d8f8db09522f6c31b0524f | every_election/apps/organisations/migrations/0059_add_sennedd_to_org_types.py | every_election/apps/organisations/migrations/0059_add_sennedd_to_org_types.py | # Generated by Django 2.2.16 on 2020-12-18 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("organisations", "0058_remove_division_organisation_fk"),
]
operations = [
migrations.AlterField(
model_name="organisation",
name="organisation_type",
field=models.CharField(
choices=[
("combined-authority", "combined-authority"),
("sp", "sp"),
("gla", "gla"),
("local-authority", "local-authority"),
("naw", "naw"),
("senedd", "senedd"),
("nia", "nia"),
("parl", "parl"),
("police-area", "police-area"),
("sp", "sp"),
("europarl", "europarl"),
],
default="local-authority",
max_length=255,
),
),
]
| Add 'senedd' to choices for organisation_type | Add 'senedd' to choices for organisation_type
| Python | bsd-3-clause | DemocracyClub/EveryElection,DemocracyClub/EveryElection,DemocracyClub/EveryElection | <REPLACE_OLD> <REPLACE_NEW> # Generated by Django 2.2.16 on 2020-12-18 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("organisations", "0058_remove_division_organisation_fk"),
]
operations = [
migrations.AlterField(
model_name="organisation",
name="organisation_type",
field=models.CharField(
choices=[
("combined-authority", "combined-authority"),
("sp", "sp"),
("gla", "gla"),
("local-authority", "local-authority"),
("naw", "naw"),
("senedd", "senedd"),
("nia", "nia"),
("parl", "parl"),
("police-area", "police-area"),
("sp", "sp"),
("europarl", "europarl"),
],
default="local-authority",
max_length=255,
),
),
]
<REPLACE_END> <|endoftext|> # Generated by Django 2.2.16 on 2020-12-18 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("organisations", "0058_remove_division_organisation_fk"),
]
operations = [
migrations.AlterField(
model_name="organisation",
name="organisation_type",
field=models.CharField(
choices=[
("combined-authority", "combined-authority"),
("sp", "sp"),
("gla", "gla"),
("local-authority", "local-authority"),
("naw", "naw"),
("senedd", "senedd"),
("nia", "nia"),
("parl", "parl"),
("police-area", "police-area"),
("sp", "sp"),
("europarl", "europarl"),
],
default="local-authority",
max_length=255,
),
),
]
| Add 'senedd' to choices for organisation_type
|
|
536bdc4e3ca9c68621d518cdaea8b119301f2dc3 | plugins/linux/lxde_set_wallpaper.py | plugins/linux/lxde_set_wallpaper.py | import os
import sys
from .. import SetWallpaper
class LXDESetWallpaper(SetWallpaper):
def __init__(self, config):
super(LXDESetWallpaper, self).__init__(config)
def platform_check(self):
return sys.platform == 'linux2' and self.config['linux.desktop-environment'] == 'lxde'
def set(self):
os.system('pcmanfm --set-wallpaper=%s')
if not self.config['keep']:
logging.warning("LXDESetWallpaper works better if you configure "
"the keep plugin (e.g. --keep=2)")
| import sys
from .. import SetWallpaper
class LXDESetWallpaper(SetWallpaper):
def __init__(self, config):
super(LXDESetWallpaper, self).__init__(config)
self.cycle = 0
def platform_check(self):
return sys.platform == 'linux2' and self.config['linux.desktop-environment'] == 'lxde'
def set(self):
import os
# TODO: Find a better solution than cycling
# - Cause: --set-wallpaper doesn't reload the file if the arguments are the same
os.system(\
'pcmanfm --set-wallpaper=%s --wallpaper-mode=%s' % (self.config['wallpaper'], 'fit' if self.cycle % 2 else 'center'))
self.cycle += 1
| Revert "Conforming LXDESetWallpaper plugin to conform with keep option (DarwinSetWallpaper), also FastForward merge" | Revert "Conforming LXDESetWallpaper plugin to conform with keep option (DarwinSetWallpaper), also FastForward merge"
This reverts commit 7212d223fe95d3042348bb29d9bd353308be2347.
I really should learn to test before I push to github.
| Python | mit | loktacar/wallpapermaker | <DELETE> os
import <DELETE_END> <REPLACE_OLD> self).__init__(config)
<REPLACE_NEW> self).__init__(config)
self.cycle = 0
<REPLACE_END> <REPLACE_OLD> os.system('pcmanfm --set-wallpaper=%s')
<REPLACE_NEW> import os
<REPLACE_END> <REPLACE_OLD> if not self.config['keep']:
<REPLACE_NEW> # TODO: Find a better solution than cycling
<REPLACE_END> <INSERT> # <INSERT_END> <REPLACE_OLD> logging.warning("LXDESetWallpaper works better <REPLACE_NEW> - Cause: --set-wallpaper doesn't reload the file <REPLACE_END> <REPLACE_OLD> you configure "
<REPLACE_NEW> the arguments are the same
os.system(\
<REPLACE_END> <INSERT> 'pcmanfm --set-wallpaper=%s --wallpaper-mode=%s' % (self.config['wallpaper'], 'fit' if self.cycle % 2 else 'center'))
<INSERT_END> <REPLACE_OLD> "the keep plugin (e.g. --keep=2)")
<REPLACE_NEW> self.cycle += 1
<REPLACE_END> <|endoftext|> import sys
from .. import SetWallpaper
class LXDESetWallpaper(SetWallpaper):
def __init__(self, config):
super(LXDESetWallpaper, self).__init__(config)
self.cycle = 0
def platform_check(self):
return sys.platform == 'linux2' and self.config['linux.desktop-environment'] == 'lxde'
def set(self):
import os
# TODO: Find a better solution than cycling
# - Cause: --set-wallpaper doesn't reload the file if the arguments are the same
os.system(\
'pcmanfm --set-wallpaper=%s --wallpaper-mode=%s' % (self.config['wallpaper'], 'fit' if self.cycle % 2 else 'center'))
self.cycle += 1
| Revert "Conforming LXDESetWallpaper plugin to conform with keep option (DarwinSetWallpaper), also FastForward merge"
This reverts commit 7212d223fe95d3042348bb29d9bd353308be2347.
I really should learn to test before I push to github.
import os
import sys
from .. import SetWallpaper
class LXDESetWallpaper(SetWallpaper):
def __init__(self, config):
super(LXDESetWallpaper, self).__init__(config)
def platform_check(self):
return sys.platform == 'linux2' and self.config['linux.desktop-environment'] == 'lxde'
def set(self):
os.system('pcmanfm --set-wallpaper=%s')
if not self.config['keep']:
logging.warning("LXDESetWallpaper works better if you configure "
"the keep plugin (e.g. --keep=2)")
|
3581c3c71bdf3ff84961df4b328f0bfc2adf0bc7 | apps/provider/urls.py | apps/provider/urls.py | from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^pjson/push$', pjson_provider_push, name="pjson_provider_push"),
url(r'^fhir/practitioner/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
url(r'^fhir/organization/push$', fhir_organization_push, name="fhir_organization_push"),
)
| from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^pjson/push$', pjson_provider_push, name="pjson_provider_push"),
url(r'^fhir/practitioner/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
url(r'^fhir/organization/push$', fhir_organization_push, name="fhir_organization_push"),
url(r'^fhir/practitioner/update$', fhir_practitioner_update, name="fhir_practitioner_update"),
url(r'^fhir/organization/update$', fhir_organization_update, name="fhir_organization_update"),
)
| Add url for update vs. push for pract and org | Add url for update vs. push for pract and org
| Python | apache-2.0 | TransparentHealth/hhs_oauth_client,TransparentHealth/hhs_oauth_client,TransparentHealth/hhs_oauth_client,TransparentHealth/hhs_oauth_client | <REPLACE_OLD> name="fhir_organization_push"),
)
<REPLACE_NEW> name="fhir_organization_push"),
url(r'^fhir/practitioner/update$', fhir_practitioner_update, name="fhir_practitioner_update"),
url(r'^fhir/organization/update$', fhir_organization_update, name="fhir_organization_update"),
)
<REPLACE_END> <|endoftext|> from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^pjson/push$', pjson_provider_push, name="pjson_provider_push"),
url(r'^fhir/practitioner/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
url(r'^fhir/organization/push$', fhir_organization_push, name="fhir_organization_push"),
url(r'^fhir/practitioner/update$', fhir_practitioner_update, name="fhir_practitioner_update"),
url(r'^fhir/organization/update$', fhir_organization_update, name="fhir_organization_update"),
)
| Add url for update vs. push for pract and org
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^pjson/push$', pjson_provider_push, name="pjson_provider_push"),
url(r'^fhir/practitioner/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
url(r'^fhir/organization/push$', fhir_organization_push, name="fhir_organization_push"),
)
|
e059af57acec9c077ddb348ac6dd84ff58d312fe | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='blanc-basic-pages',
version='0.2.1',
description='Blanc Basic Pages for Django',
long_description=open('README.rst').read(),
url='https://github.com/blancltd/blanc-basic-pages',
maintainer='Alex Tomkins',
maintainer_email='[email protected]',
platforms=['any'],
install_requires=[
'django-mptt>=0.6.0',
'django-mptt-admin==0.1.8',
],
packages=find_packages(),
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
license='BSD',
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='blanc-basic-pages',
version='0.2.1',
description='Blanc Basic Pages for Django',
long_description=open('README.rst').read(),
url='https://github.com/blancltd/blanc-basic-pages',
maintainer='Alex Tomkins',
maintainer_email='[email protected]',
platforms=['any'],
install_requires=[
'django-mptt>=0.6.1',
'django-mptt-admin>=0.1.8',
],
packages=find_packages(),
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
license='BSD',
)
| Fix dependencies for Django 1.7 | Fix dependencies for Django 1.7
Older versions of django-mptt will generate warnings
| Python | bsd-3-clause | blancltd/blanc-basic-pages | <REPLACE_OLD> 'django-mptt>=0.6.0',
'django-mptt-admin==0.1.8',
<REPLACE_NEW> 'django-mptt>=0.6.1',
'django-mptt-admin>=0.1.8',
<REPLACE_END> <|endoftext|> #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='blanc-basic-pages',
version='0.2.1',
description='Blanc Basic Pages for Django',
long_description=open('README.rst').read(),
url='https://github.com/blancltd/blanc-basic-pages',
maintainer='Alex Tomkins',
maintainer_email='[email protected]',
platforms=['any'],
install_requires=[
'django-mptt>=0.6.1',
'django-mptt-admin>=0.1.8',
],
packages=find_packages(),
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
license='BSD',
)
| Fix dependencies for Django 1.7
Older versions of django-mptt will generate warnings
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='blanc-basic-pages',
version='0.2.1',
description='Blanc Basic Pages for Django',
long_description=open('README.rst').read(),
url='https://github.com/blancltd/blanc-basic-pages',
maintainer='Alex Tomkins',
maintainer_email='[email protected]',
platforms=['any'],
install_requires=[
'django-mptt>=0.6.0',
'django-mptt-admin==0.1.8',
],
packages=find_packages(),
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
license='BSD',
)
|
bc7bf2a09fe430bb2048842626ecbb476bc6b40c | script/generate_amalgamation.py | script/generate_amalgamation.py | #!/usr/bin/env python
import sys
from os.path import basename, dirname, join
import re
INCLUDE_PATTERN = re.compile(r'^\s*#include "([\w.]+)"')
seen_files = set()
out = sys.stdout
def add_file(filename):
bname = basename(filename)
# Only include each file at most once.
if bname in seen_files:
return
seen_files.add(bname)
path = dirname(filename)
out.write('// Begin file "{0}"\n'.format(filename))
with open(filename, 'r') as f:
for line in f:
m = INCLUDE_PATTERN.match(line)
if m:
add_file(join(path, m.group(1)))
else:
out.write(line)
out.write('// End file "{0}"\n'.format(filename))
for f in sys.argv[1:]:
add_file(f)
| #!/usr/bin/env python
import sys
from os.path import basename, dirname, join
import re
INCLUDE_PATTERN = re.compile(r'^\s*#include "([\w.]+)"')
WREN_DIR = dirname(dirname(realpath(__file__)))
seen_files = set()
out = sys.stdout
# Prints a plain text file, adding comment markers.
def add_comment_file(filename):
with open(filename, 'r') as f:
for line in f:
out.write('// ')
out.write(line)
# Prints the given C source file, recursively resolving local #includes.
def add_file(filename):
bname = basename(filename)
# Only include each file at most once.
if bname in seen_files:
return
seen_files.add(bname)
path = dirname(filename)
out.write('// Begin file "{0}"\n'.format(filename))
with open(filename, 'r') as f:
for line in f:
m = INCLUDE_PATTERN.match(line)
if m:
add_file(join(path, m.group(1)))
else:
out.write(line)
out.write('// End file "{0}"\n'.format(filename))
# Print license on top.
add_comment_file(join(WREN_DIR, 'LICENSE'))
out.write('\n')
# Source files.
for f in sys.argv[1:]:
add_file(f)
| Print LICENSE on top of the amalgamation | Print LICENSE on top of the amalgamation
| Python | mit | Rohansi/wren,Nelarius/wren,minirop/wren,foresterre/wren,munificent/wren,Nave-Neel/wren,foresterre/wren,Nave-Neel/wren,Nelarius/wren,minirop/wren,Nelarius/wren,Nelarius/wren,foresterre/wren,foresterre/wren,bigdimboom/wren,minirop/wren,bigdimboom/wren,munificent/wren,Rohansi/wren,munificent/wren,bigdimboom/wren,munificent/wren,Rohansi/wren,Nave-Neel/wren,Rohansi/wren,bigdimboom/wren,foresterre/wren,Nelarius/wren,minirop/wren,minirop/wren,munificent/wren,munificent/wren,Nave-Neel/wren | <REPLACE_OLD> "([\w.]+)"')
seen_files <REPLACE_NEW> "([\w.]+)"')
WREN_DIR = dirname(dirname(realpath(__file__)))
seen_files <REPLACE_END> <REPLACE_OLD> sys.stdout
def <REPLACE_NEW> sys.stdout
# Prints a plain text file, adding comment markers.
def add_comment_file(filename):
with open(filename, 'r') as f:
for line in f:
out.write('// ')
out.write(line)
# Prints the given C source file, recursively resolving local #includes.
def <REPLACE_END> <REPLACE_OLD> "{0}"\n'.format(filename))
for <REPLACE_NEW> "{0}"\n'.format(filename))
# Print license on top.
add_comment_file(join(WREN_DIR, 'LICENSE'))
out.write('\n')
# Source files.
for <REPLACE_END> <|endoftext|> #!/usr/bin/env python
import sys
from os.path import basename, dirname, join
import re
INCLUDE_PATTERN = re.compile(r'^\s*#include "([\w.]+)"')
WREN_DIR = dirname(dirname(realpath(__file__)))
seen_files = set()
out = sys.stdout
# Prints a plain text file, adding comment markers.
def add_comment_file(filename):
with open(filename, 'r') as f:
for line in f:
out.write('// ')
out.write(line)
# Prints the given C source file, recursively resolving local #includes.
def add_file(filename):
bname = basename(filename)
# Only include each file at most once.
if bname in seen_files:
return
seen_files.add(bname)
path = dirname(filename)
out.write('// Begin file "{0}"\n'.format(filename))
with open(filename, 'r') as f:
for line in f:
m = INCLUDE_PATTERN.match(line)
if m:
add_file(join(path, m.group(1)))
else:
out.write(line)
out.write('// End file "{0}"\n'.format(filename))
# Print license on top.
add_comment_file(join(WREN_DIR, 'LICENSE'))
out.write('\n')
# Source files.
for f in sys.argv[1:]:
add_file(f)
| Print LICENSE on top of the amalgamation
#!/usr/bin/env python
import sys
from os.path import basename, dirname, join
import re
INCLUDE_PATTERN = re.compile(r'^\s*#include "([\w.]+)"')
seen_files = set()
out = sys.stdout
def add_file(filename):
bname = basename(filename)
# Only include each file at most once.
if bname in seen_files:
return
seen_files.add(bname)
path = dirname(filename)
out.write('// Begin file "{0}"\n'.format(filename))
with open(filename, 'r') as f:
for line in f:
m = INCLUDE_PATTERN.match(line)
if m:
add_file(join(path, m.group(1)))
else:
out.write(line)
out.write('// End file "{0}"\n'.format(filename))
for f in sys.argv[1:]:
add_file(f)
|
b03dff0d6964d886f122936d097c3d4acc0582db | proper_parens.py | proper_parens.py | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
def safe_input(prompt):
"""Return user input after catching KeyboardInterrupt and EOFError"""
try:
reply = raw_input(prompt)
except (EOFError, KeyboardInterrupt):
quit()
else:
return reply.decode('utf-8') # Convert input to unicode
prompt = "Input a Lisp style statement '(test)': "
reply = safe_input(prompt)
def check_statement(value):
where_open = value.find("(")
where_close = value.find(")")
if ((where_open == -1) and where_close != -1) or (where_open > where_close):
return -1
| #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
def safe_input(prompt):
"""Return user input after catching KeyboardInterrupt and EOFError"""
try:
reply = raw_input(prompt)
except (EOFError, KeyboardInterrupt):
quit()
else:
return reply.decode('utf-8') # Convert input to unicode
prompt = "Input a Lisp style statement '(test)': "
reply = safe_input(prompt)
def check_statement(value):
open_index = [i for i, val in enumerate(reply) if val == "("]
close_index = [i for i, val in enumerate(reply) if val == ")"]
paren_total_broken = [a < b for a, b in zip(open_index, close_index)]
if paren_total_broken.find(False):
return -1
else:
return 0
| Add function for broken and groundwork for other objectives | Add function for broken and groundwork for other objectives
| Python | mit | constanthatz/data-structures | <REPLACE_OLD> where_open <REPLACE_NEW> open_index <REPLACE_END> <REPLACE_OLD> value.find("(")
<REPLACE_NEW> [i for i, val in enumerate(reply) if val == "("]
<REPLACE_END> <REPLACE_OLD> where_close <REPLACE_NEW> close_index <REPLACE_END> <REPLACE_OLD> value.find(")")
<REPLACE_NEW> [i for i, val in enumerate(reply) if val == ")"]
paren_total_broken = [a < b for a, b in zip(open_index, close_index)]
<REPLACE_END> <REPLACE_OLD> ((where_open == -1) and where_close != -1) or (where_open > where_close):
<REPLACE_NEW> paren_total_broken.find(False):
<REPLACE_END> <REPLACE_OLD>
<REPLACE_NEW> else:
return 0
<REPLACE_END> <|endoftext|> #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
def safe_input(prompt):
"""Return user input after catching KeyboardInterrupt and EOFError"""
try:
reply = raw_input(prompt)
except (EOFError, KeyboardInterrupt):
quit()
else:
return reply.decode('utf-8') # Convert input to unicode
prompt = "Input a Lisp style statement '(test)': "
reply = safe_input(prompt)
def check_statement(value):
open_index = [i for i, val in enumerate(reply) if val == "("]
close_index = [i for i, val in enumerate(reply) if val == ")"]
paren_total_broken = [a < b for a, b in zip(open_index, close_index)]
if paren_total_broken.find(False):
return -1
else:
return 0
| Add function for broken and groundwork for other objectives
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
def safe_input(prompt):
"""Return user input after catching KeyboardInterrupt and EOFError"""
try:
reply = raw_input(prompt)
except (EOFError, KeyboardInterrupt):
quit()
else:
return reply.decode('utf-8') # Convert input to unicode
prompt = "Input a Lisp style statement '(test)': "
reply = safe_input(prompt)
def check_statement(value):
where_open = value.find("(")
where_close = value.find(")")
if ((where_open == -1) and where_close != -1) or (where_open > where_close):
return -1
|
44d5974fafdddb09a684882fc79662ae4c509f57 | names/__init__.py | names/__init__.py | from os.path import abspath, join, dirname
import random
__title__ = 'names'
__version__ = '0.2'
__author__ = 'Trey Hunner'
__license__ = 'MIT'
full_path = lambda filename: abspath(join(dirname(__file__), filename))
FILES = {
'first:male': full_path('dist.male.first'),
'first:female': full_path('dist.female.first'),
'last': full_path('dist.all.last'),
}
def get_name(filename):
selected = random.random() * 90
with open(filename) as name_file:
for line in name_file:
name, _, cummulative, _ = line.split()
if float(cummulative) > selected:
return name
def get_first_name(gender=None):
if gender not in ('male', 'female'):
gender = random.choice(('male', 'female'))
return get_name(FILES['first:%s' % gender]).capitalize()
def get_last_name():
return get_name(FILES['last']).capitalize()
def get_full_name(gender=None):
return u"%s %s" % (get_first_name(gender), get_last_name())
| from os.path import abspath, join, dirname
import random
__title__ = 'names'
__version__ = '0.2'
__author__ = 'Trey Hunner'
__license__ = 'MIT'
full_path = lambda filename: abspath(join(dirname(__file__), filename))
FILES = {
'first:male': full_path('dist.male.first'),
'first:female': full_path('dist.female.first'),
'last': full_path('dist.all.last'),
}
def get_name(filename):
selected = random.random() * 90
with open(filename) as name_file:
for line in name_file:
name, _, cummulative, _ = line.split()
if float(cummulative) > selected:
return name
def get_first_name(gender=None):
if gender not in ('male', 'female'):
gender = random.choice(('male', 'female'))
return get_name(FILES['first:%s' % gender]).capitalize()
def get_last_name():
return get_name(FILES['last']).capitalize()
def get_full_name(gender=None):
return unicode("%s %s").format(get_first_name(gender), get_last_name())
| Fix unicode string syntax for Python 3 | Fix unicode string syntax for Python 3
| Python | mit | treyhunner/names,treyhunner/names | <REPLACE_OLD> u"%s %s" % (get_first_name(gender), <REPLACE_NEW> unicode("%s %s").format(get_first_name(gender), <REPLACE_END> <|endoftext|> from os.path import abspath, join, dirname
import random
__title__ = 'names'
__version__ = '0.2'
__author__ = 'Trey Hunner'
__license__ = 'MIT'
full_path = lambda filename: abspath(join(dirname(__file__), filename))
FILES = {
'first:male': full_path('dist.male.first'),
'first:female': full_path('dist.female.first'),
'last': full_path('dist.all.last'),
}
def get_name(filename):
selected = random.random() * 90
with open(filename) as name_file:
for line in name_file:
name, _, cummulative, _ = line.split()
if float(cummulative) > selected:
return name
def get_first_name(gender=None):
if gender not in ('male', 'female'):
gender = random.choice(('male', 'female'))
return get_name(FILES['first:%s' % gender]).capitalize()
def get_last_name():
return get_name(FILES['last']).capitalize()
def get_full_name(gender=None):
return unicode("%s %s").format(get_first_name(gender), get_last_name())
| Fix unicode string syntax for Python 3
from os.path import abspath, join, dirname
import random
__title__ = 'names'
__version__ = '0.2'
__author__ = 'Trey Hunner'
__license__ = 'MIT'
full_path = lambda filename: abspath(join(dirname(__file__), filename))
FILES = {
'first:male': full_path('dist.male.first'),
'first:female': full_path('dist.female.first'),
'last': full_path('dist.all.last'),
}
def get_name(filename):
selected = random.random() * 90
with open(filename) as name_file:
for line in name_file:
name, _, cummulative, _ = line.split()
if float(cummulative) > selected:
return name
def get_first_name(gender=None):
if gender not in ('male', 'female'):
gender = random.choice(('male', 'female'))
return get_name(FILES['first:%s' % gender]).capitalize()
def get_last_name():
return get_name(FILES['last']).capitalize()
def get_full_name(gender=None):
return u"%s %s" % (get_first_name(gender), get_last_name())
|
e31099e964f809a8a6ebcb071c7c2b57e17248c2 | reviewboard/changedescs/evolutions/changedesc_user.py | reviewboard/changedescs/evolutions/changedesc_user.py | from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('ChangeDescription', 'user', models.ForeignKey, blank=True,
related_model='auth.User'),
]
| from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('ChangeDescription', 'user', models.ForeignKey, null=True,
related_model='auth.User'),
]
| Fix evolution for change description users | Fix evolution for change description users
Trivial change
| Python | mit | sgallagher/reviewboard,chipx86/reviewboard,chipx86/reviewboard,chipx86/reviewboard,sgallagher/reviewboard,davidt/reviewboard,brennie/reviewboard,reviewboard/reviewboard,sgallagher/reviewboard,davidt/reviewboard,reviewboard/reviewboard,reviewboard/reviewboard,sgallagher/reviewboard,brennie/reviewboard,brennie/reviewboard,davidt/reviewboard,brennie/reviewboard,chipx86/reviewboard,reviewboard/reviewboard,davidt/reviewboard | <REPLACE_OLD> blank=True,
<REPLACE_NEW> null=True,
<REPLACE_END> <|endoftext|> from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('ChangeDescription', 'user', models.ForeignKey, null=True,
related_model='auth.User'),
]
| Fix evolution for change description users
Trivial change
from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('ChangeDescription', 'user', models.ForeignKey, blank=True,
related_model='auth.User'),
]
|
00e4663940ed1d22e768b3de3d1c645c8649aecc | src/WhiteLibrary/keywords/items/textbox.py | src/WhiteLibrary/keywords/items/textbox.py | from TestStack.White.UIItems import TextBox
from WhiteLibrary.keywords.librarycomponent import LibraryComponent
from WhiteLibrary.keywords.robotlibcore import keyword
class TextBoxKeywords(LibraryComponent):
@keyword
def input_text_to_textbox(self, locator, input):
"""
Writes text to a textbox.
``locator`` is the locator of the text box.
``input`` is the text to write.
"""
textBox = self.state._get_typed_item_by_locator(TextBox, locator)
textBox.Text = input
@keyword
def verify_text_in_textbox(self, locator, expected):
"""
Verifies text in a text box.
``locator`` is the locator of the text box.
``expected`` is the expected text of the text box.
"""
textbox = self.state._get_typed_item_by_locator(TextBox, locator)
self.state._verify_value(expected, textbox.Text)
@keyword
def get_text_from_textbox(self, locator):
"""
Gets text from text box.
``locator`` is the locator of the text box.
"""
textbox = self.state._get_typed_item_by_locator(TextBox, locator)
return textbox.Text
| from TestStack.White.UIItems import TextBox
from WhiteLibrary.keywords.librarycomponent import LibraryComponent
from WhiteLibrary.keywords.robotlibcore import keyword
class TextBoxKeywords(LibraryComponent):
@keyword
def input_text_to_textbox(self, locator, input_value):
"""
Writes text to a textbox.
``locator`` is the locator of the text box.
``input_value`` is the text to write.
"""
textBox = self.state._get_typed_item_by_locator(TextBox, locator)
textBox.Text = input_value
@keyword
def verify_text_in_textbox(self, locator, expected):
"""
Verifies text in a text box.
``locator`` is the locator of the text box.
``expected`` is the expected text of the text box.
"""
textbox = self.state._get_typed_item_by_locator(TextBox, locator)
self.state._verify_value(expected, textbox.Text)
@keyword
def get_text_from_textbox(self, locator):
"""
Gets text from text box.
``locator`` is the locator of the text box.
"""
textbox = self.state._get_typed_item_by_locator(TextBox, locator)
return textbox.Text
| Change to better argument name | Change to better argument name
| Python | apache-2.0 | Omenia/robotframework-whitelibrary,Omenia/robotframework-whitelibrary | <REPLACE_OLD> input):
<REPLACE_NEW> input_value):
<REPLACE_END> <REPLACE_OLD> ``input`` <REPLACE_NEW> ``input_value`` <REPLACE_END> <REPLACE_OLD> input
<REPLACE_NEW> input_value
<REPLACE_END> <|endoftext|> from TestStack.White.UIItems import TextBox
from WhiteLibrary.keywords.librarycomponent import LibraryComponent
from WhiteLibrary.keywords.robotlibcore import keyword
class TextBoxKeywords(LibraryComponent):
@keyword
def input_text_to_textbox(self, locator, input_value):
"""
Writes text to a textbox.
``locator`` is the locator of the text box.
``input_value`` is the text to write.
"""
textBox = self.state._get_typed_item_by_locator(TextBox, locator)
textBox.Text = input_value
@keyword
def verify_text_in_textbox(self, locator, expected):
"""
Verifies text in a text box.
``locator`` is the locator of the text box.
``expected`` is the expected text of the text box.
"""
textbox = self.state._get_typed_item_by_locator(TextBox, locator)
self.state._verify_value(expected, textbox.Text)
@keyword
def get_text_from_textbox(self, locator):
"""
Gets text from text box.
``locator`` is the locator of the text box.
"""
textbox = self.state._get_typed_item_by_locator(TextBox, locator)
return textbox.Text
| Change to better argument name
from TestStack.White.UIItems import TextBox
from WhiteLibrary.keywords.librarycomponent import LibraryComponent
from WhiteLibrary.keywords.robotlibcore import keyword
class TextBoxKeywords(LibraryComponent):
@keyword
def input_text_to_textbox(self, locator, input):
"""
Writes text to a textbox.
``locator`` is the locator of the text box.
``input`` is the text to write.
"""
textBox = self.state._get_typed_item_by_locator(TextBox, locator)
textBox.Text = input
@keyword
def verify_text_in_textbox(self, locator, expected):
"""
Verifies text in a text box.
``locator`` is the locator of the text box.
``expected`` is the expected text of the text box.
"""
textbox = self.state._get_typed_item_by_locator(TextBox, locator)
self.state._verify_value(expected, textbox.Text)
@keyword
def get_text_from_textbox(self, locator):
"""
Gets text from text box.
``locator`` is the locator of the text box.
"""
textbox = self.state._get_typed_item_by_locator(TextBox, locator)
return textbox.Text
|
76600b63940da9322673ce6cd436129a7d65f10d | scripts/ec2/terminate_all.py | scripts/ec2/terminate_all.py | #!/usr/bin/env python
##########################################################################
# scripts/ec2/terminate_all.py
#
# Part of Project Thrill - http://project-thrill.org
#
# Copyright (C) 2015 Timo Bingmann <[email protected]>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
import boto3
from subprocess import call
ec2 = boto3.resource('ec2')
filters = [{'Name': 'instance-state-name', 'Values': ['running']}]
if "EC2_KEY_NAME" in os.environ:
filters.append({'Name': 'key-name', 'Values': [os.environ['EC2_KEY_NAME']]})
instances = ec2.instances.filter(Filters=filters)
ids = [instance.id for instance in instances]
print("Terminating:", ids)
ec2.instances.filter(InstanceIds=ids).terminate()
##########################################################################
| #!/usr/bin/env python
##########################################################################
# scripts/ec2/terminate_all.py
#
# Part of Project Thrill - http://project-thrill.org
#
# Copyright (C) 2015 Timo Bingmann <[email protected]>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
import boto3
import os
from subprocess import call
ec2 = boto3.resource('ec2')
filters = [{'Name': 'instance-state-name', 'Values': ['running']}]
if "EC2_KEY_NAME" in os.environ:
filters.append({'Name': 'key-name', 'Values': [os.environ['EC2_KEY_NAME']]})
instances = ec2.instances.filter(Filters=filters)
ids = [instance.id for instance in instances]
print("Terminating:", ids)
ec2.instances.filter(InstanceIds=ids).terminate()
##########################################################################
| Add import statement for os | Add import statement for os | Python | bsd-2-clause | manpen/thrill,manpen/thrill,manpen/thrill,manpen/thrill,manpen/thrill | <REPLACE_OLD> boto3
from <REPLACE_NEW> boto3
import os
from <REPLACE_END> <|endoftext|> #!/usr/bin/env python
##########################################################################
# scripts/ec2/terminate_all.py
#
# Part of Project Thrill - http://project-thrill.org
#
# Copyright (C) 2015 Timo Bingmann <[email protected]>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
import boto3
import os
from subprocess import call
ec2 = boto3.resource('ec2')
filters = [{'Name': 'instance-state-name', 'Values': ['running']}]
if "EC2_KEY_NAME" in os.environ:
filters.append({'Name': 'key-name', 'Values': [os.environ['EC2_KEY_NAME']]})
instances = ec2.instances.filter(Filters=filters)
ids = [instance.id for instance in instances]
print("Terminating:", ids)
ec2.instances.filter(InstanceIds=ids).terminate()
##########################################################################
| Add import statement for os
#!/usr/bin/env python
##########################################################################
# scripts/ec2/terminate_all.py
#
# Part of Project Thrill - http://project-thrill.org
#
# Copyright (C) 2015 Timo Bingmann <[email protected]>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
import boto3
from subprocess import call
ec2 = boto3.resource('ec2')
filters = [{'Name': 'instance-state-name', 'Values': ['running']}]
if "EC2_KEY_NAME" in os.environ:
filters.append({'Name': 'key-name', 'Values': [os.environ['EC2_KEY_NAME']]})
instances = ec2.instances.filter(Filters=filters)
ids = [instance.id for instance in instances]
print("Terminating:", ids)
ec2.instances.filter(InstanceIds=ids).terminate()
##########################################################################
|
72796a97a24c512cf43fd9559d6e6b47d2f72e72 | preferences/models.py | preferences/models.py | import uuid
from django.db import models
from django.contrib.auth.models import User
from opencivicdata.models.people_orgs import Person
from django.contrib.auth.models import User
class Preferences(models.Model):
user = models.OneToOneField(User, related_name='preferences')
address = models.CharField(max_length=100, blank=True)
lat = models.FloatField(null=True, blank=True)
lon = models.FloatField(null=True, blank=True)
apikey = models.UUIDField(default=uuid.uuid4)
class PersonFollow(models.Model):
user = models.ForeignKey(User, related_name='person_follows')
person = models.ForeignKey(Person, related_name='follows')
class TopicFollow(models.Model):
user = models.ForeignKey(User, related_name='topic_follows')
topic = models.CharField(max_length=100)
class LocationFollow(models.Model):
user = models.ForeignKey(User, related_name='location_follows')
location = models.CharField(max_length=100)
| import uuid
from django.db import models
from django.contrib.auth.models import User
from opencivicdata.models.people_orgs import Person
class Preferences(models.Model):
user = models.OneToOneField(User, related_name='preferences')
address = models.CharField(max_length=100, blank=True, null=True)
lat = models.FloatField(null=True, blank=True)
lon = models.FloatField(null=True, blank=True)
apikey = models.UUIDField(default=uuid.uuid4)
class PersonFollow(models.Model):
user = models.ForeignKey(User, related_name='person_follows')
person = models.ForeignKey(Person, related_name='follows')
class TopicFollow(models.Model):
user = models.ForeignKey(User, related_name='topic_follows')
topic = models.CharField(max_length=100)
class LocationFollow(models.Model):
user = models.ForeignKey(User, related_name='location_follows')
location = models.CharField(max_length=100)
| Allow address to be null | Allow address to be null
| Python | mit | jamesturk/tot,jamesturk/tot,jamesturk/tot,jamesturk/tot,jamesturk/tot | <REPLACE_OLD> Person
from django.contrib.auth.models import User
class <REPLACE_NEW> Person
class <REPLACE_END> <REPLACE_OLD> blank=True)
<REPLACE_NEW> blank=True, null=True)
<REPLACE_END> <|endoftext|> import uuid
from django.db import models
from django.contrib.auth.models import User
from opencivicdata.models.people_orgs import Person
class Preferences(models.Model):
user = models.OneToOneField(User, related_name='preferences')
address = models.CharField(max_length=100, blank=True, null=True)
lat = models.FloatField(null=True, blank=True)
lon = models.FloatField(null=True, blank=True)
apikey = models.UUIDField(default=uuid.uuid4)
class PersonFollow(models.Model):
user = models.ForeignKey(User, related_name='person_follows')
person = models.ForeignKey(Person, related_name='follows')
class TopicFollow(models.Model):
user = models.ForeignKey(User, related_name='topic_follows')
topic = models.CharField(max_length=100)
class LocationFollow(models.Model):
user = models.ForeignKey(User, related_name='location_follows')
location = models.CharField(max_length=100)
| Allow address to be null
import uuid
from django.db import models
from django.contrib.auth.models import User
from opencivicdata.models.people_orgs import Person
from django.contrib.auth.models import User
class Preferences(models.Model):
user = models.OneToOneField(User, related_name='preferences')
address = models.CharField(max_length=100, blank=True)
lat = models.FloatField(null=True, blank=True)
lon = models.FloatField(null=True, blank=True)
apikey = models.UUIDField(default=uuid.uuid4)
class PersonFollow(models.Model):
user = models.ForeignKey(User, related_name='person_follows')
person = models.ForeignKey(Person, related_name='follows')
class TopicFollow(models.Model):
user = models.ForeignKey(User, related_name='topic_follows')
topic = models.CharField(max_length=100)
class LocationFollow(models.Model):
user = models.ForeignKey(User, related_name='location_follows')
location = models.CharField(max_length=100)
|
6b5ab66b7fb3d514c05bf3cf69023b1e119e1797 | stock_picking_list/9.0.1.0.0/post-migration.py | stock_picking_list/9.0.1.0.0/post-migration.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenUpgrade module for Odoo
# @copyright 2015-Today: Odoo Community Association
# @author: Stephane LE CORNEC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# because path of report has changed, we need to reload it
openupgrade.load_data(
cr, 'stock_picking_list', 'report/report_data.xml')
| ADD mig scripts for picking list | ADD mig scripts for picking list
| Python | agpl-3.0 | ingadhoc/stock | <INSERT> # -*- coding: utf-8 -*-
##############################################################################
#
# <INSERT_END> <INSERT> OpenUpgrade module for Odoo
# @copyright 2015-Today: Odoo Community Association
# @author: Stephane LE CORNEC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# because path of report has changed, we need to reload it
openupgrade.load_data(
cr, 'stock_picking_list', 'report/report_data.xml')
<INSERT_END> <|endoftext|> # -*- coding: utf-8 -*-
##############################################################################
#
# OpenUpgrade module for Odoo
# @copyright 2015-Today: Odoo Community Association
# @author: Stephane LE CORNEC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# because path of report has changed, we need to reload it
openupgrade.load_data(
cr, 'stock_picking_list', 'report/report_data.xml')
| ADD mig scripts for picking list
|
|
09c2e6fff38e5c47391c0f8e948089e3efd26337 | serfnode/handler/file_utils.py | serfnode/handler/file_utils.py | import os
from tempfile import mkstemp
import time
class atomic_write(object):
"""Perform an atomic write to a file.
Use as::
with atomic_write('/my_file') as f:
f.write('foo')
"""
def __init__(self, filepath):
"""
:type filepath: str
"""
self.filepath = filepath
def __enter__(self):
"""
:rtype: File
"""
_, self.temp = mkstemp(dir=os.getcwd())
self.f = open(self.temp, 'w')
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.close()
if exc_type is None:
os.rename(self.temp, self.filepath)
def wait_for_file(filepath, sleep_interval=0.1):
"""Wait for the existence of a file.
Warning: use ``atomic_write`` to write the file, since this function
doesn't check that the file is complete.
:type filepath: str
:type sleep_interval: float
:rtype: None
"""
while not os.path.exists(filepath):
time.sleep(sleep_interval)
| import os
from tempfile import mkstemp
import time
class atomic_write(object):
"""Perform an atomic write to a file.
Use as::
with atomic_write('/my_file') as f:
f.write('foo')
"""
def __init__(self, filepath):
"""
:type filepath: str
"""
self.filepath = filepath
def __enter__(self):
"""
:rtype: File
"""
_, self.temp = mkstemp(dir=os.getcwd())
self.f = open(self.temp, 'w')
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.close()
if exc_type is None:
os.rename(self.temp, self.filepath)
def wait_for_files(*filepath, sleep_interval=0.1):
"""Wait for the existence of files.
Warning: use ``atomic_write`` to write the file, since this function
doesn't check that the file is complete.
:type filepath: str
:type sleep_interval: float
:rtype: None
"""
while not all(os.path.exists(f) for f in filepath):
time.sleep(sleep_interval)
| Allow waiting for multiple files | Allow waiting for multiple files | Python | mit | waltermoreira/serfnode,waltermoreira/serfnode,waltermoreira/serfnode | <REPLACE_OLD> wait_for_file(filepath, <REPLACE_NEW> wait_for_files(*filepath, <REPLACE_END> <REPLACE_OLD> a file.
<REPLACE_NEW> files.
<REPLACE_END> <REPLACE_OLD> os.path.exists(filepath):
<REPLACE_NEW> all(os.path.exists(f) for f in filepath):
<REPLACE_END> <|endoftext|> import os
from tempfile import mkstemp
import time
class atomic_write(object):
"""Perform an atomic write to a file.
Use as::
with atomic_write('/my_file') as f:
f.write('foo')
"""
def __init__(self, filepath):
"""
:type filepath: str
"""
self.filepath = filepath
def __enter__(self):
"""
:rtype: File
"""
_, self.temp = mkstemp(dir=os.getcwd())
self.f = open(self.temp, 'w')
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.close()
if exc_type is None:
os.rename(self.temp, self.filepath)
def wait_for_files(*filepath, sleep_interval=0.1):
"""Wait for the existence of files.
Warning: use ``atomic_write`` to write the file, since this function
doesn't check that the file is complete.
:type filepath: str
:type sleep_interval: float
:rtype: None
"""
while not all(os.path.exists(f) for f in filepath):
time.sleep(sleep_interval)
| Allow waiting for multiple files
import os
from tempfile import mkstemp
import time
class atomic_write(object):
"""Perform an atomic write to a file.
Use as::
with atomic_write('/my_file') as f:
f.write('foo')
"""
def __init__(self, filepath):
"""
:type filepath: str
"""
self.filepath = filepath
def __enter__(self):
"""
:rtype: File
"""
_, self.temp = mkstemp(dir=os.getcwd())
self.f = open(self.temp, 'w')
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.close()
if exc_type is None:
os.rename(self.temp, self.filepath)
def wait_for_file(filepath, sleep_interval=0.1):
"""Wait for the existence of a file.
Warning: use ``atomic_write`` to write the file, since this function
doesn't check that the file is complete.
:type filepath: str
:type sleep_interval: float
:rtype: None
"""
while not os.path.exists(filepath):
time.sleep(sleep_interval)
|
d1504f3c3129c926bd9897a6660669f146e64c38 | cachupy/cachupy.py | cachupy/cachupy.py | import datetime
class Cache:
EXPIRE_IN = 'expire_in'
def __init__(self):
self.store = {}
def get(self, key):
"""Gets a value based upon a key."""
self._check_expiry(key)
return self.store[key]['value']
def set(self, dictionary, expire_in):
"""Sets a dictionary to the cache with a timedelta expiration."""
for key in dictionary.keys():
self.store[key] = {
Cache.EXPIRE_IN: datetime.datetime.now() + expire_in,
'value': dictionary[key]
}
def has(self, key):
"""Returns whether a key is in the cache."""
self._check_expiry(key)
return key in self.store
def _check_expiry(self, key):
"""Removes a key/value pair if it's expired."""
if key in self.store and datetime.datetime.now() > self.store[key][Cache.EXPIRE_IN]:
self.store.pop(key, None)
| import datetime
class Cache:
EXPIRE_IN = 'expire_in'
VALUE = 'value'
def __init__(self):
self.lock = False
self.store = {}
def get(self, key):
"""Gets a value based upon a key."""
self._check_expiry(key)
return self.store[key][Cache.VALUE]
def set(self, expire_in, *args):
"""Sets a dictionary to the cache with a timedelta expiration."""
for arg in args:
if isinstance(arg, dict):
for k,v in arg.items():
self._set(k, v, expire_in)
else:
for v in arg:
self._set(v[0], v[1], expire_in)
def has(self, key):
"""Returns whether a key is in the cache."""
self._check_expiry(key)
return key in self.store
def _set(self, key, value, expire_in):
self.store[key] = {
Cache.EXPIRE_IN: datetime.datetime.now() + expire_in,
Cache.VALUE: value
}
def _check_expiry(self, key):
"""Removes a key/value pair if it's expired."""
if not self.lock and key in self.store and datetime.datetime.now() > self.store[key][Cache.EXPIRE_IN]:
self.store.pop(key, None)
| Change signature of set() method. | Change signature of set() method.
| Python | mit | patrickbird/cachupy | <REPLACE_OLD> 'expire_in'
<REPLACE_NEW> 'expire_in'
VALUE = 'value'
<REPLACE_END> <INSERT> self.lock = False
<INSERT_END> <REPLACE_OLD> self.store[key]['value']
<REPLACE_NEW> self.store[key][Cache.VALUE]
<REPLACE_END> <REPLACE_OLD> dictionary, expire_in):
<REPLACE_NEW> expire_in, *args):
<REPLACE_END> <INSERT> arg in args:
if isinstance(arg, dict):
for k,v in arg.items():
self._set(k, v, expire_in)
else:
for v in arg:
self._set(v[0], v[1], expire_in)
def has(self, key):
"""Returns whether a <INSERT_END> <REPLACE_OLD> in dictionary.keys():
<REPLACE_NEW> is in the cache."""
self._check_expiry(key)
return key in self.store
def _set(self, key, value, expire_in):
<REPLACE_END> <DELETE> <DELETE_END> <REPLACE_OLD> 'value': dictionary[key]
}
def has(self, key):
"""Returns whether a key is in the cache."""
self._check_expiry(key)
return key in self.store
<REPLACE_NEW> Cache.VALUE: value
}
<REPLACE_END> <INSERT> not self.lock and <INSERT_END> <|endoftext|> import datetime
class Cache:
EXPIRE_IN = 'expire_in'
VALUE = 'value'
def __init__(self):
self.lock = False
self.store = {}
def get(self, key):
"""Gets a value based upon a key."""
self._check_expiry(key)
return self.store[key][Cache.VALUE]
def set(self, expire_in, *args):
"""Sets a dictionary to the cache with a timedelta expiration."""
for arg in args:
if isinstance(arg, dict):
for k,v in arg.items():
self._set(k, v, expire_in)
else:
for v in arg:
self._set(v[0], v[1], expire_in)
def has(self, key):
"""Returns whether a key is in the cache."""
self._check_expiry(key)
return key in self.store
def _set(self, key, value, expire_in):
self.store[key] = {
Cache.EXPIRE_IN: datetime.datetime.now() + expire_in,
Cache.VALUE: value
}
def _check_expiry(self, key):
"""Removes a key/value pair if it's expired."""
if not self.lock and key in self.store and datetime.datetime.now() > self.store[key][Cache.EXPIRE_IN]:
self.store.pop(key, None)
| Change signature of set() method.
import datetime
class Cache:
EXPIRE_IN = 'expire_in'
def __init__(self):
self.store = {}
def get(self, key):
"""Gets a value based upon a key."""
self._check_expiry(key)
return self.store[key]['value']
def set(self, dictionary, expire_in):
"""Sets a dictionary to the cache with a timedelta expiration."""
for key in dictionary.keys():
self.store[key] = {
Cache.EXPIRE_IN: datetime.datetime.now() + expire_in,
'value': dictionary[key]
}
def has(self, key):
"""Returns whether a key is in the cache."""
self._check_expiry(key)
return key in self.store
def _check_expiry(self, key):
"""Removes a key/value pair if it's expired."""
if key in self.store and datetime.datetime.now() > self.store[key][Cache.EXPIRE_IN]:
self.store.pop(key, None)
|
73bf27a95944f67feb254d90b90cfa31165dc4cb | tests/UselessSymbolsRemove/CycleTest.py | tests/UselessSymbolsRemove/CycleTest.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 19.08.2017 16:13
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import main, TestCase
from grammpy import *
from grammpy_transforms import *
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class D(Nonterminal): pass
class Rules(Rule):
rules = [([S], [A, B]),
([S], [C]),
([A], ['a', A]),
([A], ['a']),
([B], ['b', B]),
([C], ['c']),
([D], ['b', 'c'])]
class CycleTest(TestCase):
def test_cycleTest(self):
g = Grammar(terminals=['a', 'b', 'c'],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 7)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term('c'))
self.assertFalse(com.have_term('a'))
self.assertFalse(com.have_term('b'))
self.assertTrue(com.have_nonterm([S, C]))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(B))
self.assertFalse(com.have_nonterm(D))
self.assertEqual(com.rules_count(), 2)
def test_cycleTestShouldNotChange(self):
g = Grammar(terminals=['a', 'b', 'c'],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 7)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term(['a', 'b', 'c']))
self.assertTrue(g.have_nonterm([S, A, B, C, D]))
self.assertEqual(g.rules_count(), 7)
def test_cycleTestShouldChange(self):
g = Grammar(terminals=['a', 'b', 'c'],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 7)
ContextFree.remove_useless_symbols(g, transform_grammar=True)
self.assertTrue(g.have_term('c'))
self.assertFalse(g.have_term('a'))
self.assertFalse(g.have_term('b'))
self.assertTrue(g.have_nonterm([S, C]))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(B))
self.assertFalse(g.have_nonterm(D))
self.assertEqual(g.rules_count(), 2)
| Add cycle test of remove useless symbols | Add cycle test of remove useless symbols
| Python | mit | PatrikValkovic/grammpy | <REPLACE_OLD> <REPLACE_NEW> #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 19.08.2017 16:13
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import main, TestCase
from grammpy import *
from grammpy_transforms import *
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class D(Nonterminal): pass
class Rules(Rule):
rules = [([S], [A, B]),
([S], [C]),
([A], ['a', A]),
([A], ['a']),
([B], ['b', B]),
([C], ['c']),
([D], ['b', 'c'])]
class CycleTest(TestCase):
def test_cycleTest(self):
g = Grammar(terminals=['a', 'b', 'c'],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 7)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term('c'))
self.assertFalse(com.have_term('a'))
self.assertFalse(com.have_term('b'))
self.assertTrue(com.have_nonterm([S, C]))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(B))
self.assertFalse(com.have_nonterm(D))
self.assertEqual(com.rules_count(), 2)
def test_cycleTestShouldNotChange(self):
g = Grammar(terminals=['a', 'b', 'c'],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 7)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term(['a', 'b', 'c']))
self.assertTrue(g.have_nonterm([S, A, B, C, D]))
self.assertEqual(g.rules_count(), 7)
def test_cycleTestShouldChange(self):
g = Grammar(terminals=['a', 'b', 'c'],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 7)
ContextFree.remove_useless_symbols(g, transform_grammar=True)
self.assertTrue(g.have_term('c'))
self.assertFalse(g.have_term('a'))
self.assertFalse(g.have_term('b'))
self.assertTrue(g.have_nonterm([S, C]))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(B))
self.assertFalse(g.have_nonterm(D))
self.assertEqual(g.rules_count(), 2)
<REPLACE_END> <|endoftext|> #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 19.08.2017 16:13
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import main, TestCase
from grammpy import *
from grammpy_transforms import *
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class D(Nonterminal): pass
class Rules(Rule):
rules = [([S], [A, B]),
([S], [C]),
([A], ['a', A]),
([A], ['a']),
([B], ['b', B]),
([C], ['c']),
([D], ['b', 'c'])]
class CycleTest(TestCase):
def test_cycleTest(self):
g = Grammar(terminals=['a', 'b', 'c'],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 7)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term('c'))
self.assertFalse(com.have_term('a'))
self.assertFalse(com.have_term('b'))
self.assertTrue(com.have_nonterm([S, C]))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(B))
self.assertFalse(com.have_nonterm(D))
self.assertEqual(com.rules_count(), 2)
def test_cycleTestShouldNotChange(self):
g = Grammar(terminals=['a', 'b', 'c'],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 7)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term(['a', 'b', 'c']))
self.assertTrue(g.have_nonterm([S, A, B, C, D]))
self.assertEqual(g.rules_count(), 7)
def test_cycleTestShouldChange(self):
g = Grammar(terminals=['a', 'b', 'c'],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 7)
ContextFree.remove_useless_symbols(g, transform_grammar=True)
self.assertTrue(g.have_term('c'))
self.assertFalse(g.have_term('a'))
self.assertFalse(g.have_term('b'))
self.assertTrue(g.have_nonterm([S, C]))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(B))
self.assertFalse(g.have_nonterm(D))
self.assertEqual(g.rules_count(), 2)
| Add cycle test of remove useless symbols
|
|
52982c735f729ddf0a9c020d495906c4a4899462 | txircd/modules/rfc/umode_i.py | txircd/modules/rfc/umode_i.py | from twisted.plugin import IPlugin
from txircd.module_interface import IMode, IModuleData, Mode, ModuleData
from txircd.utils import ModeType
from zope.interface import implements
class InvisibleMode(ModuleData, Mode):
implements(IPlugin, IModuleData, IMode)
name = "InvisibleMode"
core = True
affectedActions = [ "showchanneluser" ]
def actions(self):
return [ ("modeactioncheck-user-i-showchanneluser", 1, self.isInvisible) ]
def userModes(self):
return [ ("i", ModeType.NoParam, self) ]
def isInvisible(self, user, channel, fromUser, userSeeing):
if "i" in user.modes:
return True
return None
def apply(self, actionName, user, param, channel, fromUser, sameUser):
if user != sameUser:
return None
if not channel or fromUser not in channel.users:
return False
return None
invisibleMode = InvisibleMode() | from twisted.plugin import IPlugin
from txircd.module_interface import IMode, IModuleData, Mode, ModuleData
from txircd.utils import ModeType
from zope.interface import implements
class InvisibleMode(ModuleData, Mode):
implements(IPlugin, IModuleData, IMode)
name = "InvisibleMode"
core = True
affectedActions = [ "showchanneluser", "showuser" ]
def actions(self):
return [ ("modeactioncheck-user-i-showchanneluser", 1, self.isInvisibleChan),
("modeactioncheck-user-i-showuser", 1, self.isInvisibleUser) ]
def userModes(self):
return [ ("i", ModeType.NoParam, self) ]
def isInvisibleChan(self, user, channel, fromUser, userSeeing):
if "i" in user.modes:
return True
return None
def isInvisibleUser(self, user, fromUser, userSeeing):
if "i" in user.modes:
return True
return None
def apply(self, actionName, user, param, *params):
if actionName == "showchanneluser":
return self.applyChannels(user, *params)
return self.applyUsers(user, *params)
def applyChannels(self, user, channel, fromUser, sameUser):
if user != sameUser:
return None
if not channel or fromUser not in channel.users:
return False
return None
def applyUsers(self, user, fromUser, sameUser):
if user != sameUser:
return None
for channel in fromUser.channels:
if user in channel.users:
return None
return False
invisibleMode = InvisibleMode() | Make the invisible check action not necessarily require an accompanying channel | Make the invisible check action not necessarily require an accompanying channel
| Python | bsd-3-clause | Heufneutje/txircd,ElementalAlchemist/txircd | <REPLACE_OLD> "showchanneluser" <REPLACE_NEW> "showchanneluser", "showuser" <REPLACE_END> <REPLACE_OLD> self.isInvisible) <REPLACE_NEW> self.isInvisibleChan),
("modeactioncheck-user-i-showuser", 1, self.isInvisibleUser) <REPLACE_END> <REPLACE_OLD> isInvisible(self, <REPLACE_NEW> isInvisibleChan(self, <REPLACE_END> <INSERT> isInvisibleUser(self, user, fromUser, userSeeing):
if "i" in user.modes:
return True
return None
def <INSERT_END> <INSERT> *params):
if actionName == "showchanneluser":
return self.applyChannels(user, *params)
return self.applyUsers(user, *params)
def applyChannels(self, user, <INSERT_END> <REPLACE_OLD> None
invisibleMode <REPLACE_NEW> None
def applyUsers(self, user, fromUser, sameUser):
if user != sameUser:
return None
for channel in fromUser.channels:
if user in channel.users:
return None
return False
invisibleMode <REPLACE_END> <|endoftext|> from twisted.plugin import IPlugin
from txircd.module_interface import IMode, IModuleData, Mode, ModuleData
from txircd.utils import ModeType
from zope.interface import implements
class InvisibleMode(ModuleData, Mode):
implements(IPlugin, IModuleData, IMode)
name = "InvisibleMode"
core = True
affectedActions = [ "showchanneluser", "showuser" ]
def actions(self):
return [ ("modeactioncheck-user-i-showchanneluser", 1, self.isInvisibleChan),
("modeactioncheck-user-i-showuser", 1, self.isInvisibleUser) ]
def userModes(self):
return [ ("i", ModeType.NoParam, self) ]
def isInvisibleChan(self, user, channel, fromUser, userSeeing):
if "i" in user.modes:
return True
return None
def isInvisibleUser(self, user, fromUser, userSeeing):
if "i" in user.modes:
return True
return None
def apply(self, actionName, user, param, *params):
if actionName == "showchanneluser":
return self.applyChannels(user, *params)
return self.applyUsers(user, *params)
def applyChannels(self, user, channel, fromUser, sameUser):
if user != sameUser:
return None
if not channel or fromUser not in channel.users:
return False
return None
def applyUsers(self, user, fromUser, sameUser):
if user != sameUser:
return None
for channel in fromUser.channels:
if user in channel.users:
return None
return False
invisibleMode = InvisibleMode() | Make the invisible check action not necessarily require an accompanying channel
from twisted.plugin import IPlugin
from txircd.module_interface import IMode, IModuleData, Mode, ModuleData
from txircd.utils import ModeType
from zope.interface import implements
class InvisibleMode(ModuleData, Mode):
implements(IPlugin, IModuleData, IMode)
name = "InvisibleMode"
core = True
affectedActions = [ "showchanneluser" ]
def actions(self):
return [ ("modeactioncheck-user-i-showchanneluser", 1, self.isInvisible) ]
def userModes(self):
return [ ("i", ModeType.NoParam, self) ]
def isInvisible(self, user, channel, fromUser, userSeeing):
if "i" in user.modes:
return True
return None
def apply(self, actionName, user, param, channel, fromUser, sameUser):
if user != sameUser:
return None
if not channel or fromUser not in channel.users:
return False
return None
invisibleMode = InvisibleMode() |
0388ab2bb8ad50aa40716a1c5f83f5e1f400bb32 | scripts/start_baxter.py | scripts/start_baxter.py | #!/usr/bin/python
from baxter_myo.arm_controller import ArmController
from baxter_myo.config_reader import ConfigReader
def main():
c = ConfigReader("demo_config")
c.parse_all()
s = ArmController('right', c.right_angles, c.push_thresh)
s.move_loop()
if __name__ == "__main__":
main()
| #!/usr/bin/python
import time
import rospy
from baxter_myo.arm_controller import ArmController
from baxter_myo.config_reader import ConfigReader
def main():
c = ConfigReader("demo_config")
c.parse_all()
s = ArmController('right', c.right_angles, c.push_thresh)
while not rospy.is_shutdown():
s.step()
if __name__ == "__main__":
main()
| Enable ctrl-c control with rospy | Enable ctrl-c control with rospy
| Python | mit | ipab-rad/baxter_myo,ipab-rad/myo_baxter_pc,ipab-rad/myo_baxter_pc,ipab-rad/baxter_myo | <REPLACE_OLD> #!/usr/bin/python
from <REPLACE_NEW> #!/usr/bin/python
import time
import rospy
from <REPLACE_END> <REPLACE_OLD> s.move_loop()
if <REPLACE_NEW> while not rospy.is_shutdown():
s.step()
if <REPLACE_END> <|endoftext|> #!/usr/bin/python
import time
import rospy
from baxter_myo.arm_controller import ArmController
from baxter_myo.config_reader import ConfigReader
def main():
c = ConfigReader("demo_config")
c.parse_all()
s = ArmController('right', c.right_angles, c.push_thresh)
while not rospy.is_shutdown():
s.step()
if __name__ == "__main__":
main()
| Enable ctrl-c control with rospy
#!/usr/bin/python
from baxter_myo.arm_controller import ArmController
from baxter_myo.config_reader import ConfigReader
def main():
c = ConfigReader("demo_config")
c.parse_all()
s = ArmController('right', c.right_angles, c.push_thresh)
s.move_loop()
if __name__ == "__main__":
main()
|
8be551ad39f3aedff5ea0ceb536378ea0e851864 | src/waldur_auth_openid/management/commands/import_openid_accounts.py | src/waldur_auth_openid/management/commands/import_openid_accounts.py | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import transaction
from waldur_core.core.utils import DryRunCommand
User = get_user_model()
class Command(DryRunCommand):
help_text = 'Append civil number with country code for OpenID users.'
def handle(self, dry_run, *args, **options):
conf = settings.WALDUR_AUTH_OPENID
country_code = conf['COUNTRY_CODE']
registration_method = conf['NAME']
with transaction.atomic():
users = User.objects.filter(registration_method=registration_method)\
.exclude(civil_number__startswith=country_code)\
.exclude(civil_number='') \
.exclude(civil_number=None)
count = users.count()
if not dry_run:
for user in users:
user.civil_number = '%s%s' % (country_code, user.civil_number)
user.save(update_fields=['civil_number'])
self.stdout.write(self.style.SUCCESS('Civil numbers have been updated for %s users.' % count))
| from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import transaction
from waldur_core.core.utils import DryRunCommand
User = get_user_model()
class Command(DryRunCommand):
help_text = 'Append civil number with country code for OpenID users.'
def handle(self, dry_run, *args, **options):
conf = settings.WALDUR_AUTH_OPENID
country_code = conf['COUNTRY_CODE']
registration_method = conf['NAME']
with transaction.atomic():
users = User.objects.filter(registration_method=registration_method)\
.exclude(civil_number__startswith=country_code)\
.exclude(civil_number='') \
.exclude(civil_number=None)
count = users.count()
for user in users:
new_civil_number = '%s%s' % (country_code, user.civil_number)
self.stdout.write('Username: %s, before: %s, after: %s' % (
user.username, user.civil_number, new_civil_number))
if not dry_run:
user.civil_number = new_civil_number
user.save(update_fields=['civil_number'])
self.stdout.write(self.style.SUCCESS('Civil numbers have been updated for %s users.' % count))
| Print out civil_number before and after | Print out civil_number before and after [WAL-2172]
| Python | mit | opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind | <DELETE> if not dry_run:
<DELETE_END> <REPLACE_OLD> user.civil_number <REPLACE_NEW> new_civil_number <REPLACE_END> <INSERT> self.stdout.write('Username: %s, before: %s, after: %s' % (
user.username, user.civil_number, new_civil_number))
if not dry_run:
user.civil_number = new_civil_number
<INSERT_END> <|endoftext|> from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import transaction
from waldur_core.core.utils import DryRunCommand
User = get_user_model()
class Command(DryRunCommand):
help_text = 'Append civil number with country code for OpenID users.'
def handle(self, dry_run, *args, **options):
conf = settings.WALDUR_AUTH_OPENID
country_code = conf['COUNTRY_CODE']
registration_method = conf['NAME']
with transaction.atomic():
users = User.objects.filter(registration_method=registration_method)\
.exclude(civil_number__startswith=country_code)\
.exclude(civil_number='') \
.exclude(civil_number=None)
count = users.count()
for user in users:
new_civil_number = '%s%s' % (country_code, user.civil_number)
self.stdout.write('Username: %s, before: %s, after: %s' % (
user.username, user.civil_number, new_civil_number))
if not dry_run:
user.civil_number = new_civil_number
user.save(update_fields=['civil_number'])
self.stdout.write(self.style.SUCCESS('Civil numbers have been updated for %s users.' % count))
| Print out civil_number before and after [WAL-2172]
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import transaction
from waldur_core.core.utils import DryRunCommand
User = get_user_model()
class Command(DryRunCommand):
help_text = 'Append civil number with country code for OpenID users.'
def handle(self, dry_run, *args, **options):
conf = settings.WALDUR_AUTH_OPENID
country_code = conf['COUNTRY_CODE']
registration_method = conf['NAME']
with transaction.atomic():
users = User.objects.filter(registration_method=registration_method)\
.exclude(civil_number__startswith=country_code)\
.exclude(civil_number='') \
.exclude(civil_number=None)
count = users.count()
if not dry_run:
for user in users:
user.civil_number = '%s%s' % (country_code, user.civil_number)
user.save(update_fields=['civil_number'])
self.stdout.write(self.style.SUCCESS('Civil numbers have been updated for %s users.' % count))
|
9e413449f6f85e0cf9465762e31e8f251e14c23e | spacy/tests/regression/test_issue1537.py | spacy/tests/regression/test_issue1537.py | '''Test Span.as_doc() doesn't segfault'''
from ...tokens import Doc
from ...vocab import Vocab
from ... import load as load_spacy
def test_issue1537():
string = 'The sky is blue . The man is pink . The dog is purple .'
doc = Doc(Vocab(), words=string.split())
doc[0].sent_start = True
for word in doc[1:]:
if word.nbor(-1).text == '.':
word.sent_start = True
else:
word.sent_start = False
sents = list(doc.sents)
sent0 = sents[0].as_doc()
sent1 = sents[1].as_doc()
assert isinstance(sent0, Doc)
assert isinstance(sent1, Doc)
# Currently segfaulting, due to l_edge and r_edge misalignment
#def test_issue1537_model():
# nlp = load_spacy('en')
# doc = nlp(u'The sky is blue. The man is pink. The dog is purple.')
# sents = [s.as_doc() for s in doc.sents]
# print(list(sents[0].noun_chunks))
# print(list(sents[1].noun_chunks))
| '''Test Span.as_doc() doesn't segfault'''
from __future__ import unicode_literals
from ...tokens import Doc
from ...vocab import Vocab
from ... import load as load_spacy
def test_issue1537():
string = 'The sky is blue . The man is pink . The dog is purple .'
doc = Doc(Vocab(), words=string.split())
doc[0].sent_start = True
for word in doc[1:]:
if word.nbor(-1).text == '.':
word.sent_start = True
else:
word.sent_start = False
sents = list(doc.sents)
sent0 = sents[0].as_doc()
sent1 = sents[1].as_doc()
assert isinstance(sent0, Doc)
assert isinstance(sent1, Doc)
# Currently segfaulting, due to l_edge and r_edge misalignment
#def test_issue1537_model():
# nlp = load_spacy('en')
# doc = nlp(u'The sky is blue. The man is pink. The dog is purple.')
# sents = [s.as_doc() for s in doc.sents]
# print(list(sents[0].noun_chunks))
# print(list(sents[1].noun_chunks))
| Fix unicode error in new test | Fix unicode error in new test
| Python | mit | spacy-io/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,honnibal/spaCy,aikramer2/spaCy,recognai/spaCy,honnibal/spaCy,recognai/spaCy,explosion/spaCy,recognai/spaCy,aikramer2/spaCy,recognai/spaCy,aikramer2/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,spacy-io/spaCy,aikramer2/spaCy,aikramer2/spaCy,spacy-io/spaCy,explosion/spaCy,recognai/spaCy | <INSERT> __future__ import unicode_literals
from <INSERT_END> <|endoftext|> '''Test Span.as_doc() doesn't segfault'''
from __future__ import unicode_literals
from ...tokens import Doc
from ...vocab import Vocab
from ... import load as load_spacy
def test_issue1537():
string = 'The sky is blue . The man is pink . The dog is purple .'
doc = Doc(Vocab(), words=string.split())
doc[0].sent_start = True
for word in doc[1:]:
if word.nbor(-1).text == '.':
word.sent_start = True
else:
word.sent_start = False
sents = list(doc.sents)
sent0 = sents[0].as_doc()
sent1 = sents[1].as_doc()
assert isinstance(sent0, Doc)
assert isinstance(sent1, Doc)
# Currently segfaulting, due to l_edge and r_edge misalignment
#def test_issue1537_model():
# nlp = load_spacy('en')
# doc = nlp(u'The sky is blue. The man is pink. The dog is purple.')
# sents = [s.as_doc() for s in doc.sents]
# print(list(sents[0].noun_chunks))
# print(list(sents[1].noun_chunks))
| Fix unicode error in new test
'''Test Span.as_doc() doesn't segfault'''
from ...tokens import Doc
from ...vocab import Vocab
from ... import load as load_spacy
def test_issue1537():
string = 'The sky is blue . The man is pink . The dog is purple .'
doc = Doc(Vocab(), words=string.split())
doc[0].sent_start = True
for word in doc[1:]:
if word.nbor(-1).text == '.':
word.sent_start = True
else:
word.sent_start = False
sents = list(doc.sents)
sent0 = sents[0].as_doc()
sent1 = sents[1].as_doc()
assert isinstance(sent0, Doc)
assert isinstance(sent1, Doc)
# Currently segfaulting, due to l_edge and r_edge misalignment
#def test_issue1537_model():
# nlp = load_spacy('en')
# doc = nlp(u'The sky is blue. The man is pink. The dog is purple.')
# sents = [s.as_doc() for s in doc.sents]
# print(list(sents[0].noun_chunks))
# print(list(sents[1].noun_chunks))
|
321924fff843896fc67d3a4594d635546cf90bec | mycli/packages/expanded.py | mycli/packages/expanded.py | from .tabulate import _text_type
def pad(field, total, char=u" "):
return field + (char * (total - len(field)))
def get_separator(num, header_len, data_len):
sep = u"***************************[ %d. row ]***************************\n" % (num + 1)
return sep
def expanded_table(rows, headers):
header_len = max([len(x) for x in headers])
max_row_len = 0
results = []
padded_headers = [pad(x, header_len) + u" |" for x in headers]
header_len += 2
for row in rows:
row_len = max([len(_text_type(x)) for x in row])
row_result = []
if row_len > max_row_len:
max_row_len = row_len
for header, value in zip(padded_headers, row):
row_result.append(u"%s %s" % (header, value))
results.append('\n'.join(row_result))
output = []
for i, result in enumerate(results):
output.append(get_separator(i, header_len, max_row_len))
output.append(result)
output.append('\n')
return ''.join(output)
| from .tabulate import _text_type
def pad(field, total, char=u" "):
return field + (char * (total - len(field)))
def get_separator(num, header_len, data_len):
sep = u"***************************[ %d. row ]***************************\n" % (num + 1)
return sep
def expanded_table(rows, headers):
header_len = max([len(x) for x in headers])
max_row_len = 0
results = []
padded_headers = [pad(x, header_len) + u" |" for x in headers]
header_len += 2
for row in rows:
row_len = max([len(_text_type(x)) for x in row])
row_result = []
if row_len > max_row_len:
max_row_len = row_len
for header, value in zip(padded_headers, row):
if value is None: value = '<null>'
row_result.append(u"%s %s" % (header, value))
results.append('\n'.join(row_result))
output = []
for i, result in enumerate(results):
output.append(get_separator(i, header_len, max_row_len))
output.append(result)
output.append('\n')
return ''.join(output)
| Make the null value consistent between vertical and tabular output. | Make the null value consistent between vertical and tabular output.
| Python | bsd-3-clause | MnO2/rediscli,martijnengler/mycli,qbdsoft/mycli,j-bennet/mycli,D-e-e-m-o/mycli,mdsrosa/mycli,martijnengler/mycli,mattn/mycli,jinstrive/mycli,webwlsong/mycli,brewneaux/mycli,danieljwest/mycli,thanatoskira/mycli,ksmaheshkumar/mycli,brewneaux/mycli,MnO2/rediscli,evook/mycli,webwlsong/mycli,j-bennet/mycli,evook/mycli,suzukaze/mycli,chenpingzhao/mycli,shoma/mycli,ZuoGuocai/mycli,tkuipers/mycli,ksmaheshkumar/mycli,mattn/mycli,ZuoGuocai/mycli,douglasvegas/mycli,oguzy/mycli,nkhuyu/mycli,oguzy/mycli,douglasvegas/mycli,shoma/mycli,qbdsoft/mycli,tkuipers/mycli,danieljwest/mycli,suzukaze/mycli,mdsrosa/mycli,jinstrive/mycli,chenpingzhao/mycli,nkhuyu/mycli,D-e-e-m-o/mycli,thanatoskira/mycli | <INSERT> if value is None: value = '<null>'
<INSERT_END> <|endoftext|> from .tabulate import _text_type
def pad(field, total, char=u" "):
return field + (char * (total - len(field)))
def get_separator(num, header_len, data_len):
sep = u"***************************[ %d. row ]***************************\n" % (num + 1)
return sep
def expanded_table(rows, headers):
header_len = max([len(x) for x in headers])
max_row_len = 0
results = []
padded_headers = [pad(x, header_len) + u" |" for x in headers]
header_len += 2
for row in rows:
row_len = max([len(_text_type(x)) for x in row])
row_result = []
if row_len > max_row_len:
max_row_len = row_len
for header, value in zip(padded_headers, row):
if value is None: value = '<null>'
row_result.append(u"%s %s" % (header, value))
results.append('\n'.join(row_result))
output = []
for i, result in enumerate(results):
output.append(get_separator(i, header_len, max_row_len))
output.append(result)
output.append('\n')
return ''.join(output)
| Make the null value consistent between vertical and tabular output.
from .tabulate import _text_type
def pad(field, total, char=u" "):
return field + (char * (total - len(field)))
def get_separator(num, header_len, data_len):
sep = u"***************************[ %d. row ]***************************\n" % (num + 1)
return sep
def expanded_table(rows, headers):
header_len = max([len(x) for x in headers])
max_row_len = 0
results = []
padded_headers = [pad(x, header_len) + u" |" for x in headers]
header_len += 2
for row in rows:
row_len = max([len(_text_type(x)) for x in row])
row_result = []
if row_len > max_row_len:
max_row_len = row_len
for header, value in zip(padded_headers, row):
row_result.append(u"%s %s" % (header, value))
results.append('\n'.join(row_result))
output = []
for i, result in enumerate(results):
output.append(get_separator(i, header_len, max_row_len))
output.append(result)
output.append('\n')
return ''.join(output)
|
239f24cc5dc5c0f25436ca1bfcfc536e30d62587 | menu_generator/templatetags/menu_generator.py | menu_generator/templatetags/menu_generator.py | from django import template
from django.conf import settings
from .utils import get_menu_from_apps
from .. import defaults
from ..menu import generate_menu
register = template.Library()
@register.assignment_tag(takes_context=True)
def get_menu(context, menu_name):
"""
Returns a consumable menu list for a given menu_name found in settings.py.
Else it returns an empty list.
Update, March 18 2017: Now the function get the menu list from settings and append more items if found on the
menus.py's 'MENUS' dict.
:param context: Template context
:param menu_name: String, name of the menu to be found
:return: Generated menu
"""
menu_list = getattr(settings, menu_name, defaults.MENU_NOT_FOUND)
menu_from_apps = get_menu_from_apps(menu_name)
# If there isn't a menu on settings but there is menu from apps we built menu from apps
if menu_list == defaults.MENU_NOT_FOUND and menu_from_apps:
menu_list = menu_from_apps
# It there is a menu on settings and also on apps we merge both menus
elif menu_list != defaults.MENU_NOT_FOUND and menu_from_apps:
menu_list += menu_from_apps
return generate_menu(context['request'], menu_list)
| from django import template
from django.conf import settings
from .utils import get_menu_from_apps
from .. import defaults
from ..menu import generate_menu
register = template.Library()
@register.simple_tag(takes_context=True)
def get_menu(context, menu_name):
"""
Returns a consumable menu list for a given menu_name found in settings.py.
Else it returns an empty list.
Update, March 18 2017: Now the function get the menu list from settings and append more items if found on the
menus.py's 'MENUS' dict.
:param context: Template context
:param menu_name: String, name of the menu to be found
:return: Generated menu
"""
menu_list = getattr(settings, menu_name, defaults.MENU_NOT_FOUND)
menu_from_apps = get_menu_from_apps(menu_name)
# If there isn't a menu on settings but there is menu from apps we built menu from apps
if menu_list == defaults.MENU_NOT_FOUND and menu_from_apps:
menu_list = menu_from_apps
# It there is a menu on settings and also on apps we merge both menus
elif menu_list != defaults.MENU_NOT_FOUND and menu_from_apps:
menu_list += menu_from_apps
return generate_menu(context['request'], menu_list)
| Use simple_tag instead of assignment_tag | Use simple_tag instead of assignment_tag
The assignment_tag is depraceted and in django-2.0 removed.
Signed-off-by: Frantisek Lachman <[email protected]>
| Python | mit | yamijuan/django-menu-generator,RADYConsultores/django-menu-generator | <REPLACE_OLD> template.Library()
@register.assignment_tag(takes_context=True)
def <REPLACE_NEW> template.Library()
@register.simple_tag(takes_context=True)
def <REPLACE_END> <|endoftext|> from django import template
from django.conf import settings
from .utils import get_menu_from_apps
from .. import defaults
from ..menu import generate_menu
register = template.Library()
@register.simple_tag(takes_context=True)
def get_menu(context, menu_name):
"""
Returns a consumable menu list for a given menu_name found in settings.py.
Else it returns an empty list.
Update, March 18 2017: Now the function get the menu list from settings and append more items if found on the
menus.py's 'MENUS' dict.
:param context: Template context
:param menu_name: String, name of the menu to be found
:return: Generated menu
"""
menu_list = getattr(settings, menu_name, defaults.MENU_NOT_FOUND)
menu_from_apps = get_menu_from_apps(menu_name)
# If there isn't a menu on settings but there is menu from apps we built menu from apps
if menu_list == defaults.MENU_NOT_FOUND and menu_from_apps:
menu_list = menu_from_apps
# It there is a menu on settings and also on apps we merge both menus
elif menu_list != defaults.MENU_NOT_FOUND and menu_from_apps:
menu_list += menu_from_apps
return generate_menu(context['request'], menu_list)
| Use simple_tag instead of assignment_tag
The assignment_tag is depraceted and in django-2.0 removed.
Signed-off-by: Frantisek Lachman <[email protected]>
from django import template
from django.conf import settings
from .utils import get_menu_from_apps
from .. import defaults
from ..menu import generate_menu
register = template.Library()
@register.assignment_tag(takes_context=True)
def get_menu(context, menu_name):
"""
Returns a consumable menu list for a given menu_name found in settings.py.
Else it returns an empty list.
Update, March 18 2017: Now the function get the menu list from settings and append more items if found on the
menus.py's 'MENUS' dict.
:param context: Template context
:param menu_name: String, name of the menu to be found
:return: Generated menu
"""
menu_list = getattr(settings, menu_name, defaults.MENU_NOT_FOUND)
menu_from_apps = get_menu_from_apps(menu_name)
# If there isn't a menu on settings but there is menu from apps we built menu from apps
if menu_list == defaults.MENU_NOT_FOUND and menu_from_apps:
menu_list = menu_from_apps
# It there is a menu on settings and also on apps we merge both menus
elif menu_list != defaults.MENU_NOT_FOUND and menu_from_apps:
menu_list += menu_from_apps
return generate_menu(context['request'], menu_list)
|
342e6134a63c5b575ae8e4348a54f61350bca2da | parser/crimeparser/pipelinesEnricher.py | parser/crimeparser/pipelinesEnricher.py | from geopy import Nominatim
from geopy.extra.rate_limiter import RateLimiter
class GeoCodePipeline(object):
def open_spider(self, spider):
geolocator = Nominatim(timeout=5)
self.__geocodeFunc = RateLimiter(geolocator.geocode, min_delay_seconds=2)
def process_item(self, item, spider):
for crime in item["crimes"]:
place = crime["place"]
latitude, longitude = self.__geocode_address(place)
crime["latitude"] = latitude
crime["longitude"] = longitude
return item
def __geocode_address(self, place):
if place is None:
return None, None
location = self.__geocodeFunc(place)
if location is not None:
return location.latitude, location.longitude
else:
return None, None
| from geopy import Nominatim, Photon
from geopy.extra.rate_limiter import RateLimiter
class GeoCodePipeline(object):
def open_spider(self, spider):
geolocator = Photon(timeout=5)
self.__geocodeFunc = RateLimiter(geolocator.geocode, min_delay_seconds=2)
def process_item(self, item, spider):
for crime in item["crimes"]:
place = crime["place"]
latitude, longitude = self.__geocode_address(place)
crime["latitude"] = latitude
crime["longitude"] = longitude
return item
def __geocode_address(self, place):
if place is None:
return None, None
location = self.__geocodeFunc(place)
if location is not None:
return location.latitude, location.longitude
else:
return None, None
| Use Phonon instead of Nominatim for geo coding | Use Phonon instead of Nominatim for geo coding
Phonon is more fault tolerant to spelling mistakes.
| Python | mit | aberklotz/crimereport,aberklotz/crimereport,aberklotz/crimereport | <REPLACE_OLD> Nominatim
from <REPLACE_NEW> Nominatim, Photon
from <REPLACE_END> <REPLACE_OLD> Nominatim(timeout=5)
<REPLACE_NEW> Photon(timeout=5)
<REPLACE_END> <|endoftext|> from geopy import Nominatim, Photon
from geopy.extra.rate_limiter import RateLimiter
class GeoCodePipeline(object):
def open_spider(self, spider):
geolocator = Photon(timeout=5)
self.__geocodeFunc = RateLimiter(geolocator.geocode, min_delay_seconds=2)
def process_item(self, item, spider):
for crime in item["crimes"]:
place = crime["place"]
latitude, longitude = self.__geocode_address(place)
crime["latitude"] = latitude
crime["longitude"] = longitude
return item
def __geocode_address(self, place):
if place is None:
return None, None
location = self.__geocodeFunc(place)
if location is not None:
return location.latitude, location.longitude
else:
return None, None
| Use Phonon instead of Nominatim for geo coding
Phonon is more fault tolerant to spelling mistakes.
from geopy import Nominatim
from geopy.extra.rate_limiter import RateLimiter
class GeoCodePipeline(object):
def open_spider(self, spider):
geolocator = Nominatim(timeout=5)
self.__geocodeFunc = RateLimiter(geolocator.geocode, min_delay_seconds=2)
def process_item(self, item, spider):
for crime in item["crimes"]:
place = crime["place"]
latitude, longitude = self.__geocode_address(place)
crime["latitude"] = latitude
crime["longitude"] = longitude
return item
def __geocode_address(self, place):
if place is None:
return None, None
location = self.__geocodeFunc(place)
if location is not None:
return location.latitude, location.longitude
else:
return None, None
|
80a1912ce69fd356d6c54bb00f946fbc7874a9ce | bluecanary/set_cloudwatch_alarm.py | bluecanary/set_cloudwatch_alarm.py | import boto3
from bluecanary.exceptions import NamespaceError
from bluecanary.utilities import throttle
@throttle()
def set_cloudwatch_alarm(identifier, **kwargs):
if not kwargs.get('Dimensions'):
kwargs['Dimensions'] = _get_dimensions(identifier, **kwargs)
if not kwargs.get('AlarmName'):
kwargs['AlarmName'] = '{}_{}'.format(identifier, kwargs.get('MetricName'))
cloudwatch_client = boto3.client('cloudwatch')
return cloudwatch_client.put_metric_alarm(**kwargs)
def _get_dimensions(identifier, **kwargs):
base_dimensions = {
'AWS/ELB': [{u'Name': 'LoadBalancerName', u'Value': identifier}],
'AWS/EC2': [{u'Name': 'InstanceId', u'Value': identifier}],
}
try:
return base_dimensions[kwargs.get('Namespace')]
except KeyError:
message = ('Namespace "{}" is not supported by Blue Canary. '
'If you are using a plugin that supports this Namespace '
'please ensure that the plugin alarm class does not return '
'None when calling the "get_dimensions" method.'
.format(kwargs.get('Namespace')))
raise NamespaceError(message)
| import boto3
from bluecanary.exceptions import NamespaceError
from bluecanary.utilities import throttle
@throttle()
def set_cloudwatch_alarm(identifier, **kwargs):
if not kwargs.get('Dimensions'):
kwargs['Dimensions'] = _get_dimensions(identifier, **kwargs)
if not kwargs.get('AlarmName'):
kwargs['AlarmName'] = '{}_{}'.format(identifier,
kwargs.get('MetricName'))
if kwargs.get('AlarmNameModifier'):
kwargs['AlarmName'] = '{}_{}'.format(kwargs.get('AlarmName'),
kwargs.get('AlarmNameModifier'))
del(kwargs['AlarmNameModifier'])
cloudwatch_client = boto3.client('cloudwatch')
return cloudwatch_client.put_metric_alarm(**kwargs)
def _get_dimensions(identifier, **kwargs):
base_dimensions = {
'AWS/ELB': [{u'Name': 'LoadBalancerName', u'Value': identifier}],
'AWS/EC2': [{u'Name': 'InstanceId', u'Value': identifier}],
}
try:
return base_dimensions[kwargs.get('Namespace')]
except KeyError:
message = ('Namespace "{}" is not supported by Blue Canary. '
'If you are using a plugin that supports this Namespace '
'please ensure that the plugin alarm class does not return '
'None when calling the "get_dimensions" method.'
.format(kwargs.get('Namespace')))
raise NamespaceError(message)
| Allow multiple alarms for same metric type | Allow multiple alarms for same metric type
| Python | mit | voxy/bluecanary | <REPLACE_OLD> '{}_{}'.format(identifier, <REPLACE_NEW> '{}_{}'.format(identifier,
<REPLACE_END> <INSERT> if kwargs.get('AlarmNameModifier'):
kwargs['AlarmName'] = '{}_{}'.format(kwargs.get('AlarmName'),
kwargs.get('AlarmNameModifier'))
del(kwargs['AlarmNameModifier'])
<INSERT_END> <|endoftext|> import boto3
from bluecanary.exceptions import NamespaceError
from bluecanary.utilities import throttle
@throttle()
def set_cloudwatch_alarm(identifier, **kwargs):
if not kwargs.get('Dimensions'):
kwargs['Dimensions'] = _get_dimensions(identifier, **kwargs)
if not kwargs.get('AlarmName'):
kwargs['AlarmName'] = '{}_{}'.format(identifier,
kwargs.get('MetricName'))
if kwargs.get('AlarmNameModifier'):
kwargs['AlarmName'] = '{}_{}'.format(kwargs.get('AlarmName'),
kwargs.get('AlarmNameModifier'))
del(kwargs['AlarmNameModifier'])
cloudwatch_client = boto3.client('cloudwatch')
return cloudwatch_client.put_metric_alarm(**kwargs)
def _get_dimensions(identifier, **kwargs):
base_dimensions = {
'AWS/ELB': [{u'Name': 'LoadBalancerName', u'Value': identifier}],
'AWS/EC2': [{u'Name': 'InstanceId', u'Value': identifier}],
}
try:
return base_dimensions[kwargs.get('Namespace')]
except KeyError:
message = ('Namespace "{}" is not supported by Blue Canary. '
'If you are using a plugin that supports this Namespace '
'please ensure that the plugin alarm class does not return '
'None when calling the "get_dimensions" method.'
.format(kwargs.get('Namespace')))
raise NamespaceError(message)
| Allow multiple alarms for same metric type
import boto3
from bluecanary.exceptions import NamespaceError
from bluecanary.utilities import throttle
@throttle()
def set_cloudwatch_alarm(identifier, **kwargs):
if not kwargs.get('Dimensions'):
kwargs['Dimensions'] = _get_dimensions(identifier, **kwargs)
if not kwargs.get('AlarmName'):
kwargs['AlarmName'] = '{}_{}'.format(identifier, kwargs.get('MetricName'))
cloudwatch_client = boto3.client('cloudwatch')
return cloudwatch_client.put_metric_alarm(**kwargs)
def _get_dimensions(identifier, **kwargs):
base_dimensions = {
'AWS/ELB': [{u'Name': 'LoadBalancerName', u'Value': identifier}],
'AWS/EC2': [{u'Name': 'InstanceId', u'Value': identifier}],
}
try:
return base_dimensions[kwargs.get('Namespace')]
except KeyError:
message = ('Namespace "{}" is not supported by Blue Canary. '
'If you are using a plugin that supports this Namespace '
'please ensure that the plugin alarm class does not return '
'None when calling the "get_dimensions" method.'
.format(kwargs.get('Namespace')))
raise NamespaceError(message)
|
51965dc3b26abfd0f9fb730c3076ee16b13612bc | dadi/__init__.py | dadi/__init__.py | """
For examples of dadi's usage, see the examples directory in the source
distribution.
Documentation of all methods can be found in doc/api/index.html of the source
distribution.
"""
import logging
logging.basicConfig()
import Demographics1D
import Demographics2D
import Inference
import Integration
import Misc
import Numerics
import PhiManip
# Protect import of Plotting in case matplotlib not installed.
try:
import Plotting
except ImportError:
pass
# We do it this way so it's easier to reload.
import Spectrum_mod
Spectrum = Spectrum_mod.Spectrum
try:
# This is to try and ensure we have a nice __SVNVERSION__ attribute, so
# when we get bug reports, we know what version they were using. The
# svnversion file is created by setup.py.
import os
_directory = os.path.dirname(Integration.__file__)
_svn_file = os.path.join(_directory, 'svnversion')
__SVNVERSION__ = file(_svn_file).read().strip()
except:
__SVNVERSION__ = 'Unknown'
| """
For examples of dadi's usage, see the examples directory in the source
distribution.
Documentation of all methods can be found in doc/api/index.html of the source
distribution.
"""
import logging
logging.basicConfig()
import Demographics1D
import Demographics2D
import Inference
import Integration
import Misc
import Numerics
import PhiManip
# Protect import of Plotting in case matplotlib not installed.
try:
import Plotting
except ImportError:
pass
# We do it this way so it's easier to reload.
import Spectrum_mod
Spectrum = Spectrum_mod.Spectrum
try:
# This is to try and ensure we have a nice __SVNVERSION__ attribute, so
# when we get bug reports, we know what version they were using. The
# svnversion file is created by setup.py.
import os
_directory = os.path.dirname(Integration.__file__)
_svn_file = os.path.join(_directory, 'svnversion')
__SVNVERSION__ = file(_svn_file).read().strip()
except:
__SVNVERSION__ = 'Unknown'
# When doing arithmetic with Spectrum objects (which are masked arrays), we
# often have masked values which generate annoying arithmetic warnings. Here
# we tell numpy to ignore such warnings. This puts greater onus on the user to
# check results, but for our use case I think it's the better default.
import numpy
numpy.seterr(all='ignore')
| Hide spurious numpy warnings about divide by zeros and nans. | Hide spurious numpy warnings about divide by zeros and nans.
git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@429 979d6bd5-6d4d-0410-bece-f567c23bd345
| Python | bsd-3-clause | yangjl/dadi,ChenHsiang/dadi,cheese1213/dadi,ChenHsiang/dadi,RyanGutenkunst/dadi,cheese1213/dadi,niuhuifei/dadi,paulirish/dadi,beni55/dadi,yangjl/dadi,RyanGutenkunst/dadi,niuhuifei/dadi,beni55/dadi,paulirish/dadi | <REPLACE_OLD> 'Unknown'
<REPLACE_NEW> 'Unknown'
# When doing arithmetic with Spectrum objects (which are masked arrays), we
# often have masked values which generate annoying arithmetic warnings. Here
# we tell numpy to ignore such warnings. This puts greater onus on the user to
# check results, but for our use case I think it's the better default.
import numpy
numpy.seterr(all='ignore')
<REPLACE_END> <|endoftext|> """
For examples of dadi's usage, see the examples directory in the source
distribution.
Documentation of all methods can be found in doc/api/index.html of the source
distribution.
"""
import logging
logging.basicConfig()
import Demographics1D
import Demographics2D
import Inference
import Integration
import Misc
import Numerics
import PhiManip
# Protect import of Plotting in case matplotlib not installed.
try:
import Plotting
except ImportError:
pass
# We do it this way so it's easier to reload.
import Spectrum_mod
Spectrum = Spectrum_mod.Spectrum
try:
# This is to try and ensure we have a nice __SVNVERSION__ attribute, so
# when we get bug reports, we know what version they were using. The
# svnversion file is created by setup.py.
import os
_directory = os.path.dirname(Integration.__file__)
_svn_file = os.path.join(_directory, 'svnversion')
__SVNVERSION__ = file(_svn_file).read().strip()
except:
__SVNVERSION__ = 'Unknown'
# When doing arithmetic with Spectrum objects (which are masked arrays), we
# often have masked values which generate annoying arithmetic warnings. Here
# we tell numpy to ignore such warnings. This puts greater onus on the user to
# check results, but for our use case I think it's the better default.
import numpy
numpy.seterr(all='ignore')
| Hide spurious numpy warnings about divide by zeros and nans.
git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@429 979d6bd5-6d4d-0410-bece-f567c23bd345
"""
For examples of dadi's usage, see the examples directory in the source
distribution.
Documentation of all methods can be found in doc/api/index.html of the source
distribution.
"""
import logging
logging.basicConfig()
import Demographics1D
import Demographics2D
import Inference
import Integration
import Misc
import Numerics
import PhiManip
# Protect import of Plotting in case matplotlib not installed.
try:
import Plotting
except ImportError:
pass
# We do it this way so it's easier to reload.
import Spectrum_mod
Spectrum = Spectrum_mod.Spectrum
try:
# This is to try and ensure we have a nice __SVNVERSION__ attribute, so
# when we get bug reports, we know what version they were using. The
# svnversion file is created by setup.py.
import os
_directory = os.path.dirname(Integration.__file__)
_svn_file = os.path.join(_directory, 'svnversion')
__SVNVERSION__ = file(_svn_file).read().strip()
except:
__SVNVERSION__ = 'Unknown'
|
23675e41656cac48f390d97f065b36de39e27d58 | duckbot.py | duckbot.py | import discord
import duckbot_settings
import random
from discord.ext import commands
_DESCRIPTION = '''quack'''
bot = commands.Bot(command_prefix='/', description=_DESCRIPTION)
@bot.event
async def on_ready():
print('logged in: %s (%s)' % (bot.user.name, bot.user.id))
oauth_url = discord.utils.oauth_url(duckbot_settings.CLIENT_ID, permissions=discord.Permissions.text())
print('invite me: %s' % oauth_url)
print('Channels:')
channels = bot.get_all_channels()
for channel in channels:
print('%s (%s)' % (channel.name, channel.id))
if channel.name == 'botspam':
await bot.send_message(channel, 'quack!! (ready to roll)')
@bot.command()
async def roll():
await bot.say('pretending to roll')
bot.run(duckbot_settings.TOKEN)
| import discord
import duckbot_settings
import random
from discord.ext import commands
_DESCRIPTION = '''quack'''
bot = commands.Bot(command_prefix='/', description=_DESCRIPTION)
rand = random.SystemRandom()
@bot.event
async def on_ready():
print('logged in: %s (%s)' % (bot.user.name, bot.user.id))
oauth_url = discord.utils.oauth_url(duckbot_settings.CLIENT_ID, permissions=discord.Permissions.text())
print('invite me: %s' % oauth_url)
print('Channels:')
channels = bot.get_all_channels()
for channel in channels:
print('%s (%s)' % (channel.name, channel.id))
if channel.name == 'botspam':
await bot.send_message(channel, 'quack!! (ready to roll)')
@bot.command()
async def roll():
lower_bound = 1
upper_boundb = 6
await bot.say('🎲 (%d-%d): %d' % (lower_bound, upper_bound, rand.randint(lower_bound, upper_bound)))
bot.run(duckbot_settings.TOKEN)
| Add a real roll command | Add a real roll command
| Python | mit | andrewlin16/duckbot,andrewlin16/duckbot | <REPLACE_OLD> description=_DESCRIPTION)
@bot.event
async <REPLACE_NEW> description=_DESCRIPTION)
rand = random.SystemRandom()
@bot.event
async <REPLACE_END> <REPLACE_OLD> roll():
await bot.say('pretending to roll')
bot.run(duckbot_settings.TOKEN)
<REPLACE_NEW> roll():
lower_bound = 1
upper_boundb = 6
await bot.say('🎲 (%d-%d): %d' % (lower_bound, upper_bound, rand.randint(lower_bound, upper_bound)))
bot.run(duckbot_settings.TOKEN)
<REPLACE_END> <|endoftext|> import discord
import duckbot_settings
import random
from discord.ext import commands
_DESCRIPTION = '''quack'''
bot = commands.Bot(command_prefix='/', description=_DESCRIPTION)
rand = random.SystemRandom()
@bot.event
async def on_ready():
print('logged in: %s (%s)' % (bot.user.name, bot.user.id))
oauth_url = discord.utils.oauth_url(duckbot_settings.CLIENT_ID, permissions=discord.Permissions.text())
print('invite me: %s' % oauth_url)
print('Channels:')
channels = bot.get_all_channels()
for channel in channels:
print('%s (%s)' % (channel.name, channel.id))
if channel.name == 'botspam':
await bot.send_message(channel, 'quack!! (ready to roll)')
@bot.command()
async def roll():
lower_bound = 1
upper_boundb = 6
await bot.say('🎲 (%d-%d): %d' % (lower_bound, upper_bound, rand.randint(lower_bound, upper_bound)))
bot.run(duckbot_settings.TOKEN)
| Add a real roll command
import discord
import duckbot_settings
import random
from discord.ext import commands
_DESCRIPTION = '''quack'''
bot = commands.Bot(command_prefix='/', description=_DESCRIPTION)
@bot.event
async def on_ready():
print('logged in: %s (%s)' % (bot.user.name, bot.user.id))
oauth_url = discord.utils.oauth_url(duckbot_settings.CLIENT_ID, permissions=discord.Permissions.text())
print('invite me: %s' % oauth_url)
print('Channels:')
channels = bot.get_all_channels()
for channel in channels:
print('%s (%s)' % (channel.name, channel.id))
if channel.name == 'botspam':
await bot.send_message(channel, 'quack!! (ready to roll)')
@bot.command()
async def roll():
await bot.say('pretending to roll')
bot.run(duckbot_settings.TOKEN)
|
5cfcf2615e46fc3ef550159e38dc51c7362543af | readux/books/management/commands/web_export.py | readux/books/management/commands/web_export.py | from eulfedora.server import Repository
from django.core.management.base import BaseCommand
import shutil
from readux.books import annotate, export
from readux.books.models import Volume
class Command(BaseCommand):
help = 'Construct web export of an annotated volume'
def add_arguments(self, parser):
parser.add_argument('pid', nargs='+', type=str)
parser.add_argument('--static', action='store_true', default=False,
help='Generate built (static) site instead of jekyll site')
def handle(self, *args, **options):
print args
repo = Repository()
for pid in options['pid']:
vol = repo.get_object(pid, type=Volume)
tei = annotate.annotated_tei(vol.generate_volume_tei(),
vol.annotations())
zipfile = export.website(vol, tei, static=options['static'])
zipfilename = '%s-annotated-site.zip' % pid
shutil.copyfile(zipfile.name, zipfilename)
print 'Export for %s complete, zipfile is %s' % (vol.noid, zipfilename)
| from eulfedora.server import Repository
from eulxml.xmlmap import load_xmlobject_from_file
from django.core.management.base import BaseCommand
import shutil
from readux.books import annotate, export
from readux.books.models import Volume
from readux.books.tei import Facsimile
class Command(BaseCommand):
help = 'Construct web export of an annotated volume'
def add_arguments(self, parser):
parser.add_argument('pid', nargs='+', type=str)
parser.add_argument('--static', action='store_true', default=False,
help='Generate built (static) site instead of jekyll site')
parser.add_argument('--tei',
help='Use the specified TEI file instead of generating it')
def handle(self, *args, **options):
print args
repo = Repository()
for pid in options['pid']:
vol = repo.get_object(pid, type=Volume)
if options['tei']:
tei = load_xmlobject_from_file(options['tei'], Facsimile)
else:
tei = annotate.annotated_tei(vol.generate_volume_tei(),
vol.annotations())
zipfile = export.website(vol, tei, static=options['static'])
zipfilename = '%s-annotated-site.zip' % vol.noid
shutil.copyfile(zipfile.name, zipfilename)
print 'Export for %s complete, zipfile is %s' % (vol.noid, zipfilename)
| Add an option to pass in generated TEI, for speed & troubleshooting | Add an option to pass in generated TEI, for speed & troubleshooting
| Python | apache-2.0 | emory-libraries/readux,emory-libraries/readux,emory-libraries/readux | <INSERT> eulxml.xmlmap import load_xmlobject_from_file
from <INSERT_END> <REPLACE_OLD> Volume
class <REPLACE_NEW> Volume
from readux.books.tei import Facsimile
class <REPLACE_END> <REPLACE_OLD> site')
<REPLACE_NEW> site')
parser.add_argument('--tei',
help='Use the specified TEI file instead of generating it')
<REPLACE_END> <INSERT> if options['tei']:
<INSERT_END> <INSERT> load_xmlobject_from_file(options['tei'], Facsimile)
else:
tei = <INSERT_END> <INSERT> <INSERT_END> <REPLACE_OLD> pid
<REPLACE_NEW> vol.noid
<REPLACE_END> <|endoftext|> from eulfedora.server import Repository
from eulxml.xmlmap import load_xmlobject_from_file
from django.core.management.base import BaseCommand
import shutil
from readux.books import annotate, export
from readux.books.models import Volume
from readux.books.tei import Facsimile
class Command(BaseCommand):
help = 'Construct web export of an annotated volume'
def add_arguments(self, parser):
parser.add_argument('pid', nargs='+', type=str)
parser.add_argument('--static', action='store_true', default=False,
help='Generate built (static) site instead of jekyll site')
parser.add_argument('--tei',
help='Use the specified TEI file instead of generating it')
def handle(self, *args, **options):
print args
repo = Repository()
for pid in options['pid']:
vol = repo.get_object(pid, type=Volume)
if options['tei']:
tei = load_xmlobject_from_file(options['tei'], Facsimile)
else:
tei = annotate.annotated_tei(vol.generate_volume_tei(),
vol.annotations())
zipfile = export.website(vol, tei, static=options['static'])
zipfilename = '%s-annotated-site.zip' % vol.noid
shutil.copyfile(zipfile.name, zipfilename)
print 'Export for %s complete, zipfile is %s' % (vol.noid, zipfilename)
| Add an option to pass in generated TEI, for speed & troubleshooting
from eulfedora.server import Repository
from django.core.management.base import BaseCommand
import shutil
from readux.books import annotate, export
from readux.books.models import Volume
class Command(BaseCommand):
help = 'Construct web export of an annotated volume'
def add_arguments(self, parser):
parser.add_argument('pid', nargs='+', type=str)
parser.add_argument('--static', action='store_true', default=False,
help='Generate built (static) site instead of jekyll site')
def handle(self, *args, **options):
print args
repo = Repository()
for pid in options['pid']:
vol = repo.get_object(pid, type=Volume)
tei = annotate.annotated_tei(vol.generate_volume_tei(),
vol.annotations())
zipfile = export.website(vol, tei, static=options['static'])
zipfilename = '%s-annotated-site.zip' % pid
shutil.copyfile(zipfile.name, zipfilename)
print 'Export for %s complete, zipfile is %s' % (vol.noid, zipfilename)
|
19cd84480a739f9550258dc959637fe85f43af50 | fedora/release.py | fedora/release.py | '''
Information about this python-fedora release
'''
from fedora import _
NAME = 'python-fedora'
VERSION = '0.3.6'
DESCRIPTION = _('Python modules for interacting with Fedora services')
LONG_DESCRIPTION = _('''
The Fedora Project runs many different services. These services help us to
package software, develop new programs, and generally put together the distro.
This package contains software that helps us do that.
''')
AUTHOR = 'Toshio Kuratomi, Luke Macken'
EMAIL = '[email protected]'
COPYRIGHT = '2007-2008 Red Hat, Inc.'
URL = 'https://fedorahosted.org/python-fedora'
DOWNLOAD_URL = 'https://fedorahosted.org/releases/p/y/python-fedora/'
LICENSE = 'GPLv2'
| '''
Information about this python-fedora release
'''
from fedora import _
NAME = 'python-fedora'
VERSION = '0.3.6'
DESCRIPTION = _('Python modules for interacting with Fedora Services')
LONG_DESCRIPTION = _('''
The Fedora Project runs many different services. These services help us to
package software, develop new programs, and generally put together the distro.
This package contains software that helps us do that.
''')
AUTHOR = 'Toshio Kuratomi, Luke Macken'
EMAIL = '[email protected]'
COPYRIGHT = '2007-2008 Red Hat, Inc.'
URL = 'https://fedorahosted.org/python-fedora'
DOWNLOAD_URL = 'https://fedorahosted.org/releases/p/y/python-fedora/'
LICENSE = 'GPLv2'
| Correct minor typo in a string. | Correct minor typo in a string.
| Python | lgpl-2.1 | fedora-infra/python-fedora | <REPLACE_OLD> services')
LONG_DESCRIPTION <REPLACE_NEW> Services')
LONG_DESCRIPTION <REPLACE_END> <|endoftext|> '''
Information about this python-fedora release
'''
from fedora import _
NAME = 'python-fedora'
VERSION = '0.3.6'
DESCRIPTION = _('Python modules for interacting with Fedora Services')
LONG_DESCRIPTION = _('''
The Fedora Project runs many different services. These services help us to
package software, develop new programs, and generally put together the distro.
This package contains software that helps us do that.
''')
AUTHOR = 'Toshio Kuratomi, Luke Macken'
EMAIL = '[email protected]'
COPYRIGHT = '2007-2008 Red Hat, Inc.'
URL = 'https://fedorahosted.org/python-fedora'
DOWNLOAD_URL = 'https://fedorahosted.org/releases/p/y/python-fedora/'
LICENSE = 'GPLv2'
| Correct minor typo in a string.
'''
Information about this python-fedora release
'''
from fedora import _
NAME = 'python-fedora'
VERSION = '0.3.6'
DESCRIPTION = _('Python modules for interacting with Fedora services')
LONG_DESCRIPTION = _('''
The Fedora Project runs many different services. These services help us to
package software, develop new programs, and generally put together the distro.
This package contains software that helps us do that.
''')
AUTHOR = 'Toshio Kuratomi, Luke Macken'
EMAIL = '[email protected]'
COPYRIGHT = '2007-2008 Red Hat, Inc.'
URL = 'https://fedorahosted.org/python-fedora'
DOWNLOAD_URL = 'https://fedorahosted.org/releases/p/y/python-fedora/'
LICENSE = 'GPLv2'
|
14fb663019038b80d42f212e0ad8169cd0d37e84 | neutron_lib/exceptions/address_group.py | neutron_lib/exceptions/address_group.py | # All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib._i18n import _
from neutron_lib import exceptions
class AddressGroupNotFound(exceptions.NotFound):
message = _("Address group %(address_group_id)s could not be found.")
class AddressesNotFound(exceptions.NotFound):
message = _("Addresses %(addresses)s not found in the address group "
"%(address_group_id)s.")
class AddressesAlreadyExist(exceptions.BadRequest):
message = _("Addresses %(addresses)s already exist in the "
"address group %(address_group_id)s.")
| # All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib._i18n import _
from neutron_lib import exceptions
class AddressGroupNotFound(exceptions.NotFound):
message = _("Address group %(address_group_id)s could not be found.")
class AddressGroupInUse(exceptions.InUse):
message = _("Address group %(address_group_id)s is in use on one or more "
"security group rules.")
class AddressesNotFound(exceptions.NotFound):
message = _("Addresses %(addresses)s not found in the address group "
"%(address_group_id)s.")
class AddressesAlreadyExist(exceptions.BadRequest):
message = _("Addresses %(addresses)s already exist in the "
"address group %(address_group_id)s.")
| Add address group in use exception | Add address group in use exception
Related change: https://review.opendev.org/#/c/751110/
Change-Id: I2a9872890ca4d5e59a9e266c1dcacd3488a3265c
| Python | apache-2.0 | openstack/neutron-lib,openstack/neutron-lib,openstack/neutron-lib,openstack/neutron-lib | <INSERT> AddressGroupInUse(exceptions.InUse):
message = _("Address group %(address_group_id)s is in use on one or more "
"security group rules.")
class <INSERT_END> <|endoftext|> # All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib._i18n import _
from neutron_lib import exceptions
class AddressGroupNotFound(exceptions.NotFound):
message = _("Address group %(address_group_id)s could not be found.")
class AddressGroupInUse(exceptions.InUse):
message = _("Address group %(address_group_id)s is in use on one or more "
"security group rules.")
class AddressesNotFound(exceptions.NotFound):
message = _("Addresses %(addresses)s not found in the address group "
"%(address_group_id)s.")
class AddressesAlreadyExist(exceptions.BadRequest):
message = _("Addresses %(addresses)s already exist in the "
"address group %(address_group_id)s.")
| Add address group in use exception
Related change: https://review.opendev.org/#/c/751110/
Change-Id: I2a9872890ca4d5e59a9e266c1dcacd3488a3265c
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib._i18n import _
from neutron_lib import exceptions
class AddressGroupNotFound(exceptions.NotFound):
message = _("Address group %(address_group_id)s could not be found.")
class AddressesNotFound(exceptions.NotFound):
message = _("Addresses %(addresses)s not found in the address group "
"%(address_group_id)s.")
class AddressesAlreadyExist(exceptions.BadRequest):
message = _("Addresses %(addresses)s already exist in the "
"address group %(address_group_id)s.")
|
32116cf93b30fc63394379b49e921f9e0ab2f652 | django_filepicker/widgets.py | django_filepicker/widgets.py | from django.conf import settings
from django.forms import widgets
#JS_URL is the url to the filepicker.io javascript library
JS_VERSION = 0
JS_URL = "//api.filepicker.io/v%d/filepicker.js" % (JS_VERSION)
if hasattr(settings, 'FILEPICKER_INPUT_TYPE'):
INPUT_TYPE = settings.FILEPICKER_INPUT_TYPE
else:
INPUT_TYPE = 'filepicker-dragdrop'
class FPFileWidget(widgets.Input):
input_type = INPUT_TYPE
needs_multipart_form = False
def value_from_datadict_old(self, data, files, name):
#If we are using the middleware, then the data will already be
#in FILES, if not it will be in POST
if name not in data:
return super(FPFileWidget, self).value_from_datadict(
data, files, name)
return data
class Media:
js = (JS_URL,)
| from django.conf import settings
from django.forms import widgets
#JS_URL is the url to the filepicker.io javascript library
JS_VERSION = 1
JS_URL = "//api.filepicker.io/v%d/filepicker.js" % (JS_VERSION)
if hasattr(settings, 'FILEPICKER_INPUT_TYPE'):
INPUT_TYPE = settings.FILEPICKER_INPUT_TYPE
else:
INPUT_TYPE = 'filepicker-dragdrop'
class FPFileWidget(widgets.Input):
input_type = INPUT_TYPE
needs_multipart_form = False
def value_from_datadict_old(self, data, files, name):
#If we are using the middleware, then the data will already be
#in FILES, if not it will be in POST
if name not in data:
return super(FPFileWidget, self).value_from_datadict(
data, files, name)
return data
class Media:
js = (JS_URL,)
| Use version 1 of Filepicker.js | Use version 1 of Filepicker.js | Python | mit | filepicker/filepicker-django,filepicker/filepicker-django,FundedByMe/filepicker-django,FundedByMe/filepicker-django | <REPLACE_OLD> 0
JS_URL <REPLACE_NEW> 1
JS_URL <REPLACE_END> <|endoftext|> from django.conf import settings
from django.forms import widgets
#JS_URL is the url to the filepicker.io javascript library
JS_VERSION = 1
JS_URL = "//api.filepicker.io/v%d/filepicker.js" % (JS_VERSION)
if hasattr(settings, 'FILEPICKER_INPUT_TYPE'):
INPUT_TYPE = settings.FILEPICKER_INPUT_TYPE
else:
INPUT_TYPE = 'filepicker-dragdrop'
class FPFileWidget(widgets.Input):
input_type = INPUT_TYPE
needs_multipart_form = False
def value_from_datadict_old(self, data, files, name):
#If we are using the middleware, then the data will already be
#in FILES, if not it will be in POST
if name not in data:
return super(FPFileWidget, self).value_from_datadict(
data, files, name)
return data
class Media:
js = (JS_URL,)
| Use version 1 of Filepicker.js
from django.conf import settings
from django.forms import widgets
#JS_URL is the url to the filepicker.io javascript library
JS_VERSION = 0
JS_URL = "//api.filepicker.io/v%d/filepicker.js" % (JS_VERSION)
if hasattr(settings, 'FILEPICKER_INPUT_TYPE'):
INPUT_TYPE = settings.FILEPICKER_INPUT_TYPE
else:
INPUT_TYPE = 'filepicker-dragdrop'
class FPFileWidget(widgets.Input):
input_type = INPUT_TYPE
needs_multipart_form = False
def value_from_datadict_old(self, data, files, name):
#If we are using the middleware, then the data will already be
#in FILES, if not it will be in POST
if name not in data:
return super(FPFileWidget, self).value_from_datadict(
data, files, name)
return data
class Media:
js = (JS_URL,)
|
c5fba0cc8acb482a0bc1c49ae5187ebc1232dba3 | tests/test_directions.py | tests/test_directions.py | import unittest
from shapely.geometry import LineString, Point
from directions.base import _parse_points
class DirectionsTest(unittest.TestCase):
def setUp(self):
self.p = [(1,2), (3,4), (5,6), (7,8)]
self.line = LineString(self.p)
def test_origin_dest(self):
result = _parse_points(self.p[0], self.p[-1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_origin_dest_waypoints(self):
result = _parse_points(self.p[0], self.p[-1], self.p[1:-1])
self.assertEqual(self.p, result)
def test_line(self):
result = _parse_points(self.line)
self.assertEqual(self.p, result)
def test_points(self):
p0 = Point(self.line.coords[0])
p1 = Point(self.line.coords[-1])
result = _parse_points(p0, p1)
self.assertEqual([self.p[0], self.p[-1]], result)
def test_points_array(self):
p0 = Point(self.p[0])
p1 = Point(self.p[-1])
result = _parse_points([p0, p1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_mixed_types(self):
origin = 'blah'
destination = Point(self.p[-1])
points = self.p[1:-1]
expected = list(self.p) # Copy it
expected[0] = 'blah'
result = _parse_points(origin, destination, points)
self.assertEqual(expected, result)
def test_no_dest_waypoints(self):
# Can't specify waypoints without destination
with self.assertRaises(ValueError):
_parse_points('origin', waypoints=['p1'])
def test_bad_input(self):
# Test points not length 2
with self.assertRaises(ValueError):
_parse_points(self.p[0], (1.0, 2.0, 3.0))
| Add tests for the different input variations. | Add tests for the different input variations.
| Python | bsd-3-clause | asfaltboy/directions.py,jwass/directions.py,samtux/directions.py | <REPLACE_OLD> <REPLACE_NEW> import unittest
from shapely.geometry import LineString, Point
from directions.base import _parse_points
class DirectionsTest(unittest.TestCase):
def setUp(self):
self.p = [(1,2), (3,4), (5,6), (7,8)]
self.line = LineString(self.p)
def test_origin_dest(self):
result = _parse_points(self.p[0], self.p[-1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_origin_dest_waypoints(self):
result = _parse_points(self.p[0], self.p[-1], self.p[1:-1])
self.assertEqual(self.p, result)
def test_line(self):
result = _parse_points(self.line)
self.assertEqual(self.p, result)
def test_points(self):
p0 = Point(self.line.coords[0])
p1 = Point(self.line.coords[-1])
result = _parse_points(p0, p1)
self.assertEqual([self.p[0], self.p[-1]], result)
def test_points_array(self):
p0 = Point(self.p[0])
p1 = Point(self.p[-1])
result = _parse_points([p0, p1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_mixed_types(self):
origin = 'blah'
destination = Point(self.p[-1])
points = self.p[1:-1]
expected = list(self.p) # Copy it
expected[0] = 'blah'
result = _parse_points(origin, destination, points)
self.assertEqual(expected, result)
def test_no_dest_waypoints(self):
# Can't specify waypoints without destination
with self.assertRaises(ValueError):
_parse_points('origin', waypoints=['p1'])
def test_bad_input(self):
# Test points not length 2
with self.assertRaises(ValueError):
_parse_points(self.p[0], (1.0, 2.0, 3.0))
<REPLACE_END> <|endoftext|> import unittest
from shapely.geometry import LineString, Point
from directions.base import _parse_points
class DirectionsTest(unittest.TestCase):
def setUp(self):
self.p = [(1,2), (3,4), (5,6), (7,8)]
self.line = LineString(self.p)
def test_origin_dest(self):
result = _parse_points(self.p[0], self.p[-1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_origin_dest_waypoints(self):
result = _parse_points(self.p[0], self.p[-1], self.p[1:-1])
self.assertEqual(self.p, result)
def test_line(self):
result = _parse_points(self.line)
self.assertEqual(self.p, result)
def test_points(self):
p0 = Point(self.line.coords[0])
p1 = Point(self.line.coords[-1])
result = _parse_points(p0, p1)
self.assertEqual([self.p[0], self.p[-1]], result)
def test_points_array(self):
p0 = Point(self.p[0])
p1 = Point(self.p[-1])
result = _parse_points([p0, p1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_mixed_types(self):
origin = 'blah'
destination = Point(self.p[-1])
points = self.p[1:-1]
expected = list(self.p) # Copy it
expected[0] = 'blah'
result = _parse_points(origin, destination, points)
self.assertEqual(expected, result)
def test_no_dest_waypoints(self):
# Can't specify waypoints without destination
with self.assertRaises(ValueError):
_parse_points('origin', waypoints=['p1'])
def test_bad_input(self):
# Test points not length 2
with self.assertRaises(ValueError):
_parse_points(self.p[0], (1.0, 2.0, 3.0))
| Add tests for the different input variations.
|
|
0ff0a3137ea938b7db8167d132b08b9e8620e864 | contrib/internal/run-pyflakes.py | contrib/internal/run-pyflakes.py | #!/usr/bin/env python
#
# Utility script to run pyflakes with the modules we care about and
# exclude errors we know to be fine.
import os
import subprocess
import sys
module_exclusions = [
'djblets',
'django_evolution',
'dist',
'ez_setup.py',
'settings_local.py',
'ReviewBoard.egg-info',
]
def scan_for_modules():
return [entry
for entry in os.listdir(os.getcwd())
if ((os.path.isdir(entry) or entry.endswith(".py")) and
entry not in module_exclusions)]
def main():
cur_dir = os.path.dirname(__file__)
os.chdir(os.path.join(cur_dir, "..", ".."))
modules = sys.argv[1:]
if not modules:
# The user didn't specify anything specific. Scan for modules.
modules = scan_for_modules()
p = subprocess.Popen(['pyflakes'] + modules,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
contents = p.stdout.readlines()
# Read in the exclusions file
exclusions = {}
fp = open(os.path.join(cur_dir, "pyflakes.exclude"), "r")
for line in fp.readlines():
exclusions[line.rstrip()] = 1
fp.close()
# Now filter thin
for line in contents:
line = line.rstrip()
if line not in exclusions:
print line
if __name__ == "__main__":
main()
| #!/usr/bin/env python
#
# Utility script to run pyflakes with the modules we care about and
# exclude errors we know to be fine.
import os
import subprocess
import sys
module_exclusions = [
'djblets',
'django_evolution',
'dist',
'ez_setup.py',
'htdocs',
'settings_local.py',
'ReviewBoard.egg-info',
]
def scan_for_modules():
return [entry
for entry in os.listdir(os.getcwd())
if ((os.path.isdir(entry) or entry.endswith(".py")) and
entry not in module_exclusions)]
def main():
cur_dir = os.path.dirname(__file__)
os.chdir(os.path.join(cur_dir, "..", ".."))
modules = sys.argv[1:]
if not modules:
# The user didn't specify anything specific. Scan for modules.
modules = scan_for_modules()
p = subprocess.Popen(['pyflakes'] + modules,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
contents = p.stdout.readlines()
# Read in the exclusions file
exclusions = {}
fp = open(os.path.join(cur_dir, "pyflakes.exclude"), "r")
for line in fp.readlines():
exclusions[line.rstrip()] = 1
fp.close()
# Now filter thin
for line in contents:
line = line.rstrip()
if line not in exclusions:
print line
if __name__ == "__main__":
main()
| Exclude htdocs, because that just takes way too long to scan. | Exclude htdocs, because that just takes way too long to scan.
| Python | mit | atagar/ReviewBoard,atagar/ReviewBoard,sgallagher/reviewboard,chazy/reviewboard,chipx86/reviewboard,Khan/reviewboard,custode/reviewboard,atagar/ReviewBoard,Khan/reviewboard,chazy/reviewboard,atagar/ReviewBoard,bkochendorfer/reviewboard,davidt/reviewboard,asutherland/opc-reviewboard,Khan/reviewboard,Khan/reviewboard,beol/reviewboard,beol/reviewboard,Khan/reviewboard,sgallagher/reviewboard,Khan/reviewboard,brennie/reviewboard,chipx86/reviewboard,brennie/reviewboard,atagar/ReviewBoard,KnowNo/reviewboard,atagar/ReviewBoard,1tush/reviewboard,1tush/reviewboard,chazy/reviewboard,atagar/ReviewBoard,asutherland/opc-reviewboard,chazy/reviewboard,chipx86/reviewboard,asutherland/opc-reviewboard,Khan/reviewboard,chazy/reviewboard,beol/reviewboard,chipx86/reviewboard,reviewboard/reviewboard,custode/reviewboard,bkochendorfer/reviewboard,KnowNo/reviewboard,Khan/reviewboard,1tush/reviewboard,chazy/reviewboard,atagar/ReviewBoard,asutherland/opc-reviewboard,brennie/reviewboard,sgallagher/reviewboard,custode/reviewboard,sgallagher/reviewboard,KnowNo/reviewboard,atagar/ReviewBoard,beol/reviewboard,brennie/reviewboard,Khan/reviewboard,KnowNo/reviewboard,1tush/reviewboard,bkochendorfer/reviewboard,1tush/reviewboard,davidt/reviewboard,1tush/reviewboard,reviewboard/reviewboard,custode/reviewboard,reviewboard/reviewboard,reviewboard/reviewboard,bkochendorfer/reviewboard,chazy/reviewboard,chazy/reviewboard,davidt/reviewboard,1tush/reviewboard,1tush/reviewboard,1tush/reviewboard,chazy/reviewboard,davidt/reviewboard | <INSERT> 'htdocs',
<INSERT_END> <|endoftext|> #!/usr/bin/env python
#
# Utility script to run pyflakes with the modules we care about and
# exclude errors we know to be fine.
import os
import subprocess
import sys
module_exclusions = [
'djblets',
'django_evolution',
'dist',
'ez_setup.py',
'htdocs',
'settings_local.py',
'ReviewBoard.egg-info',
]
def scan_for_modules():
return [entry
for entry in os.listdir(os.getcwd())
if ((os.path.isdir(entry) or entry.endswith(".py")) and
entry not in module_exclusions)]
def main():
cur_dir = os.path.dirname(__file__)
os.chdir(os.path.join(cur_dir, "..", ".."))
modules = sys.argv[1:]
if not modules:
# The user didn't specify anything specific. Scan for modules.
modules = scan_for_modules()
p = subprocess.Popen(['pyflakes'] + modules,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
contents = p.stdout.readlines()
# Read in the exclusions file
exclusions = {}
fp = open(os.path.join(cur_dir, "pyflakes.exclude"), "r")
for line in fp.readlines():
exclusions[line.rstrip()] = 1
fp.close()
# Now filter thin
for line in contents:
line = line.rstrip()
if line not in exclusions:
print line
if __name__ == "__main__":
main()
| Exclude htdocs, because that just takes way too long to scan.
#!/usr/bin/env python
#
# Utility script to run pyflakes with the modules we care about and
# exclude errors we know to be fine.
import os
import subprocess
import sys
module_exclusions = [
'djblets',
'django_evolution',
'dist',
'ez_setup.py',
'settings_local.py',
'ReviewBoard.egg-info',
]
def scan_for_modules():
return [entry
for entry in os.listdir(os.getcwd())
if ((os.path.isdir(entry) or entry.endswith(".py")) and
entry not in module_exclusions)]
def main():
cur_dir = os.path.dirname(__file__)
os.chdir(os.path.join(cur_dir, "..", ".."))
modules = sys.argv[1:]
if not modules:
# The user didn't specify anything specific. Scan for modules.
modules = scan_for_modules()
p = subprocess.Popen(['pyflakes'] + modules,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
contents = p.stdout.readlines()
# Read in the exclusions file
exclusions = {}
fp = open(os.path.join(cur_dir, "pyflakes.exclude"), "r")
for line in fp.readlines():
exclusions[line.rstrip()] = 1
fp.close()
# Now filter thin
for line in contents:
line = line.rstrip()
if line not in exclusions:
print line
if __name__ == "__main__":
main()
|
e1b0222c8a3ed39bf76af10484a94aa4cfe5adc8 | googlesearch/templatetags/search_tags.py | googlesearch/templatetags/search_tags.py | import math
from django import template
from ..conf import settings
register = template.Library()
@register.inclusion_tag('googlesearch/_pagination.html', takes_context=True)
def show_pagination(context, pages_to_show=10):
max_pages = int(math.ceil(context['total_results'] /
settings.GOOGLE_SEARCH_RESULTS_PER_PAGE))
last_page = int(context['current_page']) + pages_to_show - 1
last_page = max_pages if last_page > max_pages else last_page
prev_page = context['current_page'] - 1
next_page = context['current_page'] + 1
context.update({
'pages': range(1, max_pages + 1),
'prev_page': prev_page if context['current_page'] - 1 > 0 else None,
'next_page': next_page if next_page < max_pages else None,
})
return context
| import math
from django import template
from ..conf import settings
register = template.Library()
@register.inclusion_tag('googlesearch/_pagination.html', takes_context=True)
def show_pagination(context, pages_to_show=10):
max_pages = int(math.ceil(context['total_results'] /
settings.GOOGLE_SEARCH_RESULTS_PER_PAGE))
prev_page = context['current_page'] - 1
next_page = context['current_page'] + 1
context.update({
'pages': range(1, max_pages + 1),
'prev_page': prev_page if context['current_page'] - 1 > 0 else None,
'next_page': next_page if next_page < max_pages else None,
})
return context
| Remove last_page not needed anymore. | Remove last_page not needed anymore.
| Python | mit | hzdg/django-google-search,hzdg/django-google-search | <DELETE> last_page = int(context['current_page']) + pages_to_show - 1
last_page = max_pages if last_page > max_pages else last_page
<DELETE_END> <|endoftext|> import math
from django import template
from ..conf import settings
register = template.Library()
@register.inclusion_tag('googlesearch/_pagination.html', takes_context=True)
def show_pagination(context, pages_to_show=10):
max_pages = int(math.ceil(context['total_results'] /
settings.GOOGLE_SEARCH_RESULTS_PER_PAGE))
prev_page = context['current_page'] - 1
next_page = context['current_page'] + 1
context.update({
'pages': range(1, max_pages + 1),
'prev_page': prev_page if context['current_page'] - 1 > 0 else None,
'next_page': next_page if next_page < max_pages else None,
})
return context
| Remove last_page not needed anymore.
import math
from django import template
from ..conf import settings
register = template.Library()
@register.inclusion_tag('googlesearch/_pagination.html', takes_context=True)
def show_pagination(context, pages_to_show=10):
max_pages = int(math.ceil(context['total_results'] /
settings.GOOGLE_SEARCH_RESULTS_PER_PAGE))
last_page = int(context['current_page']) + pages_to_show - 1
last_page = max_pages if last_page > max_pages else last_page
prev_page = context['current_page'] - 1
next_page = context['current_page'] + 1
context.update({
'pages': range(1, max_pages + 1),
'prev_page': prev_page if context['current_page'] - 1 > 0 else None,
'next_page': next_page if next_page < max_pages else None,
})
return context
|
96fe288cbd4c4399c83b4c3d56da6e427aaad0f9 | spicedham/digitdestroyer.py | spicedham/digitdestroyer.py | from spicedham.basewrapper import BaseWrapper
class DigitDestroyer(BaseWrapper):
def train(*args):
pass
def classify(self, response):
if all(map(unicode.isdigit, response)):
return 1
else:
return 0.5
| from spicedham.basewrapper import BaseWrapper
class DigitDestroyer(object):
def train(*args):
pass
def classify(self, response):
if all(map(unicode.isdigit, response)):
return 1
else:
return None
| Fix inheritence error and return value | Fix inheritence error and return value
It shouldn't inherit from BaseWrapper, but merely object.
It should return None instead of 0.5 so it will have no effect on the average.
| Python | mpl-2.0 | mozilla/spicedham,mozilla/spicedham | <REPLACE_OLD> DigitDestroyer(BaseWrapper):
<REPLACE_NEW> DigitDestroyer(object):
<REPLACE_END> <REPLACE_OLD> 0.5
<REPLACE_NEW> None
<REPLACE_END> <|endoftext|> from spicedham.basewrapper import BaseWrapper
class DigitDestroyer(object):
def train(*args):
pass
def classify(self, response):
if all(map(unicode.isdigit, response)):
return 1
else:
return None
| Fix inheritence error and return value
It shouldn't inherit from BaseWrapper, but merely object.
It should return None instead of 0.5 so it will have no effect on the average.
from spicedham.basewrapper import BaseWrapper
class DigitDestroyer(BaseWrapper):
def train(*args):
pass
def classify(self, response):
if all(map(unicode.isdigit, response)):
return 1
else:
return 0.5
|
e8f8b08ffb011ed705701f40c6a1a952c13d7c41 | analytics/test_analytics.py | analytics/test_analytics.py | # -*- encoding: utf-8
import pytest
from reports import NGINX_LOG_REGEX
@pytest.mark.parametrize('log_line', [
# Unusual methods
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "HEAD /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "OPTIONS /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
# Referrer is empty
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "GET /example HTTP/1.0" 200 0 "" "Example user agent" "1.2.3.4"',
])
def test_nginx_regex(log_line):
assert NGINX_LOG_REGEX.match(log_line) is not None
| Add a few tests for the analytics code | Add a few tests for the analytics code
| Python | mit | alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net | <INSERT> # -*- encoding: utf-8
import pytest
from reports import NGINX_LOG_REGEX
@pytest.mark.parametrize('log_line', [
<INSERT_END> <INSERT> # Unusual methods
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "HEAD /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "OPTIONS /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
# Referrer is empty
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "GET /example HTTP/1.0" 200 0 "" "Example user agent" "1.2.3.4"',
])
def test_nginx_regex(log_line):
assert NGINX_LOG_REGEX.match(log_line) is not None
<INSERT_END> <|endoftext|> # -*- encoding: utf-8
import pytest
from reports import NGINX_LOG_REGEX
@pytest.mark.parametrize('log_line', [
# Unusual methods
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "HEAD /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "OPTIONS /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
# Referrer is empty
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "GET /example HTTP/1.0" 200 0 "" "Example user agent" "1.2.3.4"',
])
def test_nginx_regex(log_line):
assert NGINX_LOG_REGEX.match(log_line) is not None
| Add a few tests for the analytics code
|
|
03f4ccf4168cdd39d3b8516346a31c4c3ac0ba49 | sieve/sieve.py | sieve/sieve.py | def sieve(n):
if n < 2:
return []
not_prime = set()
prime = [2]
for i in range(3, n+1, 2):
if i not in not_prime:
prime.append(i)
not_prime.update(range(i*i, n, i))
return prime
| def sieve(n):
if n < 2:
return []
not_prime = set()
prime = [2]
for i in range(3, n+1, 2):
if i not in not_prime:
prime.append(i)
not_prime.update(range(i*i, n+1, i))
return prime
| Fix bug where n is the square of a prime | Fix bug where n is the square of a prime
| Python | agpl-3.0 | CubicComet/exercism-python-solutions | <REPLACE_OLD> n, <REPLACE_NEW> n+1, <REPLACE_END> <|endoftext|> def sieve(n):
if n < 2:
return []
not_prime = set()
prime = [2]
for i in range(3, n+1, 2):
if i not in not_prime:
prime.append(i)
not_prime.update(range(i*i, n+1, i))
return prime
| Fix bug where n is the square of a prime
def sieve(n):
if n < 2:
return []
not_prime = set()
prime = [2]
for i in range(3, n+1, 2):
if i not in not_prime:
prime.append(i)
not_prime.update(range(i*i, n, i))
return prime
|
9353deefa7cc31fc4e9d01f29f7dab8c37b73a78 | setup.py | setup.py | from setuptools import setup, find_packages
# Dynamically calculate the version based on dbsettings.VERSION
version_tuple = (0, 4, None)
if version_tuple[2] is not None:
version = "%d.%d_%s" % version_tuple
else:
version = "%d.%d" % version_tuple[:2]
setup(
name='django-dbsettings',
version=version,
description='Application settings whose values can be updated while a project is up and running.',
long_description=open('README.rst').read(),
author='Samuel Cormier-Iijima',
author_email='[email protected]',
maintainer='Jacek Tomaszewski',
maintainer_email='[email protected]',
url='http://github.com/zlorf/django-dbsettings',
packages=find_packages(),
include_package_data=True,
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
| from setuptools import setup, find_packages
# Dynamically calculate the version based on dbsettings.VERSION
version_tuple = (0, 4, None)
if version_tuple[2] is not None:
if type(version_tuple[2]) == int:
version = "%d.%d.%s" % version_tuple
else:
version = "%d.%d_%s" % version_tuple
else:
version = "%d.%d" % version_tuple[:2]
setup(
name='django-dbsettings',
version=version,
description='Application settings whose values can be updated while a project is up and running.',
long_description=open('README.rst').read(),
author='Samuel Cormier-Iijima',
author_email='[email protected]',
maintainer='Jacek Tomaszewski',
maintainer_email='[email protected]',
url='http://github.com/zlorf/django-dbsettings',
packages=find_packages(),
include_package_data=True,
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
| Allow version to have subrevision. | Allow version to have subrevision.
| Python | bsd-3-clause | helber/django-dbsettings,sciyoshi/django-dbsettings,zlorf/django-dbsettings,helber/django-dbsettings,DjangoAdminHackers/django-dbsettings,winfieldco/django-dbsettings,MiriamSexton/django-dbsettings,nwaxiomatic/django-dbsettings,DjangoAdminHackers/django-dbsettings,nwaxiomatic/django-dbsettings,zlorf/django-dbsettings,johnpaulett/django-dbsettings,sciyoshi/django-dbsettings,winfieldco/django-dbsettings,johnpaulett/django-dbsettings | <INSERT> if type(version_tuple[2]) == int:
version = "%d.%d.%s" % version_tuple
else:
<INSERT_END> <|endoftext|> from setuptools import setup, find_packages
# Dynamically calculate the version based on dbsettings.VERSION
version_tuple = (0, 4, None)
if version_tuple[2] is not None:
if type(version_tuple[2]) == int:
version = "%d.%d.%s" % version_tuple
else:
version = "%d.%d_%s" % version_tuple
else:
version = "%d.%d" % version_tuple[:2]
setup(
name='django-dbsettings',
version=version,
description='Application settings whose values can be updated while a project is up and running.',
long_description=open('README.rst').read(),
author='Samuel Cormier-Iijima',
author_email='[email protected]',
maintainer='Jacek Tomaszewski',
maintainer_email='[email protected]',
url='http://github.com/zlorf/django-dbsettings',
packages=find_packages(),
include_package_data=True,
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
| Allow version to have subrevision.
from setuptools import setup, find_packages
# Dynamically calculate the version based on dbsettings.VERSION
version_tuple = (0, 4, None)
if version_tuple[2] is not None:
version = "%d.%d_%s" % version_tuple
else:
version = "%d.%d" % version_tuple[:2]
setup(
name='django-dbsettings',
version=version,
description='Application settings whose values can be updated while a project is up and running.',
long_description=open('README.rst').read(),
author='Samuel Cormier-Iijima',
author_email='[email protected]',
maintainer='Jacek Tomaszewski',
maintainer_email='[email protected]',
url='http://github.com/zlorf/django-dbsettings',
packages=find_packages(),
include_package_data=True,
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
|
a135e5a0919f64984b1348c3956bd95dc183e874 | scripts/test_deployment.py | scripts/test_deployment.py | import os
import pytest
import requests
@pytest.fixture
def url():
return os.getenv("SITE", "http://localhost:5000")
def test_post_images(expect, url):
params = {"key": "iw", "lines": ["test", "deployment"]}
response = requests.post(f"{url}/api/images", json=params)
expect(response.status_code) == 201
expect(response.json()["url"]).endswith("/api/images/iw/test/deployment.png")
def test_get_image(expect, url):
response = requests.get(f"{url}/iw/tests_code/in_production.jpg")
expect(response.status_code) == 200
expect(response.headers["Content-Type"]) == "image/jpeg"
| import os
import pytest
import requests
@pytest.fixture
def url():
return os.getenv("SITE", "http://localhost:5000")
def test_post_images(expect, url):
params = {"key": "iw", "lines": ["test", "deployment"]}
response = requests.post(f"{url}/api/images", json=params)
expect(response.status_code) == 201
expect(response.json()["url"]).endswith("/api/images/iw/test/deployment.png")
def test_get_image(expect, url):
response = requests.get(f"{url}/iw/tests_code/in_production.jpg")
expect(response.status_code) == 200
expect(response.headers["Content-Type"]) == "image/jpeg"
def test_get_image_custom(expect, url):
response = requests.get(
f"{url}/custom/test.png?alt=https://www.gstatic.com/webp/gallery/1.jpg"
)
expect(response.status_code) == 200
expect(response.headers["Content-Type"]) == "image/png"
| Add test for custom images | Add test for custom images
| Python | mit | jacebrowning/memegen,jacebrowning/memegen | <REPLACE_OLD> "image/jpeg"
<REPLACE_NEW> "image/jpeg"
def test_get_image_custom(expect, url):
response = requests.get(
f"{url}/custom/test.png?alt=https://www.gstatic.com/webp/gallery/1.jpg"
)
expect(response.status_code) == 200
expect(response.headers["Content-Type"]) == "image/png"
<REPLACE_END> <|endoftext|> import os
import pytest
import requests
@pytest.fixture
def url():
return os.getenv("SITE", "http://localhost:5000")
def test_post_images(expect, url):
params = {"key": "iw", "lines": ["test", "deployment"]}
response = requests.post(f"{url}/api/images", json=params)
expect(response.status_code) == 201
expect(response.json()["url"]).endswith("/api/images/iw/test/deployment.png")
def test_get_image(expect, url):
response = requests.get(f"{url}/iw/tests_code/in_production.jpg")
expect(response.status_code) == 200
expect(response.headers["Content-Type"]) == "image/jpeg"
def test_get_image_custom(expect, url):
response = requests.get(
f"{url}/custom/test.png?alt=https://www.gstatic.com/webp/gallery/1.jpg"
)
expect(response.status_code) == 200
expect(response.headers["Content-Type"]) == "image/png"
| Add test for custom images
import os
import pytest
import requests
@pytest.fixture
def url():
return os.getenv("SITE", "http://localhost:5000")
def test_post_images(expect, url):
params = {"key": "iw", "lines": ["test", "deployment"]}
response = requests.post(f"{url}/api/images", json=params)
expect(response.status_code) == 201
expect(response.json()["url"]).endswith("/api/images/iw/test/deployment.png")
def test_get_image(expect, url):
response = requests.get(f"{url}/iw/tests_code/in_production.jpg")
expect(response.status_code) == 200
expect(response.headers["Content-Type"]) == "image/jpeg"
|
0451c608525ee81e27c8f8ec78d31e50f0bed9d2 | box/models.py | box/models.py | BASE_URL = 'https://api.box.com/2.0'
FOLDERS_URL = '{}/folders/{{}}/items'.format(BASE_URL)
MAX_FOLDERS = 1000
class Client(object):
def __init__(self, provider_logic):
"""
Box client constructor
:param provider_logic: oauthclient ProviderLogic instance
:return:
"""
self.provider_logic = provider_logic
def folders(self, parent=None, limit=100, offset=0):
if parent:
folder_id = parent['id']
else:
folder_id = 0
url = FOLDERS_URL.format(folder_id)
count = 0
while count < limit:
params = {
# this is the request limit, not the number of folders we actually want
'limit': 100,
'offset': offset+count,
}
response = self.provider_logic.get(url, params=params)
json_data = response.json()
# if we hit the total number of entries, we have to be done
total_count = json_data['total_count']
if count >= total_count:
break
# determine how many more entries to get from the result set
entry_count = limit - count
entries = json_data['entries'][:entry_count]
for entry in entries:
yield entry
# increment the count by the number of entries
count += len(entries)
| BASE_URL = 'https://api.box.com/2.0'
FOLDERS_URL = '{}/folders/{{}}/items'.format(BASE_URL)
MAX_FOLDERS = 1000
class Client(object):
def __init__(self, provider_logic):
"""
Box client constructor
:param provider_logic: oauthclient ProviderLogic instance
:return:
"""
self.provider_logic = provider_logic
def folders(self, parent=None, limit=100, offset=0):
if parent:
folder_id = parent['id']
else:
folder_id = 0
url = FOLDERS_URL.format(folder_id)
count = 0
while count < limit:
params = {
# this is the request limit, not the number of folders we actually want
'limit': 100,
'offset': offset+count,
}
response = self.provider_logic.get(url, params=params)
response.raise_for_status()
json_data = response.json()
# if we hit the total number of entries, we have to be done
total_count = json_data['total_count']
if count >= total_count:
break
# determine how many more entries to get from the result set
entry_count = limit - count
entries = json_data['entries'][:entry_count]
for entry in entries:
yield entry
# increment the count by the number of entries
count += len(entries)
| Raise an exception when the API response is not successful | Raise an exception when the API response is not successful
| Python | apache-2.0 | rca/box | <REPLACE_OLD> params=params)
<REPLACE_NEW> params=params)
response.raise_for_status()
<REPLACE_END> <|endoftext|> BASE_URL = 'https://api.box.com/2.0'
FOLDERS_URL = '{}/folders/{{}}/items'.format(BASE_URL)
MAX_FOLDERS = 1000
class Client(object):
def __init__(self, provider_logic):
"""
Box client constructor
:param provider_logic: oauthclient ProviderLogic instance
:return:
"""
self.provider_logic = provider_logic
def folders(self, parent=None, limit=100, offset=0):
if parent:
folder_id = parent['id']
else:
folder_id = 0
url = FOLDERS_URL.format(folder_id)
count = 0
while count < limit:
params = {
# this is the request limit, not the number of folders we actually want
'limit': 100,
'offset': offset+count,
}
response = self.provider_logic.get(url, params=params)
response.raise_for_status()
json_data = response.json()
# if we hit the total number of entries, we have to be done
total_count = json_data['total_count']
if count >= total_count:
break
# determine how many more entries to get from the result set
entry_count = limit - count
entries = json_data['entries'][:entry_count]
for entry in entries:
yield entry
# increment the count by the number of entries
count += len(entries)
| Raise an exception when the API response is not successful
BASE_URL = 'https://api.box.com/2.0'
FOLDERS_URL = '{}/folders/{{}}/items'.format(BASE_URL)
MAX_FOLDERS = 1000
class Client(object):
def __init__(self, provider_logic):
"""
Box client constructor
:param provider_logic: oauthclient ProviderLogic instance
:return:
"""
self.provider_logic = provider_logic
def folders(self, parent=None, limit=100, offset=0):
if parent:
folder_id = parent['id']
else:
folder_id = 0
url = FOLDERS_URL.format(folder_id)
count = 0
while count < limit:
params = {
# this is the request limit, not the number of folders we actually want
'limit': 100,
'offset': offset+count,
}
response = self.provider_logic.get(url, params=params)
json_data = response.json()
# if we hit the total number of entries, we have to be done
total_count = json_data['total_count']
if count >= total_count:
break
# determine how many more entries to get from the result set
entry_count = limit - count
entries = json_data['entries'][:entry_count]
for entry in entries:
yield entry
# increment the count by the number of entries
count += len(entries)
|
8c05cb85c47db892dd13abbd91b3948c09b9a954 | statsmodels/tools/__init__.py | statsmodels/tools/__init__.py | from tools import add_constant, categorical
from datautils import Dataset
from statsmodels import NoseWrapper as Tester
test = Tester().test
| from tools import add_constant, categorical
from statsmodels import NoseWrapper as Tester
test = Tester().test
| Remove import of moved file | REF: Remove import of moved file
| Python | bsd-3-clause | josef-pkt/statsmodels,adammenges/statsmodels,saketkc/statsmodels,DonBeo/statsmodels,edhuckle/statsmodels,saketkc/statsmodels,wkfwkf/statsmodels,wzbozon/statsmodels,huongttlan/statsmodels,kiyoto/statsmodels,astocko/statsmodels,musically-ut/statsmodels,bsipocz/statsmodels,wwf5067/statsmodels,jstoxrocky/statsmodels,cbmoore/statsmodels,statsmodels/statsmodels,jseabold/statsmodels,bzero/statsmodels,yl565/statsmodels,ChadFulton/statsmodels,nguyentu1602/statsmodels,saketkc/statsmodels,astocko/statsmodels,bert9bert/statsmodels,DonBeo/statsmodels,Averroes/statsmodels,gef756/statsmodels,edhuckle/statsmodels,jseabold/statsmodels,waynenilsen/statsmodels,hainm/statsmodels,bashtage/statsmodels,nvoron23/statsmodels,huongttlan/statsmodels,detrout/debian-statsmodels,yarikoptic/pystatsmodels,bavardage/statsmodels,wzbozon/statsmodels,YihaoLu/statsmodels,phobson/statsmodels,rgommers/statsmodels,YihaoLu/statsmodels,bavardage/statsmodels,wwf5067/statsmodels,bsipocz/statsmodels,edhuckle/statsmodels,statsmodels/statsmodels,nvoron23/statsmodels,adammenges/statsmodels,wwf5067/statsmodels,yl565/statsmodels,alekz112/statsmodels,waynenilsen/statsmodels,bert9bert/statsmodels,detrout/debian-statsmodels,alekz112/statsmodels,wzbozon/statsmodels,jseabold/statsmodels,rgommers/statsmodels,ChadFulton/statsmodels,bashtage/statsmodels,YihaoLu/statsmodels,ChadFulton/statsmodels,bavardage/statsmodels,musically-ut/statsmodels,nguyentu1602/statsmodels,bsipocz/statsmodels,waynenilsen/statsmodels,nvoron23/statsmodels,huongttlan/statsmodels,bashtage/statsmodels,Averroes/statsmodels,adammenges/statsmodels,hlin117/statsmodels,wwf5067/statsmodels,gef756/statsmodels,cbmoore/statsmodels,hainm/statsmodels,wdurhamh/statsmodels,wdurhamh/statsmodels,josef-pkt/statsmodels,phobson/statsmodels,alekz112/statsmodels,bsipocz/statsmodels,saketkc/statsmodels,wdurhamh/statsmodels,musically-ut/statsmodels,kiyoto/statsmodels,phobson/statsmodels,wdurhamh/statsmodels,gef756/statsmodels,kiyoto/statsmodels,statsmodels/statsmodels,rgommers/statsmodels,josef-pkt/statsmodels,hainm/statsmodels,edhuckle/statsmodels,hlin117/statsmodels,gef756/statsmodels,josef-pkt/statsmodels,cbmoore/statsmodels,gef756/statsmodels,YihaoLu/statsmodels,statsmodels/statsmodels,hlin117/statsmodels,bert9bert/statsmodels,edhuckle/statsmodels,hlin117/statsmodels,jstoxrocky/statsmodels,bzero/statsmodels,yarikoptic/pystatsmodels,yl565/statsmodels,saketkc/statsmodels,kiyoto/statsmodels,huongttlan/statsmodels,Averroes/statsmodels,nvoron23/statsmodels,astocko/statsmodels,wzbozon/statsmodels,bzero/statsmodels,detrout/debian-statsmodels,wkfwkf/statsmodels,bzero/statsmodels,ChadFulton/statsmodels,bert9bert/statsmodels,yl565/statsmodels,bashtage/statsmodels,jseabold/statsmodels,kiyoto/statsmodels,phobson/statsmodels,bzero/statsmodels,cbmoore/statsmodels,musically-ut/statsmodels,ChadFulton/statsmodels,DonBeo/statsmodels,yarikoptic/pystatsmodels,nguyentu1602/statsmodels,jstoxrocky/statsmodels,alekz112/statsmodels,adammenges/statsmodels,bavardage/statsmodels,wkfwkf/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,jstoxrocky/statsmodels,yl565/statsmodels,bashtage/statsmodels,astocko/statsmodels,nguyentu1602/statsmodels,wkfwkf/statsmodels,Averroes/statsmodels,nvoron23/statsmodels,DonBeo/statsmodels,wzbozon/statsmodels,rgommers/statsmodels,YihaoLu/statsmodels,rgommers/statsmodels,ChadFulton/statsmodels,josef-pkt/statsmodels,bavardage/statsmodels,DonBeo/statsmodels,bert9bert/statsmodels,statsmodels/statsmodels,hainm/statsmodels,statsmodels/statsmodels,jseabold/statsmodels,wkfwkf/statsmodels,cbmoore/statsmodels,waynenilsen/statsmodels,detrout/debian-statsmodels,phobson/statsmodels,wdurhamh/statsmodels | <REPLACE_OLD> categorical
from datautils import Dataset
from <REPLACE_NEW> categorical
from <REPLACE_END> <|endoftext|> from tools import add_constant, categorical
from statsmodels import NoseWrapper as Tester
test = Tester().test
| REF: Remove import of moved file
from tools import add_constant, categorical
from datautils import Dataset
from statsmodels import NoseWrapper as Tester
test = Tester().test
|
de3aa6eed0fca73f3949ee5c584bcc79e1b98109 | setup.py | setup.py | from setuptools import setup, find_packages
setup(name='mutube',
version='0.1',
description='Scrape YouTube links from 4chan threads.',
url='https://github.com/AP-e/mutube',
license='unlicense',
classifiers=['Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3 :: Only'],
keywords='4chan youtube',
packages=find_packages(),
install_requires=['bs4'])
| from setuptools import setup, find_packages
setup(name='mutube',
version='0.1',
description='Scrape YouTube links from 4chan threads.',
url='https://github.com/AP-e/mutube',
license='unlicense',
classifiers=['Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python'],
keywords='4chan youtube',
packages=find_packages(),
install_requires=['bs4'])
| Remove Python 3 only classifier | Remove Python 3 only classifier
| Python | unlicense | AP-e/mutube | <REPLACE_OLD> Python :: 3 :: Only'],
<REPLACE_NEW> Python'],
<REPLACE_END> <|endoftext|> from setuptools import setup, find_packages
setup(name='mutube',
version='0.1',
description='Scrape YouTube links from 4chan threads.',
url='https://github.com/AP-e/mutube',
license='unlicense',
classifiers=['Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python'],
keywords='4chan youtube',
packages=find_packages(),
install_requires=['bs4'])
| Remove Python 3 only classifier
from setuptools import setup, find_packages
setup(name='mutube',
version='0.1',
description='Scrape YouTube links from 4chan threads.',
url='https://github.com/AP-e/mutube',
license='unlicense',
classifiers=['Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3 :: Only'],
keywords='4chan youtube',
packages=find_packages(),
install_requires=['bs4'])
|
01a86c09b768f6cc4e5bf9b389d09512f9e56ceb | sample_agent.py | sample_agent.py | import numpy as np
import matplotlib.pyplot as plt
class Agent(object):
def __init__(self, dim_action):
self.dim_action = dim_action
def act(self, ob, reward, done, vision):
#print("ACT!")
# Get an Observation from the environment.
# Each observation vectors are numpy array.
# focus, opponents, track sensors are scaled into [0, 1]. When the agent
# is out of the road, sensor variables return -1/200.
# rpm, wheelSpinVel are raw values and then needed to be preprocessed.
# vision is given as a tensor with size of (3, 64, 64) <-- rgb
# and values are in [0, 255]
if vision is False:
focus, speedX, speedY, speedZ, opponents, rpm, track, wheelSpinVel = ob
else:
focus, speedX, speedY, speedZ, opponents, rpm, track, wheelSpinVel, vision = ob
""" The code below is for checking the vision input. This is very heavy for real-time Control
So you may need to remove.
"""
"""
img = np.ndarray((64,64,3))
for i in range(3):
img[:, :, i] = 255 - vision[i]
plt.imshow(img, origin='lower')
plt.draw()
plt.pause(0.001)
"""
return np.tanh(np.random.randn(self.dim_action)) # random action
| import numpy as np
import matplotlib.pyplot as plt
class Agent(object):
def __init__(self, dim_action):
self.dim_action = dim_action
def act(self, ob, reward, done, vision_on):
#print("ACT!")
# Get an Observation from the environment.
# Each observation vectors are numpy array.
# focus, opponents, track sensors are scaled into [0, 1]. When the agent
# is out of the road, sensor variables return -1/200.
# rpm, wheelSpinVel are raw values and then needed to be preprocessed.
# vision is given as a tensor with size of (64*64, 3) = (4096, 3) <-- rgb
# and values are in [0, 255]
if vision_on is False:
focus, speedX, speedY, speedZ, opponents, rpm, track, wheelSpinVel = ob
else:
focus, speedX, speedY, speedZ, opponents, rpm, track, wheelSpinVel, vision = ob
""" The code below is for checking the vision input. This is very heavy for real-time Control
So you may need to remove.
"""
print(vision.shape)
"""
img = np.ndarray((64,64,3))
for i in range(3):
img[:, :, i] = 255 - vision[:, i].reshape((64, 64))
plt.imshow(img, origin='lower')
plt.draw()
plt.pause(0.001)
"""
return np.tanh(np.random.randn(self.dim_action)) # random action
| Update to follow the new observation format (follow the vision input of OpenAI ATARI environment) | Update to follow the new observation format
(follow the vision input of OpenAI ATARI environment)
| Python | mit | travistang/late_fyt,travistang/late_fyt,ugo-nama-kun/gym_torcs,travistang/late_fyt,ugo-nama-kun/gym_torcs,ugo-nama-kun/gym_torcs,travistang/late_fyt,travistang/late_fyt,ugo-nama-kun/gym_torcs,ugo-nama-kun/gym_torcs,travistang/late_fyt,ugo-nama-kun/gym_torcs,travistang/late_fyt,ugo-nama-kun/gym_torcs | <REPLACE_OLD> vision):
<REPLACE_NEW> vision_on):
<REPLACE_END> <REPLACE_OLD> (3, 64, 64) <REPLACE_NEW> (64*64, 3) = (4096, 3) <REPLACE_END> <REPLACE_OLD> vision <REPLACE_NEW> vision_on <REPLACE_END> <INSERT> print(vision.shape)
<INSERT_END> <REPLACE_OLD> vision[i]
<REPLACE_NEW> vision[:, i].reshape((64, 64))
<REPLACE_END> <|endoftext|> import numpy as np
import matplotlib.pyplot as plt
class Agent(object):
def __init__(self, dim_action):
self.dim_action = dim_action
def act(self, ob, reward, done, vision_on):
#print("ACT!")
# Get an Observation from the environment.
# Each observation vectors are numpy array.
# focus, opponents, track sensors are scaled into [0, 1]. When the agent
# is out of the road, sensor variables return -1/200.
# rpm, wheelSpinVel are raw values and then needed to be preprocessed.
# vision is given as a tensor with size of (64*64, 3) = (4096, 3) <-- rgb
# and values are in [0, 255]
if vision_on is False:
focus, speedX, speedY, speedZ, opponents, rpm, track, wheelSpinVel = ob
else:
focus, speedX, speedY, speedZ, opponents, rpm, track, wheelSpinVel, vision = ob
""" The code below is for checking the vision input. This is very heavy for real-time Control
So you may need to remove.
"""
print(vision.shape)
"""
img = np.ndarray((64,64,3))
for i in range(3):
img[:, :, i] = 255 - vision[:, i].reshape((64, 64))
plt.imshow(img, origin='lower')
plt.draw()
plt.pause(0.001)
"""
return np.tanh(np.random.randn(self.dim_action)) # random action
| Update to follow the new observation format
(follow the vision input of OpenAI ATARI environment)
import numpy as np
import matplotlib.pyplot as plt
class Agent(object):
def __init__(self, dim_action):
self.dim_action = dim_action
def act(self, ob, reward, done, vision):
#print("ACT!")
# Get an Observation from the environment.
# Each observation vectors are numpy array.
# focus, opponents, track sensors are scaled into [0, 1]. When the agent
# is out of the road, sensor variables return -1/200.
# rpm, wheelSpinVel are raw values and then needed to be preprocessed.
# vision is given as a tensor with size of (3, 64, 64) <-- rgb
# and values are in [0, 255]
if vision is False:
focus, speedX, speedY, speedZ, opponents, rpm, track, wheelSpinVel = ob
else:
focus, speedX, speedY, speedZ, opponents, rpm, track, wheelSpinVel, vision = ob
""" The code below is for checking the vision input. This is very heavy for real-time Control
So you may need to remove.
"""
"""
img = np.ndarray((64,64,3))
for i in range(3):
img[:, :, i] = 255 - vision[i]
plt.imshow(img, origin='lower')
plt.draw()
plt.pause(0.001)
"""
return np.tanh(np.random.randn(self.dim_action)) # random action
|
ab4ae040895c50da6cb0827f6461d1733c7fe30a | tests/test_plugin_states.py | tests/test_plugin_states.py | from contextlib import contextmanager
from os import path
from unittest import TestCase
from canaryd_packages import six
from dictdiffer import diff
from jsontest import JsonTest
from mock import patch
from canaryd.plugin import get_plugin_by_name
@six.add_metaclass(JsonTest)
class TestPluginStates(TestCase):
jsontest_files = path.join('tests/plugins')
@contextmanager
def patch_commands(self, commands):
def handle_command(command, *args, **kwargs):
command = command[0]
if command not in commands:
raise ValueError(
'Broken tests: {0} not in commands'.format(command),
)
return '\n'.join(commands[command])
check_output_patch = patch(
'canaryd.subprocess.check_output',
handle_command,
)
check_output_patch.start()
yield
check_output_patch.stop()
def jsontest_function(self, test_name, test_data):
plugin = get_plugin_by_name(test_data['plugin'])
with self.patch_commands(test_data['commands']):
state = plugin.get_state({})
try:
self.assertEqual(state, test_data['state'])
except AssertionError:
print(list(diff(test_data['state'], state)))
raise
| from contextlib import contextmanager
from os import path
from unittest import TestCase
from dictdiffer import diff
from jsontest import JsonTest
from mock import patch
from canaryd_packages import six
from canaryd.plugin import get_plugin_by_name
class TestPluginRealStates(TestCase):
def run_plugin(self, plugin_name):
plugin = get_plugin_by_name(plugin_name)
plugin.get_state({})
def test_meta_plugin(self):
self.run_plugin('meta')
def test_services_plugin(self):
self.run_plugin('services')
def test_containers_plugin(self):
self.run_plugin('containers')
@six.add_metaclass(JsonTest)
class TestPluginStates(TestCase):
jsontest_files = path.join('tests/plugins')
@contextmanager
def patch_commands(self, commands):
def handle_command(command, *args, **kwargs):
command = command[0]
if command not in commands:
raise ValueError('Broken tests: {0} not in commands: {1}'.format(
command, commands.keys(),
))
return '\n'.join(commands[command])
check_output_patch = patch(
'canaryd.subprocess.check_output',
handle_command,
)
check_output_patch.start()
yield
check_output_patch.stop()
def jsontest_function(self, test_name, test_data):
plugin = get_plugin_by_name(test_data['plugin'])
with self.patch_commands(test_data['commands']):
state = plugin.get_state({})
try:
self.assertEqual(state, test_data['state'])
except AssertionError:
print(list(diff(test_data['state'], state)))
raise
| Add real plugin state tests for plugins that always work (meta, containers, services). | Add real plugin state tests for plugins that always work (meta, containers, services).
| Python | mit | Oxygem/canaryd,Oxygem/canaryd | <DELETE> canaryd_packages import six
from <DELETE_END> <INSERT> canaryd_packages import six
from <INSERT_END> <REPLACE_OLD> get_plugin_by_name
@six.add_metaclass(JsonTest)
class <REPLACE_NEW> get_plugin_by_name
class TestPluginRealStates(TestCase):
def run_plugin(self, plugin_name):
plugin = get_plugin_by_name(plugin_name)
plugin.get_state({})
def test_meta_plugin(self):
self.run_plugin('meta')
def test_services_plugin(self):
self.run_plugin('services')
def test_containers_plugin(self):
self.run_plugin('containers')
@six.add_metaclass(JsonTest)
class <REPLACE_END> <REPLACE_OLD> ValueError(
'Broken <REPLACE_NEW> ValueError('Broken <REPLACE_END> <REPLACE_OLD> commands'.format(command),
)
<REPLACE_NEW> commands: {1}'.format(
command, commands.keys(),
))
<REPLACE_END> <|endoftext|> from contextlib import contextmanager
from os import path
from unittest import TestCase
from dictdiffer import diff
from jsontest import JsonTest
from mock import patch
from canaryd_packages import six
from canaryd.plugin import get_plugin_by_name
class TestPluginRealStates(TestCase):
def run_plugin(self, plugin_name):
plugin = get_plugin_by_name(plugin_name)
plugin.get_state({})
def test_meta_plugin(self):
self.run_plugin('meta')
def test_services_plugin(self):
self.run_plugin('services')
def test_containers_plugin(self):
self.run_plugin('containers')
@six.add_metaclass(JsonTest)
class TestPluginStates(TestCase):
jsontest_files = path.join('tests/plugins')
@contextmanager
def patch_commands(self, commands):
def handle_command(command, *args, **kwargs):
command = command[0]
if command not in commands:
raise ValueError('Broken tests: {0} not in commands: {1}'.format(
command, commands.keys(),
))
return '\n'.join(commands[command])
check_output_patch = patch(
'canaryd.subprocess.check_output',
handle_command,
)
check_output_patch.start()
yield
check_output_patch.stop()
def jsontest_function(self, test_name, test_data):
plugin = get_plugin_by_name(test_data['plugin'])
with self.patch_commands(test_data['commands']):
state = plugin.get_state({})
try:
self.assertEqual(state, test_data['state'])
except AssertionError:
print(list(diff(test_data['state'], state)))
raise
| Add real plugin state tests for plugins that always work (meta, containers, services).
from contextlib import contextmanager
from os import path
from unittest import TestCase
from canaryd_packages import six
from dictdiffer import diff
from jsontest import JsonTest
from mock import patch
from canaryd.plugin import get_plugin_by_name
@six.add_metaclass(JsonTest)
class TestPluginStates(TestCase):
jsontest_files = path.join('tests/plugins')
@contextmanager
def patch_commands(self, commands):
def handle_command(command, *args, **kwargs):
command = command[0]
if command not in commands:
raise ValueError(
'Broken tests: {0} not in commands'.format(command),
)
return '\n'.join(commands[command])
check_output_patch = patch(
'canaryd.subprocess.check_output',
handle_command,
)
check_output_patch.start()
yield
check_output_patch.stop()
def jsontest_function(self, test_name, test_data):
plugin = get_plugin_by_name(test_data['plugin'])
with self.patch_commands(test_data['commands']):
state = plugin.get_state({})
try:
self.assertEqual(state, test_data['state'])
except AssertionError:
print(list(diff(test_data['state'], state)))
raise
|
bac71c099f0196d5ab74a8ec08cedc32ab008e1c | graphite/post-setup-graphite-web.py | graphite/post-setup-graphite-web.py | #!/usr/bin/env python
import os
import random
import string
import sys
from django.utils.crypto import get_random_string
## Check if the script was already executed
flag_filename = '/opt/graphite/post-setup-complete'
if os.path.isfile(flag_filename):
sys.exit(0)
## Add SECRET_KEY to local_settings.py
settings_filename = '/opt/graphite/webapp/graphite/local_settings.py'
secret_key = get_random_string(
50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
with open(settings_filename, 'a') as f:
f.write("SECRET_KEY = '%s'\n" % (secret_key, ))
if '--debug' in sys.argv:
f.write('DEBUG = True\n')
import django
django.setup()
from django.contrib.auth.models import User
## Create Superuser
password = ''.join(
(random.choice(string.letters + string.digits + string.punctuation))
for x in range(20))
User.objects.create_superuser('admin', 'admin@localhost', password)
print '[graphite-web] Superuser: admin, Password: %s' % (password, )
## Set flag file
with open(flag_filename, 'w'):
os.utime(flag_filename, None)
| #!/usr/bin/env python
import os
import random
import string
import sys
from django.utils.crypto import get_random_string
## Check if the script was already executed
flag_filename = '/opt/graphite/post-setup-complete'
if os.path.isfile(flag_filename):
sys.exit(0)
## Add SECRET_KEY to local_settings.py
settings_filename = '/opt/graphite/webapp/graphite/local_settings.py'
secret_key = get_random_string(
50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
with open(settings_filename, 'a') as f:
f.write("SECRET_KEY = '%s'\n" % (secret_key, ))
if '--debug' in sys.argv:
f.write('DEBUG = True\n')
import django
django.setup()
from django.contrib.auth.models import User
## Create Superuser
password = ''.join(
(random.choice(string.letters + string.digits + string.punctuation))
for x in range(20))
if not User.objects.filter(username='admin').exists():
User.objects.create_superuser('admin', 'admin@localhost', password)
print '[graphite-web] Superuser: admin, Password: %s' % (password, )
## Set flag file
with open(flag_filename, 'w'):
os.utime(flag_filename, None)
| Check if admin user exists before creating one | Check if admin user exists before creating one
| Python | mit | rvernica/Dockerfile,rvernica/docker-library,rvernica/docker-library | <REPLACE_OLD> range(20))
User.objects.create_superuser('admin', <REPLACE_NEW> range(20))
if not User.objects.filter(username='admin').exists():
User.objects.create_superuser('admin', <REPLACE_END> <REPLACE_OLD> password)
print <REPLACE_NEW> password)
print <REPLACE_END> <|endoftext|> #!/usr/bin/env python
import os
import random
import string
import sys
from django.utils.crypto import get_random_string
## Check if the script was already executed
flag_filename = '/opt/graphite/post-setup-complete'
if os.path.isfile(flag_filename):
sys.exit(0)
## Add SECRET_KEY to local_settings.py
settings_filename = '/opt/graphite/webapp/graphite/local_settings.py'
secret_key = get_random_string(
50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
with open(settings_filename, 'a') as f:
f.write("SECRET_KEY = '%s'\n" % (secret_key, ))
if '--debug' in sys.argv:
f.write('DEBUG = True\n')
import django
django.setup()
from django.contrib.auth.models import User
## Create Superuser
password = ''.join(
(random.choice(string.letters + string.digits + string.punctuation))
for x in range(20))
if not User.objects.filter(username='admin').exists():
User.objects.create_superuser('admin', 'admin@localhost', password)
print '[graphite-web] Superuser: admin, Password: %s' % (password, )
## Set flag file
with open(flag_filename, 'w'):
os.utime(flag_filename, None)
| Check if admin user exists before creating one
#!/usr/bin/env python
import os
import random
import string
import sys
from django.utils.crypto import get_random_string
## Check if the script was already executed
flag_filename = '/opt/graphite/post-setup-complete'
if os.path.isfile(flag_filename):
sys.exit(0)
## Add SECRET_KEY to local_settings.py
settings_filename = '/opt/graphite/webapp/graphite/local_settings.py'
secret_key = get_random_string(
50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
with open(settings_filename, 'a') as f:
f.write("SECRET_KEY = '%s'\n" % (secret_key, ))
if '--debug' in sys.argv:
f.write('DEBUG = True\n')
import django
django.setup()
from django.contrib.auth.models import User
## Create Superuser
password = ''.join(
(random.choice(string.letters + string.digits + string.punctuation))
for x in range(20))
User.objects.create_superuser('admin', 'admin@localhost', password)
print '[graphite-web] Superuser: admin, Password: %s' % (password, )
## Set flag file
with open(flag_filename, 'w'):
os.utime(flag_filename, None)
|
210581cfef3d54b055ec9f9b1dc6d19b757a4d6e | cli/cli.py | cli/cli.py | import argparse
parser = argparse.ArgumentParser(prog='moocx', description='EdX MOOC Data Anaylysis')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
parser.parse_args()
| Add cmd for getting version | Add cmd for getting version
| Python | mit | McGillX/edx_data_research,McGillX/edx_data_research,McGillX/edx_data_research | <REPLACE_OLD> <REPLACE_NEW> import argparse
parser = argparse.ArgumentParser(prog='moocx', description='EdX MOOC Data Anaylysis')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
parser.parse_args()
<REPLACE_END> <|endoftext|> import argparse
parser = argparse.ArgumentParser(prog='moocx', description='EdX MOOC Data Anaylysis')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
parser.parse_args()
| Add cmd for getting version
|
|
d2ad097e08b8c5e9d318968f0a6f859f03f7c07a | mycli/packages/special/dbcommands.py | mycli/packages/special/dbcommands.py | import logging
from .main import special_command, RAW_QUERY, PARSED_QUERY
log = logging.getLogger(__name__)
@special_command('\\dt', '\\dt', 'List or describe tables.', arg_type=PARSED_QUERY, case_sensitive=True)
def list_tables(cur, arg=None, arg_type=PARSED_QUERY):
if arg:
query = 'SHOW FIELDS FROM {0}'.format(arg)
else:
query = 'SHOW TABLES'
log.debug(query)
cur.execute(query)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, '')]
else:
return [(None, None, None, '')]
@special_command('\\l', '\\l', 'List databases.', arg_type=RAW_QUERY, case_sensitive=True)
def list_databases(cur, **_):
query = 'SHOW DATABASES'
log.debug(query)
cur.execute(query)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, '')]
else:
return [(None, None, None, '')]
| import logging
from .main import special_command, RAW_QUERY, PARSED_QUERY
log = logging.getLogger(__name__)
@special_command('\\dt', '\\dt [table]', 'List or describe tables.', arg_type=PARSED_QUERY, case_sensitive=True)
def list_tables(cur, arg=None, arg_type=PARSED_QUERY):
if arg:
query = 'SHOW FIELDS FROM {0}'.format(arg)
else:
query = 'SHOW TABLES'
log.debug(query)
cur.execute(query)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, '')]
else:
return [(None, None, None, '')]
@special_command('\\l', '\\l', 'List databases.', arg_type=RAW_QUERY, case_sensitive=True)
def list_databases(cur, **_):
query = 'SHOW DATABASES'
log.debug(query)
cur.execute(query)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, '')]
else:
return [(None, None, None, '')]
| Change \dt syntax to add an optional table name. | Change \dt syntax to add an optional table name.
| Python | bsd-3-clause | mdsrosa/mycli,jinstrive/mycli,j-bennet/mycli,martijnengler/mycli,martijnengler/mycli,mdsrosa/mycli,danieljwest/mycli,j-bennet/mycli,jinstrive/mycli,shoma/mycli,danieljwest/mycli,shoma/mycli | <REPLACE_OLD> '\\dt', <REPLACE_NEW> '\\dt [table]', <REPLACE_END> <|endoftext|> import logging
from .main import special_command, RAW_QUERY, PARSED_QUERY
log = logging.getLogger(__name__)
@special_command('\\dt', '\\dt [table]', 'List or describe tables.', arg_type=PARSED_QUERY, case_sensitive=True)
def list_tables(cur, arg=None, arg_type=PARSED_QUERY):
if arg:
query = 'SHOW FIELDS FROM {0}'.format(arg)
else:
query = 'SHOW TABLES'
log.debug(query)
cur.execute(query)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, '')]
else:
return [(None, None, None, '')]
@special_command('\\l', '\\l', 'List databases.', arg_type=RAW_QUERY, case_sensitive=True)
def list_databases(cur, **_):
query = 'SHOW DATABASES'
log.debug(query)
cur.execute(query)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, '')]
else:
return [(None, None, None, '')]
| Change \dt syntax to add an optional table name.
import logging
from .main import special_command, RAW_QUERY, PARSED_QUERY
log = logging.getLogger(__name__)
@special_command('\\dt', '\\dt', 'List or describe tables.', arg_type=PARSED_QUERY, case_sensitive=True)
def list_tables(cur, arg=None, arg_type=PARSED_QUERY):
if arg:
query = 'SHOW FIELDS FROM {0}'.format(arg)
else:
query = 'SHOW TABLES'
log.debug(query)
cur.execute(query)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, '')]
else:
return [(None, None, None, '')]
@special_command('\\l', '\\l', 'List databases.', arg_type=RAW_QUERY, case_sensitive=True)
def list_databases(cur, **_):
query = 'SHOW DATABASES'
log.debug(query)
cur.execute(query)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, '')]
else:
return [(None, None, None, '')]
|
9d105a62b29f0cd170343705bbe20c509d523e46 | osf_tests/test_handlers.py | osf_tests/test_handlers.py | import pytest
from nose.tools import assert_raises
from framework.celery_tasks import handlers
from website.project.tasks import on_node_updated
class TestCeleryHandlers:
@pytest.fixture()
def queue(self):
return handlers.queue()
def test_get_task_from_queue_not_there(self):
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task is False
def test_get_task_from_queue(self, queue):
handlers.queue().append(
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
)
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task
def test_get_task_from_queue_errors_with_two_tasks(self, queue):
tasks = [
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'title'}),
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
]
queue += tasks
with assert_raises(ValueError):
handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
| Add tests for new get_task_from_queue celery helper | Add tests for new get_task_from_queue celery helper
| Python | apache-2.0 | cslzchen/osf.io,caseyrollins/osf.io,mattclark/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,erinspace/osf.io,erinspace/osf.io,sloria/osf.io,pattisdr/osf.io,mfraezz/osf.io,caseyrollins/osf.io,cslzchen/osf.io,mattclark/osf.io,cslzchen/osf.io,felliott/osf.io,felliott/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,adlius/osf.io,saradbowman/osf.io,baylee-d/osf.io,cslzchen/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,icereval/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,saradbowman/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,adlius/osf.io,Johnetordoff/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,mfraezz/osf.io,aaxelb/osf.io,felliott/osf.io,icereval/osf.io,felliott/osf.io,sloria/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,adlius/osf.io,brianjgeiger/osf.io,icereval/osf.io | <REPLACE_OLD> <REPLACE_NEW> import pytest
from nose.tools import assert_raises
from framework.celery_tasks import handlers
from website.project.tasks import on_node_updated
class TestCeleryHandlers:
@pytest.fixture()
def queue(self):
return handlers.queue()
def test_get_task_from_queue_not_there(self):
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task is False
def test_get_task_from_queue(self, queue):
handlers.queue().append(
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
)
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task
def test_get_task_from_queue_errors_with_two_tasks(self, queue):
tasks = [
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'title'}),
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
]
queue += tasks
with assert_raises(ValueError):
handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
<REPLACE_END> <|endoftext|> import pytest
from nose.tools import assert_raises
from framework.celery_tasks import handlers
from website.project.tasks import on_node_updated
class TestCeleryHandlers:
@pytest.fixture()
def queue(self):
return handlers.queue()
def test_get_task_from_queue_not_there(self):
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task is False
def test_get_task_from_queue(self, queue):
handlers.queue().append(
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
)
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task
def test_get_task_from_queue_errors_with_two_tasks(self, queue):
tasks = [
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'title'}),
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
]
queue += tasks
with assert_raises(ValueError):
handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
| Add tests for new get_task_from_queue celery helper
|
|
54f27f507820b5c9a7e832c46eb2a5ba3d918a2f | scripts/task/solver.py | scripts/task/solver.py | import numpy as np
from eigen3 import toEigen
import rbdyn as rbd
class WLSSolver(object):
def __init__(self):
self.tasks = []
def addTask(self, task, weight):
t = [task, weight]
self.tasks.append(t)
return t
def rmTask(self, taskDef):
self.tasks.remove(taskDef)
def solve(self, mb, mbc):
err = np.mat(np.empty((0, 1)))
jac = np.mat(np.empty((0, mb.nrDof())))
for t in self.tasks:
t[0].update(mb, mbc)
err = np.vstack((err, t[1]*t[0].error()))
jac = np.vstack((jac, t[1]*t[0].jacobian()))
#alpha1 = np.linalg.lstsq(jac, err)[0]
alpha2 = np.linalg.pinv(jac)*err
mbc.alpha = rbd.vectorToDof(mb, toEigen(alpha2))
| import numpy as np
from eigen3 import toEigenX
import rbdyn as rbd
class WLSSolver(object):
def __init__(self):
self.tasks = []
def addTask(self, task, weight):
t = [task, weight]
self.tasks.append(t)
return t
def rmTask(self, taskDef):
self.tasks.remove(taskDef)
def solve(self, mb, mbc):
err = np.mat(np.empty((0, 1)))
jac = np.mat(np.empty((0, mb.nrDof())))
for t in self.tasks:
t[0].update(mb, mbc)
err = np.vstack((err, t[1]*t[0].error()))
jac = np.vstack((jac, t[1]*t[0].jacobian()))
#alpha1 = np.linalg.lstsq(jac, err)[0]
alpha2 = np.linalg.pinv(jac)*err
mbc.alpha = rbd.vectorToDof(mb, toEigenX(alpha2))
| Fix a bad eigen vector cast. | Fix a bad eigen vector cast.
| Python | bsd-2-clause | jrl-umi3218/RBDyn,gergondet/RBDyn,gergondet/RBDyn,gergondet/RBDyn,jrl-umi3218/RBDyn,jrl-umi3218/RBDyn,jrl-umi3218/RBDyn,gergondet/RBDyn,gergondet/RBDyn | <REPLACE_OLD> toEigen
import <REPLACE_NEW> toEigenX
import <REPLACE_END> <REPLACE_OLD> toEigen(alpha2))
<REPLACE_NEW> toEigenX(alpha2))
<REPLACE_END> <|endoftext|> import numpy as np
from eigen3 import toEigenX
import rbdyn as rbd
class WLSSolver(object):
def __init__(self):
self.tasks = []
def addTask(self, task, weight):
t = [task, weight]
self.tasks.append(t)
return t
def rmTask(self, taskDef):
self.tasks.remove(taskDef)
def solve(self, mb, mbc):
err = np.mat(np.empty((0, 1)))
jac = np.mat(np.empty((0, mb.nrDof())))
for t in self.tasks:
t[0].update(mb, mbc)
err = np.vstack((err, t[1]*t[0].error()))
jac = np.vstack((jac, t[1]*t[0].jacobian()))
#alpha1 = np.linalg.lstsq(jac, err)[0]
alpha2 = np.linalg.pinv(jac)*err
mbc.alpha = rbd.vectorToDof(mb, toEigenX(alpha2))
| Fix a bad eigen vector cast.
import numpy as np
from eigen3 import toEigen
import rbdyn as rbd
class WLSSolver(object):
def __init__(self):
self.tasks = []
def addTask(self, task, weight):
t = [task, weight]
self.tasks.append(t)
return t
def rmTask(self, taskDef):
self.tasks.remove(taskDef)
def solve(self, mb, mbc):
err = np.mat(np.empty((0, 1)))
jac = np.mat(np.empty((0, mb.nrDof())))
for t in self.tasks:
t[0].update(mb, mbc)
err = np.vstack((err, t[1]*t[0].error()))
jac = np.vstack((jac, t[1]*t[0].jacobian()))
#alpha1 = np.linalg.lstsq(jac, err)[0]
alpha2 = np.linalg.pinv(jac)*err
mbc.alpha = rbd.vectorToDof(mb, toEigen(alpha2))
|
9e067b8f53c8ee8afae63996e725614e5766059f | tests/aggregate/test_many_to_many_relationships.py | tests/aggregate/test_many_to_many_relationships.py | import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregatesWithManyToManyRelationships(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
user_group = sa.Table('user_group', self.Base.metadata,
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id'))
)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('groups', sa.Column(sa.Integer, default=0))
def group_count(self):
return sa.func.count('1')
groups = sa.orm.relationship(
'Group',
backref='users',
secondary=user_group
)
class Group(self.Base):
__tablename__ = 'group'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
self.User = User
self.Group = Group
def test_assigns_aggregates_on_insert(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
assert user.group_count == 1
def test_updates_aggregates_on_delete(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
user.groups = []
self.session.commit()
self.session.refresh(user)
assert user.group_count == 0
| Add tests for many to many aggregates | Add tests for many to many aggregates
| Python | bsd-3-clause | JackWink/sqlalchemy-utils,rmoorman/sqlalchemy-utils,marrybird/sqlalchemy-utils,konstantinoskostis/sqlalchemy-utils,cheungpat/sqlalchemy-utils,joshfriend/sqlalchemy-utils,tonyseek/sqlalchemy-utils,tonyseek/sqlalchemy-utils,joshfriend/sqlalchemy-utils,spoqa/sqlalchemy-utils | <REPLACE_OLD> <REPLACE_NEW> import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregatesWithManyToManyRelationships(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
user_group = sa.Table('user_group', self.Base.metadata,
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id'))
)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('groups', sa.Column(sa.Integer, default=0))
def group_count(self):
return sa.func.count('1')
groups = sa.orm.relationship(
'Group',
backref='users',
secondary=user_group
)
class Group(self.Base):
__tablename__ = 'group'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
self.User = User
self.Group = Group
def test_assigns_aggregates_on_insert(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
assert user.group_count == 1
def test_updates_aggregates_on_delete(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
user.groups = []
self.session.commit()
self.session.refresh(user)
assert user.group_count == 0
<REPLACE_END> <|endoftext|> import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregatesWithManyToManyRelationships(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
user_group = sa.Table('user_group', self.Base.metadata,
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id'))
)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('groups', sa.Column(sa.Integer, default=0))
def group_count(self):
return sa.func.count('1')
groups = sa.orm.relationship(
'Group',
backref='users',
secondary=user_group
)
class Group(self.Base):
__tablename__ = 'group'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
self.User = User
self.Group = Group
def test_assigns_aggregates_on_insert(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
assert user.group_count == 1
def test_updates_aggregates_on_delete(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
user.groups = []
self.session.commit()
self.session.refresh(user)
assert user.group_count == 0
| Add tests for many to many aggregates
|
|
bf7dd8bb6ff3e4a8f6412122ca23829c35554082 | contrib/examples/sensors/echo_flask_app.py | contrib/examples/sensors/echo_flask_app.py | from flask import request, Flask
from st2reactor.sensor.base import Sensor
class EchoFlaskSensor(Sensor):
def __init__(self, sensor_service, config):
super(EchoFlaskSensor, self).__init__(
sensor_service=sensor_service,
config=config
)
self._host = '127.0.0.1'
self._port = '5000'
self._path = '/echo'
self._log = self._sensor_service.get_logger(__name__)
self._app = Flask(__name__)
def setup(self):
pass
def run(self):
@self._app.route(self._path, methods=['POST'])
def echo():
payload = request.get_json(force=True)
self._sensor_service.dispatch(trigger="examples.echo_flask",
payload=payload)
return request.data
self._log.info('Listening for payload on http://%s:%s%s' %
(self._host, self._port, self._path))
self._app.run(host=self._host, port=self._port, threaded=True)
def cleanup(self):
pass
def add_trigger(self, trigger):
# This method is called when trigger is created
pass
def update_trigger(self, trigger):
# This method is called when trigger is updated
pass
def remove_trigger(self, trigger):
# This method is called when trigger is deleted
pass
| from flask import request, Flask
from st2reactor.sensor.base import Sensor
class EchoFlaskSensor(Sensor):
def __init__(self, sensor_service, config):
super(EchoFlaskSensor, self).__init__(
sensor_service=sensor_service,
config=config
)
self._host = '127.0.0.1'
self._port = 5000
self._path = '/echo'
self._log = self._sensor_service.get_logger(__name__)
self._app = Flask(__name__)
def setup(self):
pass
def run(self):
@self._app.route(self._path, methods=['POST'])
def echo():
payload = request.get_json(force=True)
self._sensor_service.dispatch(trigger="examples.echo_flask",
payload=payload)
return request.data
self._log.info('Listening for payload on http://{}:{}{}'.format(
self._host, self._port, self._path))
self._app.run(host=self._host, port=self._port, threaded=True)
def cleanup(self):
pass
def add_trigger(self, trigger):
# This method is called when trigger is created
pass
def update_trigger(self, trigger):
# This method is called when trigger is updated
pass
def remove_trigger(self, trigger):
# This method is called when trigger is deleted
pass
| Update the port to be an integer. | Update the port to be an integer.
Fix the port to be an integer. Use the format function for string formatting. | Python | apache-2.0 | StackStorm/st2,StackStorm/st2,nzlosh/st2,StackStorm/st2,StackStorm/st2,Plexxi/st2,nzlosh/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,nzlosh/st2,Plexxi/st2 | <REPLACE_OLD> EchoFlaskSensor(Sensor):
<REPLACE_NEW> EchoFlaskSensor(Sensor):
<REPLACE_END> <REPLACE_OLD> '5000'
<REPLACE_NEW> 5000
<REPLACE_END> <REPLACE_OLD> http://%s:%s%s' %
(self._host, <REPLACE_NEW> http://{}:{}{}'.format(
self._host, <REPLACE_END> <|endoftext|> from flask import request, Flask
from st2reactor.sensor.base import Sensor
class EchoFlaskSensor(Sensor):
def __init__(self, sensor_service, config):
super(EchoFlaskSensor, self).__init__(
sensor_service=sensor_service,
config=config
)
self._host = '127.0.0.1'
self._port = 5000
self._path = '/echo'
self._log = self._sensor_service.get_logger(__name__)
self._app = Flask(__name__)
def setup(self):
pass
def run(self):
@self._app.route(self._path, methods=['POST'])
def echo():
payload = request.get_json(force=True)
self._sensor_service.dispatch(trigger="examples.echo_flask",
payload=payload)
return request.data
self._log.info('Listening for payload on http://{}:{}{}'.format(
self._host, self._port, self._path))
self._app.run(host=self._host, port=self._port, threaded=True)
def cleanup(self):
pass
def add_trigger(self, trigger):
# This method is called when trigger is created
pass
def update_trigger(self, trigger):
# This method is called when trigger is updated
pass
def remove_trigger(self, trigger):
# This method is called when trigger is deleted
pass
| Update the port to be an integer.
Fix the port to be an integer. Use the format function for string formatting.
from flask import request, Flask
from st2reactor.sensor.base import Sensor
class EchoFlaskSensor(Sensor):
def __init__(self, sensor_service, config):
super(EchoFlaskSensor, self).__init__(
sensor_service=sensor_service,
config=config
)
self._host = '127.0.0.1'
self._port = '5000'
self._path = '/echo'
self._log = self._sensor_service.get_logger(__name__)
self._app = Flask(__name__)
def setup(self):
pass
def run(self):
@self._app.route(self._path, methods=['POST'])
def echo():
payload = request.get_json(force=True)
self._sensor_service.dispatch(trigger="examples.echo_flask",
payload=payload)
return request.data
self._log.info('Listening for payload on http://%s:%s%s' %
(self._host, self._port, self._path))
self._app.run(host=self._host, port=self._port, threaded=True)
def cleanup(self):
pass
def add_trigger(self, trigger):
# This method is called when trigger is created
pass
def update_trigger(self, trigger):
# This method is called when trigger is updated
pass
def remove_trigger(self, trigger):
# This method is called when trigger is deleted
pass
|
7b0bd58c359f5ea21af907cb90234171a6cfca5c | photobox/photobox.py | photobox/photobox.py | from photofolder import Photofolder
from folder import RealFolder
from gphotocamera import Gphoto
from main import Photobox
from rcswitch import RCSwitch
##########
# config #
##########
photodirectory = '/var/www/html/'
cheesepicfolder = '/home/pi/cheesepics/'
windowwidth = 1024
windowheight = 768
camera = Gphoto()
switch = RCSwitch(2352753, 2352754, "NOT_IMPLEMENTED")
##########
filesystemFolder = RealFolder(photodirectory)
cheesepicFolder = RealFolder(cheesepicfolder)
photofolder = Photofolder(filesystemFolder)
photobox = Photobox((windowwidth, windowheight), photofolder, camera, switch)
photobox.start()
| from cheesefolder import Cheesefolder
from photofolder import Photofolder
from folder import RealFolder
from gphotocamera import Gphoto
from main import Photobox
from rcswitch import RCSwitch
##########
# config #
##########
photodirectory = '/var/www/html/'
cheesepicpath = '/home/pi/cheesepics/'
windowwidth = 1024
windowheight = 768
camera = Gphoto()
switch = RCSwitch(2352753, 2352754, "NOT_IMPLEMENTED")
##########
filesystemFolder = RealFolder(photodirectory)
cheesepicFolder = RealFolder(cheesepicpath)
cheesef = Cheesefolder(cheesepicFolder)
photofolder = Photofolder(filesystemFolder)
photobox = Photobox((windowwidth, windowheight), photofolder, camera, switch, cheesef)
photobox.start()
| Use the correct chesefolder objects | Use the correct chesefolder objects
| Python | mit | MarkusAmshove/Photobox | <INSERT> cheesefolder import Cheesefolder
from <INSERT_END> <REPLACE_OLD> '/var/www/html/'
cheesepicfolder <REPLACE_NEW> '/var/www/html/'
cheesepicpath <REPLACE_END> <REPLACE_OLD> RealFolder(cheesepicfolder)
photofolder <REPLACE_NEW> RealFolder(cheesepicpath)
cheesef = Cheesefolder(cheesepicFolder)
photofolder <REPLACE_END> <REPLACE_OLD> switch)
photobox.start()
<REPLACE_NEW> switch, cheesef)
photobox.start()
<REPLACE_END> <|endoftext|> from cheesefolder import Cheesefolder
from photofolder import Photofolder
from folder import RealFolder
from gphotocamera import Gphoto
from main import Photobox
from rcswitch import RCSwitch
##########
# config #
##########
photodirectory = '/var/www/html/'
cheesepicpath = '/home/pi/cheesepics/'
windowwidth = 1024
windowheight = 768
camera = Gphoto()
switch = RCSwitch(2352753, 2352754, "NOT_IMPLEMENTED")
##########
filesystemFolder = RealFolder(photodirectory)
cheesepicFolder = RealFolder(cheesepicpath)
cheesef = Cheesefolder(cheesepicFolder)
photofolder = Photofolder(filesystemFolder)
photobox = Photobox((windowwidth, windowheight), photofolder, camera, switch, cheesef)
photobox.start()
| Use the correct chesefolder objects
from photofolder import Photofolder
from folder import RealFolder
from gphotocamera import Gphoto
from main import Photobox
from rcswitch import RCSwitch
##########
# config #
##########
photodirectory = '/var/www/html/'
cheesepicfolder = '/home/pi/cheesepics/'
windowwidth = 1024
windowheight = 768
camera = Gphoto()
switch = RCSwitch(2352753, 2352754, "NOT_IMPLEMENTED")
##########
filesystemFolder = RealFolder(photodirectory)
cheesepicFolder = RealFolder(cheesepicfolder)
photofolder = Photofolder(filesystemFolder)
photobox = Photobox((windowwidth, windowheight), photofolder, camera, switch)
photobox.start()
|
dbfa14401c0b50eb1a3cac413652cb975ee9d41f | ocw-ui/backend/tests/test_directory_helpers.py | ocw-ui/backend/tests/test_directory_helpers.py | import os
import unittest
from webtest import TestApp
from ..run_webservices import app
from ..directory_helpers import _get_clean_directory_path
test_app = TestApp(app)
class TestDirectoryPathCleaner(unittest.TestCase):
PATH_LEADER = '/tmp/foo'
VALID_CLEAN_DIR = '/tmp/foo/bar'
if not os.path.exists(PATH_LEADER): os.mkdir(PATH_LEADER)
if not os.path.exists(VALID_CLEAN_DIR): os.mkdir(VALID_CLEAN_DIR)
def test_valid_directory_path(self):
clean_path = _get_clean_directory_path(self.PATH_LEADER, '/bar')
self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
| Add valid directory cleaner helper test | Add valid directory cleaner helper test
git-svn-id: https://svn.apache.org/repos/asf/incubator/climate/trunk@1563517 13f79535-47bb-0310-9956-ffa450edef68
Former-commit-id: a23ba7556854cb30faaa0dfc19fdbcc6cb67382c | Python | apache-2.0 | huikyole/climate,agoodm/climate,MJJoyce/climate,MBoustani/climate,agoodm/climate,MJJoyce/climate,kwhitehall/climate,MBoustani/climate,lewismc/climate,agoodm/climate,pwcberry/climate,MBoustani/climate,Omkar20895/climate,MJJoyce/climate,agoodm/climate,kwhitehall/climate,lewismc/climate,pwcberry/climate,huikyole/climate,riverma/climate,jarifibrahim/climate,apache/climate,Omkar20895/climate,jarifibrahim/climate,apache/climate,pwcberry/climate,huikyole/climate,jarifibrahim/climate,kwhitehall/climate,lewismc/climate,MBoustani/climate,kwhitehall/climate,agoodm/climate,riverma/climate,MJJoyce/climate,Omkar20895/climate,pwcberry/climate,riverma/climate,huikyole/climate,riverma/climate,pwcberry/climate,Omkar20895/climate,jarifibrahim/climate,lewismc/climate,huikyole/climate,Omkar20895/climate,apache/climate,MBoustani/climate,riverma/climate,apache/climate,MJJoyce/climate,apache/climate,jarifibrahim/climate,lewismc/climate | <INSERT> import os
import unittest
from webtest import TestApp
from ..run_webservices import app
from ..directory_helpers import _get_clean_directory_path
test_app = TestApp(app)
class TestDirectoryPathCleaner(unittest.TestCase):
<INSERT_END> <INSERT> PATH_LEADER = '/tmp/foo'
VALID_CLEAN_DIR = '/tmp/foo/bar'
if not os.path.exists(PATH_LEADER): os.mkdir(PATH_LEADER)
if not os.path.exists(VALID_CLEAN_DIR): os.mkdir(VALID_CLEAN_DIR)
def test_valid_directory_path(self):
clean_path = _get_clean_directory_path(self.PATH_LEADER, '/bar')
self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
<INSERT_END> <|endoftext|> import os
import unittest
from webtest import TestApp
from ..run_webservices import app
from ..directory_helpers import _get_clean_directory_path
test_app = TestApp(app)
class TestDirectoryPathCleaner(unittest.TestCase):
PATH_LEADER = '/tmp/foo'
VALID_CLEAN_DIR = '/tmp/foo/bar'
if not os.path.exists(PATH_LEADER): os.mkdir(PATH_LEADER)
if not os.path.exists(VALID_CLEAN_DIR): os.mkdir(VALID_CLEAN_DIR)
def test_valid_directory_path(self):
clean_path = _get_clean_directory_path(self.PATH_LEADER, '/bar')
self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
| Add valid directory cleaner helper test
git-svn-id: https://svn.apache.org/repos/asf/incubator/climate/trunk@1563517 13f79535-47bb-0310-9956-ffa450edef68
Former-commit-id: a23ba7556854cb30faaa0dfc19fdbcc6cb67382c
|
|
67b243915ef95ff1b9337bc67053d18df372e79d | unitypack/enums.py | unitypack/enums.py | from enum import IntEnum
class RuntimePlatform(IntEnum):
OSXEditor = 0
OSXPlayer = 1
WindowsPlayer = 2
OSXWebPlayer = 3
OSXDashboardPlayer = 4
WindowsWebPlayer = 5
WindowsEditor = 7
IPhonePlayer = 8
PS3 = 9
XBOX360 = 10
Android = 11
NaCl = 12
LinuxPlayer = 13
FlashPlayer = 15
WebGLPlayer = 17
MetroPlayerX86 = 18
WSAPlayerX86 = 18
MetroPlayerX64 = 19
WSAPlayerX64 = 19
MetroPlayerARM = 20
WSAPlayerARM = 20
WP8Player = 21
BB10Player = 22
BlackBerryPlayer = 22
TizenPlayer = 23
PSP2 = 24
PS4 = 25
PSM = 26
XboxOne = 27
| from enum import IntEnum
class RuntimePlatform(IntEnum):
OSXEditor = 0
OSXPlayer = 1
WindowsPlayer = 2
OSXWebPlayer = 3
OSXDashboardPlayer = 4
WindowsWebPlayer = 5
WindowsEditor = 7
IPhonePlayer = 8
PS3 = 9
XBOX360 = 10
Android = 11
NaCl = 12
LinuxPlayer = 13
FlashPlayer = 15
WebGLPlayer = 17
MetroPlayerX86 = 18
WSAPlayerX86 = 18
MetroPlayerX64 = 19
WSAPlayerX64 = 19
MetroPlayerARM = 20
WSAPlayerARM = 20
WP8Player = 21
BB10Player = 22
BlackBerryPlayer = 22
TizenPlayer = 23
PSP2 = 24
PS4 = 25
PSM = 26
PSMPlayer = 26
XboxOne = 27
SamsungTVPlayer = 28
| Add PSMPlayer and SamsungTVPlayer platforms | Add PSMPlayer and SamsungTVPlayer platforms
| Python | mit | andburn/python-unitypack | <INSERT> 26
PSMPlayer = <INSERT_END> <REPLACE_OLD> 27
<REPLACE_NEW> 27
SamsungTVPlayer = 28
<REPLACE_END> <|endoftext|> from enum import IntEnum
class RuntimePlatform(IntEnum):
OSXEditor = 0
OSXPlayer = 1
WindowsPlayer = 2
OSXWebPlayer = 3
OSXDashboardPlayer = 4
WindowsWebPlayer = 5
WindowsEditor = 7
IPhonePlayer = 8
PS3 = 9
XBOX360 = 10
Android = 11
NaCl = 12
LinuxPlayer = 13
FlashPlayer = 15
WebGLPlayer = 17
MetroPlayerX86 = 18
WSAPlayerX86 = 18
MetroPlayerX64 = 19
WSAPlayerX64 = 19
MetroPlayerARM = 20
WSAPlayerARM = 20
WP8Player = 21
BB10Player = 22
BlackBerryPlayer = 22
TizenPlayer = 23
PSP2 = 24
PS4 = 25
PSM = 26
PSMPlayer = 26
XboxOne = 27
SamsungTVPlayer = 28
| Add PSMPlayer and SamsungTVPlayer platforms
from enum import IntEnum
class RuntimePlatform(IntEnum):
OSXEditor = 0
OSXPlayer = 1
WindowsPlayer = 2
OSXWebPlayer = 3
OSXDashboardPlayer = 4
WindowsWebPlayer = 5
WindowsEditor = 7
IPhonePlayer = 8
PS3 = 9
XBOX360 = 10
Android = 11
NaCl = 12
LinuxPlayer = 13
FlashPlayer = 15
WebGLPlayer = 17
MetroPlayerX86 = 18
WSAPlayerX86 = 18
MetroPlayerX64 = 19
WSAPlayerX64 = 19
MetroPlayerARM = 20
WSAPlayerARM = 20
WP8Player = 21
BB10Player = 22
BlackBerryPlayer = 22
TizenPlayer = 23
PSP2 = 24
PS4 = 25
PSM = 26
XboxOne = 27
|
fc105f413e6683980c5d2fcc93a471ebbc9fecba | utils/files_provider.py | utils/files_provider.py | from string import Template
__author__ = 'maa'
templates_folder = 'file_templates_folder\\'
def create_and_full_fill_file(template_file_name, destination_file_path, file_name, kwargs):
template_file = open(templates_folder + template_file_name, 'r')
file_content = template_file.read()
template_file.close()
template = Template(file_content)
final_content = template.substitute(kwargs)
final_file = open(destination_file_path + '\\' + file_name, 'w')
final_file.write(final_content)
final_file.close()
| from string import Template
__author__ = 'maa'
templates_folder = 'file_templates_folder\\'
def create_and_full_fill_file(template_file_name, destination_file_path, kwargs):
template_file = open(template_file_name, 'r')
file_content = template_file.read()
template_file.close()
template = Template(file_content)
final_content = template.substitute(kwargs)
final_file = open(destination_file_path, 'w')
final_file.write(final_content)
final_file.close()
| Change method params because they are not necessary. | [dev] Change method params because they are not necessary.
| Python | apache-2.0 | amatkivskiy/baidu,amatkivskiy/baidu | <DELETE> file_name, <DELETE_END> <REPLACE_OLD> open(templates_folder + template_file_name, <REPLACE_NEW> open(template_file_name, <REPLACE_END> <REPLACE_OLD> open(destination_file_path + '\\' + file_name, <REPLACE_NEW> open(destination_file_path, <REPLACE_END> <|endoftext|> from string import Template
__author__ = 'maa'
templates_folder = 'file_templates_folder\\'
def create_and_full_fill_file(template_file_name, destination_file_path, kwargs):
template_file = open(template_file_name, 'r')
file_content = template_file.read()
template_file.close()
template = Template(file_content)
final_content = template.substitute(kwargs)
final_file = open(destination_file_path, 'w')
final_file.write(final_content)
final_file.close()
| [dev] Change method params because they are not necessary.
from string import Template
__author__ = 'maa'
templates_folder = 'file_templates_folder\\'
def create_and_full_fill_file(template_file_name, destination_file_path, file_name, kwargs):
template_file = open(templates_folder + template_file_name, 'r')
file_content = template_file.read()
template_file.close()
template = Template(file_content)
final_content = template.substitute(kwargs)
final_file = open(destination_file_path + '\\' + file_name, 'w')
final_file.write(final_content)
final_file.close()
|
f876c410ab39bd348f79ed2a256c09edd4225c56 | odo/backends/tests/test_dask_array.py | odo/backends/tests/test_dask_array.py | from __future__ import absolute_import, division, print_function
from odo.backends.dask_array import append, Array, merge
from dask.array.core import insert_to_ooc
from dask import core
from odo import convert, into
from odo.utils import tmpfile
import numpy as np
import bcolz
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_convert():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert isinstance(d, Array)
def test_convert_to_numpy_array():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
x2 = convert(np.ndarray, d)
assert eq(x, x2)
def test_append_to_array():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(0, 30), dtype=x.dtype)
append(b, a)
assert eq(b[:], x)
with tmpfile('hdf5') as fn:
h = into(fn+'::/data', a)
assert eq(h[:], x)
h.file.close()
def test_into_inplace():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(20, 30), dtype=x.dtype)
append(b, a, inplace=True)
assert eq(b[:], x)
def test_insert_to_ooc():
x = np.arange(600).reshape((20, 30))
y = np.empty(shape=x.shape, dtype=x.dtype)
a = convert(Array, x, blockshape=(4, 5))
dsk = insert_to_ooc(y, a)
core.get(merge(dsk, a.dask), list(dsk.keys()))
assert eq(y, x)
def test__array__():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert eq(x, np.array(d))
| Migrate tests for dask array conversions from dask package. | Migrate tests for dask array conversions from dask package.
| Python | bsd-3-clause | Dannnno/odo,Dannnno/odo,ywang007/odo,ContinuumIO/odo,ContinuumIO/odo,ywang007/odo,cpcloud/odo,blaze/odo,cowlicks/odo,blaze/odo,alexmojaki/odo,quantopian/odo,alexmojaki/odo,cpcloud/odo,cowlicks/odo,quantopian/odo | <REPLACE_OLD> <REPLACE_NEW> from __future__ import absolute_import, division, print_function
from odo.backends.dask_array import append, Array, merge
from dask.array.core import insert_to_ooc
from dask import core
from odo import convert, into
from odo.utils import tmpfile
import numpy as np
import bcolz
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_convert():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert isinstance(d, Array)
def test_convert_to_numpy_array():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
x2 = convert(np.ndarray, d)
assert eq(x, x2)
def test_append_to_array():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(0, 30), dtype=x.dtype)
append(b, a)
assert eq(b[:], x)
with tmpfile('hdf5') as fn:
h = into(fn+'::/data', a)
assert eq(h[:], x)
h.file.close()
def test_into_inplace():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(20, 30), dtype=x.dtype)
append(b, a, inplace=True)
assert eq(b[:], x)
def test_insert_to_ooc():
x = np.arange(600).reshape((20, 30))
y = np.empty(shape=x.shape, dtype=x.dtype)
a = convert(Array, x, blockshape=(4, 5))
dsk = insert_to_ooc(y, a)
core.get(merge(dsk, a.dask), list(dsk.keys()))
assert eq(y, x)
def test__array__():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert eq(x, np.array(d))
<REPLACE_END> <|endoftext|> from __future__ import absolute_import, division, print_function
from odo.backends.dask_array import append, Array, merge
from dask.array.core import insert_to_ooc
from dask import core
from odo import convert, into
from odo.utils import tmpfile
import numpy as np
import bcolz
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_convert():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert isinstance(d, Array)
def test_convert_to_numpy_array():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
x2 = convert(np.ndarray, d)
assert eq(x, x2)
def test_append_to_array():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(0, 30), dtype=x.dtype)
append(b, a)
assert eq(b[:], x)
with tmpfile('hdf5') as fn:
h = into(fn+'::/data', a)
assert eq(h[:], x)
h.file.close()
def test_into_inplace():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(20, 30), dtype=x.dtype)
append(b, a, inplace=True)
assert eq(b[:], x)
def test_insert_to_ooc():
x = np.arange(600).reshape((20, 30))
y = np.empty(shape=x.shape, dtype=x.dtype)
a = convert(Array, x, blockshape=(4, 5))
dsk = insert_to_ooc(y, a)
core.get(merge(dsk, a.dask), list(dsk.keys()))
assert eq(y, x)
def test__array__():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert eq(x, np.array(d))
| Migrate tests for dask array conversions from dask package.
|
|
598911ebd93085926602a26e9bbf835df0bea0b6 | test/test_rcsparse.py | test/test_rcsparse.py | import unittest
from rcsparse import rcsfile
from os.path import dirname, join
REV_NUMBER = 0
REV_STATE = 3
class Test(unittest.TestCase):
def test_rcsfile(self):
f = rcsfile(join(dirname(__file__), 'data', 'patch-copyin_c,v'))
self.assertEquals(f.head, '1.1')
self.assertEquals(f.revs[f.head][REV_NUMBER], '1.1')
self.assertEquals(f.revs[f.head][REV_STATE], 'dead')
if __name__ == '__main__':
unittest.main()
| Add a test case for Simon Schubert's rcsparse library | Add a test case for Simon Schubert's rcsparse library
| Python | isc | ustuehler/git-cvs,ustuehler/git-cvs | <INSERT> import unittest
from rcsparse import rcsfile
from os.path import dirname, join
REV_NUMBER = 0
REV_STATE = 3
class Test(unittest.TestCase):
<INSERT_END> <INSERT> def test_rcsfile(self):
f = rcsfile(join(dirname(__file__), 'data', 'patch-copyin_c,v'))
self.assertEquals(f.head, '1.1')
self.assertEquals(f.revs[f.head][REV_NUMBER], '1.1')
self.assertEquals(f.revs[f.head][REV_STATE], 'dead')
if __name__ == '__main__':
unittest.main()
<INSERT_END> <|endoftext|> import unittest
from rcsparse import rcsfile
from os.path import dirname, join
REV_NUMBER = 0
REV_STATE = 3
class Test(unittest.TestCase):
def test_rcsfile(self):
f = rcsfile(join(dirname(__file__), 'data', 'patch-copyin_c,v'))
self.assertEquals(f.head, '1.1')
self.assertEquals(f.revs[f.head][REV_NUMBER], '1.1')
self.assertEquals(f.revs[f.head][REV_STATE], 'dead')
if __name__ == '__main__':
unittest.main()
| Add a test case for Simon Schubert's rcsparse library
|
|
6cfd296a86c1b475101c179a45a7453b76dcbfd5 | riak/util.py | riak/util.py | import collections
def quacks_like_dict(object):
"""Check if object is dict-like"""
return isinstance(object, collections.Mapping)
def deep_merge(a, b):
"""Merge two deep dicts non-destructively
Uses a stack to avoid maximum recursion depth exceptions
>>> a = {'a': 1, 'b': {1: 1, 2: 2}, 'd': 6}
>>> b = {'c': 3, 'b': {2: 7}, 'd': {'z': [1, 2, 3]}}
>>> c = merge(a, b)
>>> from pprint import pprint; pprint(c)
{'a': 1, 'b': {1: 1, 2: 7}, 'c': 3, 'd': {'z': [1, 2, 3]}}
"""
assert quacks_like_dict(a), quacks_like_dict(b)
dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :
stack.append((current_dst[key], current_src[key]))
else:
current_dst[key] = current_src[key]
return dst
| try:
from collections import Mapping
except ImportError:
# compatibility with Python 2.5
Mapping = dict
def quacks_like_dict(object):
"""Check if object is dict-like"""
return isinstance(object, Mapping)
def deep_merge(a, b):
"""Merge two deep dicts non-destructively
Uses a stack to avoid maximum recursion depth exceptions
>>> a = {'a': 1, 'b': {1: 1, 2: 2}, 'd': 6}
>>> b = {'c': 3, 'b': {2: 7}, 'd': {'z': [1, 2, 3]}}
>>> c = merge(a, b)
>>> from pprint import pprint; pprint(c)
{'a': 1, 'b': {1: 1, 2: 7}, 'c': 3, 'd': {'z': [1, 2, 3]}}
"""
assert quacks_like_dict(a), quacks_like_dict(b)
dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :
stack.append((current_dst[key], current_src[key]))
else:
current_dst[key] = current_src[key]
return dst
| Adjust for compatibility with Python 2.5 | Adjust for compatibility with Python 2.5
| Python | apache-2.0 | basho/riak-python-client,GabrielNicolasAvellaneda/riak-python-client,GabrielNicolasAvellaneda/riak-python-client,bmess/riak-python-client,basho/riak-python-client,basho/riak-python-client,bmess/riak-python-client | <INSERT> try:
from collections <INSERT_END> <REPLACE_OLD> collections
def <REPLACE_NEW> Mapping
except ImportError:
# compatibility with Python 2.5
Mapping = dict
def <REPLACE_END> <REPLACE_OLD> collections.Mapping)
<REPLACE_NEW> Mapping)
<REPLACE_END> <|endoftext|> try:
from collections import Mapping
except ImportError:
# compatibility with Python 2.5
Mapping = dict
def quacks_like_dict(object):
"""Check if object is dict-like"""
return isinstance(object, Mapping)
def deep_merge(a, b):
"""Merge two deep dicts non-destructively
Uses a stack to avoid maximum recursion depth exceptions
>>> a = {'a': 1, 'b': {1: 1, 2: 2}, 'd': 6}
>>> b = {'c': 3, 'b': {2: 7}, 'd': {'z': [1, 2, 3]}}
>>> c = merge(a, b)
>>> from pprint import pprint; pprint(c)
{'a': 1, 'b': {1: 1, 2: 7}, 'c': 3, 'd': {'z': [1, 2, 3]}}
"""
assert quacks_like_dict(a), quacks_like_dict(b)
dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :
stack.append((current_dst[key], current_src[key]))
else:
current_dst[key] = current_src[key]
return dst
| Adjust for compatibility with Python 2.5
import collections
def quacks_like_dict(object):
"""Check if object is dict-like"""
return isinstance(object, collections.Mapping)
def deep_merge(a, b):
"""Merge two deep dicts non-destructively
Uses a stack to avoid maximum recursion depth exceptions
>>> a = {'a': 1, 'b': {1: 1, 2: 2}, 'd': 6}
>>> b = {'c': 3, 'b': {2: 7}, 'd': {'z': [1, 2, 3]}}
>>> c = merge(a, b)
>>> from pprint import pprint; pprint(c)
{'a': 1, 'b': {1: 1, 2: 7}, 'c': 3, 'd': {'z': [1, 2, 3]}}
"""
assert quacks_like_dict(a), quacks_like_dict(b)
dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :
stack.append((current_dst[key], current_src[key]))
else:
current_dst[key] = current_src[key]
return dst
|
5d278330812618d55ba4efafcb097e3f5ee6db04 | project/category/views.py | project/category/views.py | from flask import render_template, Blueprint, url_for, \
redirect, flash, request
from project.models import Category, Webinar
category_blueprint = Blueprint('category', __name__,)
@category_blueprint.route('/categories')
def index():
categories = Category.query.all()
return render_template('category/categories.html', categories=categories)
| from flask import render_template, Blueprint, url_for, \
redirect, flash, request
from project.models import Category, Webinar
category_blueprint = Blueprint('category', __name__,)
@category_blueprint.route('/categories')
def index():
categories = Category.query.all()
return render_template('category/categories.html', categories=categories)
@category_blueprint.route('/category/<category_slug>')
def show(category_slug):
category = Category.query.filter_by(slug=category_slug)
return render_template('category/category.html', category=category)
| Add basic view to see category show page | Add basic view to see category show page
| Python | mit | dylanshine/streamschool,dylanshine/streamschool | <REPLACE_OLD> categories=categories)
<REPLACE_NEW> categories=categories)
@category_blueprint.route('/category/<category_slug>')
def show(category_slug):
category = Category.query.filter_by(slug=category_slug)
return render_template('category/category.html', category=category)
<REPLACE_END> <|endoftext|> from flask import render_template, Blueprint, url_for, \
redirect, flash, request
from project.models import Category, Webinar
category_blueprint = Blueprint('category', __name__,)
@category_blueprint.route('/categories')
def index():
categories = Category.query.all()
return render_template('category/categories.html', categories=categories)
@category_blueprint.route('/category/<category_slug>')
def show(category_slug):
category = Category.query.filter_by(slug=category_slug)
return render_template('category/category.html', category=category)
| Add basic view to see category show page
from flask import render_template, Blueprint, url_for, \
redirect, flash, request
from project.models import Category, Webinar
category_blueprint = Blueprint('category', __name__,)
@category_blueprint.route('/categories')
def index():
categories = Category.query.all()
return render_template('category/categories.html', categories=categories)
|
0aa757955d631df9fb8e6cbe3e372dcae56e2255 | django_mailbox/transports/imap.py | django_mailbox/transports/imap.py | from imaplib import IMAP4, IMAP4_SSL
from .base import EmailTransport, MessageParseError
class ImapTransport(EmailTransport):
def __init__(self, hostname, port=None, ssl=False, archive=''):
self.hostname = hostname
self.port = port
self.archive = archive
if ssl:
self.transport = IMAP4_SSL
if not self.port:
self.port = 993
else:
self.transport = IMAP4
if not self.port:
self.port = 143
def connect(self, username, password):
self.server = self.transport(self.hostname, self.port)
typ, msg = self.server.login(username, password)
self.server.select()
def get_message(self):
typ, inbox = self.server.search(None, 'ALL')
if not inbox[0]:
return
if self.archive:
typ, folders = self.server.list(pattern=self.archive)
if folders[0] == None:
self.archive = False
for key in inbox[0].split():
try:
typ, msg_contents = self.server.fetch(key, '(RFC822)')
message = self.get_email_from_bytes(msg_contents[0][1])
yield message
except MessageParseError:
continue
if self.archive:
self.server.copy(key, self.archive)
self.server.store(key, "+FLAGS", "\\Deleted")
self.server.expunge()
return
| from imaplib import IMAP4, IMAP4_SSL
from .base import EmailTransport, MessageParseError
class ImapTransport(EmailTransport):
def __init__(self, hostname, port=None, ssl=False, archive=''):
self.hostname = hostname
self.port = port
self.archive = archive
if ssl:
self.transport = IMAP4_SSL
if not self.port:
self.port = 993
else:
self.transport = IMAP4
if not self.port:
self.port = 143
def connect(self, username, password):
self.server = self.transport(self.hostname, self.port)
typ, msg = self.server.login(username, password)
self.server.select()
def get_message(self):
typ, inbox = self.server.search(None, 'ALL')
if not inbox[0]:
return
if self.archive:
typ, folders = self.server.list(pattern=self.archive)
if folders[0] is None:
# If the archive folder does not exist, create it
self.server.create(self.archive)
for key in inbox[0].split():
try:
typ, msg_contents = self.server.fetch(key, '(RFC822)')
message = self.get_email_from_bytes(msg_contents[0][1])
yield message
except MessageParseError:
continue
if self.archive:
self.server.copy(key, self.archive)
self.server.store(key, "+FLAGS", "\\Deleted")
self.server.expunge()
return
| Create archive folder if it does not exist. | Create archive folder if it does not exist.
| Python | mit | coddingtonbear/django-mailbox,ad-m/django-mailbox,Shekharrajak/django-mailbox,leifurhauks/django-mailbox | <REPLACE_OLD> == <REPLACE_NEW> is <REPLACE_END> <REPLACE_OLD> self.archive = False
<REPLACE_NEW> # If the archive folder does not exist, create it
self.server.create(self.archive)
<REPLACE_END> <|endoftext|> from imaplib import IMAP4, IMAP4_SSL
from .base import EmailTransport, MessageParseError
class ImapTransport(EmailTransport):
def __init__(self, hostname, port=None, ssl=False, archive=''):
self.hostname = hostname
self.port = port
self.archive = archive
if ssl:
self.transport = IMAP4_SSL
if not self.port:
self.port = 993
else:
self.transport = IMAP4
if not self.port:
self.port = 143
def connect(self, username, password):
self.server = self.transport(self.hostname, self.port)
typ, msg = self.server.login(username, password)
self.server.select()
def get_message(self):
typ, inbox = self.server.search(None, 'ALL')
if not inbox[0]:
return
if self.archive:
typ, folders = self.server.list(pattern=self.archive)
if folders[0] is None:
# If the archive folder does not exist, create it
self.server.create(self.archive)
for key in inbox[0].split():
try:
typ, msg_contents = self.server.fetch(key, '(RFC822)')
message = self.get_email_from_bytes(msg_contents[0][1])
yield message
except MessageParseError:
continue
if self.archive:
self.server.copy(key, self.archive)
self.server.store(key, "+FLAGS", "\\Deleted")
self.server.expunge()
return
| Create archive folder if it does not exist.
from imaplib import IMAP4, IMAP4_SSL
from .base import EmailTransport, MessageParseError
class ImapTransport(EmailTransport):
def __init__(self, hostname, port=None, ssl=False, archive=''):
self.hostname = hostname
self.port = port
self.archive = archive
if ssl:
self.transport = IMAP4_SSL
if not self.port:
self.port = 993
else:
self.transport = IMAP4
if not self.port:
self.port = 143
def connect(self, username, password):
self.server = self.transport(self.hostname, self.port)
typ, msg = self.server.login(username, password)
self.server.select()
def get_message(self):
typ, inbox = self.server.search(None, 'ALL')
if not inbox[0]:
return
if self.archive:
typ, folders = self.server.list(pattern=self.archive)
if folders[0] == None:
self.archive = False
for key in inbox[0].split():
try:
typ, msg_contents = self.server.fetch(key, '(RFC822)')
message = self.get_email_from_bytes(msg_contents[0][1])
yield message
except MessageParseError:
continue
if self.archive:
self.server.copy(key, self.archive)
self.server.store(key, "+FLAGS", "\\Deleted")
self.server.expunge()
return
|
e5af653b2133b493c7888bb305488e932acb2274 | doc/examples/special/plot_hinton.py | doc/examples/special/plot_hinton.py | """
==============
Hinton diagram
==============
Hinton diagrams are useful for visualizing the values of a 2D array. Positive
and negative values represented by white and black squares, respectively, and
the size of each square represents the magnitude of each value.
The `special.hinton` function is based off of the Hinton demo in the matplotlib
gallery [1]_. This implementation, however, uses a `RegularPolyCollection` to
draw squares, which is much more efficient than drawing individual rectangles.
Obscure example use: For my Ph.D., I wrote a numerical solver using
finite-differences and writing down Jacobian matrices analytically, was
incredibly-prone to bugs. To debug my code, I calculated the numerical Jacobian
(calculated using `scipy.optimize.slsqp.approx_jacobian`) and plotted the
Hinton diagram for for the difference of the numerical and analytical results.
You could, of course, use `pcolor` or `imshow` in a similar situation.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpltools import special
A = np.random.uniform(-1, 1, size=(20, 20))
special.hinton(A)
plt.show()
| """
==============
Hinton diagram
==============
Hinton diagrams are useful for visualizing the values of a 2D array: Positive
and negative values are represented by white and black squares, respectively,
and the size of each square represents the magnitude of each value.
``special.hinton`` is based off of the `Hinton demo`_ in the matplotlib gallery. This implementation, however, uses a ``RegularPolyCollection`` to draw
squares, which is much more efficient than drawing individual rectangles.
Obscure example use: For my Ph.D., I wrote a numerical solver using
finite-differences. For speed, the Jacobian matrices were calculated
analytically, which was incredibly-prone to bugs. To debug my code,
I calculated the numerical Jacobian (calculated using
``scipy.optimize.slsqp.approx_jacobian``) and plotted the Hinton diagram for
the difference of the numerical and analytical results. That allowed me to
narrow down where the bugs were (boundary conditions!) instead of blindly
checking every equation. You could, of course, use ``pcolor`` or ``imshow`` in
a similar situation.
.. _Hinton demo: http://matplotlib.sourceforge.net/examples/api/hinton_demo.html
"""
import numpy as np
import matplotlib.pyplot as plt
from mpltools import special
A = np.random.uniform(-1, 1, size=(20, 20))
special.hinton(A)
plt.show()
| Clean up hinton example text. | DOC: Clean up hinton example text.
| Python | bsd-3-clause | tonysyu/mpltools,matteoicardi/mpltools | <REPLACE_OLD> array. <REPLACE_NEW> array: <REPLACE_END> <INSERT> are <INSERT_END> <REPLACE_OLD> respectively, and
the <REPLACE_NEW> respectively,
and the <REPLACE_END> <REPLACE_OLD> value.
The `special.hinton` function <REPLACE_NEW> value.
``special.hinton`` <REPLACE_END> <REPLACE_OLD> Hinton demo <REPLACE_NEW> `Hinton demo`_ <REPLACE_END> <REPLACE_OLD> matplotlib
gallery [1]_. <REPLACE_NEW> matplotlib gallery. <REPLACE_END> <REPLACE_OLD> `RegularPolyCollection` to
draw squares, <REPLACE_NEW> ``RegularPolyCollection`` to draw
squares, <REPLACE_END> <REPLACE_OLD> using
finite-differences and writing down <REPLACE_NEW> using
finite-differences. For speed, the <REPLACE_END> <REPLACE_OLD> analytically, was
incredibly-prone <REPLACE_NEW> were calculated
analytically, which was incredibly-prone <REPLACE_END> <REPLACE_OLD> code, I <REPLACE_NEW> code,
I <REPLACE_END> <REPLACE_OLD> Jacobian
(calculated using `scipy.optimize.slsqp.approx_jacobian`) <REPLACE_NEW> Jacobian (calculated using
``scipy.optimize.slsqp.approx_jacobian``) <REPLACE_END> <REPLACE_OLD> the
Hinton <REPLACE_NEW> the Hinton <REPLACE_END> <REPLACE_OLD> for for the <REPLACE_NEW> for
the <REPLACE_END> <REPLACE_OLD> results.
You <REPLACE_NEW> results. That allowed me to
narrow down where the bugs were (boundary conditions!) instead of blindly
checking every equation. You <REPLACE_END> <REPLACE_OLD> `pcolor` <REPLACE_NEW> ``pcolor`` <REPLACE_END> <REPLACE_OLD> `imshow` in a <REPLACE_NEW> ``imshow`` in
a <REPLACE_END> <REPLACE_OLD> situation.
"""
import <REPLACE_NEW> situation.
.. _Hinton demo: http://matplotlib.sourceforge.net/examples/api/hinton_demo.html
"""
import <REPLACE_END> <|endoftext|> """
==============
Hinton diagram
==============
Hinton diagrams are useful for visualizing the values of a 2D array: Positive
and negative values are represented by white and black squares, respectively,
and the size of each square represents the magnitude of each value.
``special.hinton`` is based off of the `Hinton demo`_ in the matplotlib gallery. This implementation, however, uses a ``RegularPolyCollection`` to draw
squares, which is much more efficient than drawing individual rectangles.
Obscure example use: For my Ph.D., I wrote a numerical solver using
finite-differences. For speed, the Jacobian matrices were calculated
analytically, which was incredibly-prone to bugs. To debug my code,
I calculated the numerical Jacobian (calculated using
``scipy.optimize.slsqp.approx_jacobian``) and plotted the Hinton diagram for
the difference of the numerical and analytical results. That allowed me to
narrow down where the bugs were (boundary conditions!) instead of blindly
checking every equation. You could, of course, use ``pcolor`` or ``imshow`` in
a similar situation.
.. _Hinton demo: http://matplotlib.sourceforge.net/examples/api/hinton_demo.html
"""
import numpy as np
import matplotlib.pyplot as plt
from mpltools import special
A = np.random.uniform(-1, 1, size=(20, 20))
special.hinton(A)
plt.show()
| DOC: Clean up hinton example text.
"""
==============
Hinton diagram
==============
Hinton diagrams are useful for visualizing the values of a 2D array. Positive
and negative values represented by white and black squares, respectively, and
the size of each square represents the magnitude of each value.
The `special.hinton` function is based off of the Hinton demo in the matplotlib
gallery [1]_. This implementation, however, uses a `RegularPolyCollection` to
draw squares, which is much more efficient than drawing individual rectangles.
Obscure example use: For my Ph.D., I wrote a numerical solver using
finite-differences and writing down Jacobian matrices analytically, was
incredibly-prone to bugs. To debug my code, I calculated the numerical Jacobian
(calculated using `scipy.optimize.slsqp.approx_jacobian`) and plotted the
Hinton diagram for for the difference of the numerical and analytical results.
You could, of course, use `pcolor` or `imshow` in a similar situation.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpltools import special
A = np.random.uniform(-1, 1, size=(20, 20))
special.hinton(A)
plt.show()
|
1ef1d7a973ce44943fc59315d1f962ed59f06e33 | seacucumber/backend.py | seacucumber/backend.py | """
This module contains the SESBackend class, which is what you'll want to set in
your settings.py::
EMAIL_BACKEND = 'seacucumber.backend.SESBackend'
"""
from django.core.mail.backends.base import BaseEmailBackend
from seacucumber.tasks import SendEmailTask
class SESBackend(BaseEmailBackend):
"""
A Django Email backend that uses Amazon's Simple Email Service.
"""
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of
email messages sent.
:param EmailMessage email_messages: A list of Django's EmailMessage
object instances.
:rtype: int
:returns: The number of EmailMessage objects that were successfully
queued up. Note that these are not in a state where we can
guarantee delivery just yet.
"""
num_sent = 0
for message in email_messages:
# Hand this off to a celery task.
SendEmailTask.delay(
message.from_email,
message.recipients(),
message.message().as_string(),
)
num_sent += 1
return num_sent
| """
This module contains the SESBackend class, which is what you'll want to set in
your settings.py::
EMAIL_BACKEND = 'seacucumber.backend.SESBackend'
"""
from django.core.mail.backends.base import BaseEmailBackend
from seacucumber.tasks import SendEmailTask
class SESBackend(BaseEmailBackend):
"""
A Django Email backend that uses Amazon's Simple Email Service.
"""
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of
email messages sent.
:param EmailMessage email_messages: A list of Django's EmailMessage
object instances.
:rtype: int
:returns: The number of EmailMessage objects that were successfully
queued up. Note that these are not in a state where we can
guarantee delivery just yet.
"""
num_sent = 0
for message in email_messages:
# Hand this off to a celery task.
SendEmailTask.delay(
message.from_email,
message.recipients(),
message.message().as_string().decode('utf8'),
)
num_sent += 1
return num_sent
| Patch to send mails with UTF8 encoding | Patch to send mails with UTF8 encoding
Just a temp fix
| Python | mit | makielab/sea-cucumber,duointeractive/sea-cucumber | <REPLACE_OLD> SendEmailTask
class <REPLACE_NEW> SendEmailTask
class <REPLACE_END> <REPLACE_OLD> sent.
<REPLACE_NEW> sent.
<REPLACE_END> <REPLACE_OLD> message.message().as_string(),
<REPLACE_NEW> message.message().as_string().decode('utf8'),
<REPLACE_END> <REPLACE_OLD> num_sent
<REPLACE_NEW> num_sent
<REPLACE_END> <|endoftext|> """
This module contains the SESBackend class, which is what you'll want to set in
your settings.py::
EMAIL_BACKEND = 'seacucumber.backend.SESBackend'
"""
from django.core.mail.backends.base import BaseEmailBackend
from seacucumber.tasks import SendEmailTask
class SESBackend(BaseEmailBackend):
"""
A Django Email backend that uses Amazon's Simple Email Service.
"""
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of
email messages sent.
:param EmailMessage email_messages: A list of Django's EmailMessage
object instances.
:rtype: int
:returns: The number of EmailMessage objects that were successfully
queued up. Note that these are not in a state where we can
guarantee delivery just yet.
"""
num_sent = 0
for message in email_messages:
# Hand this off to a celery task.
SendEmailTask.delay(
message.from_email,
message.recipients(),
message.message().as_string().decode('utf8'),
)
num_sent += 1
return num_sent
| Patch to send mails with UTF8 encoding
Just a temp fix
"""
This module contains the SESBackend class, which is what you'll want to set in
your settings.py::
EMAIL_BACKEND = 'seacucumber.backend.SESBackend'
"""
from django.core.mail.backends.base import BaseEmailBackend
from seacucumber.tasks import SendEmailTask
class SESBackend(BaseEmailBackend):
"""
A Django Email backend that uses Amazon's Simple Email Service.
"""
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of
email messages sent.
:param EmailMessage email_messages: A list of Django's EmailMessage
object instances.
:rtype: int
:returns: The number of EmailMessage objects that were successfully
queued up. Note that these are not in a state where we can
guarantee delivery just yet.
"""
num_sent = 0
for message in email_messages:
# Hand this off to a celery task.
SendEmailTask.delay(
message.from_email,
message.recipients(),
message.message().as_string(),
)
num_sent += 1
return num_sent
|
e3d0bcb91f59616eb0aa8cc56f72315c362493cf | utils/webhistory/epiphany-history-to-ttl.py | utils/webhistory/epiphany-history-to-ttl.py | import xml.dom.minidom
from xml.dom.minidom import Node
import time
import sys, os
PROPERTIES = {2: ("nie:title", str),
3: ("nfo:uri", str),
4: ("nie:usageCounter", int),
6: ("nie:lastRefreshed", time.struct_time)}
# Use time.struct_time as type for dates, even when the format is not that!
def get_text (node):
text = ""
for subnode in node.childNodes:
if subnode.nodeType == Node.TEXT_NODE:
text += subnode.data
return text.encode ('utf8').replace ('"', '') # Use a safer method!
def process_file (filename):
doc = xml.dom.minidom.parse(filename)
for node in doc.getElementsByTagName ("node"):
print "<uri:uuid:epiphany-webhistory-%s> a nfo:WebHistory" % (node.getAttribute ("id")),
for prop in node.getElementsByTagName ("property"):
prop_id = int(prop.getAttribute ("id"))
if (PROPERTIES.has_key (prop_id)):
prop_name, prop_type = PROPERTIES [prop_id]
if (prop_type == str):
print ';\n\t%s "%s"' % (prop_name, get_text (prop)),
elif (prop_type == int):
print ';\n\t%s %s' % (prop_name, get_text (prop)),
elif (prop_type == time.struct_time):
print ';\n\t%s "%s"' % (prop_name, time.strftime ("%Y%m%dT%H:%m:%S",time.localtime (int(get_text (prop))))),
print ".\n"
def print_headers ():
print "@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>."
print "@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> ."
if __name__ == "__main__":
epi_history = os.path.join (os.getenv ("HOME"), ".gnome2", "epiphany", "ephy-history.xml")
print >> sys.stderr, "Scanning", epi_history
print_headers ()
if (os.path.exists (epi_history)):
process_file (epi_history)
| Add util to generate real webhistory | Add util to generate real webhistory
Added program that reads epiphany web browsing history and print it
in turtle format.
| Python | lgpl-2.1 | hoheinzollern/tracker,hoheinzollern/tracker,outofbits/tracker,outofbits/tracker,outofbits/tracker,hoheinzollern/tracker,outofbits/tracker,hoheinzollern/tracker,outofbits/tracker,hoheinzollern/tracker,outofbits/tracker,hoheinzollern/tracker,hoheinzollern/tracker,outofbits/tracker | <REPLACE_OLD> <REPLACE_NEW> import xml.dom.minidom
from xml.dom.minidom import Node
import time
import sys, os
PROPERTIES = {2: ("nie:title", str),
3: ("nfo:uri", str),
4: ("nie:usageCounter", int),
6: ("nie:lastRefreshed", time.struct_time)}
# Use time.struct_time as type for dates, even when the format is not that!
def get_text (node):
text = ""
for subnode in node.childNodes:
if subnode.nodeType == Node.TEXT_NODE:
text += subnode.data
return text.encode ('utf8').replace ('"', '') # Use a safer method!
def process_file (filename):
doc = xml.dom.minidom.parse(filename)
for node in doc.getElementsByTagName ("node"):
print "<uri:uuid:epiphany-webhistory-%s> a nfo:WebHistory" % (node.getAttribute ("id")),
for prop in node.getElementsByTagName ("property"):
prop_id = int(prop.getAttribute ("id"))
if (PROPERTIES.has_key (prop_id)):
prop_name, prop_type = PROPERTIES [prop_id]
if (prop_type == str):
print ';\n\t%s "%s"' % (prop_name, get_text (prop)),
elif (prop_type == int):
print ';\n\t%s %s' % (prop_name, get_text (prop)),
elif (prop_type == time.struct_time):
print ';\n\t%s "%s"' % (prop_name, time.strftime ("%Y%m%dT%H:%m:%S",time.localtime (int(get_text (prop))))),
print ".\n"
def print_headers ():
print "@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>."
print "@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> ."
if __name__ == "__main__":
epi_history = os.path.join (os.getenv ("HOME"), ".gnome2", "epiphany", "ephy-history.xml")
print >> sys.stderr, "Scanning", epi_history
print_headers ()
if (os.path.exists (epi_history)):
process_file (epi_history)
<REPLACE_END> <|endoftext|> import xml.dom.minidom
from xml.dom.minidom import Node
import time
import sys, os
PROPERTIES = {2: ("nie:title", str),
3: ("nfo:uri", str),
4: ("nie:usageCounter", int),
6: ("nie:lastRefreshed", time.struct_time)}
# Use time.struct_time as type for dates, even when the format is not that!
def get_text (node):
text = ""
for subnode in node.childNodes:
if subnode.nodeType == Node.TEXT_NODE:
text += subnode.data
return text.encode ('utf8').replace ('"', '') # Use a safer method!
def process_file (filename):
doc = xml.dom.minidom.parse(filename)
for node in doc.getElementsByTagName ("node"):
print "<uri:uuid:epiphany-webhistory-%s> a nfo:WebHistory" % (node.getAttribute ("id")),
for prop in node.getElementsByTagName ("property"):
prop_id = int(prop.getAttribute ("id"))
if (PROPERTIES.has_key (prop_id)):
prop_name, prop_type = PROPERTIES [prop_id]
if (prop_type == str):
print ';\n\t%s "%s"' % (prop_name, get_text (prop)),
elif (prop_type == int):
print ';\n\t%s %s' % (prop_name, get_text (prop)),
elif (prop_type == time.struct_time):
print ';\n\t%s "%s"' % (prop_name, time.strftime ("%Y%m%dT%H:%m:%S",time.localtime (int(get_text (prop))))),
print ".\n"
def print_headers ():
print "@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>."
print "@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> ."
if __name__ == "__main__":
epi_history = os.path.join (os.getenv ("HOME"), ".gnome2", "epiphany", "ephy-history.xml")
print >> sys.stderr, "Scanning", epi_history
print_headers ()
if (os.path.exists (epi_history)):
process_file (epi_history)
| Add util to generate real webhistory
Added program that reads epiphany web browsing history and print it
in turtle format.
|
|
5f5f26a9d31c5c647d69e0400e381abd0ec103b0 | lwr/managers/util/env.py | lwr/managers/util/env.py |
RAW_VALUE_BY_DEFAULT = False
def env_to_statement(env):
''' Return the abstraction description of an environment variable definition
into a statement for shell script.
>>> env_to_statement(dict(name='X', value='Y'))
'X="Y"; export X'
>>> env_to_statement(dict(name='X', value='Y', raw=True))
'X=Y; export X'
>>> env_to_statement(dict(name='X', value='"A","B","C"'))
'X="\\\\"A\\\\",\\\\"B\\\\",\\\\"C\\\\""; export X'
'''
name = env['name']
value = env['value']
raw = env.get('raw', RAW_VALUE_BY_DEFAULT)
if not raw:
value = '"' + value.replace('"', '\\"') + '"'
return '%s=%s; export %s' % (name, value, name)
| Add missing file from previous commit (thanks Izzet Fatih). | Add missing file from previous commit (thanks Izzet Fatih).
| Python | apache-2.0 | jmchilton/pulsar,ssorgatem/pulsar,ssorgatem/pulsar,natefoo/pulsar,jmchilton/pulsar,galaxyproject/pulsar,jmchilton/lwr,natefoo/pulsar,jmchilton/lwr,galaxyproject/pulsar | <INSERT>
RAW_VALUE_BY_DEFAULT = False
def env_to_statement(env):
<INSERT_END> <INSERT> ''' Return the abstraction description of an environment variable definition
into a statement for shell script.
>>> env_to_statement(dict(name='X', value='Y'))
'X="Y"; export X'
>>> env_to_statement(dict(name='X', value='Y', raw=True))
'X=Y; export X'
>>> env_to_statement(dict(name='X', value='"A","B","C"'))
'X="\\\\"A\\\\",\\\\"B\\\\",\\\\"C\\\\""; export X'
'''
name = env['name']
value = env['value']
raw = env.get('raw', RAW_VALUE_BY_DEFAULT)
if not raw:
value = '"' + value.replace('"', '\\"') + '"'
return '%s=%s; export %s' % (name, value, name)
<INSERT_END> <|endoftext|>
RAW_VALUE_BY_DEFAULT = False
def env_to_statement(env):
''' Return the abstraction description of an environment variable definition
into a statement for shell script.
>>> env_to_statement(dict(name='X', value='Y'))
'X="Y"; export X'
>>> env_to_statement(dict(name='X', value='Y', raw=True))
'X=Y; export X'
>>> env_to_statement(dict(name='X', value='"A","B","C"'))
'X="\\\\"A\\\\",\\\\"B\\\\",\\\\"C\\\\""; export X'
'''
name = env['name']
value = env['value']
raw = env.get('raw', RAW_VALUE_BY_DEFAULT)
if not raw:
value = '"' + value.replace('"', '\\"') + '"'
return '%s=%s; export %s' % (name, value, name)
| Add missing file from previous commit (thanks Izzet Fatih).
|
|
b11bd211a117b695f2a1a2aa09763f4332e37ace | tests/ratings/test_rating_signals.py | tests/ratings/test_rating_signals.py | import pytest
from django.core.exceptions import ObjectDoesNotExist
from adhocracy4.ratings import models
@pytest.mark.django_db
def test_delete_of_content_object(rating):
question = rating.content_object
question.delete()
with pytest.raises(ObjectDoesNotExist):
models.Rating.objects.get(id=rating.id)
| Add test for rating signals | Add test for rating signals
| Python | agpl-3.0 | liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4 | <INSERT> import pytest
from django.core.exceptions import ObjectDoesNotExist
from adhocracy4.ratings import models
@pytest.mark.django_db
def test_delete_of_content_object(rating):
<INSERT_END> <INSERT> question = rating.content_object
question.delete()
with pytest.raises(ObjectDoesNotExist):
models.Rating.objects.get(id=rating.id)
<INSERT_END> <|endoftext|> import pytest
from django.core.exceptions import ObjectDoesNotExist
from adhocracy4.ratings import models
@pytest.mark.django_db
def test_delete_of_content_object(rating):
question = rating.content_object
question.delete()
with pytest.raises(ObjectDoesNotExist):
models.Rating.objects.get(id=rating.id)
| Add test for rating signals
|
|
07800eb26817458d2d12afeb3f670a2119533639 | setup.py | setup.py | from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='Mopidy-MusicBox-Webclient',
version=get_version('mopidy_musicbox_webclient/__init__.py'),
url='https://github.com/woutervanwijk/mopidy-musicbox-webclient',
license='Apache License, Version 2.0',
author='Wouter van Wijk',
author_email='[email protected]',
description='Mopidy MusicBox web extension',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.1.0',
],
entry_points={
'mopidy.ext': [
'musicbox_webclient = mopidy_musicbox_webclient:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
| from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='Mopidy-MusicBox-Webclient',
version=get_version('mopidy_musicbox_webclient/__init__.py'),
url='https://github.com/pimusicbox/mopidy-musicbox-webclient',
license='Apache License, Version 2.0',
author='Wouter van Wijk',
author_email='[email protected]',
description='Mopidy MusicBox web extension',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.1.0',
],
entry_points={
'mopidy.ext': [
'musicbox_webclient = mopidy_musicbox_webclient:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
| Update URL to github repository. | Update URL to github repository.
| Python | apache-2.0 | pimusicbox/mopidy-musicbox-webclient,woutervanwijk/Mopidy-MusicBox-Webclient,woutervanwijk/Mopidy-MusicBox-Webclient,pimusicbox/mopidy-musicbox-webclient,woutervanwijk/Mopidy-MusicBox-Webclient,pimusicbox/mopidy-musicbox-webclient | <REPLACE_OLD> url='https://github.com/woutervanwijk/mopidy-musicbox-webclient',
<REPLACE_NEW> url='https://github.com/pimusicbox/mopidy-musicbox-webclient',
<REPLACE_END> <|endoftext|> from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='Mopidy-MusicBox-Webclient',
version=get_version('mopidy_musicbox_webclient/__init__.py'),
url='https://github.com/pimusicbox/mopidy-musicbox-webclient',
license='Apache License, Version 2.0',
author='Wouter van Wijk',
author_email='[email protected]',
description='Mopidy MusicBox web extension',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.1.0',
],
entry_points={
'mopidy.ext': [
'musicbox_webclient = mopidy_musicbox_webclient:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
| Update URL to github repository.
from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='Mopidy-MusicBox-Webclient',
version=get_version('mopidy_musicbox_webclient/__init__.py'),
url='https://github.com/woutervanwijk/mopidy-musicbox-webclient',
license='Apache License, Version 2.0',
author='Wouter van Wijk',
author_email='[email protected]',
description='Mopidy MusicBox web extension',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.1.0',
],
entry_points={
'mopidy.ext': [
'musicbox_webclient = mopidy_musicbox_webclient:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
|
9f82e6b96bf4702901f86374e8a05c3d550091e7 | app/soc/logic/helper/convert_db.py | app/soc/logic/helper/convert_db.py | #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts the DB from an old scheme to a new one.
"""
__authors__ = [
'"Sverre Rabbelier" <[email protected]>',
]
from google.appengine.api import users
from django import http
from soc.models import user as user_model
from soc.logic import accounts
from soc.logic.models.user import logic as user_logic
def convert_user_accounts(*args, **kwargs):
"""Converts all current user accounts to normalized form.
"""
data = user_logic.getAll(user_model.User.all())
for user in data:
normalized = accounts.normalizeAccount(user.account)
if user.account != normalized:
user.account = normalized
user.put()
return http.HttpResponse('Done')
| Add a script to normalize user accounts | Add a script to normalize user accounts
Patch by: Sverre Rabbelier
| Python | apache-2.0 | MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging | <INSERT> #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <INSERT_END> <INSERT> http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts the DB from an old scheme to a new one.
"""
__authors__ = [
'"Sverre Rabbelier" <[email protected]>',
]
from google.appengine.api import users
from django import http
from soc.models import user as user_model
from soc.logic import accounts
from soc.logic.models.user import logic as user_logic
def convert_user_accounts(*args, **kwargs):
"""Converts all current user accounts to normalized form.
"""
data = user_logic.getAll(user_model.User.all())
for user in data:
normalized = accounts.normalizeAccount(user.account)
if user.account != normalized:
user.account = normalized
user.put()
return http.HttpResponse('Done')
<INSERT_END> <|endoftext|> #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts the DB from an old scheme to a new one.
"""
__authors__ = [
'"Sverre Rabbelier" <[email protected]>',
]
from google.appengine.api import users
from django import http
from soc.models import user as user_model
from soc.logic import accounts
from soc.logic.models.user import logic as user_logic
def convert_user_accounts(*args, **kwargs):
"""Converts all current user accounts to normalized form.
"""
data = user_logic.getAll(user_model.User.all())
for user in data:
normalized = accounts.normalizeAccount(user.account)
if user.account != normalized:
user.account = normalized
user.put()
return http.HttpResponse('Done')
| Add a script to normalize user accounts
Patch by: Sverre Rabbelier
|
|
a87d927acc42ba2fe4a82004ce919882024039a9 | kboard/board/forms.py | kboard/board/forms.py | from django import forms
from django.forms.utils import ErrorList
from django_summernote.widgets import SummernoteWidget
from .models import Post
EMPTY_TITLE_ERROR = "제목을 입력하세요"
EMPTY_CONTENT_ERROR = "내용을 입력하세요"
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="form-group has-error">%s</div>' % ''.join(['<div class="help-block">%s</div>' % e for e in self])
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'content', 'file')
widgets = {
'title': forms.TextInput(attrs={'id': 'id_post_title', 'class': 'form-control', 'name': 'post_title_text', 'placeholder': 'Insert Title'}),
'content': SummernoteWidget(),
}
error_messages = {
'title': {'required': EMPTY_TITLE_ERROR},
'content': {'required': EMPTY_CONTENT_ERROR}
}
def __init__(self, *args, **kwargs):
kwargs_new = {'error_class': DivErrorList}
kwargs_new.update(kwargs)
super(PostForm, self).__init__(*args, **kwargs_new)
self.fields['file'].required = False
| from django import forms
from django.forms.utils import ErrorList
from django_summernote.widgets import SummernoteWidget
from .models import Post
EMPTY_TITLE_ERROR = "제목을 입력하세요"
EMPTY_CONTENT_ERROR = "내용을 입력하세요"
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="form-group has-error">%s</div>' % ''.join(['<div class="help-block">%s</div>' % e for e in self])
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'content', 'file')
widgets = {
'title': forms.TextInput(attrs={'id': 'id_post_title', 'class': 'form-control', 'placeholder': 'Insert Title'}),
'content': SummernoteWidget(),
}
error_messages = {
'title': {'required': EMPTY_TITLE_ERROR},
'content': {'required': EMPTY_CONTENT_ERROR}
}
def __init__(self, *args, **kwargs):
kwargs_new = {'error_class': DivErrorList}
kwargs_new.update(kwargs)
super(PostForm, self).__init__(*args, **kwargs_new)
self.fields['file'].required = False
| Remove unnecessary attrs 'name' in title | Remove unnecessary attrs 'name' in title
| Python | mit | hyesun03/k-board,cjh5414/kboard,guswnsxodlf/k-board,hyesun03/k-board,darjeeling/k-board,cjh5414/kboard,kboard/kboard,kboard/kboard,kboard/kboard,guswnsxodlf/k-board,hyesun03/k-board,cjh5414/kboard,guswnsxodlf/k-board | <DELETE> 'name': 'post_title_text', <DELETE_END> <|endoftext|> from django import forms
from django.forms.utils import ErrorList
from django_summernote.widgets import SummernoteWidget
from .models import Post
EMPTY_TITLE_ERROR = "제목을 입력하세요"
EMPTY_CONTENT_ERROR = "내용을 입력하세요"
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="form-group has-error">%s</div>' % ''.join(['<div class="help-block">%s</div>' % e for e in self])
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'content', 'file')
widgets = {
'title': forms.TextInput(attrs={'id': 'id_post_title', 'class': 'form-control', 'placeholder': 'Insert Title'}),
'content': SummernoteWidget(),
}
error_messages = {
'title': {'required': EMPTY_TITLE_ERROR},
'content': {'required': EMPTY_CONTENT_ERROR}
}
def __init__(self, *args, **kwargs):
kwargs_new = {'error_class': DivErrorList}
kwargs_new.update(kwargs)
super(PostForm, self).__init__(*args, **kwargs_new)
self.fields['file'].required = False
| Remove unnecessary attrs 'name' in title
from django import forms
from django.forms.utils import ErrorList
from django_summernote.widgets import SummernoteWidget
from .models import Post
EMPTY_TITLE_ERROR = "제목을 입력하세요"
EMPTY_CONTENT_ERROR = "내용을 입력하세요"
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="form-group has-error">%s</div>' % ''.join(['<div class="help-block">%s</div>' % e for e in self])
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'content', 'file')
widgets = {
'title': forms.TextInput(attrs={'id': 'id_post_title', 'class': 'form-control', 'name': 'post_title_text', 'placeholder': 'Insert Title'}),
'content': SummernoteWidget(),
}
error_messages = {
'title': {'required': EMPTY_TITLE_ERROR},
'content': {'required': EMPTY_CONTENT_ERROR}
}
def __init__(self, *args, **kwargs):
kwargs_new = {'error_class': DivErrorList}
kwargs_new.update(kwargs)
super(PostForm, self).__init__(*args, **kwargs_new)
self.fields['file'].required = False
|
3a5a6db3b869841cf5c55eed2f5ec877a443a571 | chrome/test/functional/chromeos_html_terminal.py | chrome/test/functional/chromeos_html_terminal.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pyauto_functional # must be imported before pyauto
import pyauto
class ChromeosHTMLTerminalTest(pyauto.PyUITest):
"""Basic tests for ChromeOS HTML Terminal.
Requires ChromeOS to be logged in.
"""
def _GetExtensionInfoById(self, extensions, id):
for x in extensions:
if x['id'] == id:
return x
return None
def testInstallHTMLTerminal(self):
"""Basic installation test for HTML Terminal on ChromeOS."""
crx_file_path = os.path.abspath(
os.path.join(self.DataDir(), 'pyauto_private', 'apps',
'SecureShell-dev-0.7.9.3.crx'))
ext_id = self.InstallExtension(crx_file_path)
self.assertTrue(ext_id, 'Failed to install extension.')
extension = self._GetExtensionInfoById(self.GetExtensionsInfo(), ext_id)
self.assertTrue(extension['is_enabled'],
msg='Extension was not enabled on installation.')
self.assertFalse(extension['allowed_in_incognito'],
msg='Extension was allowed in incognito on installation.')
if __name__ == '__main__':
pyauto_functional.Main()
| #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pyauto_functional # must be imported before pyauto
import pyauto
class ChromeosHTMLTerminalTest(pyauto.PyUITest):
"""Basic tests for ChromeOS HTML Terminal.
Requires ChromeOS to be logged in.
"""
def _GetExtensionInfoById(self, extensions, id):
for x in extensions:
if x['id'] == id:
return x
return None
def testInstallAndUninstallSecureShellExt(self):
"""Basic installation test for HTML Terminal on ChromeOS."""
crx_file_path = os.path.abspath(
os.path.join(self.DataDir(), 'pyauto_private', 'apps',
'SecureShell-dev-0.7.9.3.crx'))
ext_id = self.InstallExtension(crx_file_path)
self.assertTrue(ext_id, 'Failed to install extension.')
extension = self._GetExtensionInfoById(self.GetExtensionsInfo(), ext_id)
self.assertTrue(extension['is_enabled'],
msg='Extension was not enabled on installation.')
self.assertFalse(extension['allowed_in_incognito'],
msg='Extension was allowed in incognito on installation.')
# Uninstall HTML Terminal extension
self.assertTrue(self.UninstallExtensionById(ext_id),
msg='Failed to uninstall extension.')
if __name__ == '__main__':
pyauto_functional.Main()
| Add uninstall HTML Terminal extension | Add uninstall HTML Terminal extension
BUG=
TEST=This is a test to uninstall HTML terminal extension
Review URL: https://chromiumcodereview.appspot.com/10332227
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@137790 0039d316-1c4b-4281-b951-d872f2087c98
| Python | bsd-3-clause | hgl888/chromium-crosswalk,markYoungH/chromium.src,hujiajie/pa-chromium,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,zcbenz/cefode-chromium,Chilledheart/chromium,timopulkkinen/BubbleFish,dushu1203/chromium.src,junmin-zhu/chromium-rivertrail,Pluto-tv/chromium-crosswalk,zcbenz/cefode-chromium,crosswalk-project/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,junmin-zhu/chromium-rivertrail,M4sse/chromium.src,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,patrickm/chromium.src,dushu1203/chromium.src,timopulkkinen/BubbleFish,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,fujunwei/chromium-crosswalk,littlstar/chromium.src,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,junmin-zhu/chromium-rivertrail,M4sse/chromium.src,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,ltilve/chromium,hujiajie/pa-chromium,chuan9/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,markYoungH/chromium.src,markYoungH/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,patrickm/chromium.src,pozdnyakov/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,jaruba/chromium.src,anirudhSK/chromium,keishi/chromium,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,jaruba/chromium.src,dednal/chromium.src,dednal/chromium.src,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,Chilledheart/chromium,keishi/chromium,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,axinging/chromium-crosswalk,hujiajie/pa-chromium,pozdnyakov/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,littlstar/chromium.src,keishi/chromium,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,hujiajie/pa-chromium,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,junmin-zhu/chromium-rivertrail,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,dednal/chromium.src,hujiajie/pa-chromium,ltilve/chromium,ChromiumWebApps/chromium,hujiajie/pa-chromium,M4sse/chromium.src,pozdnyakov/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,zcbenz/cefode-chromium,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,dednal/chromium.src,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,mogoweb/chromium-crosswalk,littlstar/chromium.src,timopulkkinen/BubbleFish,nacl-webkit/chrome_deps,ltilve/chromium,patrickm/chromium.src,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,nacl-webkit/chrome_deps,dednal/chromium.src,Jonekee/chromium.src,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,timopulkkinen/BubbleFish,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,nacl-webkit/chrome_deps,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,ChromiumWebApps/chromium,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,ltilve/chromium,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,pozdnyakov/chromium-crosswalk,junmin-zhu/chromium-rivertrail,axinging/chromium-crosswalk,ondra-novak/chromium.src,zcbenz/cefode-chromium,Chilledheart/chromium,Fireblend/chromium-crosswalk,jaruba/chromium.src,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,ondra-novak/chromium.src,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,zcbenz/cefode-chromium,M4sse/chromium.src,Jonekee/chromium.src,zcbenz/cefode-chromium,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,mogoweb/chromium-crosswalk,dushu1203/chromium.src,junmin-zhu/chromium-rivertrail,ondra-novak/chromium.src,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,jaruba/chromium.src,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,timopulkkinen/BubbleFish,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,Jonekee/chromium.src,anirudhSK/chromium,dednal/chromium.src,patrickm/chromium.src,chuan9/chromium-crosswalk,zcbenz/cefode-chromium,littlstar/chromium.src,Fireblend/chromium-crosswalk,nacl-webkit/chrome_deps,ondra-novak/chromium.src,Chilledheart/chromium,nacl-webkit/chrome_deps,nacl-webkit/chrome_deps,dushu1203/chromium.src,Just-D/chromium-1,jaruba/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,keishi/chromium,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,Just-D/chromium-1,ltilve/chromium,dushu1203/chromium.src,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,keishi/chromium,PeterWangIntel/chromium-crosswalk,keishi/chromium,jaruba/chromium.src,patrickm/chromium.src,krieger-od/nwjs_chromium.src,anirudhSK/chromium,zcbenz/cefode-chromium,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,markYoungH/chromium.src,patrickm/chromium.src,ChromiumWebApps/chromium,Just-D/chromium-1,M4sse/chromium.src,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk,Just-D/chromium-1,Just-D/chromium-1,Just-D/chromium-1,dednal/chromium.src,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,anirudhSK/chromium,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,ChromiumWebApps/chromium,dushu1203/chromium.src,M4sse/chromium.src,timopulkkinen/BubbleFish,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,ondra-novak/chromium.src,M4sse/chromium.src,timopulkkinen/BubbleFish,ChromiumWebApps/chromium,keishi/chromium,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,axinging/chromium-crosswalk,nacl-webkit/chrome_deps,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,Jonekee/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,junmin-zhu/chromium-rivertrail,zcbenz/cefode-chromium,dednal/chromium.src,mogoweb/chromium-crosswalk,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,ltilve/chromium,patrickm/chromium.src,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,keishi/chromium,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,jaruba/chromium.src,dushu1203/chromium.src,Jonekee/chromium.src,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,zcbenz/cefode-chromium,hujiajie/pa-chromium,ltilve/chromium,Fireblend/chromium-crosswalk,timopulkkinen/BubbleFish,keishi/chromium,littlstar/chromium.src,pozdnyakov/chromium-crosswalk,nacl-webkit/chrome_deps,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,junmin-zhu/chromium-rivertrail,pozdnyakov/chromium-crosswalk,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,markYoungH/chromium.src,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,keishi/chromium,anirudhSK/chromium,fujunwei/chromium-crosswalk,hujiajie/pa-chromium,zcbenz/cefode-chromium,timopulkkinen/BubbleFish,hujiajie/pa-chromium,nacl-webkit/chrome_deps,fujunwei/chromium-crosswalk,Chilledheart/chromium,nacl-webkit/chrome_deps,keishi/chromium,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk | <REPLACE_OLD> testInstallHTMLTerminal(self):
<REPLACE_NEW> testInstallAndUninstallSecureShellExt(self):
<REPLACE_END> <REPLACE_OLD> installation.')
if <REPLACE_NEW> installation.')
# Uninstall HTML Terminal extension
self.assertTrue(self.UninstallExtensionById(ext_id),
msg='Failed to uninstall extension.')
if <REPLACE_END> <|endoftext|> #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pyauto_functional # must be imported before pyauto
import pyauto
class ChromeosHTMLTerminalTest(pyauto.PyUITest):
"""Basic tests for ChromeOS HTML Terminal.
Requires ChromeOS to be logged in.
"""
def _GetExtensionInfoById(self, extensions, id):
for x in extensions:
if x['id'] == id:
return x
return None
def testInstallAndUninstallSecureShellExt(self):
"""Basic installation test for HTML Terminal on ChromeOS."""
crx_file_path = os.path.abspath(
os.path.join(self.DataDir(), 'pyauto_private', 'apps',
'SecureShell-dev-0.7.9.3.crx'))
ext_id = self.InstallExtension(crx_file_path)
self.assertTrue(ext_id, 'Failed to install extension.')
extension = self._GetExtensionInfoById(self.GetExtensionsInfo(), ext_id)
self.assertTrue(extension['is_enabled'],
msg='Extension was not enabled on installation.')
self.assertFalse(extension['allowed_in_incognito'],
msg='Extension was allowed in incognito on installation.')
# Uninstall HTML Terminal extension
self.assertTrue(self.UninstallExtensionById(ext_id),
msg='Failed to uninstall extension.')
if __name__ == '__main__':
pyauto_functional.Main()
| Add uninstall HTML Terminal extension
BUG=
TEST=This is a test to uninstall HTML terminal extension
Review URL: https://chromiumcodereview.appspot.com/10332227
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@137790 0039d316-1c4b-4281-b951-d872f2087c98
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pyauto_functional # must be imported before pyauto
import pyauto
class ChromeosHTMLTerminalTest(pyauto.PyUITest):
"""Basic tests for ChromeOS HTML Terminal.
Requires ChromeOS to be logged in.
"""
def _GetExtensionInfoById(self, extensions, id):
for x in extensions:
if x['id'] == id:
return x
return None
def testInstallHTMLTerminal(self):
"""Basic installation test for HTML Terminal on ChromeOS."""
crx_file_path = os.path.abspath(
os.path.join(self.DataDir(), 'pyauto_private', 'apps',
'SecureShell-dev-0.7.9.3.crx'))
ext_id = self.InstallExtension(crx_file_path)
self.assertTrue(ext_id, 'Failed to install extension.')
extension = self._GetExtensionInfoById(self.GetExtensionsInfo(), ext_id)
self.assertTrue(extension['is_enabled'],
msg='Extension was not enabled on installation.')
self.assertFalse(extension['allowed_in_incognito'],
msg='Extension was allowed in incognito on installation.')
if __name__ == '__main__':
pyauto_functional.Main()
|
ec8d7b035617f9239a0a52be346d8611cf77cb6f | integration-tests/features/src/utils.py | integration-tests/features/src/utils.py | """Unsorted utility functions used in integration tests."""
import requests
def download_file_from_url(url):
"""Download file from the given URL and do basic check of response."""
assert url
response = requests.get(url)
assert response.status_code == 200
assert response.text is not None
return response.text
def split_comma_separated_list(l):
"""Split the list into elements separated by commas."""
return [i.strip() for i in l.split(',')]
| """Unsorted utility functions used in integration tests."""
import requests
import subprocess
def download_file_from_url(url):
"""Download file from the given URL and do basic check of response."""
assert url
response = requests.get(url)
assert response.status_code == 200
assert response.text is not None
return response.text
def split_comma_separated_list(l):
"""Split the list into elements separated by commas."""
return [i.strip() for i in l.split(',')]
def oc_login(url, username, password, tls_verify=True):
"""Wrapper around `oc login`.
:param url: str, OpenShift URL
:param username: str, username
:param password: str, password
:param tls_verify: bool, verify server's certificate?; default: True
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'login', url, '--username', username, '--password', password]
if not tls_verify:
command.extend(['--insecure-skip-tls-verify=true'])
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
# replace password with '***' so somebody will not accidentally leak it in CI logs
e.cmd = [x if x != password else '***' for x in e.cmd]
raise e
def oc_delete_pods(selector, namespace=None):
"""Wrapper around `oc delete`.
Selector determines which pods will be deleted.
More on selectors:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
Note k8s/OpenShift will immediately restart deleted pods,
to match desired number of replicas for given deployment.
The expectation is that the user is already logged in
and has permissions to delete pods.
Example usage:
oc_delete_pods('service=bayesian-pgbouncer'
:param selector: str, selector identifying pods that will be deleted
:param namespace: str, namespace in which `oc delete` command should be executed,
default: currently selected namespace
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'delete', 'pods', '--selector=', selector]
if namespace:
command.extend(['--namespace', namespace])
subprocess.check_call(command)
| Add few oc wrappers for future resiliency testing | Add few oc wrappers for future resiliency testing
Not used anywhere yet.
| Python | apache-2.0 | jpopelka/fabric8-analytics-common,tisnik/fabric8-analytics-common,jpopelka/fabric8-analytics-common,tisnik/fabric8-analytics-common,tisnik/fabric8-analytics-common,jpopelka/fabric8-analytics-common | <REPLACE_OLD> requests
def <REPLACE_NEW> requests
import subprocess
def <REPLACE_END> <REPLACE_OLD> l.split(',')]
<REPLACE_NEW> l.split(',')]
def oc_login(url, username, password, tls_verify=True):
"""Wrapper around `oc login`.
:param url: str, OpenShift URL
:param username: str, username
:param password: str, password
:param tls_verify: bool, verify server's certificate?; default: True
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'login', url, '--username', username, '--password', password]
if not tls_verify:
command.extend(['--insecure-skip-tls-verify=true'])
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
# replace password with '***' so somebody will not accidentally leak it in CI logs
e.cmd = [x if x != password else '***' for x in e.cmd]
raise e
def oc_delete_pods(selector, namespace=None):
"""Wrapper around `oc delete`.
Selector determines which pods will be deleted.
More on selectors:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
Note k8s/OpenShift will immediately restart deleted pods,
to match desired number of replicas for given deployment.
The expectation is that the user is already logged in
and has permissions to delete pods.
Example usage:
oc_delete_pods('service=bayesian-pgbouncer'
:param selector: str, selector identifying pods that will be deleted
:param namespace: str, namespace in which `oc delete` command should be executed,
default: currently selected namespace
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'delete', 'pods', '--selector=', selector]
if namespace:
command.extend(['--namespace', namespace])
subprocess.check_call(command)
<REPLACE_END> <|endoftext|> """Unsorted utility functions used in integration tests."""
import requests
import subprocess
def download_file_from_url(url):
"""Download file from the given URL and do basic check of response."""
assert url
response = requests.get(url)
assert response.status_code == 200
assert response.text is not None
return response.text
def split_comma_separated_list(l):
"""Split the list into elements separated by commas."""
return [i.strip() for i in l.split(',')]
def oc_login(url, username, password, tls_verify=True):
"""Wrapper around `oc login`.
:param url: str, OpenShift URL
:param username: str, username
:param password: str, password
:param tls_verify: bool, verify server's certificate?; default: True
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'login', url, '--username', username, '--password', password]
if not tls_verify:
command.extend(['--insecure-skip-tls-verify=true'])
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
# replace password with '***' so somebody will not accidentally leak it in CI logs
e.cmd = [x if x != password else '***' for x in e.cmd]
raise e
def oc_delete_pods(selector, namespace=None):
"""Wrapper around `oc delete`.
Selector determines which pods will be deleted.
More on selectors:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
Note k8s/OpenShift will immediately restart deleted pods,
to match desired number of replicas for given deployment.
The expectation is that the user is already logged in
and has permissions to delete pods.
Example usage:
oc_delete_pods('service=bayesian-pgbouncer'
:param selector: str, selector identifying pods that will be deleted
:param namespace: str, namespace in which `oc delete` command should be executed,
default: currently selected namespace
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'delete', 'pods', '--selector=', selector]
if namespace:
command.extend(['--namespace', namespace])
subprocess.check_call(command)
| Add few oc wrappers for future resiliency testing
Not used anywhere yet.
"""Unsorted utility functions used in integration tests."""
import requests
def download_file_from_url(url):
"""Download file from the given URL and do basic check of response."""
assert url
response = requests.get(url)
assert response.status_code == 200
assert response.text is not None
return response.text
def split_comma_separated_list(l):
"""Split the list into elements separated by commas."""
return [i.strip() for i in l.split(',')]
|
7e0f1552ebdadb8f2023167afcd557bdc09b06f9 | scripts/analysis/plot_velocity_based_position_controller_data.py | scripts/analysis/plot_velocity_based_position_controller_data.py | import numpy as np
import matplotlib.pyplot as plt
import sys
if sys.argc < 2:
print "Usage ./plot_velocity_based_position_controller.py [File_name]"
sys.exit(-1)
data = np.genfromtxt(sys.argv[-1], delimiter=',')
# 0 for x, 1 for y 2 for z and 3 for yaw
plot_axis = 3;
ts = (data[:,0] - data[0, 0])/1e9
plt.figure(1+plot_axis)
plt.subplot(2,1,1)
plt.plot(ts, data[:, 1+plot_axis]);
plt.ylabel('Error')
plt.subplot(2,1,2)
plt.plot(ts, data[:, 5+plot_axis]);
plt.ylabel('Cumulative Error')
plt.xlabel('Time (seconds)')
plt.show()
plt.figure(2+plot_axis)
plt.plot(ts, data[:, 9+plot_axis])
plt.ylabel('Control')
plt.xlabel('Time (seconds)')
| Add plotting script for analyzing velocity based position controller | Add plotting script for analyzing velocity based position controller
| Python | mpl-2.0 | jhu-asco/aerial_autonomy,jhu-asco/aerial_autonomy,jhu-asco/aerial_autonomy,jhu-asco/aerial_autonomy,jhu-asco/aerial_autonomy,jhu-asco/aerial_autonomy | <INSERT> import numpy as np
import matplotlib.pyplot as plt
import sys
if sys.argc < 2:
<INSERT_END> <INSERT> print "Usage ./plot_velocity_based_position_controller.py [File_name]"
sys.exit(-1)
data = np.genfromtxt(sys.argv[-1], delimiter=',')
# 0 for x, 1 for y 2 for z and 3 for yaw
plot_axis = 3;
ts = (data[:,0] - data[0, 0])/1e9
plt.figure(1+plot_axis)
plt.subplot(2,1,1)
plt.plot(ts, data[:, 1+plot_axis]);
plt.ylabel('Error')
plt.subplot(2,1,2)
plt.plot(ts, data[:, 5+plot_axis]);
plt.ylabel('Cumulative Error')
plt.xlabel('Time (seconds)')
plt.show()
plt.figure(2+plot_axis)
plt.plot(ts, data[:, 9+plot_axis])
plt.ylabel('Control')
plt.xlabel('Time (seconds)')
<INSERT_END> <|endoftext|> import numpy as np
import matplotlib.pyplot as plt
import sys
if sys.argc < 2:
print "Usage ./plot_velocity_based_position_controller.py [File_name]"
sys.exit(-1)
data = np.genfromtxt(sys.argv[-1], delimiter=',')
# 0 for x, 1 for y 2 for z and 3 for yaw
plot_axis = 3;
ts = (data[:,0] - data[0, 0])/1e9
plt.figure(1+plot_axis)
plt.subplot(2,1,1)
plt.plot(ts, data[:, 1+plot_axis]);
plt.ylabel('Error')
plt.subplot(2,1,2)
plt.plot(ts, data[:, 5+plot_axis]);
plt.ylabel('Cumulative Error')
plt.xlabel('Time (seconds)')
plt.show()
plt.figure(2+plot_axis)
plt.plot(ts, data[:, 9+plot_axis])
plt.ylabel('Control')
plt.xlabel('Time (seconds)')
| Add plotting script for analyzing velocity based position controller
|
|
51566a873372b23b5c05d376d346dab063f87437 | photutils/utils/tests/test_quantity_helpers.py | photutils/utils/tests/test_quantity_helpers.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the _quantity_helpers module.
"""
import astropy.units as u
import numpy as np
from numpy.testing import assert_equal
import pytest
from .._quantity_helpers import process_quantities
@pytest.mark.parametrize('all_units', (False, True))
def test_units(all_units):
if all_units:
unit = u.Jy
else:
unit = 1.0
arrs = (np.ones(3) * unit, np.ones(3) * unit, np.ones(3) * unit)
names = ('a', 'b', 'c')
arrs2, unit2 = process_quantities(arrs, names)
if all_units:
assert unit2 == unit
for (arr, arr2) in zip(arrs, arrs2):
assert_equal(arr.value, arr2)
else:
assert unit2 is None
assert arrs2 == arrs
def test_mixed_units():
arrs = (np.ones(3) * u.Jy, np.ones(3) * u.km)
names = ('a', 'b')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
arrs = (np.ones(3) * u.Jy, np.ones(3))
names = ('a', 'b')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
unit = u.Jy
arrs = (np.ones(3) * unit, np.ones(3), np.ones(3) * unit)
names = ('a', 'b', 'c')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
unit = u.Jy
arrs = (np.ones(3) * unit, np.ones(3), np.ones(3) * u.km)
names = ('a', 'b', 'c')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
| Add tests for quantity helpers | Add tests for quantity helpers
| Python | bsd-3-clause | larrybradley/photutils,astropy/photutils | <REPLACE_OLD> <REPLACE_NEW> # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the _quantity_helpers module.
"""
import astropy.units as u
import numpy as np
from numpy.testing import assert_equal
import pytest
from .._quantity_helpers import process_quantities
@pytest.mark.parametrize('all_units', (False, True))
def test_units(all_units):
if all_units:
unit = u.Jy
else:
unit = 1.0
arrs = (np.ones(3) * unit, np.ones(3) * unit, np.ones(3) * unit)
names = ('a', 'b', 'c')
arrs2, unit2 = process_quantities(arrs, names)
if all_units:
assert unit2 == unit
for (arr, arr2) in zip(arrs, arrs2):
assert_equal(arr.value, arr2)
else:
assert unit2 is None
assert arrs2 == arrs
def test_mixed_units():
arrs = (np.ones(3) * u.Jy, np.ones(3) * u.km)
names = ('a', 'b')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
arrs = (np.ones(3) * u.Jy, np.ones(3))
names = ('a', 'b')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
unit = u.Jy
arrs = (np.ones(3) * unit, np.ones(3), np.ones(3) * unit)
names = ('a', 'b', 'c')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
unit = u.Jy
arrs = (np.ones(3) * unit, np.ones(3), np.ones(3) * u.km)
names = ('a', 'b', 'c')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
<REPLACE_END> <|endoftext|> # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the _quantity_helpers module.
"""
import astropy.units as u
import numpy as np
from numpy.testing import assert_equal
import pytest
from .._quantity_helpers import process_quantities
@pytest.mark.parametrize('all_units', (False, True))
def test_units(all_units):
if all_units:
unit = u.Jy
else:
unit = 1.0
arrs = (np.ones(3) * unit, np.ones(3) * unit, np.ones(3) * unit)
names = ('a', 'b', 'c')
arrs2, unit2 = process_quantities(arrs, names)
if all_units:
assert unit2 == unit
for (arr, arr2) in zip(arrs, arrs2):
assert_equal(arr.value, arr2)
else:
assert unit2 is None
assert arrs2 == arrs
def test_mixed_units():
arrs = (np.ones(3) * u.Jy, np.ones(3) * u.km)
names = ('a', 'b')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
arrs = (np.ones(3) * u.Jy, np.ones(3))
names = ('a', 'b')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
unit = u.Jy
arrs = (np.ones(3) * unit, np.ones(3), np.ones(3) * unit)
names = ('a', 'b', 'c')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
unit = u.Jy
arrs = (np.ones(3) * unit, np.ones(3), np.ones(3) * u.km)
names = ('a', 'b', 'c')
with pytest.raises(ValueError):
_, _ = process_quantities(arrs, names)
| Add tests for quantity helpers
|
|
a635a8d58e46cf4ef1bc225f8824d73984971fee | countVowels.py | countVowels.py | """ Q6- Write a program that counts up the number of vowels contained in the string s. Valid vowels are: 'a', 'e', 'i',
'o', and 'u'. For example, if s = 'azcbobobegghakl', your program should print: Number of vowels: 5
"""
# Using the isVowel function from isVowel.py module (Answer of fifth question of Assignment 3)
def isVowel( char ):
# Converting the letter to lowercase for our convenience and hence, we do not need to check character's case and hence, simplifies the problem
# str.lower( char )
# The above function has been commented out since this is not required in this problem.. But, the above built-in function might be useful in normal cases.
# Splitting the condition: 'a' or 'e' or 'i' or 'o' or 'u' to make it more readable and easier to understand.
is_char_a = char == 'a'
is_char_e = char == 'e'
is_char_i = char == 'i'
is_char_o = char == 'o'
is_char_u = char == 'u'
is_char_vowel = is_char_a or is_char_e or is_char_i or is_char_o or is_char_u
return is_char_vowel
def countVowels( string ):
if str.islower( string ):
count = 0 # Counts the number of vowels
for letter in string:
if isVowel( letter ):
count += 1
print( "Number of vowels: " + str( count ) )
else:
if len( string ):
print( "Error: All the characters in the string should be in LOWERCASE." )
else:
print( "Error: The string is EMPTY." )
string = input( "Enter the string: " )
countVowels( string ) | Add the answer to the sixth question of Assignment 3 | Add the answer to the sixth question of Assignment 3
| Python | mit | SuyashD95/python-assignments | <INSERT> """ Q6- Write a program that counts up the number of vowels contained in the string s. Valid vowels are: 'a', 'e', 'i',
'o', and 'u'. For example, if s = 'azcbobobegghakl', your program should print: Number of vowels: 5
"""
# Using the isVowel function from isVowel.py module (Answer of fifth question of Assignment 3)
def isVowel( char ):
# Converting the letter to lowercase for our convenience and hence, we do not need to check character's case and hence, simplifies the problem
# str.lower( char )
# The above function has been commented out since this is not required in this problem.. But, the above built-in function might be useful in normal cases.
# Splitting the condition: 'a' or 'e' <INSERT_END> <INSERT> or 'i' or 'o' or 'u' to make it more readable and easier to understand.
is_char_a = char == 'a'
is_char_e = char == 'e'
is_char_i = char == 'i'
is_char_o = char == 'o'
is_char_u = char == 'u'
is_char_vowel = is_char_a or is_char_e or is_char_i or is_char_o or is_char_u
return is_char_vowel
def countVowels( string ):
if str.islower( string ):
count = 0 # Counts the number of vowels
for letter in string:
if isVowel( letter ):
count += 1
print( "Number of vowels: " + str( count ) )
else:
if len( string ):
print( "Error: All the characters in the string should be in LOWERCASE." )
else:
print( "Error: The string is EMPTY." )
string = input( "Enter the string: " )
countVowels( string ) <INSERT_END> <|endoftext|> """ Q6- Write a program that counts up the number of vowels contained in the string s. Valid vowels are: 'a', 'e', 'i',
'o', and 'u'. For example, if s = 'azcbobobegghakl', your program should print: Number of vowels: 5
"""
# Using the isVowel function from isVowel.py module (Answer of fifth question of Assignment 3)
def isVowel( char ):
# Converting the letter to lowercase for our convenience and hence, we do not need to check character's case and hence, simplifies the problem
# str.lower( char )
# The above function has been commented out since this is not required in this problem.. But, the above built-in function might be useful in normal cases.
# Splitting the condition: 'a' or 'e' or 'i' or 'o' or 'u' to make it more readable and easier to understand.
is_char_a = char == 'a'
is_char_e = char == 'e'
is_char_i = char == 'i'
is_char_o = char == 'o'
is_char_u = char == 'u'
is_char_vowel = is_char_a or is_char_e or is_char_i or is_char_o or is_char_u
return is_char_vowel
def countVowels( string ):
if str.islower( string ):
count = 0 # Counts the number of vowels
for letter in string:
if isVowel( letter ):
count += 1
print( "Number of vowels: " + str( count ) )
else:
if len( string ):
print( "Error: All the characters in the string should be in LOWERCASE." )
else:
print( "Error: The string is EMPTY." )
string = input( "Enter the string: " )
countVowels( string ) | Add the answer to the sixth question of Assignment 3
|
|
4ac37e35396e2393a9bbe2e954674537747e384b | setup.py | setup.py | #!/usr/bin/python
import time
from datetime import date
from setuptools import setup
from pagekite.common import APPVER
import os
try:
# This borks sdist.
os.remove('.SELF')
except:
pass
setup(
name="pagekite",
version=APPVER.replace('github', 'dev%d' % (120*int(time.time()/120))),
license="AGPLv3+",
author="Bjarni R. Einarsson",
author_email="[email protected]",
url="http://pagekite.org/",
description="""PageKite makes localhost servers visible to the world.""",
long_description="""\
PageKite is a system for running publicly visible servers (generally
web servers) on machines without a direct connection to the Internet,
such as mobile devices or computers behind restrictive firewalls.
PageKite works around NAT, firewalls and IP-address limitations by
using a combination of tunnels and reverse proxies.
Natively supported protocols: HTTP, HTTPS
Any other TCP-based service, including SSH and VNC, may be exposed
as well to clients supporting HTTP Proxies.
""",
packages=['pagekite', 'pagekite.ui', 'pagekite.proto'],
scripts=['scripts/pagekite', 'scripts/lapcat', 'scripts/vipagekite'],
install_requires=['SocksipyChain >= 2.0.15']
)
| #!/usr/bin/python
import time
from datetime import date
from setuptools import setup
from pagekite.common import APPVER
import os
try:
# This borks sdist.
os.remove('.SELF')
except:
pass
setup(
name="pagekite",
version=os.getenv(
'PAGEKITE_VERSION',
APPVER.replace('github', 'dev%d' % (120*int(time.time()/120)))),
license="AGPLv3+",
author="Bjarni R. Einarsson",
author_email="[email protected]",
url="http://pagekite.org/",
description="""PageKite makes localhost servers visible to the world.""",
long_description="""\
PageKite is a system for running publicly visible servers (generally
web servers) on machines without a direct connection to the Internet,
such as mobile devices or computers behind restrictive firewalls.
PageKite works around NAT, firewalls and IP-address limitations by
using a combination of tunnels and reverse proxies.
Natively supported protocols: HTTP, HTTPS
Any other TCP-based service, including SSH and VNC, may be exposed
as well to clients supporting HTTP Proxies.
""",
packages=['pagekite', 'pagekite.ui', 'pagekite.proto'],
scripts=['scripts/pagekite', 'scripts/lapcat', 'scripts/vipagekite'],
install_requires=['SocksipyChain >= 2.0.15']
)
| Make it possible to manually override version numbers | Make it possible to manually override version numbers
| Python | agpl-3.0 | pagekite/PyPagekite,pagekite/PyPagekite,pagekite/PyPagekite | <REPLACE_OLD> version=APPVER.replace('github', <REPLACE_NEW> version=os.getenv(
'PAGEKITE_VERSION',
APPVER.replace('github', <REPLACE_END> <REPLACE_OLD> (120*int(time.time()/120))),
<REPLACE_NEW> (120*int(time.time()/120)))),
<REPLACE_END> <|endoftext|> #!/usr/bin/python
import time
from datetime import date
from setuptools import setup
from pagekite.common import APPVER
import os
try:
# This borks sdist.
os.remove('.SELF')
except:
pass
setup(
name="pagekite",
version=os.getenv(
'PAGEKITE_VERSION',
APPVER.replace('github', 'dev%d' % (120*int(time.time()/120)))),
license="AGPLv3+",
author="Bjarni R. Einarsson",
author_email="[email protected]",
url="http://pagekite.org/",
description="""PageKite makes localhost servers visible to the world.""",
long_description="""\
PageKite is a system for running publicly visible servers (generally
web servers) on machines without a direct connection to the Internet,
such as mobile devices or computers behind restrictive firewalls.
PageKite works around NAT, firewalls and IP-address limitations by
using a combination of tunnels and reverse proxies.
Natively supported protocols: HTTP, HTTPS
Any other TCP-based service, including SSH and VNC, may be exposed
as well to clients supporting HTTP Proxies.
""",
packages=['pagekite', 'pagekite.ui', 'pagekite.proto'],
scripts=['scripts/pagekite', 'scripts/lapcat', 'scripts/vipagekite'],
install_requires=['SocksipyChain >= 2.0.15']
)
| Make it possible to manually override version numbers
#!/usr/bin/python
import time
from datetime import date
from setuptools import setup
from pagekite.common import APPVER
import os
try:
# This borks sdist.
os.remove('.SELF')
except:
pass
setup(
name="pagekite",
version=APPVER.replace('github', 'dev%d' % (120*int(time.time()/120))),
license="AGPLv3+",
author="Bjarni R. Einarsson",
author_email="[email protected]",
url="http://pagekite.org/",
description="""PageKite makes localhost servers visible to the world.""",
long_description="""\
PageKite is a system for running publicly visible servers (generally
web servers) on machines without a direct connection to the Internet,
such as mobile devices or computers behind restrictive firewalls.
PageKite works around NAT, firewalls and IP-address limitations by
using a combination of tunnels and reverse proxies.
Natively supported protocols: HTTP, HTTPS
Any other TCP-based service, including SSH and VNC, may be exposed
as well to clients supporting HTTP Proxies.
""",
packages=['pagekite', 'pagekite.ui', 'pagekite.proto'],
scripts=['scripts/pagekite', 'scripts/lapcat', 'scripts/vipagekite'],
install_requires=['SocksipyChain >= 2.0.15']
)
|
5207550b9d19ff6823fb641e86e4851106ebd7f1 | bench/run-paper-nums.py | bench/run-paper-nums.py | #!/usr/bin/env python
devices = [
("hdd", "/dev/sdc1"),
("ssd-sam", "/dev/sdb1"),
("sdd-intel", "/dev/sdd2"),
("ram", "/dev/loop0"),
]
benches = [
("smallfile", "./smallfile /tmp/ft"),
("smallsync", "./smallsync /tmp/ft"),
("largefile", "./largefile /tmp/ft"),
("mailbench", "./mailbench.sh /home/alex/sv6 /tmp/ft"),
("app-bench", "./app-bench.sh /home/alex/xv6 /tmp/ft"),
("sqlite", "./sqlitebench.sh /tmp/ft"),
]
benches = [x for x in benches if x[0] == "mailbench"]
import os
import sys
for d, dev in devices:
for b, bench in benches:
for i in range(1, 6):
name = "{}-{}-{}".format(b, d, i)
cmd = "perflock ./run-bench.sh {0} '{1}' '{2}' > {1}.log".format(dev, name, bench)
print(cmd)
status = os.system(cmd)
if status != 0:
print("failed:", cmd, file=sys.stderr)
| Add script to run benchmarks for paper | Add script to run benchmarks for paper
| Python | mit | mit-pdos/fscq-impl,mit-pdos/fscq-impl,mit-pdos/fscq-impl,mit-pdos/fscq-impl,mit-pdos/fscq-impl | <REPLACE_OLD> <REPLACE_NEW> #!/usr/bin/env python
devices = [
("hdd", "/dev/sdc1"),
("ssd-sam", "/dev/sdb1"),
("sdd-intel", "/dev/sdd2"),
("ram", "/dev/loop0"),
]
benches = [
("smallfile", "./smallfile /tmp/ft"),
("smallsync", "./smallsync /tmp/ft"),
("largefile", "./largefile /tmp/ft"),
("mailbench", "./mailbench.sh /home/alex/sv6 /tmp/ft"),
("app-bench", "./app-bench.sh /home/alex/xv6 /tmp/ft"),
("sqlite", "./sqlitebench.sh /tmp/ft"),
]
benches = [x for x in benches if x[0] == "mailbench"]
import os
import sys
for d, dev in devices:
for b, bench in benches:
for i in range(1, 6):
name = "{}-{}-{}".format(b, d, i)
cmd = "perflock ./run-bench.sh {0} '{1}' '{2}' > {1}.log".format(dev, name, bench)
print(cmd)
status = os.system(cmd)
if status != 0:
print("failed:", cmd, file=sys.stderr)
<REPLACE_END> <|endoftext|> #!/usr/bin/env python
devices = [
("hdd", "/dev/sdc1"),
("ssd-sam", "/dev/sdb1"),
("sdd-intel", "/dev/sdd2"),
("ram", "/dev/loop0"),
]
benches = [
("smallfile", "./smallfile /tmp/ft"),
("smallsync", "./smallsync /tmp/ft"),
("largefile", "./largefile /tmp/ft"),
("mailbench", "./mailbench.sh /home/alex/sv6 /tmp/ft"),
("app-bench", "./app-bench.sh /home/alex/xv6 /tmp/ft"),
("sqlite", "./sqlitebench.sh /tmp/ft"),
]
benches = [x for x in benches if x[0] == "mailbench"]
import os
import sys
for d, dev in devices:
for b, bench in benches:
for i in range(1, 6):
name = "{}-{}-{}".format(b, d, i)
cmd = "perflock ./run-bench.sh {0} '{1}' '{2}' > {1}.log".format(dev, name, bench)
print(cmd)
status = os.system(cmd)
if status != 0:
print("failed:", cmd, file=sys.stderr)
| Add script to run benchmarks for paper
|
|
be0d4b9e2e62490cab62a39499e570bdab1ac2f5 | cmp_imgs.py | cmp_imgs.py | #!/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import imread
def rgb2gray(img):
return np.dot(img, [0.299, 0.587, 0.114])
if __name__ == "__main__":
img_name = "1920x1080.jpg"
img = imread(img_name)
gray_img = rgb2gray(img)
plt.imshow(gray_img, cmap=plt.cm.gray)
plt.show()
resized = np.ndarray((64, 64))
for (i,j),_ in np.ndenumerate(resized):
f1, f2 = gray_img.shape[0] // 64, gray_img.shape[1] // 64
slisse = gray_img[i*f1:(i+1)*f1,j*f2:(j+1)*f2]
resized[i,j] = np.max(slisse)
plt.imshow(resized, cmap=plt.cm.gray)
plt.show() | Convert an image to grayscale and resize it. | Convert an image to grayscale and resize it.
| Python | mit | HKervadec/cmp_imgs | <INSERT> #!/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import imread
def rgb2gray(img):
return np.dot(img, [0.299, 0.587, 0.114])
if __name__ == "__main__":
img_name = "1920x1080.jpg"
img = imread(img_name)
gray_img = rgb2gray(img)
plt.imshow(gray_img, cmap=plt.cm.gray)
plt.show()
resized = np.ndarray((64, 64))
for (i,j),_ in np.ndenumerate(resized):
f1, f2 = gray_img.shape[0] // 64, gray_img.shape[1] // 64
slisse = <INSERT_END> <INSERT> gray_img[i*f1:(i+1)*f1,j*f2:(j+1)*f2]
resized[i,j] = np.max(slisse)
plt.imshow(resized, cmap=plt.cm.gray)
plt.show() <INSERT_END> <|endoftext|> #!/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import imread
def rgb2gray(img):
return np.dot(img, [0.299, 0.587, 0.114])
if __name__ == "__main__":
img_name = "1920x1080.jpg"
img = imread(img_name)
gray_img = rgb2gray(img)
plt.imshow(gray_img, cmap=plt.cm.gray)
plt.show()
resized = np.ndarray((64, 64))
for (i,j),_ in np.ndenumerate(resized):
f1, f2 = gray_img.shape[0] // 64, gray_img.shape[1] // 64
slisse = gray_img[i*f1:(i+1)*f1,j*f2:(j+1)*f2]
resized[i,j] = np.max(slisse)
plt.imshow(resized, cmap=plt.cm.gray)
plt.show() | Convert an image to grayscale and resize it.
|
|
fea9c44be08719f0fcca98a1d531a83c9db4c6af | tests/test_urls.py | tests/test_urls.py | import pytest
from django.conf import settings
from pytest_django_test.compat import force_text
pytestmark = pytest.mark.urls('pytest_django_test.urls_overridden')
try:
from django.core.urlresolvers import is_valid_path
except ImportError:
from django.core.urlresolvers import resolve, Resolver404
def is_valid_path(path, urlconf=None):
"""Return True if path resolves against default URL resolver
This is a convenience method to make working with "is this a
match?" cases easier, avoiding unnecessarily indented
try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
def test_urls():
assert settings.ROOT_URLCONF == 'pytest_django_test.urls_overridden'
assert is_valid_path('/overridden_url/')
def test_urls_client(client):
response = client.get('/overridden_url/')
assert force_text(response.content) == 'Overridden urlconf works!'
| import pytest
from django.conf import settings
from pytest_django_test.compat import force_text
try:
from django.core.urlresolvers import is_valid_path
except ImportError:
from django.core.urlresolvers import resolve, Resolver404
def is_valid_path(path, urlconf=None):
"""Return True if path resolves against default URL resolver
This is a convenience method to make working with "is this a
match?" cases easier, avoiding unnecessarily indented
try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
@pytest.mark.urls('pytest_django_test.urls_overridden')
def test_urls():
assert settings.ROOT_URLCONF == 'pytest_django_test.urls_overridden'
assert is_valid_path('/overridden_url/')
@pytest.mark.urls('pytest_django_test.urls_overridden')
def test_urls_client(client):
response = client.get('/overridden_url/')
assert force_text(response.content) == 'Overridden urlconf works!'
def test_urls_cache_is_cleared(testdir):
testdir.makepyfile(myurls="""
from django.conf.urls import patterns, url
def fake_view(request):
pass
urlpatterns = patterns('', url(r'first/$', fake_view, name='first'))
""")
testdir.makepyfile("""
from django.core.urlresolvers import reverse, NoReverseMatch
import pytest
@pytest.mark.urls('myurls')
def test_something():
reverse('first')
def test_something_else():
with pytest.raises(NoReverseMatch):
reverse('first')
""")
result = testdir.runpytest()
assert result.ret == 0
| Add test to confirm url cache is cleared | Add test to confirm url cache is cleared
| Python | bsd-3-clause | pombredanne/pytest_django,thedrow/pytest-django,ktosiek/pytest-django,tomviner/pytest-django | <REPLACE_OLD> force_text
pytestmark = pytest.mark.urls('pytest_django_test.urls_overridden')
try:
<REPLACE_NEW> force_text
try:
<REPLACE_END> <REPLACE_OLD> False
def <REPLACE_NEW> False
@pytest.mark.urls('pytest_django_test.urls_overridden')
def <REPLACE_END> <REPLACE_OLD> is_valid_path('/overridden_url/')
def <REPLACE_NEW> is_valid_path('/overridden_url/')
@pytest.mark.urls('pytest_django_test.urls_overridden')
def <REPLACE_END> <REPLACE_OLD> works!'
<REPLACE_NEW> works!'
def test_urls_cache_is_cleared(testdir):
testdir.makepyfile(myurls="""
from django.conf.urls import patterns, url
def fake_view(request):
pass
urlpatterns = patterns('', url(r'first/$', fake_view, name='first'))
""")
testdir.makepyfile("""
from django.core.urlresolvers import reverse, NoReverseMatch
import pytest
@pytest.mark.urls('myurls')
def test_something():
reverse('first')
def test_something_else():
with pytest.raises(NoReverseMatch):
reverse('first')
""")
result = testdir.runpytest()
assert result.ret == 0
<REPLACE_END> <|endoftext|> import pytest
from django.conf import settings
from pytest_django_test.compat import force_text
try:
from django.core.urlresolvers import is_valid_path
except ImportError:
from django.core.urlresolvers import resolve, Resolver404
def is_valid_path(path, urlconf=None):
"""Return True if path resolves against default URL resolver
This is a convenience method to make working with "is this a
match?" cases easier, avoiding unnecessarily indented
try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
@pytest.mark.urls('pytest_django_test.urls_overridden')
def test_urls():
assert settings.ROOT_URLCONF == 'pytest_django_test.urls_overridden'
assert is_valid_path('/overridden_url/')
@pytest.mark.urls('pytest_django_test.urls_overridden')
def test_urls_client(client):
response = client.get('/overridden_url/')
assert force_text(response.content) == 'Overridden urlconf works!'
def test_urls_cache_is_cleared(testdir):
testdir.makepyfile(myurls="""
from django.conf.urls import patterns, url
def fake_view(request):
pass
urlpatterns = patterns('', url(r'first/$', fake_view, name='first'))
""")
testdir.makepyfile("""
from django.core.urlresolvers import reverse, NoReverseMatch
import pytest
@pytest.mark.urls('myurls')
def test_something():
reverse('first')
def test_something_else():
with pytest.raises(NoReverseMatch):
reverse('first')
""")
result = testdir.runpytest()
assert result.ret == 0
| Add test to confirm url cache is cleared
import pytest
from django.conf import settings
from pytest_django_test.compat import force_text
pytestmark = pytest.mark.urls('pytest_django_test.urls_overridden')
try:
from django.core.urlresolvers import is_valid_path
except ImportError:
from django.core.urlresolvers import resolve, Resolver404
def is_valid_path(path, urlconf=None):
"""Return True if path resolves against default URL resolver
This is a convenience method to make working with "is this a
match?" cases easier, avoiding unnecessarily indented
try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
def test_urls():
assert settings.ROOT_URLCONF == 'pytest_django_test.urls_overridden'
assert is_valid_path('/overridden_url/')
def test_urls_client(client):
response = client.get('/overridden_url/')
assert force_text(response.content) == 'Overridden urlconf works!'
|
a094b0978034869a32be1c541a4d396843819cfe | project/management/commands/generatesecretkey.py | project/management/commands/generatesecretkey.py | from django.core.management.templates import BaseCommand
from django.utils.crypto import get_random_string
import fileinput
from django.conf import settings
class Command(BaseCommand):
help = ("Replaces the SECRET_KEY VALUE in settings.py with a new one.")
def handle(self, *args, **options):
# Create a random SECRET_KEY hash to put it in the main settings.
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
secret_key = get_random_string(50, chars)
file_path = "{}/settings.py".format(settings.PROJECT_DIR)
for line in fileinput.input(file_path, inplace=True):
if line.startswith("SECRET_KEY = "):
print("SECRET_KEY = '{}'".format(secret_key))
else:
print(line, end='')
| Add management command to generate a random secret key | Add management command to generate a random secret key
| Python | mit | Angoreher/xcero,Angoreher/xcero,Angoreher/xcero,magnet-cl/django-project-template-py3,magnet-cl/django-project-template-py3,Angoreher/xcero,magnet-cl/django-project-template-py3,magnet-cl/django-project-template-py3 | <INSERT> from django.core.management.templates import BaseCommand
from django.utils.crypto import get_random_string
import fileinput
from django.conf import settings
class Command(BaseCommand):
<INSERT_END> <INSERT> help = ("Replaces the SECRET_KEY VALUE in settings.py with a new one.")
def handle(self, *args, **options):
# Create a random SECRET_KEY hash to put it in the main settings.
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
secret_key = get_random_string(50, chars)
file_path = "{}/settings.py".format(settings.PROJECT_DIR)
for line in fileinput.input(file_path, inplace=True):
if line.startswith("SECRET_KEY = "):
print("SECRET_KEY = '{}'".format(secret_key))
else:
print(line, end='')
<INSERT_END> <|endoftext|> from django.core.management.templates import BaseCommand
from django.utils.crypto import get_random_string
import fileinput
from django.conf import settings
class Command(BaseCommand):
help = ("Replaces the SECRET_KEY VALUE in settings.py with a new one.")
def handle(self, *args, **options):
# Create a random SECRET_KEY hash to put it in the main settings.
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
secret_key = get_random_string(50, chars)
file_path = "{}/settings.py".format(settings.PROJECT_DIR)
for line in fileinput.input(file_path, inplace=True):
if line.startswith("SECRET_KEY = "):
print("SECRET_KEY = '{}'".format(secret_key))
else:
print(line, end='')
| Add management command to generate a random secret key
|
|
46a69b1795a5946c815c16a7d910d8c680e1ed7f | setup.py | setup.py | from setuptools import setup, find_packages
from io import open
setup(
name='django-debug-toolbar',
version='1.3.2',
description='A configurable set of panels that display various debug '
'information about the current request/response.',
long_description=open('README.rst', encoding='utf-8').read(),
author='Rob Hudson',
author_email='[email protected]',
url='https://github.com/django-debug-toolbar/django-debug-toolbar',
download_url='https://pypi.python.org/pypi/django-debug-toolbar',
license='BSD',
packages=find_packages(exclude=('tests.*', 'tests', 'example')),
install_requires=[
'django>=1.4.2',
'sqlparse',
],
include_package_data=True,
zip_safe=False, # because we're including static files
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| from setuptools import setup, find_packages
from io import open
setup(
name='django-debug-toolbar',
version='1.3.2',
description='A configurable set of panels that display various debug '
'information about the current request/response.',
long_description=open('README.rst', encoding='utf-8').read(),
author='Rob Hudson',
author_email='[email protected]',
url='https://github.com/django-debug-toolbar/django-debug-toolbar',
download_url='https://pypi.python.org/pypi/django-debug-toolbar',
license='BSD',
packages=find_packages(exclude=('tests.*', 'tests', 'example')),
install_requires=[
'Django>=1.4.2',
'sqlparse',
],
include_package_data=True,
zip_safe=False, # because we're including static files
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Correct spelling of Django in requirements | Correct spelling of Django in requirements
It seems that using 'django' instead of 'Django' has the consequence that "pip install django_debug_toolbar" has the consequence of installing the latest version of Django, even if you already have Django installed. | Python | bsd-3-clause | megcunningham/django-debug-toolbar,jazzband/django-debug-toolbar,pevzi/django-debug-toolbar,Endika/django-debug-toolbar,barseghyanartur/django-debug-toolbar,peap/django-debug-toolbar,tim-schilling/django-debug-toolbar,tim-schilling/django-debug-toolbar,barseghyanartur/django-debug-toolbar,jazzband/django-debug-toolbar,barseghyanartur/django-debug-toolbar,django-debug-toolbar/django-debug-toolbar,spookylukey/django-debug-toolbar,seperman/django-debug-toolbar,peap/django-debug-toolbar,Endika/django-debug-toolbar,peap/django-debug-toolbar,spookylukey/django-debug-toolbar,pevzi/django-debug-toolbar,seperman/django-debug-toolbar,calvinpy/django-debug-toolbar,calvinpy/django-debug-toolbar,pevzi/django-debug-toolbar,calvinpy/django-debug-toolbar,megcunningham/django-debug-toolbar,tim-schilling/django-debug-toolbar,django-debug-toolbar/django-debug-toolbar,Endika/django-debug-toolbar,jazzband/django-debug-toolbar,django-debug-toolbar/django-debug-toolbar,spookylukey/django-debug-toolbar,megcunningham/django-debug-toolbar,seperman/django-debug-toolbar | <REPLACE_OLD> 'django>=1.4.2',
<REPLACE_NEW> 'Django>=1.4.2',
<REPLACE_END> <|endoftext|> from setuptools import setup, find_packages
from io import open
setup(
name='django-debug-toolbar',
version='1.3.2',
description='A configurable set of panels that display various debug '
'information about the current request/response.',
long_description=open('README.rst', encoding='utf-8').read(),
author='Rob Hudson',
author_email='[email protected]',
url='https://github.com/django-debug-toolbar/django-debug-toolbar',
download_url='https://pypi.python.org/pypi/django-debug-toolbar',
license='BSD',
packages=find_packages(exclude=('tests.*', 'tests', 'example')),
install_requires=[
'Django>=1.4.2',
'sqlparse',
],
include_package_data=True,
zip_safe=False, # because we're including static files
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Correct spelling of Django in requirements
It seems that using 'django' instead of 'Django' has the consequence that "pip install django_debug_toolbar" has the consequence of installing the latest version of Django, even if you already have Django installed.
from setuptools import setup, find_packages
from io import open
setup(
name='django-debug-toolbar',
version='1.3.2',
description='A configurable set of panels that display various debug '
'information about the current request/response.',
long_description=open('README.rst', encoding='utf-8').read(),
author='Rob Hudson',
author_email='[email protected]',
url='https://github.com/django-debug-toolbar/django-debug-toolbar',
download_url='https://pypi.python.org/pypi/django-debug-toolbar',
license='BSD',
packages=find_packages(exclude=('tests.*', 'tests', 'example')),
install_requires=[
'django>=1.4.2',
'sqlparse',
],
include_package_data=True,
zip_safe=False, # because we're including static files
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
7f6cd8f5444d92644642cadb84d7f958e0b6fce1 | examples/image_test.py | examples/image_test.py | import sys
import os
import pyglet.window
from pyglet.gl import *
from pyglet import clock
from pyglet.ext.scene2d import Image2d
from ctypes import *
if len(sys.argv) != 2:
print 'Usage: %s <PNG/JPEG filename>'%sys.argv[0]
sys.exit()
window = pyglet.window.Window(width=400, height=400)
image = Image2d.load(sys.argv[1])
s = max(image.width, image.height)
c = clock.Clock(60)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glEnable(GL_COLOR_MATERIAL)
glMatrixMode(GL_MODELVIEW)
glClearColor(0, 0, 0, 0)
glColor4f(1, 1, 1, 1)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
while not window.has_exit:
c.tick()
window.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
glScalef(1./s, 1./s, 1.)
glTranslatef(-image.width/2, -image.height/2, -1.)
image.draw()
window.flip()
| import sys
import os
import ctypes
import pyglet.window
from pyglet.gl import *
from pyglet import clock
from pyglet import image
if len(sys.argv) != 2:
print 'Usage: %s <PNG/JPEG filename>'%sys.argv[0]
sys.exit()
window = pyglet.window.Window(width=400, height=400)
image = image.load(sys.argv[1])
imx = imy = 0
@window.event
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
global imx, imy
imx += dx
imy += dy
clock.set_fps_limit(30)
while not window.has_exit:
clock.tick()
window.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT)
image.blit(imx, imy, 0)
window.flip()
| Use the core, make example more useful. | Use the core, make example more useful.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@874 14d46d22-621c-0410-bb3d-6f67920f7d95
| Python | bsd-3-clause | regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations | <REPLACE_OLD> os
import <REPLACE_NEW> os
import ctypes
import <REPLACE_END> <REPLACE_OLD> pyglet.ext.scene2d <REPLACE_NEW> pyglet <REPLACE_END> <REPLACE_OLD> Image2d
from ctypes import *
if <REPLACE_NEW> image
if <REPLACE_END> <REPLACE_OLD> height=400)
image <REPLACE_NEW> height=400)
image <REPLACE_END> <REPLACE_OLD> Image2d.load(sys.argv[1])
s <REPLACE_NEW> image.load(sys.argv[1])
imx <REPLACE_END> <REPLACE_OLD> max(image.width, image.height)
c <REPLACE_NEW> imy <REPLACE_END> <REPLACE_OLD> clock.Clock(60)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glEnable(GL_COLOR_MATERIAL)
glMatrixMode(GL_MODELVIEW)
glClearColor(0, 0, 0, 0)
glColor4f(1, 1, 1, 1)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
while <REPLACE_NEW> 0
@window.event
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
global imx, imy
imx += dx
imy += dy
clock.set_fps_limit(30)
while <REPLACE_END> <REPLACE_OLD> c.tick()
<REPLACE_NEW> clock.tick()
<REPLACE_END> <REPLACE_OLD> window.dispatch_events()
<REPLACE_NEW> window.dispatch_events()
<REPLACE_END> <REPLACE_OLD> glLoadIdentity()
glScalef(1./s, 1./s, 1.)
glTranslatef(-image.width/2, -image.height/2, -1.)
image.draw()
<REPLACE_NEW> image.blit(imx, imy, 0)
<REPLACE_END> <|endoftext|> import sys
import os
import ctypes
import pyglet.window
from pyglet.gl import *
from pyglet import clock
from pyglet import image
if len(sys.argv) != 2:
print 'Usage: %s <PNG/JPEG filename>'%sys.argv[0]
sys.exit()
window = pyglet.window.Window(width=400, height=400)
image = image.load(sys.argv[1])
imx = imy = 0
@window.event
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
global imx, imy
imx += dx
imy += dy
clock.set_fps_limit(30)
while not window.has_exit:
clock.tick()
window.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT)
image.blit(imx, imy, 0)
window.flip()
| Use the core, make example more useful.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@874 14d46d22-621c-0410-bb3d-6f67920f7d95
import sys
import os
import pyglet.window
from pyglet.gl import *
from pyglet import clock
from pyglet.ext.scene2d import Image2d
from ctypes import *
if len(sys.argv) != 2:
print 'Usage: %s <PNG/JPEG filename>'%sys.argv[0]
sys.exit()
window = pyglet.window.Window(width=400, height=400)
image = Image2d.load(sys.argv[1])
s = max(image.width, image.height)
c = clock.Clock(60)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glEnable(GL_COLOR_MATERIAL)
glMatrixMode(GL_MODELVIEW)
glClearColor(0, 0, 0, 0)
glColor4f(1, 1, 1, 1)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
while not window.has_exit:
c.tick()
window.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
glScalef(1./s, 1./s, 1.)
glTranslatef(-image.width/2, -image.height/2, -1.)
image.draw()
window.flip()
|
0323189a504f27f14d60c8c3ebdb40ea160d7f79 | source/clique/collection.py | source/clique/collection.py | # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import re
class Collection(object):
'''Represent group of items that differ only by numerical component.'''
def __init__(self, head, tail, padding, indexes=None):
'''Initialise collection.
*head* is the leading common part whilst *tail* is the trailing
common part.
*padding* specifies the "width" of the numerical component. An index
will be padded with zeros to fill this width. A *padding* of zero
implies no padding and width may be any size so long as no leading
zeros are present.
*indexes* can specify a set of numerical indexes to initially populate
the collection with.
'''
super(Collection, self).__init__()
self.head = head
self.tail = tail
self.padding = padding
self.indexes = set()
if indexes is not None:
self.indexes.update(indexes)
self._pattern = re.compile('^{0}(?P<index>(?P<padding>0*)\d+?){1}$'
.format(self.head, self.tail))
def __iter__(self):
'''Return iterator over items in collection.'''
def __contains__(self, item):
'''Return whether *item* is present in collection.'''
def match(self, item):
'''Return whether *item* matches this collection pattern.
If a match is successful return data about the match otherwise return
None.
'''
def add(self, item):
'''Add *item* to collection.
raise :py:class:`~clique.error.CollectionError` if *item* cannot be
added to the collection.
'''
def remove(self, item):
'''Remove *item* from collection.
raise :py:class:`~clique.error.CollectionError` if *item* cannot be
removed from the collection.
'''
def format(self, pattern):
'''Return string representation as specified by *pattern*.'''
def is_contiguous(self):
'''Return whether entire collection is contiguous.'''
def holes(self):
'''Return holes in collection.
Return as list of :py:class:`~clique.collection.Collection` instances.
'''
def merge(self, collection):
'''Merge *collection* into this collection.
If the *collection* is compatible with this collection then update
indexes with all indexes in *collection*.
'''
def separate(self):
'''Return contiguous parts of collection as separate collections.
Return as list of :py:class:`~clique.collection.Collection` instances.
'''
| Add initial interface for Collection class with stubs for methods. | Add initial interface for Collection class with stubs for methods.
A Collection will represent a group of items with a common numerical
component.
| Python | apache-2.0 | 4degrees/clique | <REPLACE_OLD> <REPLACE_NEW> # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import re
class Collection(object):
'''Represent group of items that differ only by numerical component.'''
def __init__(self, head, tail, padding, indexes=None):
'''Initialise collection.
*head* is the leading common part whilst *tail* is the trailing
common part.
*padding* specifies the "width" of the numerical component. An index
will be padded with zeros to fill this width. A *padding* of zero
implies no padding and width may be any size so long as no leading
zeros are present.
*indexes* can specify a set of numerical indexes to initially populate
the collection with.
'''
super(Collection, self).__init__()
self.head = head
self.tail = tail
self.padding = padding
self.indexes = set()
if indexes is not None:
self.indexes.update(indexes)
self._pattern = re.compile('^{0}(?P<index>(?P<padding>0*)\d+?){1}$'
.format(self.head, self.tail))
def __iter__(self):
'''Return iterator over items in collection.'''
def __contains__(self, item):
'''Return whether *item* is present in collection.'''
def match(self, item):
'''Return whether *item* matches this collection pattern.
If a match is successful return data about the match otherwise return
None.
'''
def add(self, item):
'''Add *item* to collection.
raise :py:class:`~clique.error.CollectionError` if *item* cannot be
added to the collection.
'''
def remove(self, item):
'''Remove *item* from collection.
raise :py:class:`~clique.error.CollectionError` if *item* cannot be
removed from the collection.
'''
def format(self, pattern):
'''Return string representation as specified by *pattern*.'''
def is_contiguous(self):
'''Return whether entire collection is contiguous.'''
def holes(self):
'''Return holes in collection.
Return as list of :py:class:`~clique.collection.Collection` instances.
'''
def merge(self, collection):
'''Merge *collection* into this collection.
If the *collection* is compatible with this collection then update
indexes with all indexes in *collection*.
'''
def separate(self):
'''Return contiguous parts of collection as separate collections.
Return as list of :py:class:`~clique.collection.Collection` instances.
'''
<REPLACE_END> <|endoftext|> # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import re
class Collection(object):
'''Represent group of items that differ only by numerical component.'''
def __init__(self, head, tail, padding, indexes=None):
'''Initialise collection.
*head* is the leading common part whilst *tail* is the trailing
common part.
*padding* specifies the "width" of the numerical component. An index
will be padded with zeros to fill this width. A *padding* of zero
implies no padding and width may be any size so long as no leading
zeros are present.
*indexes* can specify a set of numerical indexes to initially populate
the collection with.
'''
super(Collection, self).__init__()
self.head = head
self.tail = tail
self.padding = padding
self.indexes = set()
if indexes is not None:
self.indexes.update(indexes)
self._pattern = re.compile('^{0}(?P<index>(?P<padding>0*)\d+?){1}$'
.format(self.head, self.tail))
def __iter__(self):
'''Return iterator over items in collection.'''
def __contains__(self, item):
'''Return whether *item* is present in collection.'''
def match(self, item):
'''Return whether *item* matches this collection pattern.
If a match is successful return data about the match otherwise return
None.
'''
def add(self, item):
'''Add *item* to collection.
raise :py:class:`~clique.error.CollectionError` if *item* cannot be
added to the collection.
'''
def remove(self, item):
'''Remove *item* from collection.
raise :py:class:`~clique.error.CollectionError` if *item* cannot be
removed from the collection.
'''
def format(self, pattern):
'''Return string representation as specified by *pattern*.'''
def is_contiguous(self):
'''Return whether entire collection is contiguous.'''
def holes(self):
'''Return holes in collection.
Return as list of :py:class:`~clique.collection.Collection` instances.
'''
def merge(self, collection):
'''Merge *collection* into this collection.
If the *collection* is compatible with this collection then update
indexes with all indexes in *collection*.
'''
def separate(self):
'''Return contiguous parts of collection as separate collections.
Return as list of :py:class:`~clique.collection.Collection` instances.
'''
| Add initial interface for Collection class with stubs for methods.
A Collection will represent a group of items with a common numerical
component.
|
|
e4f0fc2cdd209bbadffae9f3da83b0585a64143f | accelerator/migrations/0077_add_program_overview_link_field_to_a_program.py | accelerator/migrations/0077_add_program_overview_link_field_to_a_program.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-05-15 14:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0076_change_description_to_textfield'),
]
operations = [
migrations.AddField(
model_name='program',
name='program_overview_link',
field=models.URLField(
blank=True,
null=True,
help_text=('URL of the program overview page, '
'ex: https://masschallenge.org/programs-boston'),
max_length=255,
)
)
]
| Add migration file form program overview link | [AC-6989] Add migration file form program overview link
| Python | mit | masschallenge/django-accelerator,masschallenge/django-accelerator | <REPLACE_OLD> <REPLACE_NEW> # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-05-15 14:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0076_change_description_to_textfield'),
]
operations = [
migrations.AddField(
model_name='program',
name='program_overview_link',
field=models.URLField(
blank=True,
null=True,
help_text=('URL of the program overview page, '
'ex: https://masschallenge.org/programs-boston'),
max_length=255,
)
)
]
<REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-05-15 14:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0076_change_description_to_textfield'),
]
operations = [
migrations.AddField(
model_name='program',
name='program_overview_link',
field=models.URLField(
blank=True,
null=True,
help_text=('URL of the program overview page, '
'ex: https://masschallenge.org/programs-boston'),
max_length=255,
)
)
]
| [AC-6989] Add migration file form program overview link
|
|
5ac176fafd35bfa675e1718b74a8c6ef4dc74629 | skoleintra/pgWeekplans.py | skoleintra/pgWeekplans.py | #
# -*- encoding: utf-8 -*-
#
import re
import config
import surllib
import semail
import datetime
import urllib
URL_PREFIX = 'http://%s/Infoweb/Fi/' % config.HOSTNAME
URL_MAIN = URL_PREFIX + 'Ugeplaner.asp'
def docFindWeekplans(bs):
trs = bs.findAll('tr')
for line in trs:
if not line.has_key('class'):
continue
if not [c for c in line['class'].split() if c.startswith('linje')]:
continue
links = line.findAll('a')
assert(len(links) >= 1)
# find week date
title = links[0].text
# find url
url = links[0]['href']
url = URL_PREFIX + urllib.quote(url.encode('iso-8859-1'), safe=':/?=&%')
bs = surllib.skoleGetURL(url, True)
msg = semail.Message('weekplans', bs)
msg.setTitle(u'%s' % title)
msg.updatePersonDate()
msg.maybeSend()
def skoleWeekplans():
global bs
# surllib.skoleLogin()
config.log(u'Kigger efter nye ugeplaner')
# read the initial page
bs = surllib.skoleGetURL(URL_MAIN, True, True)
docFindWeekplans(bs)
if __name__ == '__main__':
# test
skoleWeekplans()
| #
# -*- encoding: utf-8 -*-
#
import re
import config
import surllib
import semail
import datetime
import urllib
URL_PREFIX = 'http://%s/Infoweb/Fi/' % config.HOSTNAME
URL_MAIN = URL_PREFIX + 'Ugeplaner.asp'
def docFindWeekplans(bs):
trs = bs.findAll('tr')
for line in trs:
if not line.has_key('class'):
continue
if not [c for c in line['class'].split() if c.startswith('linje')]:
continue
links = line.findAll('a')
assert(len(links) >= 1)
# find week date
title = links[0].text
# find url
url = links[0]['href']
url = url.encode('iso-8859-1')
url = URL_PREFIX + urllib.quote(url, safe=':/?=&%')
bs = surllib.skoleGetURL(url, True)
msg = semail.Message('weekplans', bs)
msg.setTitle(u'%s' % title)
msg.updatePersonDate()
msg.maybeSend()
def skoleWeekplans():
global bs
# surllib.skoleLogin()
config.log(u'Kigger efter nye ugeplaner')
# read the initial page
bs = surllib.skoleGetURL(URL_MAIN, True, True)
docFindWeekplans(bs)
if __name__ == '__main__':
# test
skoleWeekplans()
| Make code comply to PEP8 | Make code comply to PEP8
| Python | bsd-2-clause | bennyslbs/fskintra | <REPLACE_OLD> 'Ugeplaner.asp'
def <REPLACE_NEW> 'Ugeplaner.asp'
def <REPLACE_END> <INSERT> url.encode('iso-8859-1')
url = <INSERT_END> <REPLACE_OLD> urllib.quote(url.encode('iso-8859-1'), <REPLACE_NEW> urllib.quote(url, <REPLACE_END> <|endoftext|> #
# -*- encoding: utf-8 -*-
#
import re
import config
import surllib
import semail
import datetime
import urllib
URL_PREFIX = 'http://%s/Infoweb/Fi/' % config.HOSTNAME
URL_MAIN = URL_PREFIX + 'Ugeplaner.asp'
def docFindWeekplans(bs):
trs = bs.findAll('tr')
for line in trs:
if not line.has_key('class'):
continue
if not [c for c in line['class'].split() if c.startswith('linje')]:
continue
links = line.findAll('a')
assert(len(links) >= 1)
# find week date
title = links[0].text
# find url
url = links[0]['href']
url = url.encode('iso-8859-1')
url = URL_PREFIX + urllib.quote(url, safe=':/?=&%')
bs = surllib.skoleGetURL(url, True)
msg = semail.Message('weekplans', bs)
msg.setTitle(u'%s' % title)
msg.updatePersonDate()
msg.maybeSend()
def skoleWeekplans():
global bs
# surllib.skoleLogin()
config.log(u'Kigger efter nye ugeplaner')
# read the initial page
bs = surllib.skoleGetURL(URL_MAIN, True, True)
docFindWeekplans(bs)
if __name__ == '__main__':
# test
skoleWeekplans()
| Make code comply to PEP8
#
# -*- encoding: utf-8 -*-
#
import re
import config
import surllib
import semail
import datetime
import urllib
URL_PREFIX = 'http://%s/Infoweb/Fi/' % config.HOSTNAME
URL_MAIN = URL_PREFIX + 'Ugeplaner.asp'
def docFindWeekplans(bs):
trs = bs.findAll('tr')
for line in trs:
if not line.has_key('class'):
continue
if not [c for c in line['class'].split() if c.startswith('linje')]:
continue
links = line.findAll('a')
assert(len(links) >= 1)
# find week date
title = links[0].text
# find url
url = links[0]['href']
url = URL_PREFIX + urllib.quote(url.encode('iso-8859-1'), safe=':/?=&%')
bs = surllib.skoleGetURL(url, True)
msg = semail.Message('weekplans', bs)
msg.setTitle(u'%s' % title)
msg.updatePersonDate()
msg.maybeSend()
def skoleWeekplans():
global bs
# surllib.skoleLogin()
config.log(u'Kigger efter nye ugeplaner')
# read the initial page
bs = surllib.skoleGetURL(URL_MAIN, True, True)
docFindWeekplans(bs)
if __name__ == '__main__':
# test
skoleWeekplans()
|
42339932811493bdd398fda4f7a2322a94bdc2e9 | saleor/shipping/migrations/0018_default_zones_countries.py | saleor/shipping/migrations/0018_default_zones_countries.py | # Generated by Django 3.0.6 on 2020-06-05 14:35
from django.db import migrations
from ..utils import get_countries_without_shipping_zone
def assign_countries_in_default_shipping_zone(apps, schema_editor):
ShippingZone = apps.get_model("shipping", "ShippingZone")
qs = ShippingZone.objects.filter(default=True)
if qs.exists():
default_zone = qs[0]
if not default_zone.countries:
default_zone.countries = get_countries_without_shipping_zone()
default_zone.save(update_fields=["countries"])
class Migration(migrations.Migration):
dependencies = [
("shipping", "0017_django_price_2"),
]
operations = [
migrations.RunPython(
assign_countries_in_default_shipping_zone, migrations.RunPython.noop
)
]
| # Generated by Django 3.0.6 on 2020-06-05 14:35
from django.db import migrations
from django_countries import countries
def get_countries_without_shipping_zone(ShippingZone):
"""Return countries that are not assigned to any shipping zone."""
covered_countries = set()
for zone in ShippingZone.objects.all():
covered_countries.update({c.code for c in zone.countries})
return (country[0] for country in countries if country[0] not in covered_countries)
def assign_countries_in_default_shipping_zone(apps, schema_editor):
ShippingZone = apps.get_model("shipping", "ShippingZone")
qs = ShippingZone.objects.filter(default=True)
if qs.exists():
default_zone = qs[0]
if not default_zone.countries:
default_zone.countries = get_countries_without_shipping_zone(ShippingZone)
default_zone.save(update_fields=["countries"])
class Migration(migrations.Migration):
dependencies = [
("shipping", "0017_django_price_2"),
]
operations = [
migrations.RunPython(
assign_countries_in_default_shipping_zone, migrations.RunPython.noop
)
]
| Move utility function to migration | Move utility function to migration
| Python | bsd-3-clause | mociepka/saleor,mociepka/saleor,mociepka/saleor | <REPLACE_OLD> migrations
from ..utils <REPLACE_NEW> migrations
from django_countries <REPLACE_END> <REPLACE_OLD> get_countries_without_shipping_zone
def <REPLACE_NEW> countries
def get_countries_without_shipping_zone(ShippingZone):
"""Return countries that are not assigned to any shipping zone."""
covered_countries = set()
for zone in ShippingZone.objects.all():
covered_countries.update({c.code for c in zone.countries})
return (country[0] for country in countries if country[0] not in covered_countries)
def <REPLACE_END> <REPLACE_OLD> get_countries_without_shipping_zone()
<REPLACE_NEW> get_countries_without_shipping_zone(ShippingZone)
<REPLACE_END> <|endoftext|> # Generated by Django 3.0.6 on 2020-06-05 14:35
from django.db import migrations
from django_countries import countries
def get_countries_without_shipping_zone(ShippingZone):
"""Return countries that are not assigned to any shipping zone."""
covered_countries = set()
for zone in ShippingZone.objects.all():
covered_countries.update({c.code for c in zone.countries})
return (country[0] for country in countries if country[0] not in covered_countries)
def assign_countries_in_default_shipping_zone(apps, schema_editor):
ShippingZone = apps.get_model("shipping", "ShippingZone")
qs = ShippingZone.objects.filter(default=True)
if qs.exists():
default_zone = qs[0]
if not default_zone.countries:
default_zone.countries = get_countries_without_shipping_zone(ShippingZone)
default_zone.save(update_fields=["countries"])
class Migration(migrations.Migration):
dependencies = [
("shipping", "0017_django_price_2"),
]
operations = [
migrations.RunPython(
assign_countries_in_default_shipping_zone, migrations.RunPython.noop
)
]
| Move utility function to migration
# Generated by Django 3.0.6 on 2020-06-05 14:35
from django.db import migrations
from ..utils import get_countries_without_shipping_zone
def assign_countries_in_default_shipping_zone(apps, schema_editor):
ShippingZone = apps.get_model("shipping", "ShippingZone")
qs = ShippingZone.objects.filter(default=True)
if qs.exists():
default_zone = qs[0]
if not default_zone.countries:
default_zone.countries = get_countries_without_shipping_zone()
default_zone.save(update_fields=["countries"])
class Migration(migrations.Migration):
dependencies = [
("shipping", "0017_django_price_2"),
]
operations = [
migrations.RunPython(
assign_countries_in_default_shipping_zone, migrations.RunPython.noop
)
]
|
0241e253c68ca6862a3da26d29a649f65c27ae36 | demos/chatroom/experiment.py | demos/chatroom/experiment.py | """Coordination chatroom game."""
import dallinger as dlgr
from dallinger.config import get_config
try:
unicode = unicode
except NameError: # Python 3
unicode = str
config = get_config()
def extra_settings():
config.register('network', unicode)
config.register('n', int)
class CoordinationChatroom(dlgr.experiments.Experiment):
"""Define the structure of the experiment."""
def __init__(self, session):
"""Initialize the experiment."""
super(CoordinationChatroom, self).__init__(session)
self.experiment_repeats = 1
self.num_participants = config.get('n')
self.initial_recruitment_size = self.num_participants
self.quorum = self.num_participants
self.config = config
if not self.config.ready:
self.config.load_config()
self.setup()
def create_network(self):
"""Create a new network by reading the configuration file."""
class_ = getattr(
dlgr.networks,
self.config.get('network')
)
return class_(max_size=self.num_participants)
def info_post_request(self, node, info):
"""Run when a request to create an info is complete."""
for agent in node.neighbors():
node.transmit(what=info, to_whom=agent)
def create_node(self, participant, network):
"""Create a node for a participant."""
return dlgr.nodes.Agent(network=network, participant=participant)
| """Coordination chatroom game."""
import dallinger as dlgr
from dallinger.compat import unicode
from dallinger.config import get_config
config = get_config()
def extra_settings():
config.register('network', unicode)
config.register('n', int)
class CoordinationChatroom(dlgr.experiments.Experiment):
"""Define the structure of the experiment."""
def __init__(self, session):
"""Initialize the experiment."""
super(CoordinationChatroom, self).__init__(session)
self.experiment_repeats = 1
self.num_participants = config.get('n')
self.initial_recruitment_size = self.num_participants
self.quorum = self.num_participants
self.config = config
if not self.config.ready:
self.config.load_config()
self.setup()
def create_network(self):
"""Create a new network by reading the configuration file."""
class_ = getattr(
dlgr.networks,
self.config.get('network')
)
return class_(max_size=self.num_participants)
def info_post_request(self, node, info):
"""Run when a request to create an info is complete."""
for agent in node.neighbors():
node.transmit(what=info, to_whom=agent)
def create_node(self, participant, network):
"""Create a node for a participant."""
return dlgr.nodes.Agent(network=network, participant=participant)
| Use compat for unicode import | Use compat for unicode import
| Python | mit | Dallinger/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger | <INSERT> dallinger.compat import unicode
from <INSERT_END> <REPLACE_OLD> get_config
try:
unicode = unicode
except NameError: # Python 3
unicode = str
config <REPLACE_NEW> get_config
config <REPLACE_END> <|endoftext|> """Coordination chatroom game."""
import dallinger as dlgr
from dallinger.compat import unicode
from dallinger.config import get_config
config = get_config()
def extra_settings():
config.register('network', unicode)
config.register('n', int)
class CoordinationChatroom(dlgr.experiments.Experiment):
"""Define the structure of the experiment."""
def __init__(self, session):
"""Initialize the experiment."""
super(CoordinationChatroom, self).__init__(session)
self.experiment_repeats = 1
self.num_participants = config.get('n')
self.initial_recruitment_size = self.num_participants
self.quorum = self.num_participants
self.config = config
if not self.config.ready:
self.config.load_config()
self.setup()
def create_network(self):
"""Create a new network by reading the configuration file."""
class_ = getattr(
dlgr.networks,
self.config.get('network')
)
return class_(max_size=self.num_participants)
def info_post_request(self, node, info):
"""Run when a request to create an info is complete."""
for agent in node.neighbors():
node.transmit(what=info, to_whom=agent)
def create_node(self, participant, network):
"""Create a node for a participant."""
return dlgr.nodes.Agent(network=network, participant=participant)
| Use compat for unicode import
"""Coordination chatroom game."""
import dallinger as dlgr
from dallinger.config import get_config
try:
unicode = unicode
except NameError: # Python 3
unicode = str
config = get_config()
def extra_settings():
config.register('network', unicode)
config.register('n', int)
class CoordinationChatroom(dlgr.experiments.Experiment):
"""Define the structure of the experiment."""
def __init__(self, session):
"""Initialize the experiment."""
super(CoordinationChatroom, self).__init__(session)
self.experiment_repeats = 1
self.num_participants = config.get('n')
self.initial_recruitment_size = self.num_participants
self.quorum = self.num_participants
self.config = config
if not self.config.ready:
self.config.load_config()
self.setup()
def create_network(self):
"""Create a new network by reading the configuration file."""
class_ = getattr(
dlgr.networks,
self.config.get('network')
)
return class_(max_size=self.num_participants)
def info_post_request(self, node, info):
"""Run when a request to create an info is complete."""
for agent in node.neighbors():
node.transmit(what=info, to_whom=agent)
def create_node(self, participant, network):
"""Create a node for a participant."""
return dlgr.nodes.Agent(network=network, participant=participant)
|
469d73255365392a821d701b4df9098d97b7546a | judge/toyojjudge/taskrunner.py | judge/toyojjudge/taskrunner.py | import asyncio
import logging
logger = logging.getLogger(__name__)
class TaskRunner:
def __init__(self, sandbox_pool, languages, checkers):
self.sandbox_pool = sandbox_pool
self.languages = languages
self.checkers = checkers
async def run(self, task):
async with self.sandbox_pool.acquire() as box:
logger.debug("Running %s", task)
lang = self.languages[task.submission.language_name]
check = self.checkers[task.testcase.checker_name]
await lang.run_task(box, task)
if task.verdict is not None:
task.accepted = False
else:
await check.check(box, task)
| import asyncio
import logging
logger = logging.getLogger(__name__)
class TaskRunner:
def __init__(self, sandbox_pool, languages, checkers):
self.sandbox_pool = sandbox_pool
self.languages = languages
self.checkers = checkers
async def run(self, task):
async with self.sandbox_pool.acquire() as box:
language_name = task.submission.language_name
checker_name = task.testcase.checker_name
logger.info("Running %s, language %s, checker %s",
task, language_name, checker_name)
lang = self.languages[language_name]
check = self.checkers[checker_name]
await lang.run_task(box, task)
if task.verdict is not None:
task.accepted = False
else:
await check.check(box, task)
| Print running task, language and checker as INFO | judge: Print running task, language and checker as INFO
| Python | agpl-3.0 | johnchen902/toyoj,johnchen902/toyoj,johnchen902/toyoj,johnchen902/toyoj,johnchen902/toyoj,johnchen902/toyoj | <REPLACE_OLD> logger.debug("Running %s", task)
<REPLACE_NEW> language_name = task.submission.language_name
checker_name = task.testcase.checker_name
logger.info("Running %s, language %s, checker %s",
task, language_name, checker_name)
<REPLACE_END> <REPLACE_OLD> self.languages[task.submission.language_name]
<REPLACE_NEW> self.languages[language_name]
<REPLACE_END> <REPLACE_OLD> self.checkers[task.testcase.checker_name]
<REPLACE_NEW> self.checkers[checker_name]
<REPLACE_END> <|endoftext|> import asyncio
import logging
logger = logging.getLogger(__name__)
class TaskRunner:
def __init__(self, sandbox_pool, languages, checkers):
self.sandbox_pool = sandbox_pool
self.languages = languages
self.checkers = checkers
async def run(self, task):
async with self.sandbox_pool.acquire() as box:
language_name = task.submission.language_name
checker_name = task.testcase.checker_name
logger.info("Running %s, language %s, checker %s",
task, language_name, checker_name)
lang = self.languages[language_name]
check = self.checkers[checker_name]
await lang.run_task(box, task)
if task.verdict is not None:
task.accepted = False
else:
await check.check(box, task)
| judge: Print running task, language and checker as INFO
import asyncio
import logging
logger = logging.getLogger(__name__)
class TaskRunner:
def __init__(self, sandbox_pool, languages, checkers):
self.sandbox_pool = sandbox_pool
self.languages = languages
self.checkers = checkers
async def run(self, task):
async with self.sandbox_pool.acquire() as box:
logger.debug("Running %s", task)
lang = self.languages[task.submission.language_name]
check = self.checkers[task.testcase.checker_name]
await lang.run_task(box, task)
if task.verdict is not None:
task.accepted = False
else:
await check.check(box, task)
|
8842bbf45ffe2a76832075e053dce90a95964bcd | Bookie/bookie/tests/__init__.py | Bookie/bookie/tests/__init__.py | import ConfigParser
import os
import unittest
from pyramid.config import Configurator
from pyramid import testing
global_config = {}
ini = ConfigParser.ConfigParser()
ini.read('test.ini')
settings = dict(ini.items('app:bookie'))
def setup_db(settings):
""" We need to create the test sqlite database to run our tests against
If the db exists, remove it
We're using the SA-Migrations API to create the db and catch it up to the
latest migration level for testing
In theory, we could use this API to do version specific testing as well if
we needed to.
If we want to run any tests with a fresh db we can call this function
"""
from migrate.versioning import api as mig
sa_url = settings['sqlalchemy.url']
migrate_repository = 'migrations'
# we're hackish here since we're going to assume the test db is whatever is
# after the last slash of the SA url sqlite:///somedb.db
db_name = sa_url[sa_url.rindex('/') + 1:]
try:
os.remove(db_name)
except:
pass
open(db_name, 'w').close()
mig.version_control(sa_url, migrate_repository)
mig.upgrade(sa_url, migrate_repository)
setup_db(settings)
| import ConfigParser
import os
import unittest
from pyramid.config import Configurator
from pyramid import testing
global_config = {}
ini = ConfigParser.ConfigParser()
# we need to pull the right ini for the test we want to run
# by default pullup test.ini, but we might want to test mysql, pgsql, etc
test_ini = os.environ.get('BOOKIE_TEST_INI', None)
if test_ini:
ini.read(test_ini)
else:
ini.read('test.ini')
settings = dict(ini.items('app:bookie'))
def setup_db(settings):
""" We need to create the test sqlite database to run our tests against
If the db exists, remove it
We're using the SA-Migrations API to create the db and catch it up to the
latest migration level for testing
In theory, we could use this API to do version specific testing as well if
we needed to.
If we want to run any tests with a fresh db we can call this function
"""
from migrate.versioning import api as mig
sa_url = settings['sqlalchemy.url']
migrate_repository = 'migrations'
# we're hackish here since we're going to assume the test db is whatever is
# after the last slash of the SA url sqlite:///somedb.db
db_name = sa_url[sa_url.rindex('/') + 1:]
try:
os.remove(db_name)
except:
pass
open(db_name, 'w').close()
mig.version_control(sa_url, migrate_repository)
mig.upgrade(sa_url, migrate_repository)
setup_db(settings)
| Add ability to set test ini via env variable | Add ability to set test ini via env variable
| Python | agpl-3.0 | charany1/Bookie,teodesson/Bookie,skmezanul/Bookie,teodesson/Bookie,skmezanul/Bookie,adamlincoln/Bookie,adamlincoln/Bookie,adamlincoln/Bookie,GreenLunar/Bookie,bookieio/Bookie,pombredanne/Bookie,wangjun/Bookie,adamlincoln/Bookie,pombredanne/Bookie,pombredanne/Bookie,skmezanul/Bookie,bookieio/Bookie,charany1/Bookie,GreenLunar/Bookie,wangjun/Bookie,bookieio/Bookie,bookieio/Bookie,GreenLunar/Bookie,charany1/Bookie,skmezanul/Bookie,GreenLunar/Bookie,teodesson/Bookie,wangjun/Bookie,wangjun/Bookie,teodesson/Bookie | <REPLACE_OLD> ConfigParser.ConfigParser()
ini.read('test.ini')
settings <REPLACE_NEW> ConfigParser.ConfigParser()
# we need to pull the right ini for the test we want to run
# by default pullup test.ini, but we might want to test mysql, pgsql, etc
test_ini = os.environ.get('BOOKIE_TEST_INI', None)
if test_ini:
ini.read(test_ini)
else:
ini.read('test.ini')
settings <REPLACE_END> <|endoftext|> import ConfigParser
import os
import unittest
from pyramid.config import Configurator
from pyramid import testing
global_config = {}
ini = ConfigParser.ConfigParser()
# we need to pull the right ini for the test we want to run
# by default pullup test.ini, but we might want to test mysql, pgsql, etc
test_ini = os.environ.get('BOOKIE_TEST_INI', None)
if test_ini:
ini.read(test_ini)
else:
ini.read('test.ini')
settings = dict(ini.items('app:bookie'))
def setup_db(settings):
""" We need to create the test sqlite database to run our tests against
If the db exists, remove it
We're using the SA-Migrations API to create the db and catch it up to the
latest migration level for testing
In theory, we could use this API to do version specific testing as well if
we needed to.
If we want to run any tests with a fresh db we can call this function
"""
from migrate.versioning import api as mig
sa_url = settings['sqlalchemy.url']
migrate_repository = 'migrations'
# we're hackish here since we're going to assume the test db is whatever is
# after the last slash of the SA url sqlite:///somedb.db
db_name = sa_url[sa_url.rindex('/') + 1:]
try:
os.remove(db_name)
except:
pass
open(db_name, 'w').close()
mig.version_control(sa_url, migrate_repository)
mig.upgrade(sa_url, migrate_repository)
setup_db(settings)
| Add ability to set test ini via env variable
import ConfigParser
import os
import unittest
from pyramid.config import Configurator
from pyramid import testing
global_config = {}
ini = ConfigParser.ConfigParser()
ini.read('test.ini')
settings = dict(ini.items('app:bookie'))
def setup_db(settings):
""" We need to create the test sqlite database to run our tests against
If the db exists, remove it
We're using the SA-Migrations API to create the db and catch it up to the
latest migration level for testing
In theory, we could use this API to do version specific testing as well if
we needed to.
If we want to run any tests with a fresh db we can call this function
"""
from migrate.versioning import api as mig
sa_url = settings['sqlalchemy.url']
migrate_repository = 'migrations'
# we're hackish here since we're going to assume the test db is whatever is
# after the last slash of the SA url sqlite:///somedb.db
db_name = sa_url[sa_url.rindex('/') + 1:]
try:
os.remove(db_name)
except:
pass
open(db_name, 'w').close()
mig.version_control(sa_url, migrate_repository)
mig.upgrade(sa_url, migrate_repository)
setup_db(settings)
|
6fcc041c45dc426d570aa4c44e48c3fc9d8fd5c0 | settings/settings.py | settings/settings.py | # This file contains the project wide settings. It is not
# part of version control and it should be adapted to
# suit each deployment.
from os import environ
# Use the absolute path to the directory that stores the data.
# This can differ per deployment
DATA_DIRECTORY = "/cheshire3/clic/dbs/dickens/data/"
#TODO: make the cache settings imported in api.py
CACHE_DIR = ""
CACHE_LOCK = ""
# Check whether there are local settings.
# If there are, then overwrite the above settings
# with the specific settings defined in the local settings
try:
environ['CLIC_SETTINGS'] == 'local'
from local_settings import *
print 'Using the local settings (local_settings.py)'
except KeyError:
print 'Using the standard settings file (settings.py)'
| # This file contains the project wide settings. It is not
# part of version control and it should be adapted to
# suit each deployment.
from os import environ
# Use the absolute path to the directory that stores the data.
# This can differ per deployment
DATA_DIRECTORY = "/home/vagrant/code/clic-project/clic/dbs/dickens/data/"
#TODO: make the cache settings imported in api.py
CACHE_DIR = ""
CACHE_LOCK = ""
# Check whether there are local settings.
# If there are, then overwrite the above settings
# with the specific settings defined in the local settings
try:
environ['CLIC_SETTINGS'] == 'local'
from local_settings import *
print 'Using the local settings (local_settings.py)'
except KeyError:
print 'Using the standard settings file (settings.py)'
| Update the setting DATA_DIRECTORY to match the vagrant setup | Update the setting DATA_DIRECTORY to match the vagrant setup
| Python | mit | CentreForCorpusResearch/clic,CentreForCorpusResearch/clic,CentreForResearchInAppliedLinguistics/clic,CentreForResearchInAppliedLinguistics/clic,CentreForResearchInAppliedLinguistics/clic,CentreForCorpusResearch/clic | <REPLACE_OLD> "/cheshire3/clic/dbs/dickens/data/"
#TODO: <REPLACE_NEW> "/home/vagrant/code/clic-project/clic/dbs/dickens/data/"
#TODO: <REPLACE_END> <|endoftext|> # This file contains the project wide settings. It is not
# part of version control and it should be adapted to
# suit each deployment.
from os import environ
# Use the absolute path to the directory that stores the data.
# This can differ per deployment
DATA_DIRECTORY = "/home/vagrant/code/clic-project/clic/dbs/dickens/data/"
#TODO: make the cache settings imported in api.py
CACHE_DIR = ""
CACHE_LOCK = ""
# Check whether there are local settings.
# If there are, then overwrite the above settings
# with the specific settings defined in the local settings
try:
environ['CLIC_SETTINGS'] == 'local'
from local_settings import *
print 'Using the local settings (local_settings.py)'
except KeyError:
print 'Using the standard settings file (settings.py)'
| Update the setting DATA_DIRECTORY to match the vagrant setup
# This file contains the project wide settings. It is not
# part of version control and it should be adapted to
# suit each deployment.
from os import environ
# Use the absolute path to the directory that stores the data.
# This can differ per deployment
DATA_DIRECTORY = "/cheshire3/clic/dbs/dickens/data/"
#TODO: make the cache settings imported in api.py
CACHE_DIR = ""
CACHE_LOCK = ""
# Check whether there are local settings.
# If there are, then overwrite the above settings
# with the specific settings defined in the local settings
try:
environ['CLIC_SETTINGS'] == 'local'
from local_settings import *
print 'Using the local settings (local_settings.py)'
except KeyError:
print 'Using the standard settings file (settings.py)'
|
1648cec8667611aa7fec4bff12f873f8e6294f82 | scripts/bodyconf.py | scripts/bodyconf.py | #!/usr/bin/python2
# -*- coding: utf-8 -*-
pixval = {
'hair': 10,
'head': 20,
'upper': 30,
'arms': 40,
'lower': 50,
'legs': 60,
'feet': 70
}
groups = [
[10, 20],
[30, 40],
[50, 60],
[70]
]
| #!/usr/bin/python2
# -*- coding: utf-8 -*-
pixval = {
'hair': 10,
'head': 20,
'upper': 30,
'arms': 40,
'lower': 50,
'legs': 60,
'feet': 70
}
groups = [
[10, 20],
[30, 40],
[50, 60],
[70],
[0,10,20,30,40,50,60,70]
]
| Add whole image as an input | Add whole image as an input
| Python | mit | Cysu/Person-Reid,Cysu/Person-Reid,Cysu/Person-Reid,Cysu/Person-Reid,Cysu/Person-Reid | <REPLACE_OLD> [70]
]
<REPLACE_NEW> [70],
[0,10,20,30,40,50,60,70]
]
<REPLACE_END> <|endoftext|> #!/usr/bin/python2
# -*- coding: utf-8 -*-
pixval = {
'hair': 10,
'head': 20,
'upper': 30,
'arms': 40,
'lower': 50,
'legs': 60,
'feet': 70
}
groups = [
[10, 20],
[30, 40],
[50, 60],
[70],
[0,10,20,30,40,50,60,70]
]
| Add whole image as an input
#!/usr/bin/python2
# -*- coding: utf-8 -*-
pixval = {
'hair': 10,
'head': 20,
'upper': 30,
'arms': 40,
'lower': 50,
'legs': 60,
'feet': 70
}
groups = [
[10, 20],
[30, 40],
[50, 60],
[70]
]
|
93d0f11658c7417371ec2e040397c7a572559585 | django_remote_submission/consumers.py | django_remote_submission/consumers.py | """Manage websocket connections."""
# -*- coding: utf-8 -*-
import json
from channels import Group
from channels.auth import channel_session_user_from_http, channel_session_user
from .models import Job
@channel_session_user_from_http
def ws_connect(message):
message.reply_channel.send({
'accept': True,
})
Group('job-user-{}'.format(message.user.username)).add(
message.reply_channel,
)
@channel_session_user
def ws_disconnect(message):
Group('job-user-{}'.format(message.user.username)).discard(
message.reply_channel,
)
| """Manage websocket connections."""
# -*- coding: utf-8 -*-
import json
from channels import Group
from channels.auth import channel_session_user_from_http, channel_session_user
from .models import Job
import json
@channel_session_user_from_http
def ws_connect(message):
last_jobs = message.user.jobs.order_by('-modified')[:10]
for job in last_jobs:
message.reply_channel.send({
'text': json.dumps({
'job_id': job.id,
'title': job.title,
'status': job.status,
}),
})
Group('job-user-{}'.format(message.user.username)).add(
message.reply_channel,
)
@channel_session_user
def ws_disconnect(message):
Group('job-user-{}'.format(message.user.username)).discard(
message.reply_channel,
)
| Send last jobs on initial connection | Send last jobs on initial connection
| Python | isc | ornl-ndav/django-remote-submission,ornl-ndav/django-remote-submission,ornl-ndav/django-remote-submission | <REPLACE_OLD> Job
@channel_session_user_from_http
def <REPLACE_NEW> Job
import json
@channel_session_user_from_http
def <REPLACE_END> <INSERT> last_jobs = message.user.jobs.order_by('-modified')[:10]
for job in last_jobs:
<INSERT_END> <REPLACE_OLD> 'accept': True,
<REPLACE_NEW> 'text': json.dumps({
'job_id': job.id,
'title': job.title,
'status': job.status,
}),
<REPLACE_END> <|endoftext|> """Manage websocket connections."""
# -*- coding: utf-8 -*-
import json
from channels import Group
from channels.auth import channel_session_user_from_http, channel_session_user
from .models import Job
import json
@channel_session_user_from_http
def ws_connect(message):
last_jobs = message.user.jobs.order_by('-modified')[:10]
for job in last_jobs:
message.reply_channel.send({
'text': json.dumps({
'job_id': job.id,
'title': job.title,
'status': job.status,
}),
})
Group('job-user-{}'.format(message.user.username)).add(
message.reply_channel,
)
@channel_session_user
def ws_disconnect(message):
Group('job-user-{}'.format(message.user.username)).discard(
message.reply_channel,
)
| Send last jobs on initial connection
"""Manage websocket connections."""
# -*- coding: utf-8 -*-
import json
from channels import Group
from channels.auth import channel_session_user_from_http, channel_session_user
from .models import Job
@channel_session_user_from_http
def ws_connect(message):
message.reply_channel.send({
'accept': True,
})
Group('job-user-{}'.format(message.user.username)).add(
message.reply_channel,
)
@channel_session_user
def ws_disconnect(message):
Group('job-user-{}'.format(message.user.username)).discard(
message.reply_channel,
)
|
903b33db0df2562df108f827177cb1dc0f39ed24 | setup.py | setup.py | #!/usr/bin/env python
import setuptools
setuptools.setup(
name='systemd-minecraft',
description='A systemd service file for one or more vanilla Minecraft servers',
author='Wurstmineberg',
author_email='[email protected]',
py_modules=['minecraft'],
install_requires=[
'docopt',
'loops',
'mcrcon',
'more-itertools',
'requests'
],
dependency_links=[
'git+https://github.com/fenhl/python-loops.git#egg=loops'
]
)
| #!/usr/bin/env python
import setuptools
setuptools.setup(
name='systemd-minecraft',
description='A systemd service file for one or more vanilla Minecraft servers',
author='Wurstmineberg',
author_email='[email protected]',
py_modules=['minecraft'],
install_requires=[
'docopt',
'loops',
'mcrcon',
'more-itertools',
'requests'
],
dependency_links=[
'git+https://github.com/fenhl/python-loops.git#egg=loops',
'git+https://github.com/wurstmineberg/MCRcon.git#egg=mcrcon'
]
)
| Add dependency link for mcrcon | Add dependency link for mcrcon
| Python | mit | wurstmineberg/systemd-minecraft | <REPLACE_OLD> 'git+https://github.com/fenhl/python-loops.git#egg=loops'
<REPLACE_NEW> 'git+https://github.com/fenhl/python-loops.git#egg=loops',
'git+https://github.com/wurstmineberg/MCRcon.git#egg=mcrcon'
<REPLACE_END> <|endoftext|> #!/usr/bin/env python
import setuptools
setuptools.setup(
name='systemd-minecraft',
description='A systemd service file for one or more vanilla Minecraft servers',
author='Wurstmineberg',
author_email='[email protected]',
py_modules=['minecraft'],
install_requires=[
'docopt',
'loops',
'mcrcon',
'more-itertools',
'requests'
],
dependency_links=[
'git+https://github.com/fenhl/python-loops.git#egg=loops',
'git+https://github.com/wurstmineberg/MCRcon.git#egg=mcrcon'
]
)
| Add dependency link for mcrcon
#!/usr/bin/env python
import setuptools
setuptools.setup(
name='systemd-minecraft',
description='A systemd service file for one or more vanilla Minecraft servers',
author='Wurstmineberg',
author_email='[email protected]',
py_modules=['minecraft'],
install_requires=[
'docopt',
'loops',
'mcrcon',
'more-itertools',
'requests'
],
dependency_links=[
'git+https://github.com/fenhl/python-loops.git#egg=loops'
]
)
|
fcde68e954eab9f1b158928f9d30633523d41d94 | corehq/apps/userreports/management/commands/resave_couch_forms_and_cases.py | corehq/apps/userreports/management/commands/resave_couch_forms_and_cases.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
import datetime
from django.core.management.base import BaseCommand
from corehq.util.couch import IterDB
from corehq.util.log import with_progress_bar
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import iter_docs
class Command(BaseCommand):
help = ("Save a bunch of couch documents so they are re-sent to kafka. "
"Pass in a file with one doc id per line")
def add_arguments(self, parser):
parser.add_argument('ids_file')
def handle(self, ids_file, **options):
with open(ids_file) as f:
doc_ids = [line.strip() for line in f]
db = XFormInstance.get_db() # Both forms and cases are in here
with IterDB(db) as iter_db:
for doc in iter_docs(db, with_progress_bar(doc_ids)):
iter_db.save(doc)
print("{} docs saved".format(len(iter_db.saved_ids)))
print("{} docs errored".format(len(iter_db.error_ids)))
not_found = len(doc_ids) - len(iter_db.saved_ids) - len(iter_db.error_ids)
print("{} docs not found".format(not_found))
filename = '_{}.csv'.format(ids_file, datetime.datetime.now())
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(['doc_id', 'status'])
for doc_id in doc_ids:
if doc_id in iter_db.saved_ids:
status = "saved"
elif iter_db.error_ids:
status = "errored"
else:
status = "not_found"
writer.writerow([doc_id, status])
print("Saved results to {}".format(filename))
| Add mgmt cmd to re-save a list of form/case IDs | Add mgmt cmd to re-save a list of form/case IDs
https://manage.dimagi.com/default.asp?263644
| Python | bsd-3-clause | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | <REPLACE_OLD> <REPLACE_NEW> from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
import datetime
from django.core.management.base import BaseCommand
from corehq.util.couch import IterDB
from corehq.util.log import with_progress_bar
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import iter_docs
class Command(BaseCommand):
help = ("Save a bunch of couch documents so they are re-sent to kafka. "
"Pass in a file with one doc id per line")
def add_arguments(self, parser):
parser.add_argument('ids_file')
def handle(self, ids_file, **options):
with open(ids_file) as f:
doc_ids = [line.strip() for line in f]
db = XFormInstance.get_db() # Both forms and cases are in here
with IterDB(db) as iter_db:
for doc in iter_docs(db, with_progress_bar(doc_ids)):
iter_db.save(doc)
print("{} docs saved".format(len(iter_db.saved_ids)))
print("{} docs errored".format(len(iter_db.error_ids)))
not_found = len(doc_ids) - len(iter_db.saved_ids) - len(iter_db.error_ids)
print("{} docs not found".format(not_found))
filename = '_{}.csv'.format(ids_file, datetime.datetime.now())
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(['doc_id', 'status'])
for doc_id in doc_ids:
if doc_id in iter_db.saved_ids:
status = "saved"
elif iter_db.error_ids:
status = "errored"
else:
status = "not_found"
writer.writerow([doc_id, status])
print("Saved results to {}".format(filename))
<REPLACE_END> <|endoftext|> from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
import datetime
from django.core.management.base import BaseCommand
from corehq.util.couch import IterDB
from corehq.util.log import with_progress_bar
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import iter_docs
class Command(BaseCommand):
help = ("Save a bunch of couch documents so they are re-sent to kafka. "
"Pass in a file with one doc id per line")
def add_arguments(self, parser):
parser.add_argument('ids_file')
def handle(self, ids_file, **options):
with open(ids_file) as f:
doc_ids = [line.strip() for line in f]
db = XFormInstance.get_db() # Both forms and cases are in here
with IterDB(db) as iter_db:
for doc in iter_docs(db, with_progress_bar(doc_ids)):
iter_db.save(doc)
print("{} docs saved".format(len(iter_db.saved_ids)))
print("{} docs errored".format(len(iter_db.error_ids)))
not_found = len(doc_ids) - len(iter_db.saved_ids) - len(iter_db.error_ids)
print("{} docs not found".format(not_found))
filename = '_{}.csv'.format(ids_file, datetime.datetime.now())
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(['doc_id', 'status'])
for doc_id in doc_ids:
if doc_id in iter_db.saved_ids:
status = "saved"
elif iter_db.error_ids:
status = "errored"
else:
status = "not_found"
writer.writerow([doc_id, status])
print("Saved results to {}".format(filename))
| Add mgmt cmd to re-save a list of form/case IDs
https://manage.dimagi.com/default.asp?263644
|
|
fa8b40b8ebc088f087ff76c36068fea67dae0824 | rnacentral/portal/management/commands/update_coordinate_names.py | rnacentral/portal/management/commands/update_coordinate_names.py | """
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
"""
Usage:
python manage.py update_coordinate_names
"""
def handle(self, *args, **options):
"""
Main function, called by django.
"""
sql = """
UPDATE rnc_coordinates a
SET
name = b.ensembl_name,
primary_start = local_start,
primary_end = local_end
FROM ensembl_insdc_mapping b
WHERE
a.primary_accession = b.insdc
AND a.name IS NULL
"""
with connection.cursor() as cursor:
cursor.execute(sql)
| Add management command for updating genome coordinate names using Ensembl-INSDC mapping | Add management command for updating genome coordinate names using Ensembl-INSDC mapping
| Python | apache-2.0 | RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode | <REPLACE_OLD> <REPLACE_NEW> """
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
"""
Usage:
python manage.py update_coordinate_names
"""
def handle(self, *args, **options):
"""
Main function, called by django.
"""
sql = """
UPDATE rnc_coordinates a
SET
name = b.ensembl_name,
primary_start = local_start,
primary_end = local_end
FROM ensembl_insdc_mapping b
WHERE
a.primary_accession = b.insdc
AND a.name IS NULL
"""
with connection.cursor() as cursor:
cursor.execute(sql)
<REPLACE_END> <|endoftext|> """
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
"""
Usage:
python manage.py update_coordinate_names
"""
def handle(self, *args, **options):
"""
Main function, called by django.
"""
sql = """
UPDATE rnc_coordinates a
SET
name = b.ensembl_name,
primary_start = local_start,
primary_end = local_end
FROM ensembl_insdc_mapping b
WHERE
a.primary_accession = b.insdc
AND a.name IS NULL
"""
with connection.cursor() as cursor:
cursor.execute(sql)
| Add management command for updating genome coordinate names using Ensembl-INSDC mapping
|
Subsets and Splits