seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
182084307
|
import sys
import json
import os
import datetime
try:
from unittest import mock
except ImportError:
import mock
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import py
import pytest
import requests
import arrow
from dateutil.tz.tz import tzutc
from click import get_app_dir
from watson import Watson, WatsonError
from watson.watson import ConfigurationError, ConfigParser
from watson.utils import get_start_time_for_period
TEST_FIXTURE_DIR = py.path.local(
os.path.dirname(
os.path.realpath(__file__)
)
) / 'resources'
PY2 = sys.version_info[0] == 2
if not PY2:
builtins = 'builtins'
else:
builtins = '__builtin__'
def mock_datetime(dt, dt_module):
class DateTimeMeta(type):
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, datetime.datetime)
class BaseMockedDateTime(datetime.datetime):
@classmethod
def now(cls, tz=None):
return dt.replace(tzinfo=tz)
@classmethod
def utcnow(cls):
return dt
@classmethod
def today(cls):
return dt
MockedDateTime = DateTimeMeta('datetime', (BaseMockedDateTime,), {})
return mock.patch.object(dt_module, 'datetime', MockedDateTime)
@pytest.fixture
def config_dir(tmpdir):
return str(tmpdir.mkdir('config'))
def mock_read(content):
return lambda self, name: self._read(StringIO(content), name)
@pytest.fixture
def watson(config_dir):
return Watson(config_dir=config_dir)
# current
def test_current(watson):
content = json.dumps({'project': 'foo', 'start': 0, 'tags': ['A', 'B']})
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
assert watson.current['project'] == 'foo'
assert watson.current['start'] == arrow.get(0)
assert watson.current['tags'] == ['A', 'B']
def test_current_with_empty_file(watson):
with mock.patch('%s.open' % builtins, mock.mock_open(read_data="")):
with mock.patch('os.path.getsize', return_value=0):
assert watson.current == {}
def test_current_with_nonexistent_file(watson):
with mock.patch('%s.open' % builtins, side_effect=IOError):
assert watson.current == {}
def test_current_watson_non_valid_json(watson):
content = "{'foo': bar}"
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
with mock.patch('os.path.getsize', return_value=len(content)):
with pytest.raises(WatsonError):
watson.current
def test_current_with_given_state(config_dir):
content = json.dumps({'project': 'foo', 'start': 0})
watson = Watson(current={'project': 'bar', 'start': 0},
config_dir=config_dir)
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
assert watson.current['project'] == 'bar'
def test_current_with_empty_given_state(config_dir):
content = json.dumps({'project': 'foo', 'start': 0})
watson = Watson(current=[], config_dir=config_dir)
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
assert watson.current == {}
# last_sync
def test_last_sync(watson):
now = arrow.get(123)
content = json.dumps(now.timestamp)
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
assert watson.last_sync == now
def test_last_sync_with_empty_file(watson):
with mock.patch('%s.open' % builtins, mock.mock_open(read_data="")):
with mock.patch('os.path.getsize', return_value=0):
assert watson.last_sync == arrow.get(0)
def test_last_sync_with_nonexistent_file(watson):
with mock.patch('%s.open' % builtins, side_effect=IOError):
assert watson.last_sync == arrow.get(0)
def test_last_sync_watson_non_valid_json(watson):
content = "{'foo': bar}"
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
with mock.patch('os.path.getsize', return_value=len(content)):
with pytest.raises(WatsonError):
watson.last_sync
def test_last_sync_with_given_state(config_dir):
content = json.dumps(123)
now = arrow.now()
watson = Watson(last_sync=now, config_dir=config_dir)
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
assert watson.last_sync == now
def test_last_sync_with_empty_given_state(config_dir):
content = json.dumps(123)
watson = Watson(last_sync=None, config_dir=config_dir)
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
assert watson.last_sync == arrow.get(0)
# frames
def test_frames(watson):
content = json.dumps([[0, 10, 'foo', None, ['A', 'B', 'C']]])
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
assert len(watson.frames) == 1
assert watson.frames[0].project == 'foo'
assert watson.frames[0].start == arrow.get(0)
assert watson.frames[0].stop == arrow.get(10)
assert watson.frames[0].tags == ['A', 'B', 'C']
def test_frames_without_tags(watson):
content = json.dumps([[0, 10, 'foo', None]])
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
assert len(watson.frames) == 1
assert watson.frames[0].project == 'foo'
assert watson.frames[0].start == arrow.get(0)
assert watson.frames[0].stop == arrow.get(10)
assert watson.frames[0].tags == []
def test_frames_with_empty_file(watson):
with mock.patch('%s.open' % builtins, mock.mock_open(read_data="")):
with mock.patch('os.path.getsize', return_value=0):
assert len(watson.frames) == 0
def test_frames_with_nonexistent_file(watson):
with mock.patch('%s.open' % builtins, side_effect=IOError):
assert len(watson.frames) == 0
def test_frames_watson_non_valid_json(watson):
content = "{'foo': bar}"
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
with mock.patch('os.path.getsize') as mock_getsize:
mock_getsize.return_value(len(content))
with pytest.raises(WatsonError):
watson.frames
def test_given_frames(config_dir):
content = json.dumps([[0, 10, 'foo', None, ['A']]])
watson = Watson(frames=[[0, 10, 'bar', None, ['A', 'B']]],
config_dir=config_dir)
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
assert len(watson.frames) == 1
assert watson.frames[0].project == 'bar'
assert watson.frames[0].tags == ['A', 'B']
def test_frames_with_empty_given_state(config_dir):
content = json.dumps([[0, 10, 'foo', None, ['A']]])
watson = Watson(frames=[], config_dir=config_dir)
with mock.patch('%s.open' % builtins, mock.mock_open(read_data=content)):
assert len(watson.frames) == 0
# config
def test_empty_config_dir():
watson = Watson()
assert watson._dir == get_app_dir('watson')
def test_wrong_config(watson):
content = u"""
toto
"""
with mock.patch.object(ConfigParser, 'read', mock_read(content)):
with pytest.raises(ConfigurationError):
watson.config
def test_empty_config(watson):
with mock.patch.object(ConfigParser, 'read', mock_read(u'')):
assert len(watson.config.sections()) == 0
def test_config_get(watson):
content = u"""
[backend]
url = foo
token =
"""
with mock.patch.object(ConfigParser, 'read', mock_read(content)):
config = watson.config
assert config.get('backend', 'url') == 'foo'
assert config.get('backend', 'token') == ''
assert config.get('backend', 'foo') is None
assert config.get('backend', 'foo', 'bar') == 'bar'
assert config.get('option', 'spamm') is None
assert config.get('option', 'spamm', 'eggs') == 'eggs'
def test_config_getboolean(watson):
content = u"""
[options]
flag1 = 1
flag2 = ON
flag3 = True
flag4 = yes
flag5 = false
flag6 =
"""
with mock.patch.object(ConfigParser, 'read', mock_read(content)):
config = watson.config
assert config.getboolean('options', 'flag1') is True
assert config.getboolean('options', 'flag1', False) is True
assert config.getboolean('options', 'flag2') is True
assert config.getboolean('options', 'flag3') is True
assert config.getboolean('options', 'flag4') is True
assert config.getboolean('options', 'flag5') is False
assert config.getboolean('options', 'flag6') is False
assert config.getboolean('options', 'flag6', True) is True
assert config.getboolean('options', 'missing') is False
assert config.getboolean('options', 'missing', True) is True
def test_config_getint(watson):
content = u"""
[options]
value1 = 42
value2 = spamm
value3 =
"""
with mock.patch.object(ConfigParser, 'read', mock_read(content)):
config = watson.config
assert config.getint('options', 'value1') == 42
assert config.getint('options', 'value1', 666) == 42
assert config.getint('options', 'missing') is None
assert config.getint('options', 'missing', 23) == 23
# default is not converted!
assert config.getint('options', 'missing', '42') == '42'
assert config.getint('options', 'missing', 6.66) == 6.66
with pytest.raises(ValueError):
config.getint('options', 'value2')
with pytest.raises(ValueError):
config.getint('options', 'value3')
def test_config_getfloat(watson):
content = u"""
[options]
value1 = 3.14
value2 = 42
value3 = spamm
value4 =
"""
with mock.patch.object(ConfigParser, 'read', mock_read(content)):
config = watson.config
assert config.getfloat('options', 'value1') == 3.14
assert config.getfloat('options', 'value1', 6.66) == 3.14
assert config.getfloat('options', 'value2') == 42.0
assert isinstance(config.getfloat('options', 'value2'), float)
assert config.getfloat('options', 'missing') is None
assert config.getfloat('options', 'missing', 3.14) == 3.14
# default is not converted!
assert config.getfloat('options', 'missing', '3.14') == '3.14'
with pytest.raises(ValueError):
config.getfloat('options', 'value3')
with pytest.raises(ValueError):
config.getfloat('options', 'value4')
def test_config_getlist(watson):
content = u"""
# empty lines in option values (including the first one) are discarded
[options]
value1 =
one
two three
four
five six
# multiple inner space preserved
value2 = one "two three" four 'five six'
value3 = one
two three
# outer space stripped
value4 = one
two three
four
# hash char not at start of line does not start comment
value5 = one
two #three
four # five
"""
with mock.patch.object(ConfigParser, 'read', mock_read(content)):
gl = watson.config.getlist
assert gl('options', 'value1') == ['one', 'two three', 'four',
'five six']
assert gl('options', 'value2') == ['one', 'two three', 'four',
'five six']
assert gl('options', 'value3') == ['one', 'two three']
assert gl('options', 'value4') == ['one', 'two three', 'four']
assert gl('options', 'value5') == ['one', 'two #three', 'four # five']
# default values
assert gl('options', 'novalue') == []
assert gl('options', 'novalue', None) == []
assert gl('options', 'novalue', 42) == 42
assert gl('nosection', 'dummy') == []
assert gl('nosection', 'dummy', None) == []
assert gl('nosection', 'dummy', 42) == 42
default = gl('nosection', 'dummy')
default.append(42)
assert gl('nosection', 'dummy') != [42], (
"Modifying default return value should not have side effect.")
def test_set_config(watson):
config = ConfigParser()
config.set('foo', 'bar', 'lol')
watson.config = config
watson.config.get('foo', 'bar') == 'lol'
# start
def test_start_new_project(watson):
watson.start('foo', ['A', 'B'])
assert watson.current != {}
assert watson.is_started is True
assert watson.current.get('project') == 'foo'
assert isinstance(watson.current.get('start'), arrow.Arrow)
assert watson.current.get('tags') == ['A', 'B']
def test_start_new_project_without_tags(watson):
watson.start('foo')
assert watson.current != {}
assert watson.is_started is True
assert watson.current.get('project') == 'foo'
assert isinstance(watson.current.get('start'), arrow.Arrow)
assert watson.current.get('tags') == []
def test_start_two_projects(watson):
watson.start('foo')
with pytest.raises(WatsonError):
watson.start('bar')
assert watson.current != {}
assert watson.current['project'] == 'foo'
assert watson.is_started is True
# stop
def test_stop_started_project(watson):
watson.start('foo', tags=['A', 'B'])
watson.stop()
assert watson.current == {}
assert watson.is_started is False
assert len(watson.frames) == 1
assert watson.frames[0].project == 'foo'
assert isinstance(watson.frames[0].start, arrow.Arrow)
assert isinstance(watson.frames[0].stop, arrow.Arrow)
assert watson.frames[0].tags == ['A', 'B']
def test_stop_started_project_without_tags(watson):
watson.start('foo')
watson.stop()
assert watson.current == {}
assert watson.is_started is False
assert len(watson.frames) == 1
assert watson.frames[0].project == 'foo'
assert isinstance(watson.frames[0].start, arrow.Arrow)
assert isinstance(watson.frames[0].stop, arrow.Arrow)
assert watson.frames[0].tags == []
def test_stop_no_project(watson):
with pytest.raises(WatsonError):
watson.stop()
# cancel
def test_cancel_started_project(watson):
watson.start('foo')
watson.cancel()
assert watson.current == {}
assert len(watson.frames) == 0
def test_cancel_no_project(watson):
with pytest.raises(WatsonError):
watson.cancel()
# save
def test_save_without_changes(watson):
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch('json.dump') as json_mock:
watson.save()
assert not json_mock.called
def test_save_current(watson):
watson.start('foo', ['A', 'B'])
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch('json.dump') as json_mock:
watson.save()
assert json_mock.call_count == 1
result = json_mock.call_args[0][0]
assert result['project'] == 'foo'
assert isinstance(result['start'], (int, float))
assert result['tags'] == ['A', 'B']
def test_save_current_without_tags(watson):
watson.start('foo')
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch('json.dump') as json_mock:
watson.save()
assert json_mock.call_count == 1
result = json_mock.call_args[0][0]
assert result['project'] == 'foo'
assert isinstance(result['start'], (int, float))
assert result['tags'] == []
dump_args = json_mock.call_args[1]
assert dump_args['ensure_ascii'] is False
def test_save_empty_current(config_dir):
watson = Watson(current={'project': 'foo', 'start': 0},
config_dir=config_dir)
watson.current = {}
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch('json.dump') as json_mock:
watson.save()
assert json_mock.call_count == 1
result = json_mock.call_args[0][0]
assert result == {}
def test_save_frames_no_change(config_dir):
watson = Watson(frames=[[0, 10, 'foo', None]],
config_dir=config_dir)
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch('json.dump') as json_mock:
watson.save()
assert not json_mock.called
def test_save_added_frame(config_dir):
watson = Watson(frames=[[0, 10, 'foo', None]], config_dir=config_dir)
watson.frames.add('bar', 10, 20, ['A'])
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch('json.dump') as json_mock:
watson.save()
assert json_mock.call_count == 1
result = json_mock.call_args[0][0]
assert len(result) == 2
assert result[0][2] == 'foo'
assert result[0][4] == []
assert result[1][2] == 'bar'
assert result[1][4] == ['A']
def test_save_changed_frame(config_dir):
watson = Watson(frames=[[0, 10, 'foo', None, ['A']]],
config_dir=config_dir)
watson.frames[0] = ('bar', 0, 10, ['A', 'B'])
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch('json.dump') as json_mock:
watson.save()
assert json_mock.call_count == 1
result = json_mock.call_args[0][0]
assert len(result) == 1
assert result[0][2] == 'bar'
assert result[0][4] == ['A', 'B']
dump_args = json_mock.call_args[1]
assert dump_args['ensure_ascii'] is False
def test_save_config_no_changes(watson):
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch.object(ConfigParser, 'write') as write_mock:
watson.save()
assert not write_mock.called
def test_save_config(watson):
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch.object(ConfigParser, 'write') as write_mock:
watson.config = ConfigParser()
watson.save()
assert write_mock.call_count == 1
def test_save_last_sync(watson):
now = arrow.now()
watson.last_sync = now
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch('json.dump') as json_mock:
watson.save()
assert json_mock.call_count == 1
assert json_mock.call_args[0][0] == now.timestamp
def test_save_empty_last_sync(config_dir):
watson = Watson(last_sync=arrow.now(), config_dir=config_dir)
watson.last_sync = None
with mock.patch('%s.open' % builtins, mock.mock_open()):
with mock.patch('json.dump') as json_mock:
watson.save()
assert json_mock.call_count == 1
assert json_mock.call_args[0][0] == 0
# push
def test_push_with_no_config(watson):
config = ConfigParser()
watson.config = config
with pytest.raises(WatsonError):
watson.push(arrow.now())
def test_push_with_no_url(watson):
config = ConfigParser()
config.add_section('backend')
config.set('backend', 'token', 'bar')
watson.config = config
with pytest.raises(WatsonError):
watson.push(arrow.now())
def test_push_with_no_token(watson):
config = ConfigParser()
config.add_section('backend')
config.set('backend', 'url', 'http://foo.com')
watson.config = config
with pytest.raises(WatsonError):
watson.push(arrow.now())
def test_push(watson, monkeypatch):
config = ConfigParser()
config.add_section('backend')
config.set('backend', 'url', 'http://foo.com')
config.set('backend', 'token', 'bar')
watson.frames.add('foo', 1, 2)
watson.frames.add('foo', 3, 4)
watson.last_sync = arrow.now()
watson.frames.add('bar', 1, 2, ['A', 'B'])
watson.frames.add('lol', 1, 2)
last_pull = arrow.now()
watson.frames.add('foo', 1, 2)
watson.frames.add('bar', 3, 4)
monkeypatch.setattr(watson, '_get_remote_projects', lambda *args: [
{'name': 'foo', 'url': '/projects/1/'},
{'name': 'bar', 'url': '/projects/2/'},
{'name': 'lol', 'url': '/projects/3/'},
])
class Response:
def __init__(self):
self.status_code = 201
with mock.patch('requests.post') as mock_put:
mock_put.return_value = Response()
with mock.patch.object(
Watson, 'config', new_callable=mock.PropertyMock
) as mock_config:
mock_config.return_value = config
watson.push(last_pull)
requests.post.assert_called_once_with(
mock.ANY,
mock.ANY,
headers={
'content-type': 'application/json',
'Authorization': "Token " + config.get('backend', 'token')
}
)
frames_sent = json.loads(mock_put.call_args[0][1])
assert len(frames_sent) == 2
assert frames_sent[0].get('project') == '/projects/2/'
assert frames_sent[0].get('tags') == ['A', 'B']
assert frames_sent[1].get('project') == '/projects/3/'
assert frames_sent[1].get('tags') == []
# pull
def test_pull_with_no_config(watson):
config = ConfigParser()
watson.config = config
with pytest.raises(ConfigurationError):
watson.pull()
def test_pull_with_no_url(watson):
config = ConfigParser()
config.add_section('backend')
config.set('backend', 'token', 'bar')
watson.config = config
with pytest.raises(ConfigurationError):
watson.pull()
def test_pull_with_no_token(watson):
config = ConfigParser()
config.add_section('backend')
config.set('backend', 'url', 'http://foo.com')
watson.config = config
with pytest.raises(ConfigurationError):
watson.pull()
def test_pull(watson, monkeypatch):
config = ConfigParser()
config.add_section('backend')
config.set('backend', 'url', 'http://foo.com')
config.set('backend', 'token', 'bar')
watson.last_sync = arrow.now()
watson.frames.add('foo', 1, 2, ['A', 'B'], id='1')
monkeypatch.setattr(watson, '_get_remote_projects', lambda *args: [
{'name': 'foo', 'url': '/projects/1/'},
{'name': 'bar', 'url': '/projects/2/'},
])
class Response:
def __init__(self):
self.status_code = 200
def json(self):
return [
{'project': '/projects/1/', 'start': 3, 'stop': 4, 'id': '1',
'tags': ['A']},
{'project': '/projects/2/', 'start': 4, 'stop': 5, 'id': '2',
'tags': []}
]
with mock.patch('requests.get') as mock_get:
mock_get.return_value = Response()
with mock.patch.object(
Watson, 'config', new_callable=mock.PropertyMock
) as mock_config:
mock_config.return_value = config
watson.pull()
requests.get.assert_called_once_with(
mock.ANY,
params={'last_sync': watson.last_sync},
headers={
'content-type': 'application/json',
'Authorization': "Token " + config.get('backend', 'token')
}
)
assert len(watson.frames) == 2
assert watson.frames[0].id == '1'
assert watson.frames[0].project == 'foo'
assert watson.frames[0].start.timestamp == 3
assert watson.frames[0].stop.timestamp == 4
assert watson.frames[0].tags == ['A']
assert watson.frames[1].id == '2'
assert watson.frames[1].project == 'bar'
assert watson.frames[1].start.timestamp == 4
assert watson.frames[1].stop.timestamp == 5
assert watson.frames[1].tags == []
# projects
def test_projects(watson):
for name in ('foo', 'bar', 'bar', 'bar', 'foo', 'lol'):
watson.frames.add(name, 0, 0)
assert watson.projects == ['bar', 'foo', 'lol']
def test_projects_no_frames(watson):
assert watson.projects == []
# tags
def test_tags(watson):
samples = (
('foo', ('A', 'D')),
('bar', ('A', 'C')),
('foo', ('B', 'C')),
('lol', ()),
('bar', ('C'))
)
for name, tags in samples:
watson.frames.add(name, 0, 0, tags)
assert watson.tags == ['A', 'B', 'C', 'D']
def test_tags_no_frames(watson):
assert watson.tags == []
# merge
@pytest.mark.datafiles(
TEST_FIXTURE_DIR / 'frames-with-conflict',
)
def test_merge_report(watson, datafiles):
# Get report
watson.frames.add('foo', 0, 15, id='1', updated_at=15)
watson.frames.add('bar', 20, 45, id='2', updated_at=45)
conflicting, merging = watson.merge_report(
str(datafiles) + '/frames-with-conflict')
assert len(conflicting) == 1
assert len(merging) == 1
assert conflicting[0].id == '2'
assert merging[0].id == '3'
# report/log
_dt = datetime.datetime
_tz = {'tzinfo': tzutc()}
@pytest.mark.parametrize('now, mode, start_time', [
(_dt(2016, 6, 2, **_tz), 'year', _dt(2016, 1, 1, **_tz)),
(_dt(2016, 6, 2, **_tz), 'month', _dt(2016, 6, 1, **_tz)),
(_dt(2016, 6, 2, **_tz), 'week', _dt(2016, 5, 30, **_tz)),
(_dt(2016, 6, 2, **_tz), 'day', _dt(2016, 6, 2, **_tz)),
(_dt(2012, 2, 24, **_tz), 'year', _dt(2012, 1, 1, **_tz)),
(_dt(2012, 2, 24, **_tz), 'month', _dt(2012, 2, 1, **_tz)),
(_dt(2012, 2, 24, **_tz), 'week', _dt(2012, 2, 20, **_tz)),
(_dt(2012, 2, 24, **_tz), 'day', _dt(2012, 2, 24, **_tz)),
])
def test_get_start_time_for_period(now, mode, start_time):
with mock_datetime(now, datetime):
assert get_start_time_for_period(mode).datetime == start_time
| null |
tests/test_watson.py
|
test_watson.py
|
py
| 25,549 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "py.path.local",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "py.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "sys.version_info",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "StringIO.StringIO",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "watson.current",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "arrow.get",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "watson.WatsonError",
"line_number": 110,
"usage_type": "argument"
},
{
"api_name": "watson.current",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "arrow.get",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "watson.last_sync",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "watson.last_sync",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "arrow.get",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "watson.last_sync",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "arrow.get",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "watson.WatsonError",
"line_number": 157,
"usage_type": "argument"
},
{
"api_name": "watson.last_sync",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "arrow.now",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "watson.last_sync",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "watson.last_sync",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "arrow.get",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "arrow.get",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "arrow.get",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "arrow.get",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "arrow.get",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "watson.WatsonError",
"line_number": 219,
"usage_type": "argument"
},
{
"api_name": "watson.frames",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "watson.Watson",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "watson._dir",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "click.get_app_dir",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 253,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigurationError",
"line_number": 254,
"usage_type": "argument"
},
{
"api_name": "watson.config",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 259,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "watson.config.sections",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "watson.config",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 269,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "watson.config",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 289,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "watson.config",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 310,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "watson.config",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 336,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "watson.config",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 377,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "watson.config",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "watson.config",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "watson.config.get",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "watson.config",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "watson.start",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 414,
"usage_type": "attribute"
},
{
"api_name": "watson.is_started",
"line_number": 415,
"usage_type": "attribute"
},
{
"api_name": "watson.current.get",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "watson.current.get",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 417,
"usage_type": "attribute"
},
{
"api_name": "arrow.Arrow",
"line_number": 417,
"usage_type": "attribute"
},
{
"api_name": "watson.current.get",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 418,
"usage_type": "attribute"
},
{
"api_name": "watson.start",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 424,
"usage_type": "attribute"
},
{
"api_name": "watson.is_started",
"line_number": 425,
"usage_type": "attribute"
},
{
"api_name": "watson.current.get",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "watson.current.get",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "arrow.Arrow",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "watson.current.get",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 428,
"usage_type": "attribute"
},
{
"api_name": "watson.start",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "watson.WatsonError",
"line_number": 434,
"usage_type": "argument"
},
{
"api_name": "watson.start",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 437,
"usage_type": "attribute"
},
{
"api_name": "watson.current",
"line_number": 438,
"usage_type": "attribute"
},
{
"api_name": "watson.is_started",
"line_number": 439,
"usage_type": "attribute"
},
{
"api_name": "watson.start",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "watson.stop",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 448,
"usage_type": "attribute"
},
{
"api_name": "watson.is_started",
"line_number": 449,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 450,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 452,
"usage_type": "attribute"
},
{
"api_name": "arrow.Arrow",
"line_number": 452,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 453,
"usage_type": "attribute"
},
{
"api_name": "arrow.Arrow",
"line_number": 453,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 454,
"usage_type": "attribute"
},
{
"api_name": "watson.start",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "watson.stop",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 461,
"usage_type": "attribute"
},
{
"api_name": "watson.is_started",
"line_number": 462,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 463,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 464,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 465,
"usage_type": "attribute"
},
{
"api_name": "arrow.Arrow",
"line_number": 465,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 466,
"usage_type": "attribute"
},
{
"api_name": "arrow.Arrow",
"line_number": 466,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 467,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "watson.WatsonError",
"line_number": 471,
"usage_type": "argument"
},
{
"api_name": "watson.stop",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "watson.start",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "watson.cancel",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 481,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 482,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "watson.WatsonError",
"line_number": 486,
"usage_type": "argument"
},
{
"api_name": "watson.cancel",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 493,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 493,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "watson.save",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "watson.start",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "watson.save",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "watson.start",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "watson.save",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 532,
"usage_type": "call"
},
{
"api_name": "watson.current",
"line_number": 534,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 537,
"usage_type": "call"
},
{
"api_name": "watson.save",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "watson.save",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 557,
"usage_type": "call"
},
{
"api_name": "watson.frames.add",
"line_number": 558,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 558,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "watson.save",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 576,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 578,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 578,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "watson.save",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 593,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 593,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 594,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 594,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 594,
"usage_type": "attribute"
},
{
"api_name": "watson.save",
"line_number": 595,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 602,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 602,
"usage_type": "attribute"
},
{
"api_name": "watson.config",
"line_number": 603,
"usage_type": "attribute"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 603,
"usage_type": "call"
},
{
"api_name": "watson.save",
"line_number": 604,
"usage_type": "call"
},
{
"api_name": "arrow.now",
"line_number": 610,
"usage_type": "call"
},
{
"api_name": "watson.last_sync",
"line_number": 611,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 613,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 613,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 614,
"usage_type": "call"
},
{
"api_name": "watson.save",
"line_number": 615,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 622,
"usage_type": "call"
},
{
"api_name": "arrow.now",
"line_number": 622,
"usage_type": "call"
},
{
"api_name": "watson.last_sync",
"line_number": 623,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 625,
"usage_type": "call"
},
{
"api_name": "mock.mock_open",
"line_number": 625,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 626,
"usage_type": "call"
},
{
"api_name": "watson.save",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 636,
"usage_type": "call"
},
{
"api_name": "watson.config",
"line_number": 637,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "watson.WatsonError",
"line_number": 639,
"usage_type": "argument"
},
{
"api_name": "watson.push",
"line_number": 640,
"usage_type": "call"
},
{
"api_name": "arrow.now",
"line_number": 640,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 644,
"usage_type": "call"
},
{
"api_name": "watson.config",
"line_number": 647,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 649,
"usage_type": "call"
},
{
"api_name": "watson.WatsonError",
"line_number": 649,
"usage_type": "argument"
},
{
"api_name": "watson.push",
"line_number": 650,
"usage_type": "call"
},
{
"api_name": "arrow.now",
"line_number": 650,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 654,
"usage_type": "call"
},
{
"api_name": "watson.config",
"line_number": 657,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 659,
"usage_type": "call"
},
{
"api_name": "watson.WatsonError",
"line_number": 659,
"usage_type": "argument"
},
{
"api_name": "watson.push",
"line_number": 660,
"usage_type": "call"
},
{
"api_name": "arrow.now",
"line_number": 660,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 664,
"usage_type": "call"
},
{
"api_name": "watson.frames.add",
"line_number": 669,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 669,
"usage_type": "attribute"
},
{
"api_name": "watson.frames.add",
"line_number": 670,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 670,
"usage_type": "attribute"
},
{
"api_name": "watson.last_sync",
"line_number": 672,
"usage_type": "attribute"
},
{
"api_name": "arrow.now",
"line_number": 672,
"usage_type": "call"
},
{
"api_name": "watson.frames.add",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 674,
"usage_type": "attribute"
},
{
"api_name": "watson.frames.add",
"line_number": 675,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 675,
"usage_type": "attribute"
},
{
"api_name": "arrow.now",
"line_number": 677,
"usage_type": "call"
},
{
"api_name": "watson.frames.add",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 679,
"usage_type": "attribute"
},
{
"api_name": "watson.frames.add",
"line_number": 680,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 680,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 692,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 695,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 696,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 695,
"usage_type": "attribute"
},
{
"api_name": "mock.PropertyMock",
"line_number": 696,
"usage_type": "attribute"
},
{
"api_name": "watson.push",
"line_number": 699,
"usage_type": "call"
},
{
"api_name": "requests.post.assert_called_once_with",
"line_number": 701,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 701,
"usage_type": "attribute"
},
{
"api_name": "mock.ANY",
"line_number": 702,
"usage_type": "attribute"
},
{
"api_name": "mock.ANY",
"line_number": 703,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 710,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 723,
"usage_type": "call"
},
{
"api_name": "watson.config",
"line_number": 724,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 726,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigurationError",
"line_number": 726,
"usage_type": "argument"
},
{
"api_name": "watson.pull",
"line_number": 727,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 731,
"usage_type": "call"
},
{
"api_name": "watson.config",
"line_number": 734,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 736,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigurationError",
"line_number": 736,
"usage_type": "argument"
},
{
"api_name": "watson.pull",
"line_number": 737,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "watson.config",
"line_number": 744,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 746,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigurationError",
"line_number": 746,
"usage_type": "argument"
},
{
"api_name": "watson.pull",
"line_number": 747,
"usage_type": "call"
},
{
"api_name": "watson.watson.ConfigParser",
"line_number": 751,
"usage_type": "call"
},
{
"api_name": "watson.last_sync",
"line_number": 756,
"usage_type": "attribute"
},
{
"api_name": "arrow.now",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "watson.frames.add",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 758,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 777,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 780,
"usage_type": "call"
},
{
"api_name": "watson.Watson",
"line_number": 781,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 780,
"usage_type": "attribute"
},
{
"api_name": "mock.PropertyMock",
"line_number": 781,
"usage_type": "attribute"
},
{
"api_name": "watson.pull",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "requests.get.assert_called_once_with",
"line_number": 786,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 786,
"usage_type": "attribute"
},
{
"api_name": "mock.ANY",
"line_number": 787,
"usage_type": "attribute"
},
{
"api_name": "watson.last_sync",
"line_number": 788,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 795,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 797,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 798,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 799,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 800,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 801,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 803,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 804,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 805,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 806,
"usage_type": "attribute"
},
{
"api_name": "watson.frames",
"line_number": 807,
"usage_type": "attribute"
},
{
"api_name": "watson.frames.add",
"line_number": 814,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 814,
"usage_type": "attribute"
},
{
"api_name": "watson.projects",
"line_number": 816,
"usage_type": "attribute"
},
{
"api_name": "watson.projects",
"line_number": 820,
"usage_type": "attribute"
},
{
"api_name": "watson.frames.add",
"line_number": 835,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 835,
"usage_type": "attribute"
},
{
"api_name": "watson.tags",
"line_number": 837,
"usage_type": "attribute"
},
{
"api_name": "watson.tags",
"line_number": 841,
"usage_type": "attribute"
},
{
"api_name": "watson.frames.add",
"line_number": 851,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 851,
"usage_type": "attribute"
},
{
"api_name": "watson.frames.add",
"line_number": 852,
"usage_type": "call"
},
{
"api_name": "watson.frames",
"line_number": 852,
"usage_type": "attribute"
},
{
"api_name": "watson.merge_report",
"line_number": 854,
"usage_type": "call"
},
{
"api_name": "pytest.mark.datafiles",
"line_number": 846,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 846,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 866,
"usage_type": "attribute"
},
{
"api_name": "dateutil.tz.tz.tzutc",
"line_number": 867,
"usage_type": "call"
},
{
"api_name": "watson.utils.get_start_time_for_period",
"line_number": 883,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 870,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 870,
"usage_type": "attribute"
}
] |
236068755
|
'''
program to make .png screenshot of web page
and send it to email
'''
import os
import time
from urllib.parse import urlparse
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from send_mail import send_mail
__author__ = 'sashko'
def main(url, fname):
'''initialize webdriver session, grub url and save image'''
# set user defined user-agent
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0"
)
# use phantomjs as webdriver
driver = webdriver.PhantomJS(desired_capabilities=dcap)
# maximize virtual window to grab all possible area
driver.maximize_window()
# read url
driver.get(url)
# wait some time to finish render a page
time.sleep(10)
# make screenshot
driver.get_screenshot_as_file('./shots/' + fname + '.png')
# close driver session
driver.quit()
# return path of saved file as result of function
return fname + '.png'
def file_rm(fname):
'''remove screenshot file from disk'''
os.remove(os.path.abspath('./shots/' + fname + '.png'))
# read links from a file on disk
with open('links.txt') as file:
for link in file:
link = link.strip('\n')
# don't load commented links
if link.startswith('#'):
continue
print("Read links: {}".format(link))
if link.startswith('http'):
print("Link with http, Ok.")
pass
else:
print("Link without http, add prefix.")
link = 'http://' + link
u = urlparse(link)
filename = u.netloc
print(send_mail(main(link, filename), link))
print()
time.sleep(2)
file_rm(filename)
| null |
My_Py_Code/screenshot.py
|
screenshot.py
|
py
| 1,856 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities.PHANTOMJS",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.PhantomJS",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "send_mail.send_mail",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 70,
"usage_type": "call"
}
] |
515947236
|
""" Implements HyperNEAT's conversion
from genotype to phenotype.
"""
### IMPORTS ###
from itertools import product
# Libs
import numpy as np
# Local
from ..networks.rnn import NeuralNetwork
class HyperNEATDeveloper(object):
""" HyperNEAT developer object."""
def __init__(self, substrate=None, substrate_shape=None,
sandwich=False,
weight_range=3.0,
min_weight=0.3,
activation_steps=5,
node_type='tanh'):
""" Constructor
:param substrate: A list of node coordinates (tuples)
:param substrate_shape: If a tuple is passed, a uniform NxN substrate is generated with coords [-1, 1]
:param weight_range: (min, max) of substrate weights
:param min_weight: The minimum CPPN output value that will lead to an expressed connection.
:param sandwich: Whether to turn the output net into a sandwich network.
:param node_type: What node type to assign to the output nodes.
"""
self.substrate = substrate
self.sandwich = sandwich
self.weight_range = weight_range
self.min_weight = min_weight
self.activation_steps = activation_steps
self.node_type = node_type
self.substrate_shape = substrate_shape
if substrate_shape is not None:
# Create coordinate grids
self.substrate = np.mgrid[[slice(-1, 1, s*1j) for s in substrate_shape]]
# Move coordinates to last dimension
self.substrate = self.substrate.transpose(range(1,len(substrate_shape)+1) + [0])
# Reshape to a N x nD list.
self.substrate = self.substrate.reshape(-1, len(substrate_shape))
if self.substrate is None:
raise Exception("You must pass either substrate or substrate_shape")
def convert(self, network):
""" Performs conversion.
:param network: Any object that is convertible to a :class:`~peas.networks.NeuralNetwork`.
"""
# Cast input to a neuralnetwork if it isn't
if not isinstance(network, NeuralNetwork):
network = NeuralNetwork(network)
# Since Stanley mentions to "fully activate" the CPPN,
# I assume this means it's a feedforward net, since otherwise
# there is no clear definition of "full activation".
# In an FF network, activating each node once leads to a stable condition.
# Check if the network has enough inputs.
cm_dims = 2 * len(self.substrate_shape)
required_inputs = cm_dims + 1
if network.cm.shape[0] < required_inputs:
raise Exception("Network does not have enough inputs. Has %d, needs %d" %
(network.cm.shape[0], cm_dims+1))
# Initialize connectivity matrix
cm = np.zeros((len(self.substrate), len(self.substrate)))
for (i, fr), (j, to) in product(enumerate(self.substrate), repeat=2):
if network.feedforward:
weight = network.feed(np.hstack((fr, to)))[-1]
else:
for _ in xrange(self.activation_steps):
weight = network.feed(np.hstack((fr, to)))[-1]
cm[j, i] = weight
# Rescale the CM
cm[np.abs(cm) < self.min_weight] = 0
cm -= (np.sign(cm) * self.min_weight)
cm *= self.weight_range / (self.weight_range - self.min_weight)
# Clip highest weights
cm = np.clip(cm, -self.weight_range, self.weight_range)
net = NeuralNetwork().from_matrix(cm, node_types=[self.node_type])
if self.sandwich:
net.make_sandwich()
if not np.all(np.isfinite(net.cm)):
raise Exception("Network contains NaN/inf weights.")
return net
| null |
peas/methods/hyperneat.py
|
hyperneat.py
|
py
| 4,068 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.mgrid",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "networks.rnn.NeuralNetwork",
"line_number": 62,
"usage_type": "argument"
},
{
"api_name": "networks.rnn.NeuralNetwork",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "networks.rnn.NeuralNetwork",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.isfinite",
"line_number": 100,
"usage_type": "call"
}
] |
517167102
|
from django.urls import path
from .import views
app_name='login'
urlpatterns = [
path('signup', views.signup,name='signup'),
path('profile/', views.profile,name='profile'),
path('profile/edite', views.profile_edit,name='profile_edit'),
#path('create/', views.create_post, name='create_post'),
]
| null |
login/urls.py
|
urls.py
|
py
| 312 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
}
] |
242938236
|
import csv
import math
import subprocess as sp
from argparse import ArgumentParser as ArgPar
def load_storage(model, num, is_round, cuda):
with open("storage/{}/{}/storage.csv".format(model, num), "r", newline = "") as f:
reader = csv.DictReader(f, delimiter = ",", quotechar = '"')
target = {}
for i, row in enumerate(reader):
if i == 0:
for var_name, value in row.items():
target[var_name] = value
generate_shell(target, model, num, is_round, cuda)
def load_dist(model):
with open("type_dict/{}/type_dict.csv".format(model), "r", newline = "") as f:
reader = csv.DictReader(f, delimiter = ";", quotechar = '"')
var_dist = {}
for row in reader:
var_dist[row["var_name"]] = [eval(row["type"]), row["dist"]]
return var_dist
def convert_value_by_dist(value, dist):
vtype, vdist = dist[0], dist[1]
if "log" in vdist:
base = float(vdist.split("log")[-1])
if vtype == float:
v = float(base ** float(value))
elif vtype == int:
v = round(base ** float(value))
else:
v = value
return v
def generate_shell(target, model, num, is_round, cuda):
scripts = ["#!/bin/bash","USER=$(whoami)"]
enter = "\n"
first_script = ""
second_script = ""
for s in scripts:
first_script += s + enter
second_script = "CUDA_VISIBLE_DEVICES={} python train.py -model {} -num {} -round {} ".format(cuda, model, num, int(is_round))
var_dist = load_dist(model)
for var_name, value in target.items():
dist = var_dist[var_name]
v = convert_value_by_dist(value, dist)
second_script += "-{} {} ".format(var_name, v)
script = first_script + "echo $USER:~$PWD$ {} \n".format(second_script) + second_script
with open("shell/{}/{}/run.sh".format(model, num), "w") as f:
f.writelines(script)
def main(model, num, is_round, cuda):
load_storage(model, num, is_round, cuda)
if __name__ == "__main__":
argp = ArgPar()
argp.add_argument("-model", type = str)
argp.add_argument("-num", type = int)
argp.add_argument("-round", type = int, default = 1, choices = [0, 1])
argp.add_argument("-cuda", type = int, default = 1, required = True)
args = argp.parse_args()
model = args.model
num = args.num
is_round = bool(args.round)
cuda = args.cuda
print("Collecting Environment Variables and Putting in Shell Scripts.")
main(model, num, is_round, cuda)
| null |
env.py
|
env.py
|
py
| 2,591 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "csv.DictReader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 71,
"usage_type": "call"
}
] |
528532283
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 19:39:52 2019
@author: eliphat
"""
import sys
import io
import ltokenizer
import lparser
import levaluator
def main():
inp = ''
if len(sys.argv) > 1:
with open(sys.argv[1]) as fi:
inp = fi.read(-1)
else:
with io.StringIO() as buffer:
line = ''
while line != 'GO!':
buffer.write(line)
line = input()
inp = buffer.getvalue()
levaluator.evaluate(lparser.parse(ltokenizer.tokenize(inp)))
if __name__ == "__main__":
main()
| null |
main.py
|
main.py
|
py
| 589 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "levaluator.evaluate",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "lparser.parse",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ltokenizer.tokenize",
"line_number": 26,
"usage_type": "call"
}
] |
465343936
|
import sys
import schiene
import datetime
import pytz
import json
import configparser
import boto3
import pickle
from pytictoc import TicToc
import multiprocessing as mp
import logging
import numpy as np
logpath = "/home/ubuntu/sbmd/logs/"
normlogfilename = "sb03clog_" + sys.argv[1] + "_" \
+ datetime.datetime.now().strftime("%Y-%m-%d_%H-%M") + ".log"
logging.basicConfig(filename=logpath+normlogfilename, level=logging.DEBUG)
def load_train(start, end, s3key, s3skey,
s = schiene.Schiene()):
'''
loads connection details from a Schiene object
start: start of the DB train connection, has to be a string and match a
name of Schiene stations
end: end of the DB train connection, has to be a string and match a
name of Schiene stations
s3key: AWS Access Key
s3skey: AWS Secret Access Key,
s: schiene instance
'''
c = s.connections(start, end)
for conn in c:
conn["date"] = str(datetime.date.today())
conn["_id"] = (str(conn["date"]) + "_" + conn["departure"]
+ "_" + start + "_" + end)
conn["timestamp"] = str(datetime.datetime.now(
tz=pytz.timezone("Europe/Berlin")))
if "delay" in conn.keys():
conn["total_delay"] = (conn["delay"]["delay_departure"]
+ conn["delay"]["delay_arrival"])
else:
conn["total_delay"] = 0
conn["start"] = start
conn["end"] = end
filename = "DB_" + conn["_id"] + ".json"
filename = filename.replace(":", "_")
filename = filename.replace(" ", "_")
s3 = boto3.resource('s3',
aws_access_key_id=s3key,
aws_secret_access_key= s3skey)
s3object = s3.Object("sbmd1db2", filename)
s3object.put(Body=(bytes(json.dumps(conn)\
.encode('utf-8'))))
def load_trains_all(conns, s3key_p, s3skey_p):
'''
runs all load_train queries for provided stations
conns: iterable containing 2 elements for start and end station
s3key_p: AWS Access key
s3skey_p: AWS Secret Access Key
'''
try:
load_train(conns[0], conns[1], s3key_p, s3skey_p)
except Exception as e:
logging.error("Error at first round for " + conns[0] + "_" + conns[1])
logging.error(e)
try:
load_train(conns[1], conns[0], s3key_p, s3skey_p)
except Exception as e:
logging.error("Error at second round for " + conns[0] + "_" + conns[1])
logging.error(e)
def main():
t = TicToc()
t.tic()
with open("/home/ubuntu/sbmd/station", "rb") as f:
fileobj = pickle.load(f)
statit = fileobj[2][int(sys.argv[1])]
statit = np.flip(statit)
credfile = "/home/ubuntu/sbmd/dwh.cfg"
config = configparser.ConfigParser()
config.read(credfile)
s3k = config['AWS']['KEY']
s3ks = config['AWS']['SECRET']
pool = mp.Pool(mp.cpu_count())
[pool.apply(load_trains_all, args=(co, s3k, s3ks)) for co in statit]
ind = sys.argv[1]
logging.info(f"Gathered conn data succesfully with index {ind}")
t.toc()
if __name__ == "__main__":
main()
| null |
sbahnmuc03c_reversed.py
|
sbahnmuc03c_reversed.py
|
py
| 3,360 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "schiene.Schiene",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "pytz.timezone",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pytictoc.TicToc",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "numpy.flip",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 111,
"usage_type": "call"
}
] |
354392259
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('home/', views.home, name='home'),
path('polls/', views.pollview, name='pollview'),
path('polls/<int:poll_id>/page/<int:page_idx>', views.pages, name='pages'),
path('polls/poll<int:poll_id>/results', views.result, name='result'),
path('statistics', views.statistics, name='statistics'),
path('login', views.login_as_admin, name='adminlogin'),
path('statistics/poll_attempts', views.poll_attempts, name='attempts'),
path('statistics/polls', views.pollview_stats, name='pollviewstats'),
path('statistics/polls/<int:poll_id>', views.statistics_results, name='statisticsresults'),
path('statistics/polls/<int:poll_id>/user/<str:user>', views.user_result, name='userresult'),
path('statistics/mwq/polls', views.most_wrong_questions_polls, name='mostwrongquestions'),
path('statistics/mwq/polls/<int:poll_id>', views.most_wrong_questions_results, name='mostwrongquestionsresults')
]
| null |
dlp/polls/urls.py
|
urls.py
|
py
| 1,016 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
}
] |
2704383
|
import numpy
import matplotlib.pyplot as plt
def sample_new_point(origin_square, length_halfsquare, subidx):
dx, dy = subidx % 2, subidx // 2
offset = length_halfsquare * numpy.array([dx, dy], dtype=float)
random_offset = numpy.array([numpy.random.random(), numpy.random.random()])
return origin_square + random_offset * length_halfsquare + offset
def subdivide_square(origin_square, length_square, seeds, density_func):
length_halfsquare = 0.5 * length_square
rho = density_func(origin_square + length_halfsquare)
target_seeds = (length_square ** 2) * rho
if target_seeds <= 4:
# 1st case: the cell is a leaf
shuffled_idx = numpy.random.permutation(4)
min_samples = int(numpy.floor(target_seeds))
proba_last = target_seeds - min_samples
for i in range(min_samples):
seeds.append(sample_new_point(origin_square, length_halfsquare, shuffled_idx[i]))
if numpy.random.random() <= proba_last and min_samples < 4:
seeds.append(sample_new_point(origin_square, length_halfsquare, shuffled_idx[min_samples]))
else:
# 2nd case: recursive call
for delta in numpy.ndindex(2, 2):
offset = numpy.array(delta, dtype=float)
origin_subsquare = origin_square + offset * length_halfsquare
subdivide_square(origin_subsquare, length_halfsquare, seeds, density_func)
def plot_seeds(seeds, extent):
seeds_x = [s[0] for s in seeds]
seeds_y = [s[1] for s in seeds]
plt.scatter(seeds_x, seeds_y, s=0.5)
plt.xlim([0, extent[0]])
plt.ylim([0, extent[1]])
plt.axes().set_aspect('equal')
plt.show()
def generate_seeds(coarse_level_length, extent):
def density_func(point):
# grading in x direction
seed_density_factor = 2000
return (point[0] / extent[0]) * seed_density_factor # seeds / mm^2
numpy.random.seed(1)
seeds = []
for origin_x in numpy.arange(0.0, extent[0], coarse_level_length):
for origin_y in numpy.arange(0.0, extent[1], coarse_level_length):
origin_square_coarse = numpy.array([origin_x, origin_y], dtype=float)
subdivide_square(origin_square_coarse, coarse_level_length, seeds, density_func)
return seeds
if __name__ == "__main__":
coarse_level_length = 4.0 # (mm)
extent = numpy.array([8.0, 2.0], dtype=float) # (mm)
seeds = generate_seeds(coarse_level_length, extent)
plot_seeds(seeds, extent)
| null |
YALE_3D_Design_and_Fab/Voronoi Foam Project/original supplement/code/generate_seeds_recursive.py
|
generate_seeds_recursive.py
|
py
| 2,479 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.permutation",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.floor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndindex",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 61,
"usage_type": "call"
}
] |
622679236
|
# Execute a trigger using a congressional vote.
# ---------------------------------------------
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from contrib.models import Trigger
from contrib.legislative import execute_trigger_from_data_urls
class Command(BaseCommand):
args = 'trigger_id [-]vote_url [[-]vote_url...]'
help = 'Executes a trigger (by ID) using the URL to a GovTrack vote page.'
def handle(self, *args, **options):
args = list(args)
if len(args) < 2:
print("Usage: ./manage.my execute_trigger trigger_id [[-]govtrack_url...]")
print("Paste GovTrack vote URLs or bill URLs (to pull cosponsors). Precede a URL with a minus sign to flip its valence.")
return
# What trigger to execute.
t = Trigger.objects.get(id=args.pop(0))
# What votes?
votes = []
for arg in args:
flip = False
if arg[0] == "-":
flip = True
arg = arg[1:]
votes.append({
"url": arg,
"flip": flip,
})
# Go!
execute_trigger_from_data_urls(t, votes)
# Show what happened.
import pprint
print(t.execution.description)
pprint.pprint(t.execution.get_outcome_summary())
| null |
contrib/management/commands/execute_trigger.py
|
execute_trigger.py
|
py
| 1,172 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "contrib.models.Trigger.objects.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "contrib.models.Trigger.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "contrib.models.Trigger",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "contrib.legislative.execute_trigger_from_data_urls",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 42,
"usage_type": "call"
}
] |
286039787
|
# coding=utf-8
'''
======================
3D surface (color map)
======================
Demonstrates plotting a 3D surface colored with the coolwarm color map.
The surface is made opaque by using antialiased=False.
Also demonstrates using the LinearLocator and custom formatting for the
z axis tick labels.
'''
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
y1 = np.arange(-5, 10, 0.25)
y2 = np.arange(-5, 10, 0.25)
y1, y2 = np.meshgrid(y1, y2)
Z1 = y1 + y2 + 2
Z2 = 2 * y1 - y2 + 4
Z3 = y1 - y2 + 1
Z4 = np.minimum(np.minimum(Z1,Z2),Z3)
# Plot the surface.
# surf1 = ax.plot_surface(X, Y, Z1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
# surf2 = ax.plot_surface(X, Y, Z2, cmap=cm.coolwarm, linewidth=0, antialiased=False)
# surf3 = ax.plot_surface(X, Y, Z3, cmap=cm.coolwarm, linewidth=0, antialiased=False)
surf4 = ax.plot_surface(y1, y2, Z4, cmap=cm.coolwarm, linewidth=0, antialiased=False)
# ax.plot_trisurf(X, Y, Z1, linewidth=0.2, antialiased=True)
# Customize the z axis.
# ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf4, shrink=0.5, aspect=5)
plt.show()
| null |
Optimisation_lineaire/simplex_dev2_ex3_plot3d.py
|
simplex_dev2_ex3_plot3d.py
|
py
| 1,427 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm.coolwarm",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.cm",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.ticker.LinearLocator",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker.FormatStrFormatter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
}
] |
16914057
|
"""
This module provides a CNN auto-encoder.
The model can be adjusted, such that 0-3 pooling operations will be performed.
"""
import numpy as np
from torch import nn
class MaxGridPool(nn.Module):
"""
This class enables max pooling relative to the featuremaps size.
Its counterpart is the :class:`src.nn.UpsampleGrid` class.
"""
def __init__(self, s_in, n_bins):
"""
:param s_in: width/ height of feature map
:param n_bin: total number of bins to be extracted (has to be quadratic)
"""
super().__init__()
size = int(s_in / np.sqrt(n_bins))
self.pooling = nn.MaxPool2d(size, stride=size, padding=0)
def forward(self, x):
"""
Max pooling relative to feature map height and width.
:param x: batch of feature maps
:return: feature maps reduced to the number of bins defined at :func:`src.nn.MaxGridPool.__init__`
"""
return self.pooling(x)
class UpsampleGrid(nn.Module):
"""
This class enables upsampling relative to the featuremaps size.
Its counterpart is the :class:`src.nn.MaxGridPool` class.
"""
def __init__(self, s_in, n_bin):
"""
:param s_in: width/ height of initial feature map
:param n_bin: total number of previously extracted bins (has to be quadratic)
"""
super().__init__()
size = int(s_in / np.sqrt(n_bin))
self.upsample = nn.Upsample(scale_factor=size, mode='nearest')
def forward(self, x):
"""
:param x: batcg of feature maps
:return: upsampled feature maps
"""
return self.upsample(x)
class Reshape(nn.Module):
"""
This class adapts the reshape operation to the nn.Module interface.
"""
def __init__(self, shape_out):
"""
:param shape_out: new shape of tensors
"""
super().__init__()
self.shape_out = shape_out
def forward(self, x):
"""
Reshaping the tensor.
:param x: any tensor that matches the size of the initialized shape
:return: reshaped tensor
"""
return x.reshape(x.shape[0], *self.shape_out)
class ConvNet(nn.Module):
"""
This class defines the auto-encoder model. Up to 3 pooling operations can be performed.
.. note::
Increasing the number of pooling operations will increase the depth of encoder and decoder networks.
"""
def __init__(self, c_in=1, c_out=1, img_h=64, img_w=64, act='LReLU', n_pooling=0):
"""
:param c_in: number of input channels/ feature maps
:param c_out: number of ouput channels/ feature maps
:param img_h: height of images
:param img_w: width of images
:param act: string indication whether to use LReLU or ReLU for hidden layers
:param n_pooling: number of pooling operations that will be performed
"""
assert img_h >= 32 and img_w == img_h, "images aught to be at least 32x32"
super().__init__()
self.n_pooling = n_pooling
self.img_w = img_w
self.img_h = img_h
self.c_in = c_in
self.c_out = c_out
# convolution & pooling
layer_conv = lambda l_in, l_out: nn.Sequential(nn.Conv2d(l_in, l_out, 3, padding=1),
nn.LeakyReLU() if act == 'LReLU' else nn.ReLU(),
nn.BatchNorm2d(l_out))
layer_deconv = lambda l_in, l_out: nn.Sequential(nn.ConvTranspose2d(l_in, l_out, 3, padding=1),
nn.LeakyReLU() if act == 'LReLU' else nn.ReLU(),
nn.BatchNorm2d(l_out))
self.conv_1 = nn.Sequential(layer_conv(c_in, 64),
layer_conv(64, 128))
if self.n_pooling > 0:
self.pool_1 = nn.MaxPool2d(5, stride=2, padding=2)
self.conv_2 = nn.Sequential(layer_conv(128, 128),
layer_conv(128, 256))
if self.n_pooling > 1:
self.pool_2 = nn.MaxPool2d(5, stride=2, padding=2)
self.conv_3 = nn.Sequential(layer_conv(256, 256),
layer_conv(256, 512))
if self.n_pooling > 2:
coeff_ds = 1.0 / self.pool_1.stride / self.pool_2.stride
assert (img_h) % 1.0 == 0 and (img_w) % 1.0 == 0, \
f"image height and width aught to be a multiple of {int(1./coeff_ds)}"
h_fmap = int(img_h * coeff_ds)
n_bins = 16**2
#self.pool_3 = MaxGridPool(h_fmap, n_bins)
self.pool_3 = nn.MaxPool2d(5, stride=2, padding=2)
self.encoder = nn.Sequential(layer_conv(512, 512),
layer_conv(512, 1024))
self.decoder = nn.Sequential(layer_deconv(1024, 1024),
layer_deconv(1024, 512))
# # linear encoder & decoder after third time pooling
# n_features = 512
# d = n_features * n_bins
# self.reshape_enc = Reshape(shape_out=(d,))
# layer_lin = lambda f_in, f_out: nn.Sequential(nn.Linear(f_in, f_out),
# nn.LeakyReLU() if act == 'LReLU' else nn.ReLU())
# self.encoder = nn.Sequential(layer_lin(d, 1024),
# #layer_lin(1024, 1024), nn.Dropout(0.5),
# layer_lin(1024, 1024))
# self.decoder = nn.Sequential(#layer_lin(1024, 1024), nn.Dropout(0.5),
# layer_lin(1024, 1024),
# layer_lin(1024, d))
# self.reshape_dec = Reshape(shape_out=(n_features, int(np.sqrt(n_bins)), int(np.sqrt(n_bins))))
# deconvolution & unpooling
if self.n_pooling > 2:
#self.unpool_3 = UpsampleGrid(h_fmap, n_bins)
self.unpool_3 = nn.Upsample(scale_factor=2, mode='nearest')
if self.n_pooling > 1:
self.deconv_3 = nn.Sequential(layer_deconv(512, 512), layer_deconv(512, 256))
self.unpool_4 = nn.Upsample(scale_factor=2, mode='nearest')
if self.n_pooling > 0:
self.deconv_4 = nn.Sequential(layer_deconv(256, 256), layer_deconv(256, 128))
self.unpool_5 = nn.Upsample(scale_factor=2, mode='nearest')
self.deconv_5 = nn.Sequential(layer_deconv(128, 128), layer_deconv(128, 64))
# output layer
self.output_layer = nn.ConvTranspose2d(64, c_out, 3, padding=1)
def conv(self, x):
"""
Convolution (/ cross correlation) part of the encoder.
:param x: input image
:return: convolved (/ cross correlated) feature maps
"""
y = self.conv_1(x)
if self.n_pooling > 0:
y = self.pool_1(y)
y = self.conv_2(y)
if self.n_pooling > 1:
y = self.pool_2(y)
y = self.conv_3(y)
if self.n_pooling > 2:
y = self.pool_3(y)
return y
def deconv(self, x):
"""
The transposed convolution (/ cross correlation).
:param x: neural code
:return: feature maps for the output layer
"""
y = x
if self.n_pooling > 2:
y = self.unpool_3(y)
if self.n_pooling > 1:
y = self.deconv_3(y)
y = self.unpool_4(y)
if self.n_pooling > 0:
y = self.deconv_4(y)
y = self.unpool_5(y)
y = self.deconv_5(y)
return y
def encode(self, x):
"""
Encoding the input image to a neural code.
:param x: input image
:return: neural code
"""
y = self.conv(x)
if self.n_pooling > 2:
#y = self.reshape_enc(y)
y = self.encoder(y)
return y
def decode(self, x):
"""
Decoding the the input image from a neural code.
:param x: neural code
:return: feature maps for the output layer
"""
y = x
if self.n_pooling > 2:
y = self.decoder(x)
#y = self.reshape_dec(y)
y = self.deconv(y)
return y
def forward(self, x):
"""
The models forward pass, embodying encoding, decoding and final activations of the output layer.
:param x: input image
:return: predicted image
"""
if self.n_pooling > 2:
assert x.shape[-2] == self.img_h and self.img_w == x.shape[-1], \
f'only {self.img_h}x{self.img_w}images allowed for this model'
y = self.encode(x)
y = self.decode(y)
y = self.output_layer(y)
return y
| null |
src/nn.py
|
nn.py
|
py
| 9,125 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.nn.Upsample",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "torch.nn.ConvTranspose2d",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "torch.nn.Upsample",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "torch.nn.Upsample",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "torch.nn.Upsample",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "torch.nn.ConvTranspose2d",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 157,
"usage_type": "name"
}
] |
226979710
|
from datetime import datetime, timedelta
from pymongo import MongoClient
import os
# cliente = MongoClient('localhost', 27017)
cliente = MongoClient(os.environ.get('data_eoc_mongo_host'),
int(os.environ.get('data_eoc_mongo_port')))
banco = cliente['doc']
class MongoArticleItem:
collection = "articles"
@staticmethod
def insert_into(data):
db_collection = banco[MongoArticleItem.collection]
data.update({"data_criacao": datetime.utcnow()})
new_id = db_collection.insert_one(data).inserted_id
return new_id
| null |
doc/doc/mongo_wrapper.py
|
mongo_wrapper.py
|
py
| 577 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "name"
}
] |
355579185
|
import pickle as pk
import re
import numpy as np
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from util import load_word_re, load_type_re, load_pair, word_replace, map_item
def ind2label(label_inds):
ind_labels = dict()
for word, ind in label_inds.items():
ind_labels[ind] = word
return ind_labels
seq_len = 30
path_stop_word = 'dict/stop_word.txt'
path_type_dir = 'dict/word_type'
path_homo = 'dict/homonym.csv'
path_syno = 'dict/synonym.csv'
stop_word_re = load_word_re(path_stop_word)
word_type_re = load_type_re(path_type_dir)
homo_dict = load_pair(path_homo)
syno_dict = load_pair(path_syno)
path_word2ind = 'model/word2ind.pkl'
path_label_ind = 'feat/label_ind.pkl'
with open(path_word2ind, 'rb') as f:
word2ind = pk.load(f)
with open(path_label_ind, 'rb') as f:
label_inds = pk.load(f)
ind_labels = ind2label(label_inds)
paths = {'dnn': 'model/dnn.h5',
'cnn': 'model/cnn.h5',
'rnn': 'model/rnn.h5'}
models = {'dnn': load_model(map_item('dnn', paths)),
'cnn': load_model(map_item('cnn', paths)),
'rnn': load_model(map_item('rnn', paths))}
def predict(text, name):
text = re.sub(stop_word_re, '', text.strip())
for word_type, word_re in word_type_re.items():
text = re.sub(word_re, word_type, text)
text = word_replace(text, homo_dict)
text = word_replace(text, syno_dict)
seq = word2ind.texts_to_sequences([text])[0]
pad_seq = pad_sequences([seq], maxlen=seq_len)
model = map_item(name, models)
probs = model.predict(pad_seq)[0]
sort_probs = sorted(probs, reverse=True)
sort_inds = np.argsort(-probs)
sort_preds = [ind_labels[ind] for ind in sort_inds]
formats = list()
for pred, prob in zip(sort_preds, sort_probs):
formats.append('{} {:.3f}'.format(pred, prob))
return ', '.join(formats)
if __name__ == '__main__':
while True:
text = input('text: ')
print('dnn: %s' % predict(text, 'dnn'))
print('cnn: %s' % predict(text, 'cnn'))
print('rnn: %s' % predict(text, 'rnn'))
| null |
classify.py
|
classify.py
|
py
| 2,116 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "util.load_word_re",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "util.load_type_re",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "util.load_pair",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "util.load_pair",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "util.map_item",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "util.map_item",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "util.map_item",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "util.word_replace",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "util.word_replace",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.sequence.pad_sequences",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "util.map_item",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 61,
"usage_type": "call"
}
] |
95195473
|
import plotly
import plotly.graph_objs as go
import pandas as pd
import json
def create_plot_calculos(capital_vector):
x_axis = capital_vector.index.values
fig = go.Figure(data=[
go.Scatter(
name='Juros',
x=x_axis,
y=capital_vector['Juros Acumulados'].values
),
go.Scatter(
name='Aportes',
x=x_axis,
y=capital_vector['Valor Aportado'].values
),
go.Scatter(
name='Capital',
x=x_axis,
y=capital_vector['Capital'].values
)
])
# Change the bar mode
fig.update_layout(
xaxis=dict(
showline=True,
showgrid=True,
showticklabels=True,
zeroline=True,
zerolinecolor='rgb(204, 204, 204)',
linecolor='rgb(204, 204, 204)',
linewidth=2,
ticks='outside',
tickfont=dict(
family='Arial',
size=12,
color='rgb(82, 82, 82)',
),
gridwidth=1,
gridcolor='rgb(204, 204, 204)',
tickmode = 'linear',
tick0 = 0,
dtick = 1,
),
yaxis=dict(
showgrid=True,
zeroline=True,
zerolinecolor='rgb(204, 204, 204)',
showline=True,
showticklabels=True,
ticks='outside',
linecolor='rgb(204, 204, 204)',
linewidth=2,
tickfont=dict(
family='Arial',
size=12,
color='rgb(82, 82, 82)',
),
gridwidth=1,
gridcolor='rgb(204, 204, 204)',
tickprefix = 'R$ ',
tickformat = '.2f',
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.05,
xanchor="right",
x=1
),
autosize=True,
showlegend=True,
plot_bgcolor='white',
hovermode="x unified",
)
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON
| null |
application/plotly_wrapper.py
|
plotly_wrapper.py
|
py
| 2,150 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "plotly.graph_objs.Figure",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "plotly.utils",
"line_number": 82,
"usage_type": "attribute"
}
] |
451117721
|
import csv
import time
import requests
from bs4 import BeautifulSoup
url = 'https://quotes.toscrape.com/'
website = BeautifulSoup(requests.get(url).text, 'html.parser')
quotes = website.find_all('div', class_='quote')
header = ['Text', 'Author', 'Tags']
text, author, tags = [], [], []
for quote in quotes:
text.append(quote.find(class_='text').text.strip())
author.append(quote.find(class_='author').text.strip())
tags_container = quote.find('div', class_='tags').find_all('a')
tags.append([tags_container[i].text for i in range(len(tags_container))])
# print('Stopping For 5sec')
# time.sleep(5)
with open('quotes.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerow(text)
writer.writerow(author)
writer.writerow(tags)
| null |
quizz4.py
|
quizz4.py
|
py
| 820 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bs4.BeautifulSoup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 22,
"usage_type": "call"
}
] |
412615273
|
from argparse import ArgumentParser
from lxml import etree
import logging
import numpy as np
def restricted_float(x):
try:
x = float(x)
except ValueError:
raise ArgumentTypeError("%r not a floating-point literal" % (x,))
if x < 0.0 or x > 1.0:
raise ArgumentTypeError("%r not in range [0.0, 1.0]" % (x,))
return x
def is_subsequence(pattern, entry):
# If the pattern is empty then it's trivially a subsequence of any entry.
if not pattern:
return True
# If the entry is empty then no pattern can be a subsequence of it.
if not entry:
return False
# If the last items of the entry and the pattern match, discard these items
# and keep checking for subsequence.
if pattern[-1] == entry[-1]:
return is_subsequence(pattern[:-1], entry[:-1])
# Else, discard the last item from the entry and keep checking for
# subsequence.
return is_subsequence(pattern, entry[:-1])
def read_patterns(input_file, separator):
for pattern in input_file:
yield pattern.rstrip('\n').split(sep=separator)
def compress_patterns(
db_file,
input_file,
output_file,
tag,
jaccard_threshold,
is_sequence,
separator
):
logging.info('Extracting the patterns.')
# Extract the patterns.
matches = list(
map(
lambda pattern: { 'pattern' : pattern, 'transactions' : set() },
read_patterns(input_file, separator)
)
)
logging.debug('Patterns')
logging.debug(list(map(lambda match: match['pattern'], matches)))
# Compute pattern matching against XML database.
logging.info('Computing pattern matching against XML database.')
context = etree.iterparse(
db_file,
dtd_validation=True,
events=('end',),
recover=True,
)
tid = 0
for _, node in context:
if node.tag != 'inproceedings':
continue
item_list = list(
map(
lambda node: node.text,
filter(
lambda node: node is not None and node.text is not None,
node.findall(tag),
),
)
)
if not item_list:
continue
for match in matches:
# Check for subsequence match in case of sequential patterns.
if is_sequence:
transaction = item_list[0].split(sep=separator)
if not is_subsequence(match['pattern'], transaction):
continue
logging.debug(
'Pattern {} matched {}'.format(
match['pattern'], transaction
)
)
# Check for subset match in case of itemset patterns.
elif not set(match['pattern']).issubset(item_list):
continue
else:
logging.debug(
'Pattern {} matched {}'.format(match['pattern'], item_list)
)
match['transactions'].add(tid)
tid = tid + 1
# Compute pattern distances.
logging.info('Computing pattern distances.')
# For each pattern, calculate its Jaccard distance to the other patterns.
jaccard_matrix = np.zeros((len(matches), len(matches)))
for rowIx in range(len(matches)):
for colIx in range(rowIx):
tA = matches[rowIx]['transactions']
tB = matches[colIx]['transactions']
logging.debug('Pattern {}'.format(matches[rowIx]['pattern']))
logging.debug(tA)
logging.debug('Pattern {}'.format(matches[colIx]['pattern']))
logging.debug(tB)
# Jaccard Distance = 1 - | Da ^ Db | / | Da v Db |
# where Da and Db are the transactions pattern A and B occur
# respectively.
distance = 1 - len(tA.intersection(tB)) / len(tA.union(tB))
jaccard_matrix[rowIx, colIx] = distance
# Because distances are symmetric, only calculate the lower triangle of the
# distance matrix and populate the upper triangle by forcing symmetry.
jaccard_matrix = jaccard_matrix + jaccard_matrix.T
logging.debug('Jaccardian matrix')
logging.debug(jaccard_matrix)
# Compute clusters.
logging.info('Computing clusters.')
clusters = []
# For each pattern:
for matchIx in range(len(matches)):
# If there are no previous clusters, create one for the current pattern.
if not clusters:
clusters.append([matchIx])
continue
minCluster = None
minDistance = None
# Calculate the distance to the closest cluster for the current pattern
# and return the cluster and distance.
for cluster in clusters:
# The distance to a cluster is determined by the Jaccard distance
# between the current pattern and the farthest pattern in the
# cluster, that is, complete linkage.
distance = max(jaccard_matrix[matchIx, cluster])
if minDistance is None or distance < minDistance:
minDistance = distance
minCluster = cluster
# If the distance to the closest cluster is less than a threshold,
# assign the pattern to the cluster.
if minDistance < jaccard_threshold:
minCluster.append(matchIx)
# Else create a new cluster for the pattern.
else:
clusters.append([matchIx])
logging.debug('Clusters')
logging.debug(clusters)
logging.debug(
list(
map(
lambda cluster: (
[matches[matchIx]['pattern'] for matchIx in cluster]
),
clusters,
)
)
)
# Write every cluster medoid as a compressed pattern.
logging.info('Writing every cluster medoid as a compressed pattern.')
# For each cluster:
for cluster in clusters:
# Get the pattern that is closest to the 'center' of the cluster.
medoid = matches[
min(
map(
lambda matchIx: {
'matchIx' : matchIx,
# The distance to a cluster's center is determined
# by the average distance of each pattern to others
# in the same cluster.
'distance' : np.average(
jaccard_matrix[matchIx, cluster]
),
},
cluster,
),
key=lambda match: match['distance'],
)['matchIx']
]
pattern = separator.join(medoid['pattern'])
output_file.write('{}\n'.format(pattern))
logging.info(
'{} out of {} patterns selected.'.format(len(clusters), len(matches))
)
compression_rate = 0
if len(matches) != 0:
compression_rate = (len(matches) - len(clusters)) / len(matches) * 100
logging.info('{:.2f}% compression rate.'.format(compression_rate))
if __name__ == '__main__':
parser = ArgumentParser(description='Remove pattern redundancy.')
parser.add_argument(
'--log',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='ERROR',
help='the log level (Default: ERROR)',
)
parser.add_argument(
'-i',
'--input_file',
required=True,
help='REQUIRED: the input file that stores the uncompressed patterns',
)
parser.add_argument(
'-o',
'--output_file',
required=True,
help='REQUIRED: the output file that will store the compressed patterns',
)
parser.add_argument(
'-t',
'--tag',
required=True,
help='REQUIRED: the tag to search for in the XML database',
)
parser.add_argument(
'-d',
'--distance',
required=True,
type=restricted_float,
help='REQUIRED: the Jaccard distance threshold for every cluster of patterns',
)
parser.add_argument(
'--sequence',
action='store_true',
help='whether patterns are sequences or not (default: False)',
)
parser.add_argument(
'--separator',
default=' ',
help='the string separating items in a transaction (default: <SPACE>)',
)
parser.add_argument(
'db_file', help='the XML input file with all the transactions'
)
args = parser.parse_args()
numeric_level = getattr(logging, args.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level)
db_file = open(args.db_file, 'rb')
input_file = open(args.input_file, 'r')
output_file = open(args.output_file, 'w+')
try:
compress_patterns(
db_file,
input_file,
output_file,
args.tag,
args.distance,
args.sequence,
args.separator
)
finally:
db_file.close()
input_file.close()
output_file.close()
| null |
compress.py
|
compress.py
|
py
| 9,250 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.info",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "lxml.etree.iterparse",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 286,
"usage_type": "call"
}
] |
573304916
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#import copy
import numpy as np
import pandas as pd
import matplotlib
#import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
formatDate = lambda x: (int(x[1]) - 1) * 7 + int(x[3])
def writeLocalCsv(path, frames):
#output = open(path, 'wt')
with open(path, 'w') as output:
for index, row in frames.iterrows():
output.write("%s\t%s\n" % (index, ','.join(str(int(e)) for e in row.tolist()) ))
output.close()
input = pd.read_csv('./data/data.txt', sep = '\t', header = None)
input.columns = ['id', 'date', 'type', 'count']
input['weekday'] = input['date'].apply(lambda x: int(x[3]))
input['date'] = input['date'].apply(formatDate)
input['type'] = input['type'].str.extract('v(\d+)').astype(np.int)
total = []
#grpByUser = input.groupby('id')
#dataDict = {}
#xRow = np.zeros(17).tolist()
#for name, group in grpByUser:
# item = {'x': [], 'y': []}
# for index, row in group.iterrows():
# v = copy.deepcopy(xRow)
# v[row['date'] - 1] = 1
# v[row['type'] + 6] = 1
# item['x'].append(v)
# item['y'].append(row['count'])
#
# dataDict[name] = item
### 5, 7, 8, 9 not degrade significant
groupByType = input.groupby('type')
for name, frame in groupByType:
##after day31
recent = frame[frame['date'] > 31]
gByDate = recent.groupby('date', as_index=True).agg({'count': np.sum})
gByDate['ewma'] = pd.ewma(gByDate['count'], span=7)
predictDaysL = gByDate.iloc[-7:]['ewma'].astype('int').tolist()
gByUser = recent.groupby(['id','weekday'], as_index=False).agg({'count': np.sum})
#totalVisit = gByUser['count'].sum()
#gByUser['count_p'] = gByUser['count'] / totalVisit
result = pd.DataFrame()
for i in range(0,7):
greater1 = gByUser[gByUser['count_p'] >= 1.0 / predictDaysL[i]]
smaller1 = gByUser[gByUser['count_p'] < 1.0 / predictDaysL[i]] \
.sort_values('count',ascending=False)
predict = (predictDaysL[i] * greater1['count_p']).astype('int')
get_smaller_c = predictDaysL[i] - predict.sum()
cutSmaller1 = smaller1.iloc[:get_smaller_c]
cutSmaller1['temp'] = 1
result['d' + str(i+1) + 's' + str(name)] \
= predict.append(cutSmaller1['temp'])
#cutSmaller1['day' + str(i+1)] = 1
#not_nan = result.fillna(0)
total.append(result)
full = pd.concat(total, axis = 1).fillna(0)
cols =[]
for d in range(1, 8):
for s in range(1, 11):
cols.append('d' + str(d) + 's' + str(s) )
full = full[cols]
writeLocalCsv('test.txt', full)
#site1 = groupByType.get_group(1)
#countBydate = site1.groupby('date', as_index=False) \
# .agg({'count' : np.sum})
#countBydate.plot(x='date', y='count')
| null |
bbb.py
|
bbb.py
|
py
| 2,796 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.style.use",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.style",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pandas.ewma",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 73,
"usage_type": "call"
}
] |
600559882
|
from django.conf import settings
import pytest
from model_mommy import mommy
from usaspending_api.references.models import Agency, OfficeAgency, SubtierAgency, ToptierAgency
def pytest_configure():
# To make sure the test setup process doesn't try
# to set up another test db, remove everything but the default
# DATABASE_URL from the list of databases in django settings
test_db = settings.DATABASES.pop('default', None)
settings.DATABASES.clear()
settings.DATABASES['default'] = test_db
# Also remove any database routers
settings.DATABASE_ROUTERS.clear()
@pytest.fixture(scope='session')
def agencies():
"""Setup agency hierarchy for use in tests."""
o = mommy.make(OfficeAgency, aac_code='aac1', name='The Office')
s = mommy.make(SubtierAgency, subtier_code='sub1', name='Subtiers of a Clown')
t = mommy.make(ToptierAgency, cgac_code='cgac1', name='Department of Test Data Naming')
mommy.make(Agency, id=1, toptier_agency=t, subtier_agency=s, office_agency=o)
o = mommy.make(OfficeAgency, aac_code='aac2', name='Office Space')
s = mommy.make(SubtierAgency, subtier_code='sub2', name='Subtiers in my Beers')
t = mommy.make(ToptierAgency, cgac_code='cgac2', name='Department of Bureacracy')
mommy.make(Agency, id=2, toptier_agency=t, subtier_agency=s, office_agency=o)
| null |
usaspending_api/conftest.py
|
conftest.py
|
py
| 1,345 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.settings.DATABASES.pop",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DATABASES",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DATABASES.clear",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DATABASES",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DATABASES",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DATABASE_ROUTERS.clear",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DATABASE_ROUTERS",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "model_mommy.mommy.make",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "usaspending_api.references.models.OfficeAgency",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "model_mommy.mommy",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "model_mommy.mommy.make",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "usaspending_api.references.models.SubtierAgency",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "model_mommy.mommy",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "model_mommy.mommy.make",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "usaspending_api.references.models.ToptierAgency",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "model_mommy.mommy",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "model_mommy.mommy.make",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "usaspending_api.references.models.Agency",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "model_mommy.mommy",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "model_mommy.mommy.make",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "usaspending_api.references.models.OfficeAgency",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "model_mommy.mommy",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "model_mommy.mommy.make",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "usaspending_api.references.models.SubtierAgency",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "model_mommy.mommy",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "model_mommy.mommy.make",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "usaspending_api.references.models.ToptierAgency",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "model_mommy.mommy",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "model_mommy.mommy.make",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "usaspending_api.references.models.Agency",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "model_mommy.mommy",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 20,
"usage_type": "call"
}
] |
194012912
|
import numpy as np
import cv2
pictureNum = 6
showTime = 5
timeStep = 100
picture = list()
for i in range(pictureNum):
pictureAdress = 'C:/Users/zczc1/Pictures/Saved Pictures/Picture #' + str(i + 1) + '.jpg'
picture.append(cv2.imread(pictureAdress, 1))
def slideShow():
while(True):
for i in range(pictureNum):
for timeStepCnt in range(timeStep):
weight = timeStepCnt / timeStep
img = cv2.addWeighted(picture[i], 1- weight, picture[(i + 1) % pictureNum], weight, 0)
cv2.imshow('SlideShow', img)
if cv2.waitKey(int(showTime / timeStep * 1000)) == ord('q'):
return
slideShow()
cv2.destroyAllWindows()
| null |
Desk Arranging Robot/Vision/OpenCV Examples/slideShow.py
|
slideShow.py
|
py
| 742 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.addWeighted",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 25,
"usage_type": "call"
}
] |
606495558
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Domsense s.r.l. (<http://www.domsense.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from osv import fields, osv
class sale_order_dates(osv.osv):
_inherit = 'sale.order'
def _get_commitment_last_date(self, cr, uid, ids, name, arg, context=None):
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
for line in order.order_line:
dt = datetime.strptime(order.date_order, '%Y-%m-%d') + relativedelta(days=line.delay or 0.0)
dt_s = dt.strftime('%Y-%m-%d')
dates_list.append(dt_s)
if dates_list:
res[order.id] = max(dates_list)
return res
_columns = {
'commitment_last_date': fields.function(_get_commitment_last_date, method=True, type='date', string='Data ultima consegna', help="Date on which delivery of products is to be made."),
}
sale_order_dates()
| null |
custom/Report_8_Se/sale.py
|
sale.py
|
py
| 1,947 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "osv.osv.osv",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "osv.osv",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "dateutil.relativedelta.relativedelta",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "osv.fields.function",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "osv.fields",
"line_number": 43,
"usage_type": "name"
}
] |
237836787
|
#!/bin/env python
# -*- coding: cp1252 -*-
"""
Simple HtmlWindow that redirects all web links (http://)
to a new browser instance.
Created on: 7/09/2010
Author: Tennessee Carmel-Veilleux (tcv -at- ro.boto.ca)
Revision: $Rev: 20 $
Copyright 2010 Tennessee Carmel-Veilleux
Description:
Simple HtmlWindow that redirects all web links (http://)
to a new browser instance.
License:
Copyright (c) 2010, Tennessee Carmel-Veilleux
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of SONIA AUV nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import wx.html as html
import webbrowser
class ExternalBrowserHtmlWindow(html.HtmlWindow):
def __init__(self, parent, id, pos, size, style):
html.HtmlWindow.__init__(self, parent, id, pos, size, style)
def OnLinkClicked(self, link):
if link.GetHref().startswith("http://"):
webbrowser.open_new_tab(link.GetHref())
else:
html.HtmlWindow.OnLinkClicked(self, link)
| null |
AutoBGA/ExternalBrowserHtmlWindow.py
|
ExternalBrowserHtmlWindow.py
|
py
| 2,378 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "wx.html.HtmlWindow",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "wx.html",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "wx.html.HtmlWindow.__init__",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "wx.html.HtmlWindow",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "wx.html",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "webbrowser.open_new_tab",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "wx.html.HtmlWindow.OnLinkClicked",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "wx.html.HtmlWindow",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "wx.html",
"line_number": 58,
"usage_type": "name"
}
] |
76095793
|
import json #dirbti su json tipo duomenim
import requests #siusti http uzklausas
import csv #dirbti su .csv tipo failais
createUrl = 'http://127.0.0.1:5000/create/'
usersUrl = 'http://127.0.0.1:5000/members/'
#Sukuria tik viena useri pagal imputa
def fun_create_usr(createUrl):
fname = input('enter name:')
lname = input('enter last_name:')
bday = input('enter birthday:')
fun_send_data(createUrl,fname,lname,bday)
def fun_send_data(createUrl,fname,lname,bday):
url= '%s%s%s%s' %(createUrl,fname,lname,bday)
response = requests.put(url, data={"fname": fname, "lname": lname, "bday": bday})
def fun_get_usr(usersUrl):
response = requests.get(usersUrl)
return json.loads(response.content)
def fun_csv_to_web():
global createUrl
fileNm = input('Enter file name to import "example.csv":')
file = open(fileNm)
data = list(csv.reader(file))
file.close()
for i in data:
fun_send_data(createUrl,i[0],i[1],i[2])
def fun_web_to_csv():
global usersUrl
fileNm = input('Enter file name for export "example.csv":')
outputFile = open(fileNm, 'w', newline='')
writer = csv.writer(outputFile)
data = fun_get_usr(usersUrl)
for var in list(data):
writer.writerow([data[var]['fname'],data[var]['lname'],data[var]['bday']])
outputFile.close()
def menu():
print("Select task nr. from menu and enter it")
print("1 upload single user")
print("2 upload *.csv file's data to web-api")
print("3 dowload web-api data to *.csv file")
print("4 see all users in web-api")
option = input('Enter menu number:')
if int(option)==1:
fun_create_usr(createUrl)
elif int(option)==2:
fun_csv_to_web()
elif int(option)==3:
fun_web_to_csv()
elif int(option)==4:
print(fun_get_usr(usersUrl))
else:
print('No such option')
exit(1)
menu()
| null |
webscript.py
|
webscript.py
|
py
| 1,909 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.put",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 39,
"usage_type": "call"
}
] |
210240400
|
"""
Первая версия первого задания
"""
from collections import Counter
import timeit
from students.mignatenko.day1.task1.constants import TEXT, REPEAT_COUNT
def func():
result={}
result['vowels'] = dict(Counter(c for c in TEXT.lower() if c in 'aeiou'))
result['consonants'] = dict(Counter(c for c in TEXT.lower() if c in 'bcdfghjklmnpqrstvwxyz'))
return result
if __name__ == "__main__":
r = timeit.Timer(func)
print(r.timeit(REPEAT_COUNT))
| null |
Python/SCRIPT-003 Python Applied/src/students/mignatenko/day1/task1/init.py
|
init.py
|
py
| 495 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.Counter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "students.mignatenko.day1.task1.constants.TEXT.lower",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "students.mignatenko.day1.task1.constants.TEXT",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "students.mignatenko.day1.task1.constants.TEXT.lower",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "students.mignatenko.day1.task1.constants.TEXT",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "timeit.Timer",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "students.mignatenko.day1.task1.constants.REPEAT_COUNT",
"line_number": 19,
"usage_type": "argument"
}
] |
374463287
|
from nose.tools import raises, assert_raises
from MTADelayPredict.subway_line import SubwayLine, N_STOP_LIST
@raises(ValueError)
def test_wrong_direction_line():
bad_line = SubwayLine(['R1N', 'R2S'])
@raises(ValueError)
def test_underspecified_stops():
bad_line = SubwayLine(['R1', 'R2'])
def test_stop_idx():
test_line = SubwayLine(['R16N', 'R15N'])
assert test_line.stop_idx('R16N') == 0
def test_stop_create():
# Test creation of new stops
stop_list = ['R1N', 'R2N', 'W3N']
test_line = SubwayLine(stop_list)
assert_raises(ValueError, test_line.stop, '1N')
test_stop = test_line.stop('R1N')
assert test_stop.stop_id == 'R1N'
assert test_stop.stop_idx == 0
assert_raises(IndexError, test_line.stop_from_idx, 5)
assert_raises(IndexError, test_line.stop_from_idx, -1)
test_stop = test_line.stop_from_idx(2)
assert test_stop.stop_id == 'W3N'
assert test_stop.stop_idx == 2
| null |
tests/test_subway_line.py
|
test_subway_line.py
|
py
| 942 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "MTADelayPredict.subway_line.SubwayLine",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "nose.tools.raises",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "MTADelayPredict.subway_line.SubwayLine",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nose.tools.raises",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "MTADelayPredict.subway_line.SubwayLine",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "MTADelayPredict.subway_line.SubwayLine",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_raises",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_raises",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_raises",
"line_number": 27,
"usage_type": "call"
}
] |
67832360
|
from django.test import TestCase
from django.contrib.auth.models import User
from rest_framework.test import APIClient
class TestPost(TestCase):
def setUp(self):
self.user = User.objects.create(username="Foo")
def test_post_request_json(self):
""" Ensure POST'd JSON data is accepted """
client = APIClient()
client.force_authenticate(user=self.user)
data = dict(title='new idea', code="foo")
response = client.post('/snippets/', data, format='json')
self.assertEqual(response.status_code, 201)
def test_post_request_form(self):
""" Ensure POST'd form data is accepted """
client = APIClient()
client.force_authenticate(user=self.user)
data = dict(title='new idea', code="foo")
response = client.post('/snippets/', data)
# THIS WILL FAIL IF BUG IS NOT FIXED
self.assertEqual(response.status_code, 201)
| null |
snippets/tests.py
|
tests.py
|
py
| 930 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.test.TestCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.create",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "rest_framework.test.APIClient",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "rest_framework.test.APIClient",
"line_number": 20,
"usage_type": "call"
}
] |
368835803
|
#!/usr/bin/env python
import http.client
# address = "example.com"
address = "127.0.0.1:1234"
conn = http.client.HTTPSConnection(address)
conn.request("GET", "/")
r = conn.getresponse()
print(r.status, r.reason)
data1 = r.read()
print(data1)
# conn.request("GET", "/")
# r = conn.getresponse()
# while not r.closed:
# l = r.read(200)
# if len(l) == 0:
# break
# print(l) # 200 bytes
| null |
client/client_http.py
|
client_http.py
|
py
| 410 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "http.client.client.HTTPSConnection",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "http.client.client",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 9,
"usage_type": "name"
}
] |
613531386
|
import pygame
class Player:
def __init__(self,x=120,y=300,width=32,height=32):
self.rect = pygame.Rect(x,y,width,height)
self.texture = pygame.transform.scale(pygame.image.load("./data/img/player.png"),(width,height))
self.texture.set_colorkey((255,255,255))
self.gravity = 4
self.died = False
self.jumpForce = 0
self.jumpCount = 0
self.isJump = False
def update(self,windowf,dt):
if not self.died:
windowf.blit(self.texture,self.rect.x,self.rect.y)
self.Keys(dt)
def Keys(self,dt):
self.rect.y += self.gravity * dt
key = pygame.key.get_pressed()
if key[pygame.K_SPACE] and self.isJump == False:
self.isJump = True
if self.isJump == True:
self.jumpForce = 25
self.isJump = False
if self.jumpForce >= 0:
self.rect.y -= self.jumpForce
self.jumpForce -= 2 * dt
| null |
first/data/player.py
|
player.py
|
py
| 981 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.Rect",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.transform.scale",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 25,
"usage_type": "attribute"
}
] |
322551242
|
from aniLink import animeLink
from flask import Flask,render_template,request
from aniForm import aniForm
from colorama import Fore, Back, Style
import time
import random
import binascii
app=Flask(__name__)
app.secret_key=binascii.hexlify(str(random.random()).encode("utf-8"))
def getData(website,keyword):
anime=animeLink()
result=dict()
anime.search(keyword)
if website == "animesave":
result["animesave"]=anime.animesave()
elif website == "meguminime":
result["meguminime"]=anime.meguminime()
elif website == "drivenime":
result["drivenime"]=anime.drivenime()
elif website == "bakacan":
result["bakacan"]=anime.bakacan()
elif website == "meownime":
result["meownime"]=anime.meownime()
elif website == "wibudesu":
result["wibudesu"]=anime.wibudesu()
elif website == "kusonime":
result["kusonime"]=anime.kusonime()
elif website == "awbatch":
result["awbatch"]=anime.awbatch()
elif website == "zonawibu":
result["zonawibu"]=anime.zonawibu()
else:
result["animesave"]=anime.animesave()
time.sleep(0.1)
result["meguminime"]=anime.meguminime()
time.sleep(0.1)
result["drivenime"]=anime.drivenime()
time.sleep(0.1)
result["bakacan"]=anime.bakacan()
time.sleep(0.1)
result["meownime"]=anime.meownime()
time.sleep(0.1)
result["wibudesu"]=anime.wibudesu()
time.sleep(0.1)
result["kusonime"]=anime.kusonime()
time.sleep(0.1)
result["awbatch"]=anime.awbatch()
time.sleep(0.1)
result["zonawibu"]=anime.zonawibu()
return result
@app.route("/", methods=["POST","GET"])
def vIndex():
form=aniForm(csrf_disabled=False)
if request.method=="GET":
return render_template("index.html",data={"form":form})
elif request.method=="POST":
if form.validate_on_submit():
keyword=form.keyword.data
website=form.listSite.data
data=getData(website=website,keyword=keyword)
waktu=time.strftime("%d-%m-%Y %H:%M:%S",time.localtime())
for site in data:
for b in data[site]:
if b["title"] == "404 Not Found":
print("{bRed}[{website}][{time}] {log}{bReset}".format(website=site,time=waktu,log=b["title"],bRed=Back.RED,bReset=Back.RESET))
else:
print("{fYellow}[{website}]{fReset}{fGreen}[{time}]{fReset} {log}".format(website=site,time=waktu,log=b["title"],fGreen=Fore.GREEN,fYellow=Fore.YELLOW,fReset=Fore.RESET))
return render_template("index.html",data={"form":form,"data":data})
else:
return render_template("index.html",data={"form":form})
if __name__ == "__main__":
app.run(debug=True)
| null |
anigrab/server.py
|
server.py
|
py
| 2,853 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "binascii.hexlify",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "aniLink.animeLink",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "aniForm.aniForm",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "colorama.Back.RED",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "colorama.Back",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "colorama.Back.RESET",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.GREEN",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.YELLOW",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 73,
"usage_type": "call"
}
] |
461493472
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Gated working memory with an echo state network
# Copyright (c) 2018 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License.
# -----------------------------------------------------------------------------
import numpy as np
import freetype as ft
import scipy.ndimage
def smoothen(Z, window='hanning', length=25):
"""
Smoothen a signal by averaging it over a fixed-size window
Z : np.array
Signal to smoothen
window: string
Specify how to compute the average over neighbours
One of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
length: int
Size of the averaging window
"""
# window in 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
S = np.r_[Z[length-1:0:-1], Z, Z[-2:-length-1:-1]]
if window == 'flat':
W = np.ones(length,'d')
else:
W = eval('np.' + window + '(length)')
Z = np.convolve(W/W.sum(), S, mode='valid')
return 2*Z[(length//2-1):-(length//2)-1]
def generate_data(values, ticks, last=None):
"""
This function generates output data for a gated working memory task:
Considering an input signal S(t) and a tick signal T(t), the output
signal O(t) is defined as: O(t) = S(tᵢ) where i = argmax(T(t) = 1).
values : np.array
Input signal as a single sequence of random float
ticks : np.array
Gating signal(s) as one (or several) sequence(s) of 0 and 1
"""
values = np.array(values).ravel()
ticks = np.array(ticks)
if len(ticks.shape) == 1:
ticks = ticks.reshape(len(ticks), 1)
n_gate = ticks.shape[1]
size = len(values)
data = np.zeros(size, dtype = [ ("input", float, (1 + n_gate,)),
("output", float, ( n_gate,))])
# Input signals
data["input"][:,0 ] = values
data["input"][:,1:] = ticks
wm = np.zeros(n_gate)
# If no last activity set tick=1 at time t=0
if last is None:
wm[:] = data["input"][0, 0]
data["input"][0, 1:] = 1
else:
wm[:] = last
# Output value(s) according to ticks
for i in range(size):
for j in range(n_gate):
# Output at time of tick is not changed
# data["output"][i,j] = wm[j]
if data["input"][i,1+j] > 0:
wm[j] = data["input"][i,0]
# Output at time of tick is changed
data["output"][i,j] = wm[j]
return data
def str_to_bmp(text, size=11, zmin=1.0, zmax=1.0, add_kerning=False):
"""
Generate a noisy bitmap string of text using different fonts
Parameters
==========
text: string
Text to be displayed
size: int
Font size to use to generate text (default 20)
zmin: float
Minimal horizontal distortion
zmax: float
Maximal horizontal distortion
Returns
=======
Tuple of numpy array (Z,I)
Z is the bitmap string array
I is a unidimensional numpy array that indicates the corresponding
character for each column of Z
"""
# Load fonts
fonts = ["./Inconsolata-Regular.ttf"]
faces = [ft.Face(filename) for filename in fonts]
for face in faces:
face.set_char_size(size*64)
slots = [face.glyph for face in faces]
# Find baseline and height (maximum)
baseline, height = 0, 0
for face in faces:
ascender = face.size.ascender >> 6
descender = face.size.descender >> 6
height = max(height, ascender-descender)
baseline = max(baseline, -descender)
# Set individual character font and zoom level
font_index = np.random.randint(0, len(faces), len(text))
zoom_level = np.random.uniform(zmin, zmax, len(text))
# First pass to compute bounding box
width = 0
previous = 0
for i,c in enumerate(text):
index = font_index[i]
zoom = zoom_level[i]
face, slot = faces[index], slots[index]
face.load_char(c, ft.FT_LOAD_RENDER | ft.FT_LOAD_FORCE_AUTOHINT)
bitmap = slot.bitmap
kerning = face.get_kerning(previous, c).x >> 6
kerning = int(round(zoom*kerning))
advance = slot.advance.x >> 6
advance = int(round(zoom*advance))
if i == len(text)-1:
width += max(advance, int(round(zoom*bitmap.width)))
else:
width += advance + kerning
previous = c
# Allocate arrays for storing data
Z = np.zeros((height,width), dtype=np.ubyte)
I = np.zeros(width, dtype=np.int) + ord(' ')
# Second pass for actual rendering
x, y = 0, 0
previous = 0
for i,c in enumerate(text):
index = font_index[i]
zoom = zoom_level[i]
face, slot = faces[index], slots[index]
face.load_char(c, ft.FT_LOAD_RENDER | ft.FT_LOAD_FORCE_AUTOHINT)
bitmap = slot.bitmap
top, left = slot.bitmap_top, slot.bitmap_left
w,h = bitmap.width, bitmap.rows
y = height - baseline - top
kerning = 0
if(add_kerning):
kerning = face.get_kerning(previous, c).x >> 6
kerning = int(round(zoom*kerning))
advance = slot.advance.x >> 6
advance = int(round(zoom*advance))
glyph = np.array(bitmap.buffer, dtype='ubyte').reshape(h,w)
glyph = scipy.ndimage.zoom(glyph, (1, zoom), order=3)
w = glyph.shape[1]
x += kerning
left = 0
Z[y:y+h,x+left:x+left+w] += glyph
I[x:x+w] = ord(c)
x += advance
previous = c
return Z/255.0, I
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import matplotlib.pyplot as plt
"""
n = 2500
values = smoothen(np.random.uniform(-1, +1, n))
ticks = np.random.uniform(0, 1, (n,2)) < 0.01
data = generate_data(values, ticks)
print("Data size: {0}".format(len(data)))
print("Data dtype: {0}".format(data.dtype))
plt.figure(figsize=(12,2.5))
plt.plot(data["input"][:,0], color='0.75', lw=1.0)
plt.plot(data["output"][:,0], color='0.00', lw=1.5)
plt.ylim(-1,1)
plt.tight_layout()
plt.show()
"""
n = 50
np.random.seed(6)
values = np.random.uniform(0, +1, n)
ticks = np.random.uniform(0, 1, (n,1)) < 0.05
data1 = generate_data(values, ticks)
ticks = np.random.uniform(0, 1, (n,3)) < 0.05
data3 = generate_data(values, ticks)
cmap = "magma"
S = [
( 6, data1["input"][:,0], cmap, 0.75, "Value (V)"),
( 5, data3["input"][:,1], "gray_r", 1.00, "Trigger (T₁)"),
( 4, data3["output"][:,0], cmap, 0.75, "Output (M₁)"),
( 3, data3["input"][:,2], "gray_r", 1.00, "Trigger (T₂)"),
( 2, data3["output"][:,1], cmap, 0.75, "Output (M₂)"),
( 1, data3["input"][:,3], "gray_r", 1.00, "Trigger (T₃)"),
( 0, data3["output"][:,2], cmap, 0.75, "Output (M₃)"),
(10, data1["input"][:,0], cmap, 0.75, "Value (V)"),
( 9, data1["input"][:,1], "gray_r", 1.00, "Trigger (T)"),
( 8, data1["output"][:,0], cmap, 0.75, "Output (M)") ]
fig = plt.figure(figsize=(10,2.5))
ax = plt.subplot(1,1,1, frameon=False)
ax.tick_params(axis='y', which='both', length=0)
X = np.arange(n)
Y = np.ones(n)
yticks = []
ylabels = []
for (index, V, cmap, alpha, label) in S:
ax.scatter(X, index*Y, s=100, vmin=0, vmax=1, alpha=alpha,
edgecolor="None", c=V, cmap=cmap)
ax.scatter(X, index*Y, s=100, edgecolor="k", facecolor="None",
linewidth=0.5)
yticks.append(index)
ylabels.append(label)
ax.set_yticks(yticks)
ax.set_yticklabels(ylabels)
ax.set_ylim(-0.5,10.5)
ax.set_xticks([])
ax.set_xlim(-0.5,n-0.5)
plt.savefig("data.pdf")
plt.show()
| null |
attic/data.py
|
data.py
|
py
| 7,940 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.r_",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.convolve",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "freetype.Face",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "freetype.FT_LOAD_RENDER",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "freetype.FT_LOAD_FORCE_AUTOHINT",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "numpy.ubyte",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "freetype.FT_LOAD_RENDER",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "freetype.FT_LOAD_FORCE_AUTOHINT",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.ndimage.zoom",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.ndimage",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "scipy.ndimage",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 266,
"usage_type": "name"
}
] |
170791410
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author : Emma E. M. Hobbs
#
# Contact:
# [email protected]
#
# Emma Hobbs,
# School of Biology,
# University of St Andrews,
# Biomedical Sciences Research Complex,
# St Andrews,
# Fife,
# KY16 9ST
# Scotland,
# UK
#
# MIT License
import setuptools
from pathlib import Path
# get long description from README.md
with Path("README.md").open("r") as long_description_handle:
long_description = long_description_handle.read()
setuptools.setup(
name="cazy_webscraper",
version="0.1.2",
# Metadata
author="Emma E. M. Hobbs",
author_email="[email protected]",
description="".join(
[
(
"cazy_webscraper provides a webscraper to automate "
"the retrieval of protein data from the CAZy database, "
"found at http://www.cazy.org"
)
]
),
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
keywords="bioinforamtics protein webscraper",
platforms="Posix, MacOS X",
url="https://github.com/HobnobMancer/cazy_webscraper",
entry_points={"console_scripts": ["cazy_webscraper.py = scraper.cazy_webscraper:main"]},
install_requires=[
"biopython>=1.76",
"mechanicalsoup",
"pandas>=1.0.3",
"pyyaml",
"requests",
"sqlalchemy==1.3.20",
"tqdm",
],
packages=setuptools.find_packages(),
package_data={
"Conda microenvironment": ["environment.yml"],
"CAZy dictionary": ["cazy_dictionary.json"],
"Configuration file": ["scraper_config.yaml"],
},
include_package_data=True,
classifiers=[
"Environment :: Console",
"Intended Audience :: Science/Research",
"Licence :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific :: Bioinformatics",
],
)
| null |
setup.py
|
setup.py
|
py
| 2,099 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib.Path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 62,
"usage_type": "call"
}
] |
410376779
|
#app/ecommend.py
# Imports
from os import path
import pandas as pd
import pickle
import json
# Load pickled vectorizer and model
with open('./pickles/tfidf.pkl', 'rb') as tfidf_pkl:
tfidf = pickle.load(tfidf_pkl)
with open('./pickles/nn_model.pkl', 'rb') as nn_pkl:
nn_model = pickle.load(nn_pkl)
#with open('./pickles/min_data.pkl', 'rb') as data_pkl:
# data = pickle.load(data_pkl)
"""Pickle data to use here, or try loading from DB"""
# Import pop_rent_crime_bins csv
file_name = path.join(path.dirname(__file__), "../data/pop_rent_crime_bins.csv")
prcb = pd.read_csv(file_name)
# Recommend Function
def recommend(user_input):
temp_df = nn_model.kneighbors(tfidf.transform([user_input]).todense())[1]
for i in range(4):
info = prcb.iloc[temp_df[0][i]]['Location']
info_pop = prcb.iloc[temp_df[0][i]]['2019 Population']
info_town_or_city = prcb.iloc[temp_df[0][i]]['Town or City']
info_rent = prcb.iloc[temp_df[0][i]]['2019 Rental Rates']
info_state = prcb.iloc[temp_df[0][i]]['State']
info_city = prcb.iloc[temp_df[0][i]]['City']
info_population = prcb.iloc[temp_df[0][i]]['Population']
info_violent_crime = prcb.iloc[temp_df[0][i]]['Violent crime']
info_murder = prcb.iloc[temp_df[0][i]]['Murder and nonnegligent manslaughter']
info_vehicle_theft = prcb.iloc[temp_df[0][i]]['Motor vehicle theft']
info_arson = prcb.iloc[temp_df[0][i]]['Arson']
info_crime_rate = prcb.iloc[temp_df[0][i]]['Crime Rate']
info_urb_pop_cat = prcb.iloc[temp_df[0][i]]['Urban Population by City Size Categories']
info_urb_pop_rang = prcb.iloc[temp_df[0][i]]['Urban Population by City Size Ranges']
info_rent_cat = prcb.iloc[temp_df[0][i]]['Rental Rate Categories']
info_rent_rang = prcb.iloc[temp_df[0][i]]['Rental Rate Ranges']
info_crime_cat = prcb.iloc[temp_df[0][i]]['Crime Rate Categories']
info_crime_rang = prcb.iloc[temp_df[0][i]]['Crime Rate Categories']
# Possible Outputs
location = json.dumps(info)
pop = json.dumps(int(info_pop))
town_or_city = json.dumps(info_town_or_city)
rent = json.dumps(int(info_rent))
state = json.dumps(info_state)
city = json.dumps(info_city)
population = json.dumps(int(info_population))
violent_crime = json.dumps(int(info_violent_crime))
murder = json.dumps(int(info_murder))
vehicle_theft = json.dumps(int(info_vehicle_theft))
arson = json.dumps(int(info_arson))
crime_rate = json.dumps(int(info_crime_rate))
urb_pop_cat = json.dumps(info_urb_pop_cat)
urb_pop_rang = json.dumps(info_urb_pop_rang)
rent_cat = json.dumps(info_rent_cat)
rent_rang = json.dumps(info_rent_rang)
crime_cat = json.dumps(info_crime_cat)
crime_rang = json.dumps(info_crime_rang)
# Add all future column names
return [location, pop, town_or_city, rent, state, city, population, violent_crime, murder, vehicle_theft,
arson, crime_rate, urb_pop_cat, urb_pop_rang, rent_cat, rent_rang, crime_cat, crime_rang]
| null |
app/recommend.py
|
recommend.py
|
py
| 3,175 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pickle.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 77,
"usage_type": "call"
}
] |
502260800
|
import cv2
import numpy as pd
img = cv2.imread('lenna.png')
height, width = img.shape[:2]
#starting pixel coordinates('top left of cropping rectangle')
start_row, start_col = int(height*.25), int(width*.25)
#ending the pixel coordinates(bottom right)
end_row, end_col = int(height*.75), int(width*.75)
#Use indexing to crop image
cropped = img[start_row:end_row, start_col:end_col]
cv2.imshow('Original', img)
cv2.waitKey(0)
cv2.imshow('Ceopped', cropped)
cv2.waitKey(0)
cv2.destroyAllWindows()
| null |
crop.py
|
crop.py
|
py
| 502 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 22,
"usage_type": "call"
}
] |
52272731
|
import os
import shutil
import markdown
def isCustomCompile():
return True
js_outer = '<script type="text/javascript">%s</script>'
def genHtml(folderOut):
files = os.listdir('.')
files = [f for f in files if f.find('__pycache') == -1]
for f in files:
shutil.copyfile(f, os.path.join(f, os.path.join(folderOut, f)))
fh = open('index.html', 'r')
html = fh.read()
fh.close()
rootDir = '.'
jsCt = [js_outer % open(os.path.join(rootDir, f), 'r').read()
for f in os.listdir(rootDir) if f[-3:] == '.js']
html = html.replace('__js__', "\n".join(jsCt))
md = markdown.Markdown()
mdFile = [f for f in os.listdir('.') if '.markdown' in f][0]
mDhtml = md.convert(open(mdFile).read())
html = html.replace('__markdown__', mDhtml)
fh = open(os.path.join(folderOut, 'index.html'), 'w')
fh.write(html)
fh.close()
| null |
articles/web/HTMLCanvasAnimationAtoms/compile.py
|
compile.py
|
py
| 895 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.listdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "shutil.copyfile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "markdown.Markdown",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
}
] |
554764691
|
# write your code here
import collections
import os
import sys
import re
import hashlib
import sqlite3
args = sys.argv
if len(args) != 2:
print('Directory is not specified')
exit(-1)
path = args[1]
sorting_options = {'1': 'DESC', '2': 'ASC'}
file_format = input('Enter file format: ')
print('\nSize sorting options:\n\
1. Descending\n\
2. Ascending\n')
if file_format == '':
file_format = '.*'
while True:
option = input('Enter a sorting option: ')
if option not in sorting_options:
print('Wrong!\n')
else:
break
conn = sqlite3.connect('files.s3db')
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS files')
cur.execute('CREATE TABLE files(name TEXT, size INTEGER, hash VARCHAR(100))')
conn.commit()
def read_files_in_dir(path):
for root, dic, files in os.walk(path, topdown=True):
for name in files:
if re.match('.*\.' + file_format + '$', str(name)):
full_name = root + '\\' + name
file_size = str(os.path.getsize(full_name))
with open(full_name, 'rb') as f:
file_hash = hashlib.md5(f.read()).hexdigest()
cur.execute(f"INSERT INTO files(name, size, hash) VALUES('{full_name}', {file_size}, '{file_hash}')")
conn.commit()
read_files_in_dir(path)
cur.execute(f'SELECT size, name FROM files ORDER BY size {sorting_options[option]}')
files_sorted_by_size = cur.fetchall()
if option == '1':
current_size = sys.maxsize
else:
current_size = -1
for file in files_sorted_by_size:
file_size = file[0]
file_name = file[1]
if option == '1':
if file_size < current_size:
current_size = file_size
print(f'\n{str(current_size)} bytes')
else:
if file_size > current_size:
current_size = file_size
print(f'\n{str(current_size)} bytes')
print(file_name)
print()
while True:
check_duplicates = input('Check for duplicates?\n')
if check_duplicates == 'yes' or check_duplicates == 'no':
print()
break
print('Wrong option\n')
if check_duplicates == 'yes':
cur.execute(f'SELECT hash, name, size FROM files WHERE hash IN(SELECT hash FROM files GROUP BY hash HAVING COUNT(hash)>=2) ORDER BY 3 {sorting_options[option]}, 1 DESC')
duplicate_files = cur.fetchall()
current_size = -1
current_hash = ""
iterator = 1
file_numbers = []
for file in duplicate_files:
hash_ = file[0]
name = file[1]
size = file[2]
if size != current_size:
print(f'\n{str(size)} bytes')
current_size = size
if hash_ != current_hash:
print('Hash: ' + hash_)
current_hash = hash_
print(str(iterator) + '. ' + name)
file_numbers.append(iterator)
iterator += 1
while True:
delete = input('Delete files?\n')
if delete == 'yes' or check_duplicates == 'no':
print()
break
print('Wrong option\n')
if delete == 'yes':
while True:
try:
input_ = input('Enter file numbers to delete:\n')
if input_ == '':
raise Exception
files = [int(number) for number in input_.split()]
temp = [x for x in file_numbers]
for number in files:
if number in temp:
temp.remove(number)
else:
raise Exception
break
except Exception:
print('\nWrong format\n')
continue
total_removed = 0
for i in range(1, iterator):
if i in files:
os.remove(duplicate_files[i-1][1])
total_removed += int(duplicate_files[i-1][2])
print(f'Total freed up space: {total_removed} bytes')
cur.execute('DROP TABLE files')
conn.commit()
conn.close()
| null |
Duplicate File Handler/Duplicate File Handler/task/handler.py
|
handler.py
|
py
| 3,973 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.getsize",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "hashlib.md5",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 125,
"usage_type": "call"
}
] |
130925232
|
#!/usr/bin/python
import os
import sys
import time
import datetime
from dateutil.relativedelta import relativedelta
import operator
import re
import shutil
from config_upload import upload_base
'''this is launched by file_upload to update the index page of the media folder; consolidate bulletins later'''
allowed_ext = ['.pdf', '.txt', '.jpg', '.png', '.gif']
def parse_files(mpath):
media_files = {}
file_dates = {}
for filename in os.listdir(mpath):
fpath = os.path.join(mpath, filename)
t = os.path.getmtime(fpath)
tdate = datetime.datetime.fromtimestamp(t)
fdate = tdate.replace(microsecond=0)
fn, ext = os.path.splitext(filename)
if ext.lower() in allowed_ext:
fn_list = fn.split(' ')
if len(fn_list) > 1:
new_name = '_'.join(fn_list)
new_name_ext = '{0}{1}'.format(new_name, ext.lower())
file_dates[new_name_ext] = fdate
else:
new_name_ext = '{0}{1}'.format(fn, ext.lower())
file_dates[new_name_ext] = fdate
return file_dates
def get_trailing_numbers(dts):
result = re.search(r'\d+$', dts)
return int(result.group()) if result else None
def parse_bulletins(mpath):
bulletin_dates = {}
for fn in os.listdir(mpath):
if fn.startswith('Bulletin') and fn[-4:] == '.pdf':
#catch bulletins that are already renamed (which should always be true)
if '_' in fn:
fn_list = fn.split('_')
dt_ext = fn_list[1]
dts = dt_ext[:-4]
result = get_trailing_numbers(dts)
if result:
dtg = datetime.datetime.strptime(dts, '%b-%d-%Y')
dt = datetime.datetime.strftime(dtg.date(), '%Y-%m-%d')
bulletin_dates[dt] = fn
return bulletin_dates
def update_index(mpath, mfolder, file_dates):
#page headers = header_index.html in same folder with uploaded files (clean this up later)
header = os.path.join(mpath, 'header_index.html')
mpath_index = os.path.join(mpath, 'index.html')
sorted_file_dates = sorted(file_dates.items(), key=operator.itemgetter(1), reverse=True)
cur_day = datetime.date.today()
earliest = cur_day - relativedelta(months=12)
earliest_dt = datetime.datetime.combine(earliest, datetime.time.min)
target = open(mpath_index, 'w')
try:
shutil.copyfileobj(open(header, 'rb'), target)
except IOError:
print('Could not copy form header! Please contact [email protected]')
sys.exit(1)
if mfolder == 'Bulletins':
bulletin_dates = parse_bulletins(mpath)
most_recent = sorted(bulletin_dates.keys())[-1]
bulletin_current = os.path.join(mpath, 'bulletin_current.pdf')
for fdate in bulletin_dates:
fn = bulletin_dates[fdate]
if fdate == most_recent:
copy_bulletin = os.path.join(mpath, fn)
shutil.copy2(copy_bulletin, bulletin_current)
#Bulletin index page updates
target.write('<tr><td colspan="3" align="center"><table width="400">')
for fdate in sorted(bulletin_dates.keys(), reverse=True):
fn = bulletin_dates[fdate]
dt = datetime.datetime.strptime(fdate, '%Y-%m-%d')
#include one year of files; print "recent:", fn, dt
if dt > earliest_dt:
display_date = datetime.datetime.strftime(dt.date(), '%Y-%m-%d')
target.write('<tr><td><a href="/shc/media/users/Bulletins/{0}" target="_blank">{1}</a></td><td> </td><td>{2}</td></tr>\n'.format(fn, fn, display_date))
target.write('</table></td></tr></table>\n</body></html>\n')
target.close()
elif mfolder == 'Education':
target.write('<tr><td colspan="3" align="center"><table width="400">')
if len(sorted_file_dates) > 1:
target.write('''<tr><td colspan="2" align="center"><hr color="#9e0909" width="400">
Documents are listed in the order they were uploaded.
<hr color="#9e0909" width="400"></td></tr>''')
for fn, ts in sorted_file_dates:
if fn == 'Registration_Form.pdf':
pass
elif file_dates[fn] > earliest_dt:
#file is more recent than one year ago; print "recent:", fn, ts
display_date = datetime.datetime.strftime(ts.date(), '%Y-%m-%d')
target.write('<tr><td><a href="/shc/media/users/{0}/{1}" target="_blank">{2}</a></td><td> </td><td>{3}</td></tr>\n'.format(mfolder, fn, fn, display_date))
target.write('</table></td></tr></table>\n</body></html>\n')
target.close()
else:
#Altar, Apostolate, Education, Youth index page updates; listed by timestamp of upload
target.write('<tr><td colspan="3" align="center"><table width="400">')
for fn, ts in sorted_file_dates:
if file_dates[fn] > earliest_dt:
#file is more recent than one year ago; print "recent:", fn, ts
display_date = datetime.datetime.strftime(ts.date(), '%Y-%m-%d')
target.write('<tr><td><a href="/shc/media/users/{0}/{1}" target="_blank">{2}</a></td><td> </td><td>{3}</td></tr>\n'.format(mfolder, fn, fn, display_date))
target.write('</table></td></tr></table>\n</body></html>\n')
target.close()
if __name__ == "__main__":
#passed from file_upload; parsing bulletin dates by name, all others by timestamp of file upload
media_folders = ['Altar', 'Apostolate', 'Bulletins', 'Education', 'Youth']
for mfolder in media_folders:
mpath = os.path.join(upload_base, mfolder)
if mfolder == 'Bulletins':
bulletin_dates = parse_bulletins(mpath)
update_index(mpath, mfolder, file_dates=bulletin_dates)
else:
file_dates = parse_files(mpath)
update_index(mpath, mfolder, file_dates)
| null |
maintenance/media_archives.py
|
media_archives.py
|
py
| 6,382 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.listdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.getmtime",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strftime",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "operator.itemgetter",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "dateutil.relativedelta.relativedelta",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.combine",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "datetime.time",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "shutil.copyfileobj",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy2",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strftime",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strftime",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strftime",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "config_upload.upload_base",
"line_number": 144,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 144,
"usage_type": "attribute"
}
] |
201288243
|
#!/usr/bin/env python3
"""Import data from daily attitudes heartbeat survey into BigQuery."""
import datetime as dt
import itertools
import re
from argparse import ArgumentParser
from time import sleep
import pytz
import requests
from google.cloud import bigquery
parser = ArgumentParser(description=__doc__)
parser.add_argument("--date", required=True)
parser.add_argument("--survey_id", required=True)
parser.add_argument("--sg_api_token", required=True)
parser.add_argument("--sg_api_secret", required=True)
parser.add_argument("--destination_table", default="moz-fx-data-shared-prod.external.survey_gizmo_daily_attitudes")
Q_FIELDS = {
"I trust Firefox to help me with my online privacy": "trust_firefox",
"All the sites I’ve visited recently have worked; none of them seem broken": "sites_work",
"The internet is open and accessible to all": "internet_accessible",
"Using the internet helped me meet my goals today": "met_goals"
}
def utc_date_to_eastern_string(date_string):
"""Takes in a YYYY-MM-DD date string and returns the equivalent in eastern time of midnight UTC of that date"""
naive_dt = dt.datetime.strptime(date_string, '%Y-%m-%d')
as_utc = pytz.utc.localize(naive_dt)
as_eastern = as_utc.astimezone(pytz.timezone('US/Eastern'))
return dt.datetime.strftime(as_eastern, '%Y-%m-%d+%H:%M:%S')
def date_plus_one(date_string):
return dt.datetime.strftime(dt.date.fromisoformat(date_string) + dt.timedelta(days=1), '%Y-%m-%d')
def format_responses(s, date):
"""Takes a single user's responses and returns a list of dictionaries, one per answer,
formatted to corresponding bigquery fields."""
# `survey_data` is a dict with question ID as the key and question/response details as the value
# e.g. survey_data: {'25': {'id': 25, 'type': 'RADIO', 'question': 'I trust Firefox to help me with my online privacy',
# 'section_id': 2, 'answer': 'Agree', 'answer_id': 10066, 'shown': True}}
resp = list(s.get("survey_data", {}).values())
try:
# Shield ID is sent as a hidden field with question name "Shield ID"
shield_id = [r.get("answer") for r in resp if r.get('question') == 'Shield ID'][0]
except IndexError:
shield_id = None
return [{
'shield_id': shield_id,
'date': date,
'question': r.get('question'),
'question_key': Q_FIELDS.get(r.get('question')),
'value': r.get('answer')
} for r in resp if r.get('question') != 'Shield ID']
def construct_data(survey, date):
formatted = [format_responses(resp, date) for resp in survey['data']]
# flatten list of lists into a single list of dictionaries
return list(itertools.chain.from_iterable(formatted))
def get_survey_data(survey_id, date_string, token, secret):
# per SurveyGizmo docs, times are assumed to be eastern
# https://apihelp.surveygizmo.com/help/filters-v5
# so UTC midnight must be converted to EST/EDT
start_date = utc_date_to_eastern_string(date_string)
end_date = utc_date_to_eastern_string(date_plus_one(date_string))
url = (f"https://restapi.surveygizmo.com/v5/survey/{survey_id}/surveyresponse"
f"?api_token={token}&api_token_secret={secret}&results_per_page=500"
# filter for date_submitted >= start_date
f"&filter[field][0]=date_submitted&filter[operator][0]=>=&filter[value][0]={start_date}"
# filter for date_submitted < end_date
f"&filter[field][1]=date_submitted&filter[operator][1]=<&filter[value][1]={end_date}")
resp = requests.get(url)
resp.raise_for_status()
survey = resp.json()
# if the result set is large, we'll have to page through them to get all data
total_pages = survey.get("total_pages")
print(f"Found {total_pages} pages after filtering on date={date_string}")
print("fetching page 1")
ret = construct_data(survey, date_string)
for page in range(2, total_pages+1):
print("fetching page {}".format(page))
resp = requests.get(url + f"&page={page}")
resp.raise_for_status()
ret = ret + construct_data(resp.json(), date_string)
return ret
def insert_to_bq(data, table, date, write_disposition=bigquery.job.WriteDisposition.WRITE_TRUNCATE):
client = bigquery.Client()
print(f"Inserting {len(data)} rows into bigquery")
job_config = bigquery.LoadJobConfig(write_disposition=write_disposition)
partition = f"{table}${date.replace('-', '')}"
job = client.load_table_from_json(data, partition, job_config=job_config)
# job.result() returns a LoadJob object if successful, or raises an exception if not
job.result()
def main():
args = parser.parse_args()
survey_data = get_survey_data(args.survey_id, args.date, args.sg_api_token, args.sg_api_secret)
insert_to_bq(survey_data, args.destination_table, args.date)
if __name__ == "__main__":
main()
| null |
sql/moz-fx-data-shared-prod/telemetry_derived/surveygizmo_daily_attitudes/import_responses.py
|
import_responses.py
|
py
| 4,868 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pytz.utc.localize",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pytz.timezone",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strftime",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strftime",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.fromisoformat",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery.job",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.Client",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.LoadJobConfig",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 107,
"usage_type": "name"
}
] |
246912248
|
import jsonlines
from collections import Counter
from collections import defaultdict
def read_in_jsonl(file):
with open(file, 'r') as infile:
reader = jsonlines.Reader(infile)
lines = reader.iter()
lines = list(lines)
return lines
def count_entity_types(json_docs):
all_types = []
for annotation in json_docs:
labels = annotation['labels']
all_types.append([type for start, end, type in labels])
return Counter(all_types)
def spans_by_type(json_docs):
spans_by_type = defaultdict(list)
for annotation in json_docs:
labels = annotation['labels']
text = annotation['text']
for start, end, type in labels:
span = text[start:end].lower().replace("\n", " ")
spans_by_type[type].append(span)
for type, spans in spans_by_type.items():
spans_by_type[type] = Counter(spans)
return spans_by_type
if __name__=="__main__":
luc = read_in_jsonl("annotated_batches/batch_1_annotated_lucino.jsonl")
em = read_in_jsonl("annotated_batches/batch_3_annotated_emily.jsonl")
sam = read_in_jsonl("annotated_batches/batch_4_annotated_samantha.jsonl")
mark = read_in_jsonl("annotated_batches/batch_2_annotated_mark.jsonl")
all = read_in_jsonl("annotated_data.jsonl")
#print("LUCINO ", count_entity_types(luc))
#print("EMILY ", count_entity_types(em))
#print("SAM ", count_entity_types(sam))
#print("MARK ", count_entity_types(mark))
#print("ALL ", count_entity_types(all))
for type, spans in spans_by_type(all).items():
print(len(spans))
print(type, spans)
| null |
descriptives.py
|
descriptives.py
|
py
| 1,641 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "jsonlines.Reader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 33,
"usage_type": "call"
}
] |
342440913
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
val_data = np.genfromtxt('/scratch/src/cuIBM-FSI/validation-data/cylinderRe40-KL95.txt')
force = np.genfromtxt('forces.txt')
plt.plot(0.5*val_data[:,0], val_data[:,1], 'o', color = 'red', markersize = 8, label = 'Koumoutsakos and Leonard, 1995')
plt.plot(force[4:-1,0], 2*force[4:-1,1], '-', color='blue', linewidth=2, label='Present Work')
#plt.title('Flow over impulsively started cyliner re 40')
plt.xlabel('Non-Dimensional Time')
plt.ylabel('Drag Coefficient')
plt.legend(loc='upper right', numpoints=1, fancybox=True)
plt.axis([0,5,0,6])
pp = PdfPages('CylinderDragRe40.pdf')
pp.savefig()
pp.close()
| null |
scripts/python/plotDrag.py
|
plotDrag.py
|
py
| 732 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.genfromtxt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.backends.backend_pdf.PdfPages",
"line_number": 16,
"usage_type": "call"
}
] |
8365695
|
import requests
from bs4 import BeautifulSoup as bs
# 使用def定义函数,myurl是函数的参数
def get_url_name(myurl):
user_agent = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
header = {'user-agent' : user_agent}
response = requests.get(myurl, headers=header)
bs_info = bs(response.text, 'html.parser')
#bs_info = bs(response.text, 'html.parser')
# 使用for方法
for tags in bs_info.find_all('div', attrs={'class': 'hd'}):
for atag in tags.find_all('a',):
#获取所有链接
print(atag.get('herf'))
#获取电影名字
print(atag.find('span',).text)
#生成包含所有页面的元组
urls = tuple(f'https://movie.douban.com/top250?start={ page * 25}&filter=' for page in range(10))
print (urls)
# 设置控制请求的频率, 引入了time的模块
from time import sleep
sleep(10)
for page in urls:
get_url_name(page)
sleep(5)
| null |
week01/shuncon_requests/shun_bs4_autopages.py
|
shun_bs4_autopages.py
|
py
| 1,016 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 35,
"usage_type": "call"
}
] |
631233844
|
import pickle
import os
import six
from enum import Enum
class DictOption(Enum):
Radical = 1
Pinyin = 2
all_option = list(DictOption)
class Radical(object):
_dictionary_file_name = 'dictionary.pickle'
_dict_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
def __init__(self, options):
self.dictionary_file_name = os.path.join(self._dict_dir, self._dictionary_file_name)
self.diction = None
if options is None:
options = DictOption.Radical
self.options = self._format_options(options)
@staticmethod
def _format_options(options):
if not isinstance(options, (list, tuple)):
options = [options]
rslt = []
all_option_name = [op.name.lower() for op in all_option]
for option in options:
if option in all_option:
rslt.append(option)
continue
if isinstance(option, six.string_types) and option.lower() in all_option_name:
rslt.append(DictOption[option.capitalize()])
return rslt
def get_dict(self):
dictionray = None
with open(self.dictionary_file_name, 'rb') as f:
dictionray = pickle.load(f)
if dictionray is None:
raise IOError('cannot find dictionary file {fname}'.format(fname=self.dictionary_file_name))
for op in all_option:
if op not in self.options:
del dictionray[op.name.lower()]
return dictionray
def trans(self, ch=''):
if self.diction is None:
self.diction = self.get_dict()
rslt = []
for op in self.options:
rslt.append(self.diction[op.name.lower()][ch])
if len(rslt) == 1:
rslt = rslt[0]
return rslt
| null |
cnradical/radical.py
|
radical.py
|
py
| 1,812 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "enum.Enum",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 43,
"usage_type": "call"
}
] |
588225000
|
import logging
import cStringIO
import traceback
import os
import datetime
import pytz
from db.db import SS
import db.model as m
from app.util import Selector, Batcher
from app.i18n import get_text as _
log = logging.getLogger(__name__)
def run_recurring_utterance_selections(task):
rs = m.UtteranceSelection.query.filter(m.UtteranceSelection.taskId==task.taskId
).filter(m.UtteranceSelection.recurring
).filter(m.UtteranceSelection.enabled).all()
for selection in rs:
subTask = m.SubTask.query.get(selection.subTaskId)
if not subTask:
log.error(_('utterance selection {0} is corrupted: sub task {1} not found'
).format(selectionId, selection.subTaskId))
continue
if subTask.workType != m.WorkType.REWORK:
log.error(_('utterance selection {0} is corrupted: sub task {1} is not a {2} sub task'
).format(selectionId, selection.subTaskId, m.WorkType.REWORK))
continue
rawPieceIds = Selector.select(selection)
if not rawPieceIds:
log.info(_('no matches found'))
continue
batches = Batcher.batch(subTask, rawPieceIds)
for batch in batches:
SS.add(batch)
itemCount = len(rawPieceIds)
now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
event = m.SubTaskContentEvent(subTaskId=subTask.subTaskId,
selectionId=selectionId, itemCount=itemCount,
isAdding=True, tProcessedAt=now, operator=selection.userId)
SS.add(event)
def main(taskId=None):
logging.basicConfig(level=logging.DEBUG)
if taskId is None:
tasks = m.Task.query.filter(m.Task.status.notin_([
m.Task.STATUS_ARCHIVED, m.Task.STATUS_CLOSED,
m.Task.STATUS_FINISHED])).all()
else:
task = m.Task.query.get(taskId)
if not task:
raise ValueError('task {0} not found'.format(taskId))
tasks = [task]
for task in tasks:
try:
run_recurring_utterance_selections(task)
except:
out = cStringIO.StringIO()
traceback.print_exc(file=out)
log.error(out.getvalue())
SS.rollback()
# break
else:
# log.info('task {} succeeded'.format(task.taskId))
SS.commit()
pass
| null |
jobs/job_run_recurring_utterance_selections.py
|
job_run_recurring_utterance_selections.py
|
py
| 2,034 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "db.model.UtteranceSelection.query.filter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "db.model.UtteranceSelection",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "db.model.UtteranceSelection",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "db.model.UtteranceSelection",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "db.model.SubTask.query.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "db.model.SubTask",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "app.i18n.get_text",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "db.model.WorkType",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "app.i18n.get_text",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "db.model.WorkType",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "app.util.Selector.select",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "app.util.Selector",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "app.i18n.get_text",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "app.util.Batcher.batch",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "app.util.Batcher",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "db.db.SS.add",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "db.db.SS",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "pytz.utc",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "db.model.SubTaskContentEvent",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "db.model",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "db.db.SS.add",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "db.db.SS",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "db.model.Task.query.filter",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "db.model.Task",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "db.model.Task.status.notin_",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "db.model.Task",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "db.model.Task",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "db.model.Task.query.get",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "db.model.Task",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "cStringIO.StringIO",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "db.db.SS.rollback",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "db.db.SS",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "db.db.SS.commit",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "db.db.SS",
"line_number": 74,
"usage_type": "name"
}
] |
130019134
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
'''
# -*- coding: Latin-1
# -*- coding: iso-8859-1
# -*- coding: utf-8 -*-
Created on 18 jun 2015
@author: s057wl
'''
import ibm_db
ibm_db_conn = ibm_db.connect('pydev', 'PDSN', 'secret')
import ctypes
def getConnectAtrr(conn, odbc, infotype):
# total_buf_len = 1000
# alloc_buffer = ctypes.c_ulong()
# used_buf_len = odbc.c_short()
# ret = odbc.ODBC_API.SQLGetConnectAttr(conn.dbc_h, infotype, odbc.ADDR(alloc_buffer), total_buf_len,\
# odbc.ADDR(used_buf_len))
# odbc.check_success(conn, ret)
# result = alloc_buffer.value
total_buf_len = 1000
alloc_buffer = odbc.create_buffer(total_buf_len)
used_buf_len = odbc.c_short()
if conn.ansi:
API_f = odbc.ODBC_API.SQLGetConnectAttr
else:
API_f = odbc.ODBC_API.SQLGetConnectAttrW
ret = API_f(conn.dbc_h,infotype,odbc.ADDR(alloc_buffer), total_buf_len,\
odbc.ADDR(used_buf_len))
odbc.check_success(conn, ret)
if conn.ansi:
result = alloc_buffer.value
else:
result = odbc.UCS_dec(alloc_buffer)
# if odbc.aInfoTypes[infotype] == 'GI_YESNO':
# if odbc.unicode(result[0]) == odbc.unicode('Y'):
# result = True
# else:
# result = False
return result
# import pyodbc as odbc
import pypyodbc as odbc
SQL_ATTR_CLIENT_CODEPAGE=1208
# aInfoTypes={SQL_ATTR_CLIENT_CODEPAGE : 'GI_UINTEGER',}
# aInfoTypes={SQL_ATTR_CLIENT_CODEPAGE : 'GI_USMALLINT',}
# aInfoTypes={SQL_ATTR_CLIENT_CODEPAGE : '',}
test_string = 'där'
# print (repr(test_string.encode('mbcs')))
# odbc.aInfoTypes.update(aInfoTypes)
import pprint
pp=pprint.pprint
pp(odbc.dataSources())
# c = odbc.connect('DSN=PDSN')
# for i in range(0, 10000):
#
# # try:
# print (i, getConnectAtrr(c, odbc, i))
# # except:
# # print(i)
#
#
# print(c.getinfo(1208))
# pp(dir(odbc.ODBC_API))
# # pp(dir(odbc.ODBC_API.SQLGetInf))
# cursor=c.cursor()
# # pp(dir(cursor))
# print(cursor.getTypeInfo(1208))
'''
SQLINTEGER iUnicode = 1208;
cliRC = SQLSetConnectAttr(hdbc,
SQL_ATTR_CLIENT_CODEPAGE,
(SQLPOINTER)iUnicode,
SQL_IS_INTEGER);
'''
tables=[
# 'QP.TBOKNKAP',
# 'QP.TBOKNONG',
# 'QP.TBOKNONS',
# 'QP.TBOKNOPT',
# 'QP.TBRALOGA',
# 'QP.TBRALOGB',
# 'QP.TBRALOGE',
# 'QP.TBRALOGF',
# 'QP.TBRALOGG',
# 'QP.TBRALOGK',
# 'QP.TBRALOGM',
# 'QP.TBRALOGV',
# 'QP.TGCTRAF',
# 'QP.TGCTRAFJ',
# 'QP.TGCVGN',
# 'QP.TGCVKM',
'QP.TVÄGVIA',
# 'QP.TVXLDEST'
]
from api_databbases import Link, get_column_names_for_table
link=Link('PDSN')
#
for table in tables:
table_creator, table_name=table.split('.')
names=get_column_names_for_table(table_creator, table_name, link)
link.execute("select * from {} where TNR='T08' and TKPLVXLO='ÅGGB'".format(table))
# link.execute("select * from {} where TNR='T08' and TKPLVXLO='ÅGGB fetch first 10 rows only".format(table))
# rows=link.cursor.fetchmany()
# print( rows )
# print(cursor._NumOfRows())
# print(cursor.rowcount)
# print('Fetched')
# out=[None for _ in link.cursor]
out=[e for e in link.cursor]
print(names)
for row in out:
print(row)
print(len(out))
table_creator='QP'
table_name='TBOKNONG'
names=get_column_names_for_table(table_creator, table_name, link)
print(names)
tagnr=6571
gangdat=150707
l=sorted(['a.BESTNR', 'a.DBESTNR', 'a.AKTUELL', 'a.SEKVENS',
'a.TOTVIKT', 'a.LÄNGD', 'a.BK'])
s='select '
s+=', '.join(l)
s+=' from QP.TBOKNONG as a where a.TÅGNR={} AND a.GÅNGDAT={}'.format(tagnr, gangdat)
print(s)
out=link.get_data(s)
print(out)
# cursor.execute("db2 LIST TABLES for all")
# cursor.execute("select * from syscat.tables where type = 'T'")
# cursor.execute("select * from syscat.tables where schema = 'QT'")
# cursor.execute("select * from AO32LL.MISSING1")
# cursor.execute('select * from QPB.TFO fetch first 5')
# cursor.execute("select * from DB2TEC1.ACT")
# cursor.execute("select * from QP.AABEND")
# cursor.execute("DESCRIBE TABLE QP.AABEND")
# cursor.execute("describe SELECT * FROM QP.AABEND");
# cursor.execute("select gångdat, tågnr from QP.TBOKNKAP where gångdat=150615")
# cursor.execute("select top 10 * from QP.TBOKNKAP")
# cursor.execute("select * from QP.TBOKNKAP fetch first 5 rows only")
# cursor.execute("select * from QP.TBOKNKAP where sekvens=1 fetch first 5 rows only")
# cursor.execute("select * from QP.TBOKNKAP where sekvens>=0")
# cursor.execute("select * from QP.TBOKNKAP where GÅNGDAT=150615".encode('mbcs'))
# cursor.execute("select * from QP.TBOKNKAP where GÅNGDAT=150612")
# cursor.execute("select * from QP.TBOKNKAP")
# print(cursor.rowcount)
# out=[row for row in cursor]
# print(len(out))
# cursor.execute("select * from CCPDSN.SYSCOLUMNS fetch first 100 rows only")
# cursor.execute("select * from CCPDSN.SYSCOLUMNS")
# # cursor.execute("select * from SYSIBMTS.SYSTEXTCOLUMNS fetch first 5 rows only")
# for row in cursor:
# print(row)
# cursor.execute("SELECT TBNAME,COLNAME from SYSIBMTS.SYSTEXTCOLUMNS where TBNAME='QP.TBOKNKA'")
# cursor.execute("Select distinct(name), ColType, Length from Sysibm.syscolumns where tbname = 'employee'")
| null |
src/misc/test_db2_connect.py
|
test_db2_connect.py
|
py
| 5,356 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ibm_db.connect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "pypyodbc.dataSources",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "api_databbases.Link",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "api_databbases.get_column_names_for_table",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "api_databbases.get_column_names_for_table",
"line_number": 136,
"usage_type": "call"
}
] |
322471476
|
#!/usr/bin/python
from jinja2 import Environment, FileSystemLoader
from xhtml2pdf import pisa
import optparse
import logging
import json
import sys
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
env = Environment(loader=FileSystemLoader('.'))
class Reporter:
def __init__(self, results, template):
self._results = results
self._template = env.get_template(template)
@property
def logger(self):
logger = logging.getLogger(__name__)
return logger
@property
def results(self):
return self._results
@property
def template(self):
return self._template
def template_results(self):
with open("{}/order-acked-consolidated-results.json".format(self.results)) as ack_js:
ack = json.load(ack_js)
with open("{}/order-entry-consolidated-results.json".format(self.results)) as entry_js:
ent = json.load(entry_js)
with open("{}/round-trip-consolidated-results.json".format(self.results)) as rt_js:
rt = json.load(rt_js)
with open("{}/systemInfo.json".format(self.results)) as info_js:
sys = json.load(info_js)
template_vars = {"ack": ack, "ent": ent, "source": self.results, "sys": sys, "rt": rt}
temp_out = self.template.render(template_vars)
with open("{}/report.html".format(self.results), 'wb') as oa:
oa.write(temp_out)
self.logger.info("report.html created")
with open("{}/report.pdf".format(self.results), 'wb') as pf:
pisa.CreatePDF(src=temp_out, dest=pf)
self.logger.info("report.pdf created")
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("--results", dest="results", help="Path to results directory")
parser.add_option("--template", dest="template", help="Path to template file")
options, inputs = parser.parse_args()
template = options.template
if template is None:
sys.exit("Please give directory to your template")
results = options.results
if results is None:
sys.exit("Please give directory to your analysis results")
report = Reporter(results, template)
report.template_results()
| null |
test/benchmark/FosdkBenchmark/report.py
|
report.py
|
py
| 2,271 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "jinja2.Environment",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "xhtml2pdf.pisa.CreatePDF",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "xhtml2pdf.pisa",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "optparse.OptionParser",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 66,
"usage_type": "call"
}
] |
388086055
|
from datetime import datetime
class BankAcc:
def __init__(self,name,phone_number):
self.name=name
self.phone_number=phone_number
self.balance=0
self.loan=0
self.statement=[]
def show_balance(self):
return f"Hello {self.name} you balance is {self.balance}"
def withdraw(self,amount):
if amount>self.balance:
return f"you can't withdraw {amount} it is below minimum"
else:
self.balance-=amount
return self.show_balance()
def deposit(self,amount):
try :
10+amount
except TypeError:
return f"The amount must be a figure"
if amount<0:
return f"you cannot deposit ksh:{amount} it is below minimum"
else:
self.balance+=amount
now=datetime.now()
transaction={
"time":now,
"amount":50,
"narration":"deposited in your account"
}
self.statement.append(transaction)
return self.statement
def show_statement(self):
for transaction in self.statement:
amount=transaction["amount"]
narration=transaction["narration"]
time=transaction["time"]
date=time.strftime("%d/%m/%y")
print(f"{date}:{narration} {amount}")
return
def withdraw(self,amount):
if amount>self.balance:
return f"Your balance is {self.balance} and You cant't withdrwaw {amount} it is below minimum"
else:
self.balance-=amount
now=datetime.now()
transaction={
"amount":70,
"time":now,
"narration":" withdrawn from your account"
}
self.statement.append(transaction)
return self.show_balance()
def show_statement(self):
for transaction in self.statement:
amount=transaction["amount"]
narration=transaction["narration"]
time=transaction["time"]
date=time.strftime("%d/%m/%y")
print(f"{date}:{narration} {amount}")
return
def borrow(self,amount):
self.loan=0
if amount <0:
return f"Dear {self.name} you cannot borrow {amount} your amount should be more than a 0."
elif self.loan>0:
return f"Dear {self.name} you cannot borrow {amount}.Kindly repay your previous loan"
elif amount>0.1*self.balance:
return f"Dear {self.name} you cannot borrow {amount}.your loan limit is {0.05*self.balance}"
else:
loan=amount*0.05
self.loan=loan
self.balance+=amount
now=datetime.now()
transaction={
"amount":3000,
"time":now,
"narration":"borrowed "
}
self.statement.append(transaction)
return f"Congulaturation {self.name} you have received a loan of {amount} your loan balance is {loan} and your account balance is {self.balance}"
def repay(self,amount):
if amount<=0:
return f"Dear {self.name} you cannot pay less than 0"
elif amount<=self.loan:
self.loan-=amount
return f"Dear {self.name} you have payed {amount}"
else:
diff=amount-self.loan
self.loan=0
self.deposit(diff)
now=datetime.now()
transaction={
"amount":3000,
"time":now,
"narration":"repayed for you loan"
}
self.statement.append(transaction)
return f" Dear {self.name} you have repaid you loan of sh {amount}.We have deposited {diff} in your account your."
def withdraw(self,amount):
try:
self.balance-amount
except TypeError:
return f"The amount must be a figure"
def borrow(self,amount):
try:
amount*0.05
except TypeError:
return f"The amount must be a figure"
def repay(self,amount):
try:
amount-self.loan
except TypeError:
return f"The amount must be a figure"
def transfer(self,account,amount):
try:
amount+10
except TypeError:
return f"Amount must be a figure"
fee=amount*0.05
total=amount+fee
if amount<0:
return f" Dear {self.name} amount must be greater than 0"
elif total>self.balance:
return f"Dear {self.name} you dont have sufficient fund"
else:
self.balance-=total
account.deposit(amount)
return f"you have transfered {amount} to acc {account} your balance is {self.balance -account}"
class MobileMoneyAccount(BankAcc):
def __init__(self, name, phone_number,service_provider):
BankAcc.__init__(name,phone_number)
self.service_provider=service_provider
def buy_airtime(self,amount):
try:
amount+10
except TypeError:
return f"Amount must be a figure"
if amount<0:
return f" Dear {self.name} amount must be greater than 0"
elif amount>self.balance:
return f"Dear {self.name} you dont have sufficient fund to buy {amount} worth airtime"
else:
self.balance-=amount
return f"Dear {self.name} you have bought {amount} airtime your new balance is{self.balance}"
def withdraw(self,amount):
try:
amount+10
except TypeError:
return f"Amount must be a figure"
if amount<0:
return f" Dear {self.name} amount must be greater than 0"
elif amount>self.balance:
return f"Dear {self.name} you insufficient fund to withdraw {amount}"
else:
self.balance-=amount
return f"Dear {self.name} you have withdrawn sh{amount} . your new balance is{self.balance}"
| null |
bank.py
|
bank.py
|
py
| 6,150 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 107,
"usage_type": "name"
}
] |
584801874
|
import pytz
from datetime import datetime
from amazon.api import AsinNotFound
from django.db.models import CharField
from django.db.models import Manager as DgManager
from django.db.models import Model
from django.db.models import PositiveSmallIntegerField
from django.db.models import Q
from accounts.models import AccountManager
from books.models import Book
from books.models import Code
from books.models import BookDescription
from sales.models import SalesLineBook
from sales.models import SalesLineShipping
from sales.models import Sale
from sales.models import Transfer
class CoopManager(DgManager):
accounts = AccountManager()
def get_coop(self, user):
acc = self.accounts.get_account(user)
return acc.coop if user != acc else Cooperative.objects.get(pk=1)
class Cooperative(Model):
"""A cooperative model, the heart of the application
The cooperative binds students, managers and books together. It also
manages transactions between both actors with a well-defined process.
Thus, we considered that a cooperative is a stand-alone module which act as
a controller in this application.
"""
name = CharField(max_length=200, blank=True)
address = CharField(max_length=200, blank=True)
shipping_fee = PositiveSmallIntegerField(default=10)
objects = CoopManager()
def fetch_code(self, q):
# Controller pattern (Grasp)
return Code.objects.get_or_create_code(q)[0]
def fetch_desc(self, code):
# Controller pattern (Grasp)
desc, created = BookDescription.objects.get_or_create_desc(code)
desc.coop.add(self)
msg = ""
if created:
try:
BookDescription.objects.lookup_update(desc)
except AsinNotFound:
msg = "".join(("This book is unknown to the system, please",
" fill in the description"))
return desc, msg
def create_sale(self, student):
# Controller pattern (Grasp)
return Sale.objects.create(client=student, coop=self)
def delete_sale(self, sale_id):
sale = Sale.objects.get(pk=sale_id)
book_set = sale.get_books()
msg = "".join(("The sale {} has been cancelled, ",
"concerning books {}")).format(sale.id, book_set)
sale.client.send_email(msg)
book_set.update(stock=Book.IN_STOCK)
sale.delete()
def get_available_bookset(self, q):
book_set = Book.objects.get_bookset(q)
booking_due = SalesLineBook.objects.filter(sale__is_due=True)
return book_set.filter(Q(stock=Book.IN_STOCK) |
Q(stock=Book.BOOKED,
saleslinebook__in=booking_due))
def get_notinstock_bookset(self, q):
coop_inventory = Book.objects.get_coop_inventory(self)
book_id_set = Book.objects.get_bookset(q).values("id")
coop_inventory.filter(pk__in=book_id_set)
return coop_inventory.filter(stock=Book.NOT_IN_STOCK)
def get_sales(self):
sale_set = Sale.objects.filter(coop=self).exclude(created_date=None)
return sale_set.filter(delivery_date=None).order_by("id").reverse()
def get_transfer_bookset(self, io):
if io == "in":
transfer_set = Transfer.objects.filter(coop_to=self, date_in=None)
return Book.objects.filter(transfer__in=transfer_set)
elif io == "out":
book_set = Book.objects.get_coop_inventory(self)
book_set = book_set.filter(stock=Book.BOOKED)
book_set = book_set.exclude(saleslinebook__saleslineshipping=None)
return book_set.filter(transfer=None)
else:
raise ValueError("no such choice: {}".format(io))
def notify_client(self, sale):
book_set = sale.get_books()
to_transfer_set = SalesLineShipping.objects.filter(sale=sale)
transferred_set = Transfer.objects.filter(book=book_set, coop_to=self)
transferred_set = transferred_set.exclude(date_in=None)
if to_transfer_set.count() == transferred_set.count():
sale.created_date = datetime.now()
msg = "Your books has been transferred sale{}: {}".format(sale.id,
book_set)
sale.client.send_email(msg)
def sell(self, sale_id):
sale = Sale.objects.filter(pk=sale_id)
sale.first().get_books().update(stock=Book.SOLD)
sale.update(delivery_date=datetime.now(pytz.utc))
def transfer_book(self, io, book_id):
book = Book.objects.get(pk=book_id)
sale = book.saleslinebook.sale
transfer = Transfer.objects.filter(book=book, date_in=None)
if io == "in":
transfer.update(date_in=datetime.now(pytz.utc))
transfer = transfer.first()
elif io == "out":
transfer = Transfer.objects.create(book=book, coop_from=self,
coop_to=sale.client.coop,
date_out=datetime.now(pytz.utc))
return transfer, sale
def __str__(self):
return self.name
| null |
coop/models.py
|
models.py
|
py
| 5,214 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.models.Manager",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "accounts.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "accounts.models.AccountManager",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.db.models.PositiveSmallIntegerField",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "books.models.Code.objects.get_or_create_code",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "books.models.Code.objects",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "books.models.Code",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "books.models.BookDescription.objects.get_or_create_desc",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "books.models.BookDescription.objects",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "books.models.BookDescription",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "books.models.BookDescription.objects.lookup_update",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "books.models.BookDescription.objects",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "books.models.BookDescription",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "amazon.api.AsinNotFound",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "sales.models.Sale.objects.create",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sales.models.Sale.objects",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "sales.models.Sale",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "sales.models.Sale.objects.get",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sales.models.Sale.objects",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "sales.models.Sale",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "books.models.Book.IN_STOCK",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "books.models.Book.objects.get_bookset",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "books.models.Book.objects",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "sales.models.SalesLineBook.objects.filter",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "sales.models.SalesLineBook.objects",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "sales.models.SalesLineBook",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "books.models.Book.IN_STOCK",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "books.models.Book.BOOKED",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "books.models.Book.objects.get_coop_inventory",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "books.models.Book.objects",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "books.models.Book.objects.get_bookset",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "books.models.Book.objects",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "books.models.Book.NOT_IN_STOCK",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "sales.models.Sale.objects.filter",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "sales.models.Sale.objects",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "sales.models.Sale",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "sales.models.Transfer.objects.filter",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "sales.models.Transfer.objects",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "sales.models.Transfer",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "books.models.Book.objects.filter",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "books.models.Book.objects",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "books.models.Book.objects.get_coop_inventory",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "books.models.Book.objects",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "books.models.Book.BOOKED",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "sales.models.SalesLineShipping.objects.filter",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sales.models.SalesLineShipping.objects",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "sales.models.SalesLineShipping",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "sales.models.Transfer.objects.filter",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sales.models.Transfer.objects",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "sales.models.Transfer",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "sales.models.Sale.objects.filter",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "sales.models.Sale.objects",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "sales.models.Sale",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "books.models.Book.SOLD",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "pytz.utc",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book.objects.get",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "books.models.Book.objects",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "sales.models.Transfer.objects.filter",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "sales.models.Transfer.objects",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "sales.models.Transfer",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "pytz.utc",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "sales.models.Transfer.objects.create",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "sales.models.Transfer.objects",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "sales.models.Transfer",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "pytz.utc",
"line_number": 129,
"usage_type": "attribute"
}
] |
478981248
|
from rest_framework.throttling import SimpleRateThrottle
from django.conf import settings
class AccountThrottling(SimpleRateThrottle):
scope = 'account'
THROTTLE_RATES = {
'account': settings.THROTTLING_RATES,
}
def get_cache_key(self, request, view):
account = request.query_params.get('account')
return self.cache_format % {'scope': self.scope, 'ident': account}
| null |
luffyapi/apps/user/throttling.py
|
throttling.py
|
py
| 408 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rest_framework.throttling.SimpleRateThrottle",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.THROTTLING_RATES",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 8,
"usage_type": "name"
}
] |
366581422
|
from selenium import webdriver
from paresrs.ncert_parsers import Ncert_Parser
chrome = webdriver.Chrome(executable_path="/Users/ulaganathan/Software/ChromeDriver/chromedriver")
chrome.get("https://ncert.nic.in/textbook.php")
ncert_parser_obj = Ncert_Parser(chrome)
for classes in ncert_parser_obj.class_options:
print(classes)
class_name_input = input("Enter the Class : ")
ncert_parser_obj.select_class_name(class_name_input)
for subject in ncert_parser_obj.subject_options:
print(subject)
subject_input = input("Enter the Subject : ").strip().lower().capitalize()
ncert_parser_obj.select_subject_name(subject_input)
for book in ncert_parser_obj.book_name_options:
print(book)
book_name = input("Enter the Book Name : ").strip().lower().capitalize()
ncert_parser_obj.select_book_name(book_name)
ncert_parser_obj.go_button.click()
print("")
print("Success")
print("The Button is Clicked...")
ncert_parser_obj.download_button.click()
| null |
chrome_driver_programs/ncert_website/app.py
|
app.py
|
py
| 955 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "paresrs.ncert_parsers.Ncert_Parser",
"line_number": 7,
"usage_type": "call"
}
] |
92283236
|
import asyncio
from typing import List
import pytest
from typedflow.nodes import LoaderNode
from typedflow.tasks import DataLoader
@pytest.fixture
def loader_node() -> LoaderNode[str]:
lst: List[str] = ['hi', 'hello', 'konnichiwa']
loader: DataLoader[str] = DataLoader(gen=lst, batch_size=2)
node: LoaderNode[str] = LoaderNode(loader=loader)
return node
def test_get_or_produce_batch(loader_node):
node = loader_node
node.add_succ()
batch = asyncio.run(node.get_or_produce_batch(0))
assert batch.data == ['hi', 'hello']
batch = asyncio.run(node.get_or_produce_batch(1)) # noqa
assert batch.data == ['konnichiwa']
def test_cache(loader_node):
node = loader_node
node.add_succ()
node.add_succ()
batch = asyncio.run(node.get_or_produce_batch(0))
assert batch.data == ['hi', 'hello']
assert node.cache_table.cache_table[0].value.data == ['hi', 'hello']
batch = asyncio.run(node.get_or_produce_batch(0)) # noqa
assert batch.data == ['hi', 'hello']
assert 0 not in node.cache_table.cache_table
| null |
typedflow/tests/nodes/test_loader_node.py
|
test_loader_node.py
|
py
| 1,074 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typedflow.tasks.DataLoader",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typedflow.nodes.LoaderNode",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "typedflow.nodes.LoaderNode",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "asyncio.run",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "asyncio.run",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "asyncio.run",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "asyncio.run",
"line_number": 34,
"usage_type": "call"
}
] |
338311187
|
__author__ = 'cfiloteo'
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core import serializers
import json
class ajaxPaginator(Paginator):
rows_per_page = 5
page_number = 1
fields = ()
format = 'json'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_page(self):
# page
try:
pg = self.page( self.page_number )
except PageNotAnInteger:
# If page is not an integer, deliver first page.
pg = self.page( 1 )
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
pg = self.page( self.num_pages )
return pg
def get_json(self):
#pg = self.get_page()
self.page( self.page_number )
if not self.fields:
dt = self.object_list.values()
else:
dt = self.object_list.values( *self.fields )
next_page_number = pg.next_page_number() if pg.has_next() else self.num_pages
previous_page_number = pg.previous_page_number() if pg.has_previous() else 1
response = {
'num_pages': self.num_pages,
'has_next': pg.has_next(),
'has_previous': pg.has_previous(),
'next_page_number': next_page_number,
'previous_page_number': previous_page_number,
'data': list(dt)
}
return json.dumps(response)
| null |
lib/ajaxPagination.py
|
ajaxPagination.py
|
py
| 1,480 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.paginator.Paginator",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 51,
"usage_type": "call"
}
] |
622509543
|
import json
from django.core.paginator import EmptyPage, Paginator, PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import ArticleColumn, ArticlePost
from .forms import ArticleColumnForm, ArticlePostForm
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from .models import ArticleTag
from .forms import ArticleTagForm
import json
from translateyoudao.translate import translate
import json
from urllib import parse
import urllib.request, urllib.parse, urllib.request
# 读取数据库中的栏目
@login_required(login_url='/account/login/')
# 防止提交表单时遇到的CSRF问题的一种方式
@csrf_exempt
def article_column(request):
if request.method == 'GET':
# 读取用户在数据库中的栏目
columns = ArticleColumn.objects.filter(user=request.user)
column_form = ArticleColumnForm()
return render(request, 'article/column/article-column.html', {'columns':columns, 'column_form':column_form})
if request.method == 'POST':
column_name = request.POST['column']
columns = ArticleColumn.objects.filter(user_id=request.user.id, column=column_name)
if columns:
return HttpResponse('2')
else:
ArticleColumn.objects.create(user=request.user, column=column_name)
return HttpResponse('1')
@login_required(login_url='/account/login')
@require_POST
@csrf_exempt
def rename_article_column(request):
column_name = request.POST['column_name']
column_id = request.POST['column_id']
try:
line = ArticleColumn.objects.get(id=column_id)
line.column = column_name
line.save()
return HttpResponse('1')
except:
return HttpResponse('0')
@login_required(login_url='/account/login')
@require_POST
@csrf_exempt
def del_article_column(request):
column_id = request.POST['column_id']
try:
line = ArticleColumn.objects.get(id=column_id)
line.delete()
return HttpResponse('1')
except:
return HttpResponse('0')
@login_required(login_url='/account/login')
@csrf_exempt
def article_post(request):
if request.method == 'POST':
article_post_form = ArticlePostForm(data=request.POST)
if article_post_form.is_valid():
cd = article_post_form.cleaned_data
try:
new_article = article_post_form.save(commit=False)
new_article.author = request.user
new_article.column = request.user.article_column.get(id=request.POST['column_id'])
new_article.save()
id = new_article.id
slug = new_article.slug
tags = request.POST['tags']
if tags:
for atag in json.loads(tags):
tag = request.user.tag.get(tag=atag)
new_article.article_tag.add(tag)
return HttpResponse(json.dumps({'e': 1,'id':id,'slug':slug}))
except:
return HttpResponse(json.dumps({'e': 2}))
else:
return HttpResponse(json.dumps({'e': 3}))
else:
article_post_form = ArticlePostForm()
article_columns = request.user.article_column.all()
# 得到当前用户的所有标签
article_tags = request.user.tag.all()
# 打印出文章标签从数据库中提取出标签的sql语句
print(article_tags.query)
return render(request, 'article/column/article_post.html', {'article_post_form':article_post_form, 'article_columns':article_columns, 'article_tags':article_tags})
@login_required(login_url='/account/login')
def article_list(request):
articles_list = ArticlePost.objects.filter(author=request.user)
paginator = Paginator(articles_list, 5)
page = request.GET.get('page')
try:
current_page = paginator.page(page)
articles = current_page.object_list
except PageNotAnInteger:
current_page = paginator.page(1)
articles = current_page.object_list
except EmptyPage:
current_page = paginator.page(paginator.num_pages)
articles = current_page.object_list
return render(request, 'article/column/article_list.html', {"articles":articles, 'page':current_page})
@login_required(login_url='/account/login')
def article_detail(request,id,slug):
article = get_object_or_404(ArticlePost,id=id,slug=slug)
return render(request, 'article/column/article_detail.html', {'article':article})
@login_required(login_url='/account/login')
@require_POST
@csrf_exempt
def del_article(request):
article_id = request.POST['article_id']
try:
article = ArticlePost.objects.get(id=article_id)
article.delete()
return HttpResponse("1")
except:
return HttpResponse("2")
@login_required(login_url='/account/login')
@csrf_exempt
def redit_article(request,article_id):
if request.method == "GET":
article_columns = request.user.article_column.all()
article = ArticlePost.objects.get(id=article_id)
this_article_form = ArticlePostForm(initial={"title":article.title})
this_article_column = article.column
return render(request, 'article/column/redit_article.html', {'article':article, 'article_columns':article_columns, 'this_article_column':this_article_column, 'this_article_form':this_article_form})
else:
redit_article = ArticlePost.objects.get(id=article_id)
try:
redit_article.column = request.user.article_column.get(id=request.POST['column_id'])
redit_article.title = request.POST['title']
redit_article.body = request.POST['body']
redit_article.save()
return HttpResponse('1')
except:
return HttpResponse('2')
@login_required(login_url='/account/login')
@csrf_exempt
def article_tag(request):
if request.method == "GET":
article_tags = ArticleTag.objects.filter(author=request.user)
article_tag_form = ArticleTagForm()
return render(request, "article/tag/tag_list.html", {"article_tags":article_tags, "article_tag_form":article_tag_form})
if request.method == "POST":
tag_post_form = ArticleTagForm(data=request.POST)
if tag_post_form.is_valid():
try:
new_tag = tag_post_form.save(commit=False)
new_tag.author = request.user
new_tag.save()
return HttpResponse("1")
except:
return HttpResponse("the data cannot be save.")
else:
return HttpResponse("Sorry, the form is not valid.")
@login_required(login_url='/account/login')
@require_POST
@csrf_exempt
def del_article_tag(request):
tag_id = request.POST['tag_id']
try:
tag = ArticleTag.objects.get(id=tag_id)
tag.delete()
return HttpResponse("1")
except:
return HttpResponse("2")
def youdaofanyi(request):
'''''
有道翻译功能
'''
query = {} # 定义需要翻译的文本
fanyi = request.POST.get('fanyi_content', '')
query['q'] = fanyi # 输入要翻译的文本
url = 'http://fanyi.youdao.com/openapi.do?keyfrom=11pegasus11&key=273646050&type=data&doctype=json&version=1.1&' + parse.urlencode(
query) # 有道翻译api
response = urllib.request.urlopen(url, timeout=3)
# response = urllib.parse.urlopen(url)
# 编码转换
try:
html = response.read().decode('utf-8')
d = json.loads(html)
explains = d.get('basic').get('explains') # 翻译后输出
a1 = d.get('basic').get('uk-phonetic') # 英式发音
a2 = d.get('basic').get('us-phonetic') # 美式发音
explains_list = []
for result in explains:
explains_list.append(result)
# 输出
fanyi_dict = {
'q': query['q'],
'yinshi': a1,
'meishi': a2,
'explains_list': explains_list,
}
return fanyi_dict
except Exception as e:
print (e)
def listblogs(request):
fanyi_dict = {}
fanyi_form = fanyiForm()
if request.method == 'POST':
fanyi_form = fanyiForm(request.POST)
if fanyi_form.is_valid():
fanyi_dict = youdaofanyi(request)
bloglist = {
'fanyi_form' : fanyi_form, # 翻译的表单
'fanyi_dict' : fanyi_dict, # 翻译出来的文本
}
return render(request, 'fanyi.html', bloglist)
| null |
article/views.py
|
views.py
|
py
| 8,821 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.ArticleColumn.objects.filter",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "models.ArticleColumn.objects",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "models.ArticleColumn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "forms.ArticleColumnForm",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "models.ArticleColumn.objects.filter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "models.ArticleColumn.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "models.ArticleColumn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.ArticleColumn.objects.create",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "models.ArticleColumn.objects",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "models.ArticleColumn",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "models.ArticleColumn.objects.get",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "models.ArticleColumn.objects",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "models.ArticleColumn",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.http.require_POST",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "models.ArticleColumn.objects.get",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "models.ArticleColumn.objects",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "models.ArticleColumn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.http.require_POST",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "forms.ArticlePostForm",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "forms.ArticlePostForm",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "models.ArticlePost.objects.filter",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "models.ArticlePost.objects",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "models.ArticlePost",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "models.ArticlePost",
"line_number": 121,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.render",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "models.ArticlePost.objects.get",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "models.ArticlePost.objects",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "models.ArticlePost",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.http.require_POST",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "models.ArticlePost.objects.get",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "models.ArticlePost.objects",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "models.ArticlePost",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "forms.ArticlePostForm",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "models.ArticlePost.objects.get",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "models.ArticlePost.objects",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "models.ArticlePost",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "models.ArticleTag.objects.filter",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "models.ArticleTag.objects",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "models.ArticleTag",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "forms.ArticleTagForm",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "forms.ArticleTagForm",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "models.ArticleTag.objects.get",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "models.ArticleTag.objects",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "models.ArticleTag",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.http.require_POST",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "urllib.parse.urlencode",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 239,
"usage_type": "call"
}
] |
477118132
|
import torch
from pytorch_pretrained_bert import BertTokenizer
from pytorch_pretrained_bert.modeling import BertPreTrainedModel, BertModel
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
import csv, os
import numpy as np
import ast
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
import logging
import configparser
from tqdm import tqdm, trange # tqdmで処理進捗を表示
from tensorboardX import SummaryWriter
from sklearn.metrics import precision_score, confusion_matrix
import pickle
from emo_PRF import eval_multi_label, fail_and_success, multi_classification_report
#logger = logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class BERTforMultiLabelClassification(BertPreTrainedModel):
'''
input_featureをBERTに入力
->出力からdrop_out_rateで選択された値だけLinear層に入力
->線形変換によりshape=(num_label,)に変換
'''
def __init__(self, config, num_labels=19):
super(BERTforMultiLabelClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.classifier = torch.nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights) # BERTのパラメータを修正?
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None: # training case
loss_function = torch.nn.BCEWithLogitsLoss()
loss = loss_function(logits.view(-1, self.num_labels), labels.view(-1, self.num_labels))
return loss
else: # test case
return logits
def freeze_bert_encoder(self): # bertのパラメータを固定する関数?finetuneなしはこの関数呼びました
for param in self.bert.parameters():
param.requires_grad = False
def unfreeze_bert_encoder(self):
for param in self.bert.parameters():
param.requires_grad = True
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeature(object):
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def loaddata_and_create_examples(set_type, config):
'''set_type: train or dev
'''
data_dir = config['path']['data_dir'] + config['path']['{}_data'.format(set_type)]
with open(data_dir, 'r') as f:
reader = csv.reader(f, delimiter='\t')
lines = []
for line in reader:
for i in range(5):
line[i] = line[i].strip()
lines.append(line)
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
if config['data'].getboolean('with_context'):
if line[1].find('No Context') == -1: #contextがあるなら
line[1] = line[1].replace('.', '. [SEP]')
text_a = line[1] + line[2]
#text_a = line[1] + ' ' + line[2]
else:
text_a = line[2]
else:
text_a = line[2]
if config['data'].getboolean('with_char#'):
text_a = line[3] + '#' + text_a
label = ast.literal_eval(line[-1])
examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label) )
return examples
def convert_examples_to_features(examples, max_seq_len, tokenizer):
'''
example -> list of InputBatchesに変換
input_ids: max_seq_lenに文長をあわせ(切り捨てor padding),tokenize(word->idxに変換)したもの.
input_mask: input_idsの各indexがpadding->0, otherwise->1
segment_ids: ???
label_id: exampleのlabel_idそのまま ex.[0,0,0,...1,0] (len=19)
'''
features = []
for ex_index, example in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Now writing ex.%d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
if len(tokens_a) > max_seq_len - 2:
tokens_a = tokens_a[:(max_seq_len - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# mask 1:real tokens, 0:padding tokens
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_len - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_len
assert len(input_mask) == max_seq_len
assert len(segment_ids) == max_seq_len
label_id = example.label
features.append(
InputFeature(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id
))
return features
if __name__ =='__main__':
#load config parser
config_path = './conf.ini'
config = configparser.ConfigParser()
config.read(config_path)
# set defalt valiables
do_train = config['learning'].getboolean('do_train')
max_seq_len = int(config['learning']['max_seq_length'])
tokenizer = BertTokenizer.from_pretrained(config['learning']['bert_model'])
train_batch_size = int(config['learning']['train_batch_size'])
gradient_accumulation_steps = int(config['learning']['gradient_accumulation_steps'])
num_train_epochs = int(config['learning']['num_train_epochs'])
# cudaのハナシ TODO gpu3個とかの指定可能な形に修正
# local_rank = [0, 1, 2]
local_rank = int(config['learning']['local_rank']) #-1: no_cuda=True or 使えるGPU=nullじゃなければGPUフルで使う
no_cuda = config['learning'].getboolean('no_cuda')
num_labels = int(config['learning']['num_labels'])
if local_rank == -1 or no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
#n_gpu = len(local_rank) #TODO gpu複数個の設定
n_gpu = 1
if do_train:
if local_rank in [-1, 0]:
tb_writer = SummaryWriter()
#Load data
train_examples = loaddata_and_create_examples("train", config)
# convert to features
train_features = convert_examples_to_features(train_examples, max_seq_len, tokenizer)
# change dtype to torch.tensor
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# if distributed sampling is needed, change to DistributedSampler(train_data)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=train_batch_size)
num_train_optimization_steps = \
len(train_dataloader) // gradient_accumulation_steps * num_train_epochs
# call the model
#bertconfig = BertConfig(vocab_size=32000)
model = BERTforMultiLabelClassification.from_pretrained(config['learning']['bert_model'], num_labels=num_labels)
if config['model'].getboolean('freeze_bert_encoder'):
model.freeze_bert_encoder()
model.to(device)
if local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True)
# prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=float(config['learning']['learning_rate']),
warmup=float(config['learning']['warmup_propotion']),
t_total=num_train_optimization_steps)
# TODO write logger.info here.
model.train()
global_step = 0
nb_tr_steps = 0
tr_loss = 0
#### Training ###
for _ in trange(int(config['learning']['num_train_epochs']), desc="Epoch", disable=local_rank not in [-1, 0]):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=local_rank not in [-1, 0])):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask,\
labels=label_ids)
if n_gpu > 1:
loss = loss.mean()
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
loss.backward(loss)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps +=1
if (step + 1) % gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
if local_rank in [-1, 0]:
tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)
tb_writer.add_scalar('loss', loss.item(), global_step)
tb_writer.close()
# saving best-practices
if do_train and (local_rank == -1 or torch.distributed.get_rank() == 0):
model_to_save = model.module if hasattr(model, 'module') else model
# saveしたモデルはfrom_pretrainedで使える
output_dir = config['path']['output_dir']
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(output_dir)
# for config.ini
with open(output_dir+'config.ini', 'w') as configfile:
config.write(configfile)
# Reload the trained model and vocab that u have fine-tuned
model = BERTforMultiLabelClassification.from_pretrained(output_dir, num_labels=int(config['learning']['num_labels']))
tokenizer = BertTokenizer.from_pretrained(
output_dir,
do_lower_case=config['learning'].getboolean('do_lower_case'))
print("Done saving models")
else:
#model = BERTforMultiLabelClassification.from_pretrained(
# config['learning']['bert_model'],
# num_labels=int(config['learning']['num_labels']))
model = BERTforMultiLabelClassification.from_pretrained(config['path']['output_dir'], num_labels=int(config['learning']['num_labels']))
model.to(device)
# Eval
if config['learning'].getboolean('do_eval') and (local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = loaddata_and_create_examples(set_type="dev", config=config)
eval_features = convert_examples_to_features(eval_examples, max_seq_len, tokenizer)
#import pdb; pdb.set_trace()
logger.info("**** Running Eveluation ***")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", int(config['learning']['eval_batch_size']))
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# if distributed sampling is needed, change to DistributedSampler(train_data)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data,
sampler=eval_sampler,
batch_size=int(config['learning']['eval_batch_size']))
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
out_label_ids = None
sentences = []
for nb_eval_steps, batch in enumerate(tqdm(eval_dataloader, desc="Evaluating")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
with torch.no_grad():
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
logits = logits.sigmoid() # ここいるかわらからんけど
loss_function = torch.nn.BCEWithLogitsLoss()
tmp_eval_loss = loss_function(logits.view(-1, num_labels), label_ids.view(-1, num_labels))
eval_loss += tmp_eval_loss.mean().item()
input_ids = input_ids.detach().cpu().tolist()
for token_id in input_ids:
sentences.append(tokenizer.convert_ids_to_tokens(token_id))
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
out_label_ids = label_ids.detach().cpu().numpy()
else:
# よくわからんけどそのままappendするとミスるらしい
preds[0] = np.append(preds[0], logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, label_ids.detach().cpu().numpy(), axis=0
)
eval_loss = eval_loss / nb_eval_steps
preds = np.round(preds[0]) #TODO 丸め誤差問題
P, R ,F1 = eval_multi_label(out_label_ids, preds)
print("P:{}".format(P))
print("R:{}".format(R))
print("F1:{}".format(F1))
if config['learning'].getboolean('do_char_eval'):
with open(config['path']['output_dir']+'eval_result_char.csv', 'w') as f:
columns = ['P', 'R' ,'F1']
results = [np.round(P, 4), np.round(R, 4), np.round(F1,4)]
writer = csv.writer(f)
writer.writerow(columns)
writer.writerow(results)
writer.writerow([' '])
# print("Done writing evaluation result.")
with open(config['path']['output_dir']+'preds_char.pickle', 'wb') as f:
pickle.dump(preds, f)
with open(config['path']['output_dir']+'out_label_ids_char.pickle', 'wb') as f:
pickle.dump(out_label_ids, f)
with open(config['path']['output_dir']+'sentences_char.pickle', 'wb') as f:
pickle.dump(sentences, f)
else:
with open(config['path']['output_dir']+'eval_result.csv', 'w') as f:
columns = ['P', 'R' ,'F1']
results = [np.round(P, 4), np.round(R, 4), np.round(F1,4)]
writer = csv.writer(f)
writer.writerow(columns)
writer.writerow(results)
writer.writerow([' '])
with open(config['path']['output_dir']+'preds.pickle', 'wb') as f:
pickle.dump(preds, f)
with open(config['path']['output_dir']+'out_label_ids.pickle', 'wb') as f:
pickle.dump(out_label_ids, f)
with open(config['path']['output_dir']+'sentences.pickle', 'wb') as f:
pickle.dump(sentences, f)
| null |
others/BERT_multilabel.py
|
BERT_multilabel.py
|
py
| 17,234 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytorch_pretrained_bert.modeling.BertPreTrainedModel",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pytorch_pretrained_bert.modeling.BertModel",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.BCEWithLogitsLoss",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "pytorch_pretrained_bert.BertTokenizer.from_pretrained",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "pytorch_pretrained_bert.BertTokenizer",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.device_count",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.set_device",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "tensorboardX.SummaryWriter",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.TensorDataset",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.RandomSampler",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "torch.nn.parallel.DistributedDataParallel",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "pytorch_pretrained_bert.optimization.BertAdam",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "tqdm.trange",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "torch.distributed.get_rank",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "torch.distributed",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "pytorch_pretrained_bert.file_utils.WEIGHTS_NAME",
"line_number": 258,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "pytorch_pretrained_bert.file_utils.CONFIG_NAME",
"line_number": 259,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "pytorch_pretrained_bert.BertTokenizer.from_pretrained",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "pytorch_pretrained_bert.BertTokenizer",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "torch.distributed.get_rank",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "torch.distributed",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.TensorDataset",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.SequentialSampler",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "torch.nn.BCEWithLogitsLoss",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "numpy.append",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "emo_PRF.eval_multi_label",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 373,
"usage_type": "call"
}
] |
322071772
|
import os
import sys
import torch
import struct
import numpy as np
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '.\\')))
from KeyMap import *
fileName = os.path.join('checkpoint', '20191104_resnet10_quant_ch8_224x', 'checkpoint_224x_fuse_b100.pth')
# fileName = os.path.join('checkpoint', '20191029_resnet10_quant8_fused_symm_-128_127_224x224_test', 'checkpoint_8626.pth')
fileNameNew = os.path.join('checkpoint', '20191024_resnet10_quant8_fused_sym_-128_127_224x224_resize', 'checkpoint_dequant_4.pth')
model_q = torch.load(fileName)
_my_dict = {}
_my_dict_tmp = {}
_my_dict_fuse = {}
def get_clamp_limit(bit_size=8, signed=True):
signed_limit = 2 ** (bit_size - 1)
# if(bit_size == 32):
# _max = 2147483500
# return (-signed_limit, 2147483500) if signed else (0, 2 * signed_limit - 1)
return (-signed_limit, signed_limit - 1) if signed else (0, 2 * signed_limit - 1)
def clamp(input, min, max, inplace=False):
if inplace:
input.clamp_(min, max)
return input
return torch.clamp(input, min, max)
def trim_prefix():
model_q['trim_state_dict'] = {}
for key, data in model_q.items():
items = str(key).split('.')
items.remove('module')
new_key = str('.'.join(items))
model_q['trim_state_dict'][new_key] = data
def parse_quant_info():
for key, data in model_q.items():
items = str(key).split('.')
items.remove(items[len(items) - 1])
new_key = str('.'.join(items))
# weight quant #
# if(('conv' in key or 'fc' in key or 'downsample' in key or 'downsample.0' in key or 'fuse' in key) \
# and '.weight' in key and '.weight_scale' not in key and '.weight_zero_point' not in key):
# if(new_key not in _my_dict):
# _my_dict[new_key] = { 'weight': [],
# 'w_scale': [],
# 'w_zero_point': [],
# 'bias': [],
# 'b_scale': [],
# 'b_zero_point': []}
# if (new_key in _my_dict and len(_my_dict[new_key]['weight']) == 0):
# _my_dict[new_key]['weight'].append(data)
if(('conv' in key or 'fc' in key or 'downsample' in key or 'downsample.0' in key or 'fuse' in key) and '.weight_scale' in key):
if (new_key not in _my_dict):
_my_dict[new_key] = {'w_scale': [],
'w_zero_point': [],
'b_scale': [],
'b_zero_point': []}
if(new_key in _my_dict and len(_my_dict[new_key]['w_scale']) == 0):
_my_dict[new_key]['w_scale'].append(data)
elif (('conv' in key or 'fc' in key or 'downsample' in key or 'downsample.0' in key or 'fuse' in key) and '.weight_zero_point' in key):
if(new_key in _my_dict and len(_my_dict[new_key]['w_zero_point']) == 0):
_my_dict[new_key]['w_zero_point'].append(data)
elif('fake_q' in key and '.scale' in key): # activation quant #
if (new_key not in _my_dict):
_my_dict[new_key] = {'input': [],
'scale': [],
'zero_point': []}
_my_dict[new_key]['input'].append(new_key)
_my_dict[new_key]['scale'].append(data)
elif('fake_q' in key and '.zero_point' in key):
_my_dict[new_key]['zero_point'].append(data)
# elif (('conv' in key or 'fc' in key or 'downsample' in key or 'downsample.0' in key or 'fuse' in key) \
# and '.bias' in key and '.bias_scale' not in key and '.bias_zero_point' not in key):
# if (new_key in _my_dict and len(_my_dict[new_key]['bias']) == 0):
# _my_dict[new_key]['bias'].append(data)
elif (('conv' in key or 'fc' in key or 'downsample' in key or 'downsample.0' in key or 'fuse' in key) and '.bias_scale' in key):
if (new_key in _my_dict and len(_my_dict[new_key]['b_scale']) == 0):
_my_dict[new_key]['b_scale'].append(data)
elif (('conv' in key or 'fc' in key or 'downsample' in key or 'downsample.0' in key or 'fuse' in key) and '.bias_zero_point' in key):
if(new_key in _my_dict and len(_my_dict[new_key]['b_zero_point']) == 0):
_my_dict[new_key]['b_zero_point'].append(data)
# for key, data in _my_dict.items():
# print(key, _my_dict[key]['scale'], _my_dict[key]['zero_point'])
def quant_dequant_weight():
model_q['quant_state_dict'] = {}
model_q['dequant_state_dict'] = {}
for key, data in _my_dict.items():
if('conv' in key or 'fc' in key or 'downsample' in key or 'downsample.0' in key or 'fuse' in key):
_min, _max = get_clamp_limit(bit_size=8, signed=True)
model_q['quant_state_dict'][key+'.weight'] = _quantValue(_my_dict[key]['weight'][0],\
_my_dict[key]['w_scale'][0],\
_my_dict[key]['w_zero_point'][0],\
_min, _max)
model_q['dequant_state_dict'][key + '.weight'] = _dequantValue(model_q['quant_state_dict'][key+'.weight'], \
_my_dict[key]['w_scale'][0],\
_my_dict[key]['w_zero_point'][0])
if(len(_my_dict[key]['bias']) > 0):
_min, _max = get_clamp_limit(bit_size=32, signed=True)
model_q['quant_state_dict'][key + '.bias'] = _quantValue(_my_dict[key]['bias'][0], \
_my_dict[key]['b_scale'][0], \
_my_dict[key]['b_zero_point'][0],\
_min, _max)
model_q['dequant_state_dict'][key + '.bias'] = _dequantValue(model_q['quant_state_dict'][key + '.bias'], \
_my_dict[key]['b_scale'][0], \
_my_dict[key]['b_zero_point'][0])
# for key, data in model_q['quant_state_dict'].items():
# print(key, data)
def replace_with_dequant_value_and_save():
for key, data in model_q['state_dict'].items():
if(key not in model_q['dequant_state_dict']):
_my_dict_tmp[key] = data
else:
_my_dict_tmp[key+'_original'] = data
_my_dict_tmp[key] = model_q['dequant_state_dict'][key]
_my_dict_tmp[key+'_quant'] = model_q['quant_state_dict'][key]
torch.save(_my_dict_tmp, fileNameNew)
def _quantValue(input, scale, zero_point, clamp_min, clamp_max):
quant = torch.round(scale * input - zero_point)
return clamp(quant, clamp_min, clamp_max, False)
def _dequantValue(input, scale, zero_point):
return (input + zero_point) / scale
def _dump_value_struct_pack(input, outputFolder, name, mode):
fileName = os.path.join('checkpoint', outputFolder, name+'.txt')
t = torch.flatten(input)
with open(fileName, "wb") as text_file:
for o in range(len(t)):
text_file.write( struct.pack(mode, t[o]) )
text_file.close()
def _dump_value_numpy(input, outputFolder, name):
fileName = os.path.join('checkpoint', outputFolder, name)
inputToNumpy = input.cpu().numpy()
np.save(fileName, inputToNumpy)
def dump_weight_and_bias_to_file():
outputFolder = '20191028_resnet10_quant8_fused_symm_-128_127_224x224_test'
type = 'weight'
if(type == 'weight'):
postfix = '.fp32'
mode = 'f' ## fp
elif(type == 'bias'):
postfix = '.q32'
mode = 'i' ## int
else:
postfix = '.q8'
mode = 'b' ## char
for key, data in key_map.items():
# new_key = str('module.' + str(key) + '.' + type + '_quant')
new_key = str('module.' + str(key) + '.' + type)
if new_key in model_q:
print(new_key)
_dump_value_struct_pack(model_q[new_key], \
outputFolder, \
data + '.' + type + postfix, \
mode)
_dump_value_numpy(model_q[new_key], \
outputFolder, \
data + '.' + type + postfix)
def dump_scale_info(outputFolder, name):
fileNameDump = os.path.join('checkpoint', outputFolder, name+'.txt')
with open(fileNameDump, "w") as text_file:
for key, data in key_ch8_map.items():
weight_key = str('module.' + str(key) + '.weight_scale')
bias_key = str('module.' + str(key) + '.bias_scale')
image_key = str('module.' + str(key) + '.scale')
if(weight_key in model_q):
text_file.writelines('config w\n')
text_file.writelines('{0} {1}\n'.format(data, torch.log2(model_q[weight_key]).item()))
# if (bias_key in model_q):
# text_file.writelines('config b\n')
# text_file.writelines('{0} {1}\n'.format(data, torch.log2(model_q[bias_key]).item()))
if (image_key in model_q):
text_file.writelines('config o\n')
text_file.writelines('{0} {1}\n'.format(data, torch.log2(model_q[image_key]).item()))
text_file.writelines('\n')
text_file.close()
if __name__ == '__main__':
torch.set_printoptions(precision=9)
if ('state_dict' in model_q):
model_q = model_q['state_dict']
# trim_prefix()
# torch.save(model_q, fileNameNew)
parse_quant_info()
# quant_dequant_weight()
# replace_with_dequant_value_and_save()
outputFolder = '20191104_resnet10_quant_ch8_224x'
name = 'scale_QAT_shift'
dump_scale_info(outputFolder, name)
# name = '2.maxpooling.activation.npy'
# fileName1 = os.path.join('checkpoint', \
# '20191030_resnet10_fp32_fused_220x220', \
# '20191030_pytorch_batch1_img2_hw_data', \
# name)
# fileName2 = os.path.join('checkpoint', \
# '20191031_resnet10_fp32_fused_220x220', \
# '20191031_pytorch_input_batch100_0to9', \
# 'input.activation.int8.0.npy')
# try:
# tmpNpy1 = np.load(fileName1)
# tmpNpy2 = np.load(fileName2)
# print(np.array_equal(tmpNpy1, tmpNpy2))
# print('max pooling')
# print(tmpNpy2)
# print('res1_input')
# print(tmpNpy1)
# except IOError as e:
# print('[Error] no such file {0}'.format(name))
# for name in std_names:
# fileName = os.path.join('checkpoint', \
# '20191028_resnet10_quant8_fused_symm_-128_127_224x224_test', \
# '2.'+name+'.bias.npy')
# try:
# tmpNpy = np.load(fileName)
# # print(tmpNpy)
# except IOError as e:
# print('[Error] no such file {0}'.format('2.'+name+'.bias.npy'))
# _biasTensor = torch.from_numpy(tmpNpy).float().cuda()
# for key, data in key_map.items():
# if(data == name and 'module.'+key in _my_dict):
# if('b_scale' in _my_dict['module.'+key]):
# _biasScale = _my_dict['module.'+key]['b_scale'][0]
# print(name, _biasScale)
# _min, _max = get_clamp_limit(bit_size=32, signed=True)
# _biasQuant = _quantValue(_biasTensor, \
# _biasScale, \
# 0, \
# _min, _max)
# _dump_value_numpy(_biasQuant, outputFolder, '2.'+name+'.bias.int32.npy')
| null |
examples/classifier_compression/LoadModelCkp.py
|
LoadModelCkp.py
|
py
| 12,278 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.insert",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.round",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "torch.flatten",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "numpy.save",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "torch.log2",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "torch.log2",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "torch.set_printoptions",
"line_number": 193,
"usage_type": "call"
}
] |
495725401
|
import pygame
import math
from effects.Explosion import Explosion
class EnemyProjectile(pygame.sprite.Sprite):
"""This class is base class for all bullet projectiles by the enemy.
"""
def __init__(self, game, enemy, pos):
"""__init__ method for EnemyProjectile class
Args:
game (Integrate.Game): Integrate.Game class object.
enemy (Enemy.Enemy): Enemy.Enemy class object.
pos (tuple length 2) : position of the player (x,y).
"""
self.group = game.all_sprites
self.layer = enemy.layer
pygame.sprite.Sprite.__init__(self)
self.group.add(self, layer=self.layer)
self.enemy = enemy
self.game = game
self.pos = pygame.math.Vector2(pos)
self.Blst = []
self.vel = pygame.math.Vector2(0, 0)
self.anim_update = 0
self.current_frame = 0
self.state = 'SHOT'
# set own direction based on the direction the player sprite is facing
self.destroy_timer = 0
self.angle = 0
self.dir = self.enemy.lastdir
self.image = pygame.transform.rotozoom(self.image, self.enemy.angleee, 1)
Bangle = math.degrees(math.atan2(self.game.player.pos.y-(self.enemy.pos.y),self.game.player.pos.x-(self.enemy.pos.x)))
A = self.enemy.pos.x + math.cos(math.radians(Bangle))*35
B = self.enemy.pos.y + math.sin(math.radians(Bangle))*35
self.Blst.append([math.atan2(self.game.player.pos.y-(self.enemy.pos.y),self.game.player.pos.x-(self.enemy.pos.x)),A,B])
self.rect = self.image.get_rect()
self.mask = pygame.mask.from_surface(self.image)
self.maskbount = self.mask.get_bounding_rects()
self.rect.center = self.pos
self.hit_rect = self.maskbount[0]
self.hit_rect.center = self.rect.center
def collide_hit_rect(self, one, two):
"""EnemyProjectile class method to check if two objects are colliding.
"""
return one.hit_rect.colliderect(two.hit_rect)
def update(self):
"""EnemyProjectile class method to update the projectile motion of enemy bullet and its effects.
"""
if self.state == 'SHOT':
# effects of hiting an explotion crate
hits_walls = pygame.sprite.spritecollide(self, self.game.walls, False, self.collide_hit_rect)
for wall in hits_walls:
if wall.image == self.game.imageLoader.solid_img['crate']:
images = self.game.imageLoader.effects['crate_explosion']
Explosion(self.game, pygame.math.Vector2(self.pos), images, 80, damage = 0.2,
sound=self.game.soundLoader.get['explosiveTank'],
hit_rect=pygame.Rect(images[0].get_rect().inflate(-6, -6)))
wall.kill()
# change the state to hits wall and later destroy bullet
if hits_walls:
self.state = 'HIT_WALL'
# update the bullet velocity and position
for bullet in self.Blst:
velx=math.cos(bullet[0])*5
vely=math.sin(bullet[0])*5
if self.game.player.MoveCheck == True:
bullet[1]+=velx
bullet[2]+=vely
for projectile in self.Blst:
self.acc = projectile
# cause damage to player if bullet hits them
player = self.game.player
if self.collide_hit_rect(player, self):
if (player.state != 'HITSTUN'):
self.state = 'HIT_Player'
player.hp -= self.damage
# limit velocity
if self.vel.length_squared() > self.max_speed ** 2:
self.vel.scale_to_length(self.max_speed)
# updates the position of the bullet.
self.pos = (self.acc[1], self.acc[2])
else:
self.destroy()
self.rect.center = self.pos
self.hit_rect.center = self.rect.center
def destroy(self):
"""EnemyProjectile class method to destroy the bullet.
"""
if self.state == 'HIT_WALL':
# push the arrow a bit into a wall
if self.vel.length_squared() > 0:
self.pos += self.vel.normalize() * 3
self.vel *= 0
self.kill()
elif self.state == 'HIT_Player':
self.pos = self.game.player.pos
self.kill()
| null |
bullets/EnemyProjectile.py
|
EnemyProjectile.py
|
py
| 4,602 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.sprite",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.math.Vector2",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.math",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pygame.math.Vector2",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.math",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.rotozoom",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "math.degrees",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "math.atan2",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "math.atan2",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.spritecollide",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "effects.Explosion.Explosion",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pygame.math.Vector2",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pygame.math",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 77,
"usage_type": "call"
}
] |
150503246
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('add_date', models.DateTimeField(auto_now_add=True, verbose_name='add date', null=True)),
('edit_date', models.DateTimeField(auto_now=True, verbose_name='Last modification date', null=True)),
('is_active', models.BooleanField(default=False, verbose_name='Active')),
('is_a_prospect', models.BooleanField(default=False, verbose_name='Prospect')),
('name', models.CharField(max_length=250)),
('is_power_user', models.BooleanField(default=False, verbose_name='Power User')),
('have_access_hedge_event', models.BooleanField(default=False, verbose_name='Can access Hedge Events')),
('access_hedge_event_until', models.DateTimeField(null=True, blank=True)),
('have_access_due_diligence', models.BooleanField(default=False, verbose_name='Can access Due Dilligence')),
('access_due_diligence_until', models.DateTimeField(null=True, blank=True)),
('tableau_username', models.CharField(max_length=25, null=True, blank=True)),
('notes', models.TextField(null=True, blank=True)),
('progress_billing', models.BooleanField(default=False, verbose_name='Progress Billing')),
('can_view_alerts', models.BooleanField(default=False, verbose_name='Alerts')),
('can_view_qrtly_updates', models.BooleanField(default=False, verbose_name='Quarterly Updates')),
('can_view_fs_dashboard', models.BooleanField(default=False, verbose_name='Dashboard')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='ClientUser',
fields=[
('user_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('prompt_change_password', models.BooleanField(default=False)),
('position', models.CharField(max_length=250, null=True, blank=True)),
('company', models.CharField(max_length=250, null=True, blank=True)),
('notify_on_reports_updates', models.BooleanField(default=False)),
('notify_on_blog_update', models.BooleanField(default=False)),
('notify_on_new_event', models.BooleanField(default=False)),
('subscribe_newsletter', models.BooleanField(default=False)),
('default_sort_column', models.CharField(max_length=20, choices=[(b'funds', 'Funds'), (b'managers', 'Managers')])),
('default_sort_order', models.CharField(max_length=5, choices=[(b'asc', 'ASC'), (b'desc', 'DESC')])),
('default_home_page', models.CharField(max_length=20, choices=[(b'due_dilligence', 'Due Dilligence Reports'), (b'hedge_event', 'Hedge Events')])),
('client', models.ForeignKey(related_name='users', to='client.Client')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('add_date', models.DateTimeField(auto_now_add=True, verbose_name='add date', null=True)),
('edit_date', models.DateTimeField(auto_now=True, verbose_name='Last modification date', null=True)),
('name', models.CharField(max_length=250)),
('doc', models.FileField(upload_to=b'uploads/client')),
('pub_date', models.DateTimeField()),
('client', models.ForeignKey(to='client.Client')),
],
options={
'ordering': ('add_date',),
'abstract': False,
'get_latest_by': 'add_date',
},
),
migrations.AddField(
model_name='client',
name='primary_cha_representative',
field=models.ForeignKey(related_name='primary_cha_representative', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
]
| null |
client/migrations/0001_initial.py
|
0001_initial.py
|
py
| 5,012 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.swappable_dependency",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.AUTH_USER_MODEL",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.AUTH_USER_MODEL",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.db.contrib.auth.models.UserManager",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.db.contrib",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "django.db",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.db.models.FileField",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.AUTH_USER_MODEL",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 86,
"usage_type": "name"
}
] |
477607430
|
from django.shortcuts import render,redirect
from .models import properties,shortlist,notes,tourrequests,offers,propertyrating
from .forms import propertyform
from django.contrib.auth.decorators import login_required
from django.core.files.storage import FileSystemStorage
from mysite.models import profileModel
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
from Django_Project.settings import EMAIL_HOST_USER
@login_required
def search2(request):
template = 'search2.html'
context = {}
obj = profileModel.objects.get(user=request.user).Teacher_or_Parent
if obj == 'Agent':
context['agentcheck'] = True
else:
context['agentcheck'] = False
try:
if request.method == 'GET':
probjs = []
a = request.GET['city']
b = request.GET['area']
objs = properties.objects.all()
for i in objs:
if a.lower() in i.city.lower():
if b == '':
probjs.append(i)
else:
if b.lower() in i.area.lower():
probjs.append(i)
else:
pass
context['listofproperty'] = probjs
except:
pass
return render(request,template,context)
@login_required
def createproperty(request):
template = 'new-property.html'
form = propertyform(request.POST)
if form.is_valid():
a = form.cleaned_data
b = properties()
b.user = request.user
b.price = a['price']
b.sqft = a['sqft']
b.beds = a['beds']
b.baths = a['baths']
b.family_type = a['family_type']
b.year_built = a['year_built']
b.heating = a['heating']
b.cooling = a['cooling']
b.parking = a['parking']
b.lot = a['lot']
b.description = a['description']
b.save()
try:
image = a['image1']
fs = FileSystemStorage()
filename = fs.save(image.name, image)
b.image1 = fs.url(filename)
b.save()
image = a['image2']
fs = FileSystemStorage()
filename = fs.save(image.name, image)
b.image2 = fs.url(filename)
b.save()
image = a['image3']
fs = FileSystemStorage()
filename = fs.save(image.name, image)
b.image3 = fs.url(filename)
b.save()
image = a['image4']
fs = FileSystemStorage()
filename = fs.save(image.name, image)
b.image4 = fs.url(filename)
b.save()
image = a['image5']
fs = FileSystemStorage()
filename = fs.save(image.name, image)
b.image5 = fs.url(filename)
b.save()
except:
pass
else:
form = propertyform()
context = {'form':form}
return render(request,template,context)
@login_required
def propertydetail(request,id):
template = 'page_51.html'
obj = properties.objects.get(pk=id)
objs = shortlist.objects.filter(user=request.user)
objs1 = tourrequests.objects.filter(user=request.user)
context = {'obj':obj,'lists':objs,'tours':objs1}
obj = profileModel.objects.get(user=request.user).Teacher_or_Parent
if obj == 'Agent':
context['agentcheck'] = True
else:
context['agentcheck'] = False
return render(request,template,context)
@login_required
def addtolist(request,listid,objid):
listobj = shortlist.objects.get(pk=listid)
obj = properties.objects.get(pk=objid)
listobj.relproperties.add(obj)
listobj.save()
obj1 = propertyrating()
obj1.linklist = listobj
obj1.relproperty = obj
obj1.save()
return redirect('/property-detail/'+str(objid))
@login_required
def deletelist(request,id):
listobj = shortlist.objects.get(pk=id)
listobj.delete()
return redirect('/manage-lists')
@login_required
def deletetour(request,id):
tourobj = tourrequests.objects.get(pk=id)
tourobj.delete()
return redirect('/tour-requests')
@login_required
def removefromlist(request,listid,objid):
listobj = shortlist.objects.get(pk=listid)
obj = properties.objects.get(pk=objid)
listobj.relproperties.remove(obj)
listobj.save()
for i in propertyrating.objects.all():
if i.linklist == listobj and i.relproperty == obj:
i.delete()
return redirect('/show-list/'+str(listid))
@login_required
def managelists(request):
if request.method == 'POST':
a = request.POST
obj = shortlist.objects.get(pk=a['id'])
obj.name = a['listname']
obj.save()
template = 'page_24_End.html'
lists = []
for i in shortlist.objects.all():
if i.user == request.user or request.user in i.shared_with.all():
lists.append(i)
context = {'lists':reversed(lists)}
context['agentlist'] = profileModel.objects.filter(Teacher_or_Parent = 'Agent')
context['buyerlist'] = profileModel.objects.filter(Teacher_or_Parent = 'Buyer')
obj = profileModel.objects.get(user=request.user).Teacher_or_Parent
if obj == 'Agent':
context['agentcheck'] = True
else:
context['agentcheck'] = False
return render(request,template,context)
@login_required
def createlist(request):
obj = shortlist()
obj.user = request.user
obj.save()
return redirect('/manage-lists')
@login_required
def createtour(request):
obj = tourrequests()
obj.user = request.user
obj.save()
return redirect('/tour-requests')
@login_required
def namelist(request,name,proid):
obj = shortlist()
obj.name = name
obj.user = request.user
obj.save()
obj.relproperties.add(properties.objects.get(pk=proid))
obj.save()
return redirect('/manage-lists')
@login_required
def showlist(request,id):
template = 'page_30_End.html'
obj = shortlist.objects.get(pk=id)
if request.method == 'POST':
a = request.POST['id']
b = stripspace(request.POST['propertynote'])
for i in notes.objects.all():
if i.user == request.user and (i.relproperty == properties.objects.get(pk=a) and i.relshortlist == obj):
i.delete()
break
note = notes()
note.user = request.user
note.relproperty = properties.objects.get(pk=a)
note.relshortlist = obj
note.note = b
note.save()
context = {'obj':obj}
context['agents'] = profileModel.objects.filter(Teacher_or_Parent='Agent')
objs = notes.objects.filter(relshortlist = obj)
check = 2
notelist = {}
ratings = {}
for i in obj.relproperties.all():
ratings[i] = getrating(i,obj)
for j in objs:
if j.relproperty == i and j.relshortlist == obj:
notelist[i] = j.note
check = 1
break
else:
check = 2
if check == 2:
notelist[i] = ''
context['notes'] = notelist.items()
context['ratings'] = ratings.items()
context['lists'] = []
for i in shortlist.objects.all():
if i.user == request.user and i != context['obj']:
context['lists'].append(i)
if profileModel.objects.get(user=request.user).Teacher_or_Parent == 'Agent':
context['agentcheck'] = True
else:
context['agentcheck'] = False
return render(request,template,context)
@login_required
def requesttour(request,listid,proid,reqid):
template = 'page_43_google_slides.html'
context = {}
if (int(reqid) != 0):
reqobj = tourrequests.objects.get(pk=reqid)
elif (int(listid) == 0):
obj = properties.objects.get(pk=proid)
reqobj = tourrequests()
reqobj.user = request.user
reqobj.save()
reqobj.relproperty.add(obj)
reqobj.save()
elif (int(proid) == 0):
obj = shortlist.objects.get(pk=listid)
reqobj = tourrequests()
reqobj.user = request.user
reqobj.save()
for i in obj.relproperties.all():
reqobj.relproperty.add(i)
reqobj.save()
else:
pass
context['reqobj'] = reqobj
return render(request,template,context)
@login_required
def removefromtour(request,proid,reqid):
obj = tourrequests.objects.get(pk=reqid)
obj.relproperty.remove(properties.objects.get(pk=proid))
return redirect('/request-tour/0/0/'+str(reqid))
@login_required
def submitrequest(request,reqid):
reqobj = tourrequests.objects.get(pk=reqid)
a = request.POST
reqobj.date1 = a['date1']
reqobj.time1_date1 = a['time1']
reqobj.time2_date1 = a['time2']
reqobj.time3_date1 = a['time3']
reqobj.date2 = a['date2']
reqobj.time1_date2 = a['time4']
reqobj.time2_date2 = a['time5']
reqobj.time3_date2 = a['time6']
reqobj.date3 = a['date3']
reqobj.time1_date3 = a['time7']
reqobj.time2_date3 = a['time8']
reqobj.time3_date3 = a['time9']
reqobj.note = a['note']
reqobj.name = a['tourname']
reqobj.status = 'Requested'
reqobj.save()
return redirect('/show-tour/'+str(reqid))
@login_required
def showtour(request,reqid):
template = 'page_54_google_slides.html'
context = {}
if (profileModel.objects.get(user=request.user).Teacher_or_Parent == 'Agent'):
context['agentcheck'] = True
else:
context['agentcheck'] = False
reqobj = tourrequests.objects.get(pk=reqid)
context['reqobj'] = reqobj
return render(request,template,context)
@login_required
def addnote(request,id):
template = 'add-note.html'
context = {'id':id}
if request.method == 'POST':
id = request.POST['id']
note1 = request.POST['note']
for i in profileModel.objects.filter(Teacher_or_Parent = 'Agent'):
a = notes()
a.user = request.user
a.reciever = i.user
a.relproperty = properties.objects.get(pk=id)
a.note = note1
a.save()
return redirect('/property-detail/'+str(id))
return render(request,template,context)
@login_required
def toursrequested(request):
if request.method == 'POST':
obj = tourrequests.objects.get(pk=request.POST['id'])
obj.name = request.POST['listname']
obj.save()
template = 'tr.html'
context = {}
a = profileModel.objects.get(user=request.user).Teacher_or_Parent
if a == 'Buyer':
context['agentcheck'] = False
context['objs'] = reversed(tourrequests.objects.filter(user=request.user))
else:
context['agentcheck'] = True
context['objs'] = reversed(tourrequests.objects.all())
return render(request,template,context)
@login_required
def shownotes(request):
template = 'show-notes.html'
context = {'sent':notes.objects.filter(user=request.user),'recieved':notes.objects.filter(reciever=request.user)}
return render(request,template,context)
@login_required
def replytonote(request,id):
template = 'add-note.html'
context = {'id':id}
if request.method == 'POST':
id = request.POST['id']
note1 = request.POST['note']
noteobj = notes.objects.get(pk=id)
a = notes()
a.user = noteobj.reciever
a.reciever = noteobj.user
a.relproperty = noteobj.relproperty
a.note = note1
a.save()
return redirect('/show-notes')
return render(request,template,context)
@login_required
def listnotechange(request,id):
obj = shortlist.objects.get(pk=id)
obj.note = stripspace(request.POST['listnote'])
obj.save()
return redirect('/show-list/'+str(id))
@login_required
def addfromlist(request,listid,proid,currentid):
obj1 = properties.objects.get(pk=proid)
obj = shortlist.objects.get(pk=listid)
obj.relproperties.add(obj1)
obj.save()
obj = shortlist.objects.get(pk=currentid)
obj.relproperties.add(obj1)
obj.save()
return redirect('/show-list/'+str(currentid))
@login_required
def listofbuyers(request):
template = 'page_46_End.html'
context = {'objs':profileModel.objects.filter(Teacher_or_Parent='Buyer')}
if profileModel.objects.get(user=request.user).Teacher_or_Parent != 'Agent':
return redirect('/')
else:
context['agentcheck'] = True
return render(request,template,context)
@login_required
def addtotour(request,tid,proid):
obj = tourrequests.objects.get(pk=tid)
obj.relproperty.add(properties.objects.get(pk=proid))
return redirect('/property-detail/'+str(proid))
@login_required
def approvetour(request,id):
obj = tourrequests.objects.get(pk=id)
obj.status = 'Scheduled'
obj.save()
return redirect('/show-tour/'+str(id))
@login_required
def canceltour(request,id):
obj = tourrequests.objects.get(pk=id)
obj.status = 'Cancelled'
obj.save()
return redirect('/show-tour/'+str(id))
@login_required
def offer(request,id):
obj = offers()
obj.user = request.user
obj.relproperty = properties.objects.get(pk=id)
obj.note = request.POST['offer']
obj.save()
return redirect('/property-detail/'+str(id))
@login_required
def agent_offercheck(request):
context = {}
if (profileModel.objects.get(user=request.user).Teacher_or_Parent == 'Agent'):
template = 'page_81_End.html'
objs = offers.objects.all()
context['data'] = {}
for i in objs:
if i in context['data'].keys():
context['data'][i.user.username].append(i)
else:
context['data'][i.user.username] = []
context['data'][i.user.username].append(i)
context['agentcheck'] = True
else:
template = 'page_43.html'
objs = offers.objects.filter(user=request.user)
context['ic'] = []
context['req'] = []
context['sub'] = []
context['ft'] = []
context['cl'] = []
for i in objs:
if i.status == 'In Contract':
context['ic'].append(i)
if i.status == 'Requested':
context['req'].append(i)
if i.status == 'Submitted':
context['sub'].append(i)
if i.status == 'Fell Through':
context['ft'].append(i)
if i.status == 'Closed':
context['cl'].append(i)
context['agentcheck'] = False
context['offers'] = objs
return render(request,template,context)
@login_required
def tour(request,id):
obj = tourrequests()
obj.user = request.user
obj.name = request.POST['tourname']
obj.save()
obj.relproperty.add(properties.objects.get(pk=id))
obj.save()
return redirect('/property-detail/'+str(id))
@login_required
def changeofferstatus(request,id,id2):
obj = offers.objects.get(pk=id2)
obj.status = id
obj.save()
return redirect('/offers')
@login_required
def profilepage(request,usr):
if request.user.username == usr:
return redirect('/')
if request.method == 'POST':
obj = User.objects.get(username=usr)
obj.first_name = request.POST['name']
obj.email = request.POST['email']
obj.save()
print('done')
obj = profileModel.objects.get(user=obj)
try:
obj.contactNumber = request.POST['phone']
except:
pass
obj.occupation = request.POST['occu']
obj.any_other = request.POST['any_other']
obj.save()
template = 'page_471.html'
objs = profileModel.objects.all()
obj = None
for i in objs:
if i.user.username == usr:
print(i.user.username)
obj = i
print(obj.user.first_name)
break
context = {}
context['agentcheck'] = True
context['obj'] = obj
return render(request,template,context)
@login_required
def sharelist(request,usr,id):
obj = shortlist.objects.get(pk=id)
a = User.objects.get(username=usr)
obj.shared_with.add(a)
obj.save()
email = EmailMessage(
subject = request.user.username + ' shared an Interest list',
body = request.get_host()+'/show-list/'+str(id)+' is the link to the list.',
to = [a.email]
)
email.send()
return
@login_required
def sharelistbuyer(request,usr,id):
obj = shortlist.objects.get(pk=id)
a = User.objects.get(username=usr)
obj.shared_with.add(a)
obj.save()
email = EmailMessage(
subject = request.user.username + ' shared an Interest list',
body = request.get_host()+'/show-list/'+str(id)+' is the link to the list.',
to = [a.email]
)
email.send()
return
@login_required
def unlinklist(request,id):
obj = shortlist.objects.get(pk=id)
for i in obj.shared_with.all():
obj.shared_with.remove(i)
obj.save()
return
@login_required
def ratepro(request,listid,proid,rating):
objl = shortlist.objects.get(pk=listid)
objp = properties.objects.get(pk=proid)
for i in propertyrating.objects.all():
if objl == i.linklist and objp == i.relproperty:
i.rating = rating
i.save()
return
#helping functions
def stripspace(a):
b = ''
check = 1
for i in a:
if check == 1:
if i == ' ':
continue
elif i == '\n':
continue
else:
check = 2
if check == 2:
b = b + i
return b
def getrating(a,b):
for i in propertyrating.objects.all():
if i.linklist == b and i.relproperty == a:
return i.rating
| null |
ptm/functionality/views.py
|
views.py
|
py
| 17,700 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "mysite.models.profileModel.objects.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "models.properties.objects.all",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "forms.propertyform",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "models.properties",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.FileSystemStorage",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.FileSystemStorage",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.FileSystemStorage",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.FileSystemStorage",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.FileSystemStorage",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "forms.propertyform",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "models.properties.objects.get",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.filter",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.filter",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.get",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "models.properties.objects.get",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "models.propertyrating",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.get",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "models.properties.objects.get",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "models.propertyrating.objects.all",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "models.propertyrating.objects",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "models.propertyrating",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.all",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.filter",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.filter",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.get",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "models.shortlist",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "models.tourrequests",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "models.shortlist",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "models.properties.objects.get",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "models.notes.objects.all",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "models.notes.objects",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "models.notes",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "models.properties.objects.get",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "models.notes",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "models.properties.objects.get",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.filter",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "models.notes.objects.filter",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "models.notes.objects",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "models.notes",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.all",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.get",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.get",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "models.properties.objects.get",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "models.tourrequests",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "models.tourrequests",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.get",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "models.properties.objects.get",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.get",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.get",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.get",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.filter",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "models.notes",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "models.properties.objects.get",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.get",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.get",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.filter",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.all",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 331,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "models.notes.objects.filter",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "models.notes.objects",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "models.notes",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "models.notes.objects.get",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "models.notes.objects",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "models.notes",
"line_number": 347,
"usage_type": "name"
},
{
"api_name": "models.notes",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 359,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "models.properties.objects.get",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 367,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 364,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.filter",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.get",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.get",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 387,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 387,
"usage_type": "name"
},
{
"api_name": "models.properties.objects.get",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 388,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 388,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 385,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.get",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 393,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 393,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 391,
"usage_type": "name"
},
{
"api_name": "models.tourrequests.objects.get",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "models.tourrequests.objects",
"line_number": 400,
"usage_type": "attribute"
},
{
"api_name": "models.tourrequests",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 398,
"usage_type": "name"
},
{
"api_name": "models.offers",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "models.properties.objects.get",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 405,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.get",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 417,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "models.offers.objects.all",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "models.offers.objects",
"line_number": 419,
"usage_type": "attribute"
},
{
"api_name": "models.offers",
"line_number": 419,
"usage_type": "name"
},
{
"api_name": "models.offers.objects.filter",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "models.offers.objects",
"line_number": 430,
"usage_type": "attribute"
},
{
"api_name": "models.offers",
"line_number": 430,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "models.tourrequests",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "models.properties.objects.get",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 457,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 457,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "models.offers.objects.get",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "models.offers.objects",
"line_number": 463,
"usage_type": "attribute"
},
{
"api_name": "models.offers",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 473,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 473,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.get",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 478,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 478,
"usage_type": "name"
},
{
"api_name": "mysite.models.profileModel.objects.all",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "mysite.models.profileModel.objects",
"line_number": 487,
"usage_type": "attribute"
},
{
"api_name": "mysite.models.profileModel",
"line_number": 487,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 502,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 502,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 503,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 503,
"usage_type": "name"
},
{
"api_name": "django.core.mail.EmailMessage",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 500,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 516,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 516,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 517,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 517,
"usage_type": "name"
},
{
"api_name": "django.core.mail.EmailMessage",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 514,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 530,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 530,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 528,
"usage_type": "name"
},
{
"api_name": "models.shortlist.objects.get",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "models.shortlist.objects",
"line_number": 538,
"usage_type": "attribute"
},
{
"api_name": "models.shortlist",
"line_number": 538,
"usage_type": "name"
},
{
"api_name": "models.properties.objects.get",
"line_number": 539,
"usage_type": "call"
},
{
"api_name": "models.properties.objects",
"line_number": 539,
"usage_type": "attribute"
},
{
"api_name": "models.properties",
"line_number": 539,
"usage_type": "name"
},
{
"api_name": "models.propertyrating.objects.all",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "models.propertyrating.objects",
"line_number": 540,
"usage_type": "attribute"
},
{
"api_name": "models.propertyrating",
"line_number": 540,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 536,
"usage_type": "name"
},
{
"api_name": "models.propertyrating.objects.all",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "models.propertyrating.objects",
"line_number": 563,
"usage_type": "attribute"
},
{
"api_name": "models.propertyrating",
"line_number": 563,
"usage_type": "name"
}
] |
114300888
|
import os
import re
from sys import stdout
from time import sleep
from datetime import datetime
from subprocess import Popen, PIPE
from copy import deepcopy as copy
# from random import randint # TODO Usado somente em testes
import multiprocessing as mp
import xml.etree.ElementTree as et
def memory_usage_ps():
out = Popen(['ps', 'v', '-C', 'python3'],
stdout=PIPE).communicate()[0].split(b'\n')
vsz_index = out[0].split().index(b'%MEM')
mem = 0.0
for i in out[1:-1]:
mem += float(i.split()[vsz_index])
return mem/100
def dual_print(*args,**kwargs):
if kwargs['file']!=sys.stdout and kwargs['file']!=None :
print(*args, file=kwargs['file'])
kwargs.pop('file')
print(*args, **kwargs)
def call(arg, out_file=sys.stdout):
dual_print(datetime.now(), 'Executando comando: ', arg, file=out_file)
processo = Popen(arg, shell=True, stdout=PIPE)
return_text, erros = processo.communicate(timeout=60)
if return_text:
dual_print(datetime.now(), return_text, file=out_file)
else:
print('\n')
if erros:
dual_print(datetime.now(), 'ERRO:', errors, file=out_file)
return processo.returncode if processo.returncode != 0 else 1
return processo.returncode
# if not sleep(randint(1,60)): # TODO Uso durante testes
# return 0 # TODO Uso durante testes
def corretor_sintatico(tree):
xml_final = []
md = next(tree.iter('md')) # Supondo que só haja 1 tag "md"
for mi in md.iter('mi'):
try:
next(mi.iter('mv'))
except StopIteration:
continue
rows, a_remover =[], []
for mt in mi.iter('mt'):
a_remover.append((mi, mt))
rows.append(mt)
gp = next(mi.iter('gp'))
mi.remove(gp)
mts = next(mi.iter('mts'))
for mv in mi.iter('mv'):
mv.insert(1, copy(mts))
for i, r in enumerate(mv.iter('r')):
r.tag = rows[i].text
for sf in mv.iter('sf'):
a_remover.append((mv,sf))
mi.remove(mts)
for parent, tag in a_remover:
parent.remove(tag)
xml_final.append(et.tostring(mi).decode("utf-8"))
return '<?xml version="1.0" encoding="UTF-8"?>' + '\n' + ''.join(xml_final)
def save_correct(name, dir_path, tree, out_file=None):
if not out_file:
out_file = open('/root/save_hdfs.log','a')
if len(list(tree.iter('moid'))) == 0:
return
str_value = corretor_sintatico(tree)
dir_path = os.path.abspath(dir_path)
complete_path = dir_path+'/'+name
call('mkdir -p '+dir_path, out_file=out_file)
print('Criando arquivo: '+complete_path, file=out_file)
with open(complete_path,'w') as temp_file:
print(str_value, file=temp_file)
print('Transferindo '+complete_path+' para o HDFS.', file=out_file)
call('export HADOOP_USER_NAME=hdfs && hdfs dfs -put '+complete_path+' /user/hdfs/spool',
out_file=out_file)
print('Removendo arquivo corrigido original.', file=out_file)
os.remove(complete_path)
def mem_safe_proc(target,args):
c = 0
# Aguardar o termino de outros processos antes de criar um novo
while memory_usage_ps() > .2: # Teto de uso da memória: 20%
c+=1
if c%10==0:
print('\t\t\t -- Aguardando espaço na memória \
RAM para criar novo processo --')
print('\t\t\t Memória em uso pelo python3: ',
'{:.2%}'.format(memory_usage_ps()))
sleep(1)
proc = mp.Process(target=target, args=args)
proc.start()
if __name__ == '__main__':
ids_path = os.path.abspath('../resources/ids.csv')
ids = open(ids_path, 'r')
ids = [line.replace('\n','') for line in ids]
ingest_path = os.path.abspath('../ingest/') + '/'
_, _, files = next(os.walk(ingest_path))
filtered_path = os.path.abspath('/data4/filtered') +'/'
# filtered_path = os.path.abspath('../spool/filtered') +'/' # TODO Uso durante testes
arquivos_novos = []
for file_name in files:
original_name = file_name
file_name = ingest_path+file_name
tree, primeira, segunda = '', '', ''
try:
tree = et.parse(file_name)
with open(file_name,'r') as input_file:
primeira = input_file.readline()
segunda = input_file.readline()[:-6]
os.remove(file_name) # TODO Não deve ser usado durante testes
except:
# Em caso de concorrência, um dos dois vai deletar o arquivo primeiro,
# e o outro deve continuar a execução desconsiderando o arquivo
# deletado, que será tratado pelo concorrente.
print('\nConcorrência ou erro de permissão na análise do arquivo.', file_name)
continue
new_name = original_name.replace('.xml','') \
+'_filtered_at_' \
+re.sub('[\.\:\s-]', '_',
str(datetime.now())) +'.xml'
mem_safe_proc(save_correct, (new_name, '/data4/correct', copy(tree)))
arquivos_novos.append(original_name)
print('\nFiltrando arquivo:', file_name)
print('Arquivo original removido.')
mvs = []
for mi in tree.iter('mi'):
for mv in mi.iter('mv'):
for moid in mv.iter('moid'):
if not any((id_ in moid.text) for id_ in ids):
mvs.append((mi, mv))
break
for mi, mv in mvs:
mi.remove(mv)
if len(list(tree.iter('moid'))) > 0:
print('Salvando arquivo filtrado:\n', new_name)
saida = open(filtered_path+new_name, 'a')
print(primeira, file=saida, end='')
print(segunda, file=saida)
print(et.tostring(tree.getroot()).decode("utf-8"), end='', file=saida)
if len(arquivos_novos):
print('*'*80,'\n')
print(len(arquivos_novos), 'Arquivos tratados e salvos nesta exacução:\n')
for arquivo in arquivos_novos:
print('\t',arquivo)
print()
| null |
RanSharing/Filter/filter.py
|
filter.py
|
py
| 6,171 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "subprocess.Popen",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sys.stdout",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.tostring",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.tostring",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 189,
"usage_type": "name"
}
] |
300910657
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso
from sklearn.feature_selection import SelectFromModel
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def feature_selection():
df=pd.read_csv('X_train.csv')
df1=pd.read_csv('train.csv')
selected_feat_2=['Alley','PoolQC','Fence','MiscFeature']
train_data=data[:len(df1)].drop(['SalePrice'],axis=1)
test_data=data[len(df1):].drop(['SalePrice'],axis=1)
salePrice=data['SalePrice'][:len(df1)]
salePrice=pd.DataFrame(salePrice)
x_train,x_test,y_train,y_test=train_test_split(train_data,salePrice,test_size=.05)
x_train.fillna(1,inplace=True)
feature_sel_model = SelectFromModel(Lasso(alpha=1, random_state=0))
feature_sel_model.fit(x_train, y_train)
selected_feat = x_train.columns[(feature_sel_model.get_support())]
print('total features: {}'.format((x_train.shape[1])))
print('selected features: {}'.format(len(selected_feat)))
selected_feat=list(set(selected_feat)-set(selected_feat_2))
x_train=x_train[selected_feat]
x_test=x_test[selected_feat]
x_train.to_csv('x_train.csv',index=False)
x_test.to_csv('x_test.csv',index=False)
y_train.to_csv('y_train.csv',index=False)
y_test.to_csv('y_test.csv',index=False)
test_data.to_csv('test.csv',index=False)
if __name__ == '__main__':
feature_selection()
| null |
MachineLearning-Projects--master/House Price prediction (kaggle)/feature_selection.py
|
feature_selection.py
|
py
| 1,445 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_selection.SelectFromModel",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.Lasso",
"line_number": 28,
"usage_type": "call"
}
] |
356714770
|
import base64
import uuid
import os
from openpyxl.chart import (
LineChart,
BarChart,
Reference,
Series
)
from openpyxl.styles import PatternFill, Border, Side, Alignment, Font
from openpyxl.drawing.image import Image
from openpyxl import Workbook
from openpyxl.chart.label import DataLabelList
####################################################################################################################
# PROCEDURES
# Step 1: Validate the report data
# Step 2: Generate excelexporters file
# Step 3: Encode the excelexporters file to Base64
####################################################################################################################
def export(report, name, reporting_start_datetime_local, reporting_end_datetime_local, period_type):
####################################################################################################################
# Step 1: Validate the report data
####################################################################################################################
if report is None:
return None
if "reporting_period" not in report.keys() or \
"values" not in report['reporting_period'].keys() or len(report['reporting_period']['values']) == 0:
return None
####################################################################################################################
# Step 2: Generate excel file from the report data
####################################################################################################################
filename = generate_excel(report,
name,
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type)
####################################################################################################################
# Step 3: Encode the excel file to Base64
####################################################################################################################
try:
with open(filename, 'rb') as binary_file:
binary_file_data = binary_file.read()
except IOError as ex:
pass
# Base64 encode the bytes
base64_encoded_data = base64.b64encode(binary_file_data)
# get the Base64 encoded data using human-readable characters.
base64_message = base64_encoded_data.decode('utf-8')
# delete the file from server
try:
os.remove(filename)
except NotImplementedError as ex:
pass
return base64_message
def generate_excel(report, name, reporting_start_datetime_local, reporting_end_datetime_local, period_type):
wb = Workbook()
# todo
ws = wb.active
# Row height
ws.row_dimensions[1].height = 102
for i in range(2, 2000 + 1):
ws.row_dimensions[i].height = 42
# ws.row_dimensions[1].height = 102
# for i in range(2, 11 + 1):
# ws.row_dimensions[i].height = 30
#
# for i in range(12, 43 + 1):
# ws.row_dimensions[i].height = 30
# Col width
ws.column_dimensions['A'].width = 1.5
ws.column_dimensions['B'].width = 25.0
for i in range(ord('C'), ord('L')):
ws.column_dimensions[chr(i)].width = 15.0
# Font
name_font = Font(name='Constantia', size=15, bold=True)
title_font = Font(name='宋体', size=15, bold=True)
data_font = Font(name='Franklin Gothic Book', size=11)
table_fill = PatternFill(fill_type='solid', fgColor='1F497D')
f_border = Border(left=Side(border_style='medium', color='00000000'),
right=Side(border_style='medium', color='00000000'),
bottom=Side(border_style='medium', color='00000000'),
top=Side(border_style='medium', color='00000000')
)
b_border = Border(
bottom=Side(border_style='medium', color='00000000'),
)
b_c_alignment = Alignment(vertical='bottom',
horizontal='center',
text_rotation=0,
wrap_text=True,
shrink_to_fit=False,
indent=0)
c_c_alignment = Alignment(vertical='center',
horizontal='center',
text_rotation=0,
wrap_text=True,
shrink_to_fit=False,
indent=0)
b_r_alignment = Alignment(vertical='bottom',
horizontal='right',
text_rotation=0,
wrap_text=True,
shrink_to_fit=False,
indent=0)
c_r_alignment = Alignment(vertical='bottom',
horizontal='center',
text_rotation=0,
wrap_text=True,
shrink_to_fit=False,
indent=0)
# Img
img = Image("excelexporters/myems.png")
img.width = img.width * 0.85
img.height = img.height * 0.85
# img = Image("myems.png")
ws.add_image(img, 'B1')
# Title
ws.row_dimensions[3].height = 60
ws['B3'].font = name_font
ws['B3'].alignment = b_r_alignment
ws['B3'] = 'Name:'
ws['C3'].border = b_border
ws['C3'].alignment = b_c_alignment
ws['C3'].font = name_font
ws['C3'] = name
ws['D3'].font = name_font
ws['D3'].alignment = b_r_alignment
ws['D3'] = 'Period:'
ws['E3'].border = b_border
ws['E3'].alignment = b_c_alignment
ws['E3'].font = name_font
ws['E3'] = period_type
ws['F3'].font = name_font
ws['F3'].alignment = b_r_alignment
ws['F3'] = 'Date:'
ws['G3'].border = b_border
ws['G3'].alignment = b_c_alignment
ws['G3'].font = name_font
ws['G3'] = reporting_start_datetime_local + "__" + reporting_end_datetime_local
ws.merge_cells("G3:H3")
if "reporting_period" not in report.keys() or \
"values" not in report['reporting_period'].keys() or len(report['reporting_period']['values']) == 0:
filename = str(uuid.uuid4()) + '.xlsx'
wb.save(filename)
return filename
###############################
has_cost_data_flag = True
if "values" not in report['reporting_period'].keys() or len(report['reporting_period']['values']) == 0:
has_cost_data_flag = False
if has_cost_data_flag:
ws['B6'].font = title_font
ws['B6'] = name + '报告期成本'
reporting_period_data = report['reporting_period']
category = report['virtual_meter']['energy_category_name']
ca_len = len(category)
ws.row_dimensions[7].height = 60
ws['B7'].fill = table_fill
ws['B7'].border = f_border
ws['B8'].font = title_font
ws['B8'].alignment = c_c_alignment
ws['B8'] = '成本'
ws['B8'].border = f_border
ws['B9'].font = title_font
ws['B9'].alignment = c_c_alignment
ws['B9'] = '环比'
ws['B9'].border = f_border
col = 'B'
for i in range(0, ca_len):
col = chr(ord('C') + i)
ws[col + '7'].fill = table_fill
ws[col + '7'].font = name_font
ws[col + '7'].alignment = c_c_alignment
ws[col + '7'] = report['virtual_meter']['energy_category_name'] + \
" (" + report['virtual_meter']['unit_of_measure'] + ")"
ws[col + '7'].border = f_border
ws[col + '8'].font = name_font
ws[col + '8'].alignment = c_c_alignment
ws[col + '8'] = round(reporting_period_data['total_in_category'], 2)
ws[col + '8'].border = f_border
ws[col + '9'].font = name_font
ws[col + '9'].alignment = c_c_alignment
ws[col + '9'] = str(round(reporting_period_data['increment_rate'] * 100, 2)) + "%" \
if reporting_period_data['increment_rate'] is not None else "-"
ws[col + '9'].border = f_border
# TCE TCO2E
end_col = col
# TCE
tce_col = chr(ord(end_col) + 1)
ws[tce_col + '7'].fill = table_fill
ws[tce_col + '7'].font = name_font
ws[tce_col + '7'].alignment = c_c_alignment
ws[tce_col + '7'] = "吨标准煤 (TCE)"
ws[tce_col + '7'].border = f_border
ws[tce_col + '8'].font = name_font
ws[tce_col + '8'].alignment = c_c_alignment
ws[tce_col + '8'] = round(reporting_period_data['total_in_kgce'] / 1000, 2)
ws[tce_col + '8'].border = f_border
ws[tce_col + '9'].font = name_font
ws[tce_col + '9'].alignment = c_c_alignment
ws[tce_col + '9'] = str(round(reporting_period_data['increment_rate'] * 100, 2)) + "%" \
if reporting_period_data['increment_rate'] is not None else "-"
ws[tce_col + '9'].border = f_border
# TCO2E
tco2e_col = chr(ord(end_col) + 2)
ws[tco2e_col + '7'].fill = table_fill
ws[tco2e_col + '7'].font = name_font
ws[tco2e_col + '7'].alignment = c_c_alignment
ws[tco2e_col + '7'] = "吨二氧化碳排放 (TCO2E)"
ws[tco2e_col + '7'].border = f_border
ws[tco2e_col + '8'].font = name_font
ws[tco2e_col + '8'].alignment = c_c_alignment
ws[tco2e_col + '8'] = round(reporting_period_data['total_in_kgco2e'] / 1000, 2)
ws[tco2e_col + '8'].border = f_border
ws[tco2e_col + '9'].font = name_font
ws[tco2e_col + '9'].alignment = c_c_alignment
ws[tco2e_col + '9'] = str(round(reporting_period_data['increment_rate'] * 100, 2)) + "%" \
if reporting_period_data['increment_rate'] is not None else "-"
ws[tco2e_col + '9'].border = f_border
else:
for i in range(6, 9 + 1):
ws.rows_dimensions[i].height = 0.1
######################################
has_cost_detail_flag = True
reporting_period_data = report['reporting_period']
category = report['virtual_meter']['energy_category_name']
ca_len = len(category)
times = reporting_period_data['timestamps']
if "values" not in reporting_period_data.keys() or len(reporting_period_data['values']) == 0:
has_cost_detail_flag = False
if has_cost_detail_flag:
ws['B11'].font = title_font
ws['B11'] = name + '详细数据'
ws.row_dimensions[18].height = 60
ws['B18'].fill = table_fill
ws['B18'].font = title_font
ws['B18'].border = f_border
ws['B18'].alignment = c_c_alignment
ws['B18'] = '日期时间'
time = times
has_data = False
max_row = 0
if len(time) > 0:
has_data = True
max_row = 18 + len(time)
if has_data:
end_data_row_number = 19
for i in range(0, len(time)):
col = 'B'
end_data_row_number = 19 + i
row = str(end_data_row_number)
ws[col + row].font = title_font
ws[col + row].alignment = c_c_alignment
ws[col + row] = time[i]
ws[col + row].border = f_border
ws['B' + str(end_data_row_number + 1)].font = title_font
ws['B' + str(end_data_row_number + 1)].alignment = c_c_alignment
ws['B' + str(end_data_row_number + 1)] = '总计'
ws['B' + str(end_data_row_number + 1)].border = f_border
for i in range(0, ca_len):
col = chr(ord('C') + i)
ws[col + '18'].fill = table_fill
ws[col + '18'].font = title_font
ws[col + '18'].alignment = c_c_alignment
ws[col + '18'] = report['virtual_meter']['energy_category_name'] + \
" (" + report['virtual_meter']['unit_of_measure'] + ")"
ws[col + '18'].border = f_border
time = times
time_len = len(time)
for j in range(0, time_len):
row = str(19 + j)
ws[col + row].font = title_font
ws[col + row].alignment = c_c_alignment
ws[col + row] = round(reporting_period_data['values'][j], 2)
ws[col + row].border = f_border
ws[col + str(end_data_row_number + 1)].font = title_font
ws[col + str(end_data_row_number + 1)].alignment = c_c_alignment
ws[col + str(end_data_row_number + 1)] = round(reporting_period_data['total_in_category'], 2)
ws[col + str(end_data_row_number + 1)].border = f_border
line = LineChart()
labels = Reference(ws, min_col=2, min_row=19, max_row=max_row)
line_data = Reference(ws, min_col=3, min_row=18, max_row=max_row)
line.series.append(Series(line_data, title_from_data=True))
line.set_categories(labels)
line_data = line.series[0]
line_data.marker.symbol = "circle"
line_data.smooth = True
line.x_axis.crosses = 'min'
line.title = '报告期成本 - ' + report['virtual_meter']['energy_category_name'] + \
" (" + report['virtual_meter']['unit_of_measure'] + ")"
line.dLbls = DataLabelList()
line.dLbls.dLblPos = 't'
line.dLbls.showVal = True
line.height = 8.25
line.width = 24
ws.add_chart(line, "B12")
else:
for i in range(11, 43 + 1):
ws.row_dimensions[i].height = 0.0
filename = str(uuid.uuid4()) + '.xlsx'
wb.save(filename)
return filename
| null |
myems-api/excelexporters/virtualmetercost.py
|
virtualmetercost.py
|
py
| 13,827 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "base64.b64encode",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "openpyxl.Workbook",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.PatternFill",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Border",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Side",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Side",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Side",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Side",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Border",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Side",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Alignment",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Alignment",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Alignment",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Alignment",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "openpyxl.drawing.image.Image",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "openpyxl.chart.LineChart",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "openpyxl.chart.Reference",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "openpyxl.chart.Reference",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "openpyxl.chart.Series",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "openpyxl.chart.label.DataLabelList",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 360,
"usage_type": "call"
}
] |
589367352
|
"""
构建数据集
"""
import json
import torch
import torch.nn as nn
import nn_models.nn_config as nn_config
from torch.utils.data import Dataset
import numpy as np
from tqdm import tqdm
import pickle
import dgl
import random
from functools import reduce
def tokenizer(s):
"""
:param s: 摘要文本,切分成token
:return:
"""
tokens = []
li = s.split()
for word in li:
if word.startswith('ENTITY'):
tokens.append(word)
else:
_ = ''.join((char if char.isalpha() or char.isdigit() else " ") for char in word).split()
for t in _:
tokens.append(t.lower()) # 小写
return tokens
class SampleGenerator:
"""
针对一个sample : (m, e)
生成可送入深度学习模型的数据
"""
def __init__(self, mention_abstract_emb_fp, cell_max=5, row_max=5, col_max=30, abstract_max=100):
self.cell_max = cell_max
self.row_max = row_max
self.col_max = col_max
self.abstract_max = abstract_max
with open(mention_abstract_emb_fp, 'rb') as f:
self.mention_encode, self.abstract_encode = pickle.load(f)
def process_sample(self, sample):
"""
为一对m, e;构建符合深度学习模型的数据
:param sample: {
'cell_context': [...],
'col_context': [...],
'row_context': [...],
'mention': '...',
'abstract': '...',
}
:return:
"""
cell_context = sample['cell_context']
if len(cell_context) > self.cell_max:
random.shuffle(cell_context)
cell_context = cell_context[:self.cell_max]
col_context = sample['col_context']
if len(col_context) > self.col_max:
random.shuffle(col_context)
col_context = col_context[:self.col_max]
row_context = sample['row_context']
if len(row_context) > self.row_max:
random.shuffle(row_context)
row_context = row_context[:self.row_max]
c_g, c_features = self.construct_graph(sample['mention'], cell_context, col_context)
r_g, r_features = self.construct_graph(sample['mention'], cell_context, row_context)
abstract = self.encode_abstract(sample['abstract'])
return c_g, c_features, r_g, r_features, abstract
def encode_abstract(self, abstract):
"""
:param abstract:
:return: [pad_seq_len, emb_dim]
"""
res = []
for t in tokenizer(abstract):
if len(res) >= self.abstract_max:
break
if t in self.abstract_encode and self.abstract_encode[t] is not None:
res.append(self.abstract_encode[t])
for i in range(len(res), self.abstract_max):
res.append(self.abstract_encode['<pad>'])
return np.array(res)
def construct_graph(self, m, cell_context, some_context):
g = dgl.DGLGraph()
# mention, cell mentions, col mentions
g.add_nodes(num=1 + len(cell_context) + len(some_context))
for i in range(1 + len(cell_context)):
for j in range(1 + len(cell_context)):
if i != j: # 先不加自环
g.add_edge(i, j)
for i in range(1 + len(cell_context), 1 + len(cell_context) + len(some_context)):
g.add_edge(0, i)
g.add_edge(i, 0) # 无向图
ndata = [self.get_mention_emb(m)]
for i in range(len(cell_context)): # 1, ..., 1+len(cell_context)
ndata.append(self.get_mention_emb(cell_context[i]))
for i in range(len(some_context)): # 1+len(cell_context), 1+len(cell_context)+len(some_context)
ndata.append(self.get_mention_emb(some_context[i]))
ndata = np.array(ndata)
features = torch.from_numpy(ndata).float()
return g, features
def get_mention_emb(self, m):
"""
找不到的mention随机初始化
:param m:
:return:
"""
if m in self.mention_encode and self.mention_encode[m] is not None:
return self.mention_encode[m]
else:
return self.mention_encode['<unk>']
class AttentionSampleGenerator(SampleGenerator):
def __init__(self, mention_abstract_emb_fp, cell_max=5, row_max=5, col_max=30, abstract_max=100):
super().__init__(mention_abstract_emb_fp, cell_max, row_max, col_max, abstract_max)
def process_sample(self, sample):
"""
为一对m, e;构建符合深度学习模型的数据
:param sample: {
'cell_context': [...],
'col_context': [...],
'row_context': [...],
'mention': '...',
'abstract': '...',
}
:return:
"""
cell_context = sample['cell_context']
if len(cell_context) > self.cell_max:
random.shuffle(cell_context)
cell_context = cell_context[:self.cell_max]
col_context = sample['col_context']
if len(col_context) > self.col_max:
random.shuffle(col_context)
col_context = col_context[:self.col_max]
row_context = sample['row_context']
if len(row_context) > self.row_max:
random.shuffle(row_context)
row_context = row_context[:self.row_max]
c_g, c_features = self.construct_graph(sample['mention'], cell_context, col_context)
r_g, r_features = self.construct_graph(sample['mention'], cell_context, row_context)
abstract = self.encode_abstract(sample['abstract'])
entity_emb_repeat = np.array([sample['embedding'] for i in range(self.abstract_max)])
mention_emb = self.get_mention_emb(sample['mention'])
return c_g, c_features, r_g, r_features, abstract, entity_emb_repeat, mention_emb
class MentionEntityDataset(Dataset):
"""
语义匹配数据集
每条数据,包含,(mention, entity) 如果匹配,则标识1, 否则标识0
是个二分类问题
"""
def __init__(self, data, mention_abstract_emb_fp, cell_max=5, row_max=5, col_max=30, abstract_max=100):
print('data', len(data))
self.sample_generator = SampleGenerator(mention_abstract_emb_fp,
cell_max, row_max, col_max, abstract_max)
self.row_graphs = []
self.row_features = []
self.col_graphs = []
self.col_features = []
self.abstracts = []
self.labels = []
cnt = 0
for item in tqdm(data):
c_g, c_features, r_g, r_features, abstract = \
self.sample_generator.process_sample(item)
self.col_graphs.append(c_g)
self.col_features.append(c_features)
self.row_graphs.append(r_g)
self.row_features.append(r_features)
self.abstracts.append(abstract)
self.labels.append(item['label'])
def __getitem__(self, i):
return self.row_graphs[i], self.row_features[i], \
self.col_graphs[i], self.col_features[i], \
self.abstracts[i], self.labels[i]
def __len__(self):
return len(self.abstracts)
class AttentionMentionEntityDataset(Dataset):
"""
语义匹配数据集
每条数据,包含,(mention, entity) 如果匹配,则标识1, 否则标识0
是个二分类问题
(添加了attention数据集)
"""
def __init__(self, data, mention_abstract_emb_fp, cell_max=5, row_max=5, col_max=30, abstract_max=100):
print('data', len(data))
self.sample_generator = AttentionSampleGenerator(mention_abstract_emb_fp,
cell_max, row_max, col_max, abstract_max)
self.row_graphs = []
self.row_features = []
self.col_graphs = []
self.col_features = []
self.abstracts = []
self.entities_emb = []
self.mention_emb = []
self.labels = []
cnt = 0
for item in tqdm(data):
c_g, c_features, r_g, r_features, abstract, entities_emb, mention_emb = \
self.sample_generator.process_sample(item)
self.col_graphs.append(c_g)
self.col_features.append(c_features)
self.row_graphs.append(r_g)
self.row_features.append(r_features)
self.abstracts.append(abstract)
self.entities_emb.append(entities_emb)
self.mention_emb.append(mention_emb)
self.labels.append(item['label'])
def __getitem__(self, i):
return self.row_graphs[i], self.row_features[i], \
self.col_graphs[i], self.col_features[i], \
self.abstracts[i], self.entities_emb[i], \
self.mention_emb[i], self.labels[i]
def __len__(self):
return len(self.abstracts)
if __name__ == '__main__':
pass
| null |
code/nn_models/data.py
|
data.py
|
py
| 8,928 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pickle.load",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "dgl.DGLGraph",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 222,
"usage_type": "call"
}
] |
295911800
|
from pyspark import SparkContext
from pyspark.python.pyspark.shell import spark
from pyspark.sql import SQLContext
sc = SparkContext(appName="query1")
sqlContext = SQLContext(sc)
lineitem = spark.read.parquet("hdfs://namenode:8020/hossein-parquet-data/lineitem.parquet")
from datetime import datetime
from datetime import timedelta
from pyspark.sql import functions as F
old_time = '1998-12-01'
new_time = datetime.strptime(old_time, '%Y-%m-%d') - timedelta(days=120)
new_time = str(new_time.date())
fun1 = lambda x, y: x * (1 - y)
fun2 = lambda x, y, z: x * (1 - y) * (1 + z)
query1 = lineitem.filter(lineitem.L_SHIPDATE <= new_time) \
.groupBy(lineitem.L_RETURNFLAG, lineitem.L_LINESTATUS) \
.agg(F.sum(lineitem.L_QUANTITY).alias('sum_qty'),
F.sum(lineitem.L_EXTENDEDPRICE).alias('sum_base_price'),
F.sum(fun1(lineitem.L_EXTENDEDPRICE, lineitem.L_DISCOUNT)).alias('sum_disc_price'),
F.sum(fun2(lineitem.L_EXTENDEDPRICE, lineitem.L_DISCOUNT, lineitem.L_TAX).alias('sum_charge')),
F.avg(lineitem.L_QUANTITY).alias('avg_qty'),
F.avg(lineitem.L_EXTENDEDPRICE).alias('avg_price'),
F.avg(lineitem.L_DISCOUNT).alias('avg_disc'),
F.count('*').alias('count_order')) \
.sort(lineitem.L_RETURNFLAG, lineitem.L_LINESTATUS)
query1.show()
| null |
OLAP/spark_query/query_in_parquet_format/spark_query1.py
|
spark_query1.py
|
py
| 1,307 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyspark.SparkContext",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SQLContext",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyspark.python.pyspark.shell.spark.read.parquet",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyspark.python.pyspark.shell.spark.read",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pyspark.python.pyspark.shell.spark",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.sum",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.sum",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.sum",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.sum",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.avg",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.avg",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.avg",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.count",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 29,
"usage_type": "name"
}
] |
526655981
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import print_function
import os
import math
import argparse
import numpy as np
import _cntk_py
import cntk.io.transforms as xforms
import cntk as C
from cntk.logging import *
from cntk.ops import placeholder, minus, constant, relu
from cntk.train.distributed import data_parallel_distributed_learner, Communicator
from cntk.io import ImageDeserializer, MinibatchSource, StreamDef, StreamDefs, FULL_DATA_SWEEP
from cntk.layers import Convolution2D, Activation, MaxPooling, Dense, Dropout, default_options, Sequential, For
from cntk.initializer import normal
from cntk.train.training_session import *
# default Paths relative to current python file.
abs_path = os.path.dirname(os.path.abspath(__file__))
data_path = "/scratch/andpic/imageNet/train" #os.path.join(abs_path, "..", "..", "..", "DataSets", "ImageNet")
model_path = os.path.join(abs_path, "Models")
log_dir = None
# model dimensions
image_height = 224
image_width = 224
num_channels = 3 # RGB
num_classes = 1000
model_name = "VGG16.model"
# Create a minibatch source.
def create_image_mb_source(map_file, is_training, total_number_of_samples):
if not os.path.exists(map_file):
raise RuntimeError("File '%s' does not exist." %map_file)
# transformation pipeline for the features has jitter/crop only when training
transforms = []
if is_training:
transforms += [
xforms.crop(crop_type='randomside', side_ratio=(0.4375, 0.875), jitter_type='uniratio') # train uses jitter
]
else:
transforms += [
xforms.crop(crop_type='center', side_ratio=0.5833333) # test has no jitter
]
transforms += [
xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'),
]
# deserializer
return MinibatchSource(
ImageDeserializer(map_file, StreamDefs(
features = StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image'
labels = StreamDef(field='label', shape=num_classes))), # and second as 'label'
randomize = is_training,
max_samples=total_number_of_samples,
multithreaded_deserializer = True)
# Create the network.
def create_vgg16():
# Input variables denoting the features and label data
feature_var = C.input_variable((num_channels, image_height, image_width))
label_var = C.input_variable((num_classes))
# apply model to input
# remove mean value
input = minus(feature_var, constant([[[104]], [[117]], [[124]]]), name='mean_removed_input')
with default_options(activation=None, pad=True, bias=True):
z = Sequential([
# we separate Convolution and ReLU to name the output for feature extraction (usually before ReLU)
For(range(2), lambda i: [
Convolution2D((3,3), 64, name='conv1_{}'.format(i)),
Activation(activation=relu, name='relu1_{}'.format(i)),
]),
MaxPooling((2,2), (2,2), name='pool1'),
For(range(2), lambda i: [
Convolution2D((3,3), 128, name='conv2_{}'.format(i)),
Activation(activation=relu, name='relu2_{}'.format(i)),
]),
MaxPooling((2,2), (2,2), name='pool2'),
For(range(3), lambda i: [
Convolution2D((3,3), 256, name='conv3_{}'.format(i)),
Activation(activation=relu, name='relu3_{}'.format(i)),
]),
MaxPooling((2,2), (2,2), name='pool3'),
For(range(3), lambda i: [
Convolution2D((3,3), 512, name='conv4_{}'.format(i)),
Activation(activation=relu, name='relu4_{}'.format(i)),
]),
MaxPooling((2,2), (2,2), name='pool4'),
For(range(3), lambda i: [
Convolution2D((3,3), 512, name='conv5_{}'.format(i)),
Activation(activation=relu, name='relu5_{}'.format(i)),
]),
MaxPooling((2,2), (2,2), name='pool5'),
Dense(4096, name='fc6'),
Activation(activation=relu, name='relu6'),
Dropout(0.5, name='drop6'),
Dense(4096, name='fc7'),
Activation(activation=relu, name='relu7'),
Dropout(0.5, name='drop7'),
Dense(num_classes, name='fc8')
])(input)
# loss and metric
ce = C.cross_entropy_with_softmax(z, label_var)
pe = C.classification_error(z, label_var)
pe5 = C.classification_error(z, label_var, topN=5)
log_number_of_parameters(z) ; print()
return {
'feature': feature_var,
'label': label_var,
'ce' : ce,
'pe' : pe,
'pe5': pe5,
'output': z
}
# Create trainer
def create_trainer(network, epoch_size, num_quantization_bits, progress_printer):
# Set learning parameters
lr_per_mb = [0.01] #[0.01]*20 + [0.001]*20 + [0.0001]*20 + [0.00001]*10 + [0.000001]
lr_schedule = C.learning_parameter_schedule(lr_per_mb, epoch_size=epoch_size)
mm_schedule = C.learners.momentum_schedule(0.9)
l2_reg_weight = 0.0005 # CNTK L2 regularization is per sample, thus same as Caffe
# Create learner
local_learner = C.learners.momentum_sgd(network['output'].parameters, lr_schedule, mm_schedule, unit_gain=False, l2_regularization_weight=l2_reg_weight)
# Since we reuse parameter settings (learning rate, momentum) from Caffe, we set unit_gain to False to ensure consistency
parameter_learner = data_parallel_distributed_learner(
local_learner,
num_quantization_bits=num_quantization_bits,
distributed_after=0)
# Create trainer
return C.Trainer(network['output'], (network['ce'], network['pe']), parameter_learner, progress_printer)
# Train and test
def train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size, restore):
# define mapping from intput streams to network inputs
input_map = {
network['feature']: train_source.streams.features,
network['label']: train_source.streams.labels
}
# Train all minibatches
training_session(
trainer=trainer, mb_source=train_source,
model_inputs_to_streams=input_map,
mb_size=minibatch_size,
progress_frequency=epoch_size,
checkpoint_config=CheckpointConfig(filename=os.path.join(model_path, model_name), restore=restore),
# test_config=TestConfig(minibatch_source=test_source, minibatch_size=minibatch_size)
).train()
# Train and evaluate the network.
def vgg16_train_and_eval(train_data, test_data, num_quantization_bits=32, minibatch_size=128, epoch_size = 1281167, max_epochs=80,
restore=True, log_to_file=None, num_mbs_per_log=None, gen_heartbeat=False, testing=False):
_cntk_py.set_computation_network_trace_level(0)
progress_printer = ProgressPrinter(
freq=num_mbs_per_log,
tag='Training',
log_to_file=log_to_file,
rank=Communicator.rank(),
gen_heartbeat=gen_heartbeat,
num_epochs=max_epochs)
network = create_vgg16()
trainer = create_trainer(network, epoch_size, num_quantization_bits, progress_printer)
train_source = create_image_mb_source(train_data, True, total_number_of_samples=max_epochs * epoch_size)
if testing:
# reduce number of samples for validation when testing
num_of_validation_samples = max_epochs * epoch_size * 10
else:
num_of_validation_samples = FULL_DATA_SWEEP
test_source = create_image_mb_source(test_data, False, total_number_of_samples=num_of_validation_samples)
train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size, restore)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-datadir', '--datadir', help='Data directory where the ImageNet dataset is located', required=False, default=data_path)
parser.add_argument('-outputdir', '--outputdir', help='Output directory for checkpoints and models', required=False, default=None)
parser.add_argument('-logdir', '--logdir', help='Log file', required=False, default=None)
parser.add_argument('-n', '--num_epochs', help='Total number of epochs to train', type=int, required=False, default='80')
parser.add_argument('-m', '--minibatch_size', help='Minibatch size', type=int, required=False, default='128')
parser.add_argument('-e', '--epoch_size', help='Epoch size', type=int, required=False, default='1281167')
parser.add_argument('-q', '--quantized_bits', help='Number of quantized bits used for gradient aggregation', type=int, required=False, default='32')
parser.add_argument('-r', '--restart', help='Indicating whether to restart from scratch (instead of restart from checkpoint file by default)', action='store_true')
parser.add_argument('-device', '--device', type=int, help="Force to run the script on a specified device", required=False, default=None)
parser.add_argument('-testing', '--testing', help='Indicate if running for testing purposes (validation only done in a portion of the test dataset)', action='store_true')
args = vars(parser.parse_args())
if args['outputdir'] is not None:
model_path = args['outputdir'] + "/models"
if args['datadir'] is not None:
data_path = args['datadir']
if args['logdir'] is not None:
log_dir = args['logdir']
if args['device'] is not None:
if args['device'] == -1:
C.device.try_set_default_device(C.device.cpu())
else:
C.device.try_set_default_device(C.device.gpu(args['device']))
if not os.path.isdir(data_path):
raise RuntimeError("Directory %s does not exist" % data_path)
train_data=os.path.join(data_path, 'train_map.txt')
test_data=os.path.join(data_path, 'train_map.txt')
vgg16_train_and_eval(train_data, test_data,
minibatch_size=args['minibatch_size'],
epoch_size=args['epoch_size'],
num_quantization_bits=args['quantized_bits'],
max_epochs=args['num_epochs'],
restore=not args['restart'],
log_to_file=args['logdir'],
num_mbs_per_log=200,
gen_heartbeat=True,
testing=args['testing'])
# Must call MPI finalize when process exit without exceptions
Communicator.finalize()
| null |
cntk/vgg16/VGG16_ImageNet_Distributed.py
|
VGG16_ImageNet_Distributed.py
|
py
| 10,779 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.dirname",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "cntk.io.transforms.crop",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "cntk.io.transforms",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "cntk.io.transforms.crop",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cntk.io.transforms",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "cntk.io.transforms.scale",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cntk.io.transforms",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "cntk.io.MinibatchSource",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "cntk.io.ImageDeserializer",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cntk.io.StreamDefs",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cntk.io.StreamDef",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "cntk.io.StreamDef",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "cntk.input_variable",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "cntk.input_variable",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cntk.ops.minus",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "cntk.ops.constant",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "cntk.layers.default_options",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Sequential",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "cntk.layers.For",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Convolution2D",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Activation",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "cntk.ops.relu",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "cntk.layers.MaxPooling",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "cntk.layers.For",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Convolution2D",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Activation",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "cntk.ops.relu",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "cntk.layers.MaxPooling",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "cntk.layers.For",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Convolution2D",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Activation",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "cntk.ops.relu",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "cntk.layers.MaxPooling",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "cntk.layers.For",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Convolution2D",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Activation",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "cntk.ops.relu",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "cntk.layers.MaxPooling",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "cntk.layers.For",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Convolution2D",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Activation",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "cntk.ops.relu",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "cntk.layers.MaxPooling",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Dense",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Activation",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "cntk.ops.relu",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "cntk.layers.Dropout",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Dense",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Activation",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "cntk.ops.relu",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "cntk.layers.Dropout",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "cntk.layers.Dense",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "cntk.cross_entropy_with_softmax",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "cntk.classification_error",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "cntk.classification_error",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "cntk.learning_parameter_schedule",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "cntk.learners.momentum_schedule",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "cntk.learners",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "cntk.learners.momentum_sgd",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "cntk.learners",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "cntk.train.distributed.data_parallel_distributed_learner",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "cntk.Trainer",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "_cntk_py.set_computation_network_trace_level",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "cntk.train.distributed.Communicator.rank",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "cntk.train.distributed.Communicator",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "cntk.io.FULL_DATA_SWEEP",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "cntk.device.try_set_default_device",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "cntk.device",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "cntk.device.cpu",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "cntk.device.try_set_default_device",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "cntk.device",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "cntk.device.gpu",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "cntk.train.distributed.Communicator.finalize",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "cntk.train.distributed.Communicator",
"line_number": 247,
"usage_type": "name"
}
] |
442342683
|
from .AbstractPoolingPyramid import AbstractPoolingPyramid
import scipy.sparse
import pyamg
import numpy as np
#from graphcnn.util.modelnet.pointCloud2Graph import ply2graph
import tensorflow as tf
class LloydPoolingPyramid(AbstractPoolingPyramid):
def __init__(self,numRepresentations,companderConstructor, ratios):
super(LloydPoolingPyramid, self).__init__(numRepresentations,companderConstructor)
self.ratios = ratios
def makeP(self,A,V=None):
Plist = []
companderInstance = self.companderConstructor(V,A)
for pIndex in range(self.numRepresentations):
P = pyamg.aggregation.aggregate.lloyd_aggregation(\
scipy.sparse.csr_matrix(companderInstance.contractA()),ratio=self.ratios[pIndex],distance='same',maxiter=10)[0]
P = P.todense()
Pcolsum = np.tile(np.count_nonzero(P,axis=0),(P.shape[0],1))
Pcolsum[Pcolsum == 0] = 1
P = np.divide(P,Pcolsum.astype(np.float64))
Plist.append(P.astype(np.float32))
#print(P.shape)
companderInstance.update(P)
A = companderInstance.expandA()
V = companderInstance.V
return Plist
def write(self,Ps,As):
AsparseList = []
for A in As:
currentA = A.tolist()
pass
| null |
src/graphcnn/util/pooling/LloydPoolingPyramid.py
|
LloydPoolingPyramid.py
|
py
| 1,331 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "AbstractPoolingPyramid.AbstractPoolingPyramid",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pyamg.aggregation.aggregate.lloyd_aggregation",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pyamg.aggregation",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse.sparse.csr_matrix",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.sparse",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "numpy.tile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 25,
"usage_type": "attribute"
}
] |
373411794
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 6 17:42:28 2018
@author: Samuele Garda
"""
import argparse
from utils.prepare_shootout import preprocess
from evaluation.evaluation import load_eval_data
def parse_arguments():
"""
Parse command line arguments.
"""
parser = argparse.ArgumentParser(description='Generate text with words to be kept in vocabulary generation for word embeddings models')
parser.add_argument('--eval',required = True, type = str, help = "Path to BenchLS")
parser.add_argument('--out',required = True, type = str, help = "Where to store word list")
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
data = load_eval_data(args.eval)
with open(args.out,'w+') as outfile:
for idx,test_case in data.iterrows():
processed = preprocess(test_case['sent'], pre_tok = True)
target_pos = int(test_case['c_w_p'])
word = processed[target_pos]
outfile.write("{}\n".format(word))
for w in test_case['sub']:
try:
outfile.write("{}\n".format(preprocess(w)[0]))
except IndexError:
pass
| null |
evaluation/gen_keep_vocab.py
|
gen_keep_vocab.py
|
py
| 1,234 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "evaluation.evaluation.load_eval_data",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "utils.prepare_shootout.preprocess",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "utils.prepare_shootout.preprocess",
"line_number": 44,
"usage_type": "call"
}
] |
312280234
|
# coding=utf-8
import argparse
import textwrap
import time
import os, sys
from torch.autograd import Variable
sys.path.append(os.path.dirname(__file__))
from utils.config import process_config, check_config_dict
from utils.logger import ExampleLogger
from trainers.example_model import ExampleModel
from trainers.example_trainer import ExampleTrainer
from data_loader.dataset import get_data_loader
import sys
import utils.global_variable as global_value
import torch
import json
from nets.net_interface import NetModule
config = process_config(os.path.join(os.path.dirname(__file__), 'configs', 'config.json'))
class ImageClassificationPytorch:
def __init__(self, config):
gpu_id = config['gpu_id']
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
print(config)
#check_config_dict(config)
self.config = config
self.init()
def init(self):
# create net
self.model = ExampleModel(self.config)
# load
self.model.load()
# create your data generator
self.train_loader, self.test_loader = get_data_loader(self.config)
# create logger
self.logger = ExampleLogger(self.config)
# create trainer and path all previous components to it
self.trainer = ExampleTrainer(self.model, self.train_loader, self.test_loader, self.config, self.logger)
def run(self):
# here you train your model
self.trainer.train()
def close(self):
# close
self.logger.close()
def main():
global_value._init()
reload(sys)
sys.setdefaultencoding("utf-8")
state_dict=torch.load('/home1/sas/crop_disease_detect-master/val_eval_best.pth')
interface = NetModule(config['model_module_name'], config['model_net_name'])
net = interface.create_model(num_classes=config['num_classes'])
if torch.cuda.is_available():
net.cuda()
net.load_state_dict(state_dict)
net.eval()
#print(net)
train_loader, test_loader = get_data_loader(config)
num=0
f=open("/home1/sas/datasets/ai_challenger_pdr2018_validationset_20180905/AgriculturalDisease_validationset/AgriculturalDisease_validation_annotations.json",'r+')
f_results=open("/home1/sas/datasets/ai_challenger_pdr2018_testA_20180905/AgriculturalDisease_testA/AgriculturalDisease_val_results_val_best_annotations.json",'w')
json_data=json.load(f)
print(len(json_data))
for _, (image, label) in enumerate(test_loader):
image=image.cuda()
image=Variable(image)
labels=net(image)
label=torch.max(labels,1)[1].data.squeeze()
label=int(label)
json_data[_]['disease_class']=label
#print(json_data[_])
num=num+1
dict_json=json.dumps(json_data, ensure_ascii=True, indent=2)
f_results.write(dict_json)
f_results.close()
f.close()
if __name__ == '__main__':
now = time.strftime('%Y-%m-%d | %H:%M:%S', time.localtime(time.time()))
print('----------------------------------------------------------------------')
print('Time: ' + now)
print('----------------------------------------------------------------------')
print(' Now start ...')
print('----------------------------------------------------------------------')
main()
print('----------------------------------------------------------------------')
print(' All Done!')
print('----------------------------------------------------------------------')
print('Start time: ' + now)
print('Now time: ' + time.strftime('%Y-%m-%d | %H:%M:%S', time.localtime(time.time())))
print('----------------------------------------------------------------------')
| null |
test.py
|
test.py
|
py
| 3,771 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "utils.config.process_config",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "trainers.example_model.ExampleModel",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "data_loader.dataset.get_data_loader",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "utils.logger.ExampleLogger",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "trainers.example_trainer.ExampleTrainer",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "utils.global_variable._init",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "utils.global_variable",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "sys.setdefaultencoding",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "nets.net_interface.NetModule",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "data_loader.dataset.get_data_loader",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 104,
"usage_type": "call"
}
] |
359581538
|
import script_context
import requests
from Stonks.utilities.config import apikey
import pandas as pd
import matplotlib.pyplot as plt
import time
import importlib
import h5py
import os
import sys
import numpy as np
import arrow
def grub(symbol='VIX9D', startdate=1581921000000):
# define endpoint
price_endpoint = r'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(symbol)
payload = {'apikey': str(apikey),
'periodType': 'month',
'period': int(3),
'frequencyType': 'daily',
'frequency': int(1)
}
content = requests.get(url=price_endpoint, params=payload)
print(content.url)
time.sleep(1) # wait for webpage to load
prelim_data = content.json()
print(prelim_data)
try:
symbol = prelim_data['symbol']
data = pd.DataFrame.from_dict(prelim_data['candles'])
return True, symbol, data
except KeyError:
print('symbol {} is invalid'.format(symbol))
return False, None, None
if __name__ == '__main__':
# symbol, data = grub(symbol='SPY')
grub_targets = ['VIX', 'VIX9D']
'''File Handling'''
filename = 'D:/StockData/S&P_500_VIX_{}'.format(str(arrow.now('America/New_York').date()))
if not os.path.exists(filename):
print('creating datafile:')
print(filename)
datafile = h5py.File(filename)
else:
print('Data file already exists!')
print('exiting program')
sys.exit()
successful_grubs = 0
for grubbie in grub_targets:
success, symbol, data = grub(symbol=grubbie)
successful_grubs += 1
if success:
local_group = datafile.create_group(str(symbol))
# print(local_group.name)
for key in data.keys():
local_dataset = local_group.create_dataset(name=key, shape=data[key].shape, dtype=np.float64)
# print(local_dataset.name)
local_dataset[...] = data[key]
if key == 'datetime':
local_dataset = local_group.create_dataset(name='market_hours', shape=data[key].shape,
dtype=np.float64)
print('successfully grubbed {}'.format(symbol))
print('successfully grubbed {} tickers'.format(successful_grubs))
datafile.close()
| null |
Stonks/DataGrubbing/VIX_grubbing.py
|
VIX_grubbing.py
|
py
| 2,388 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "Stonks.utilities.config.apikey",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "arrow.now",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "h5py.File",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "numpy.float64",
"line_number": 73,
"usage_type": "attribute"
}
] |
374192318
|
import base64
import json
import boto3
import os
import uuid
ROUTING_KEY = "delivery-receipts"
def lambda_handler(event, context):
sqs = boto3.resource('sqs')
queue = sqs.get_queue_by_name(
QueueName=f"{os.getenv('NOTIFICATION_QUEUE_PREFIX')}{ROUTING_KEY}"
)
for record in event["Records"]:
task = {
"task": "process-ses-result",
"id": str(uuid.uuid4()),
"args": [
{
"Message": record["Sns"]["Message"]
}
],
"kwargs": {},
"retries": 0,
"eta": None,
"expires": None,
"utc": True,
"callbacks": None,
"errbacks": None,
"timelimit": [
None,
None
],
"taskset": None,
"chord": None
}
envelope = {
"body": base64.b64encode(bytes(json.dumps(task), 'utf-8')).decode("utf-8"),
"content-encoding": "utf-8",
"content-type": "application/json",
"headers": {},
"properties": {
"reply_to": str(uuid.uuid4()),
"correlation_id": str(uuid.uuid4()),
"delivery_mode": 2,
"delivery_info": {
"priority": 0,
"exchange": "default",
"routing_key": ROUTING_KEY
},
"body_encoding": "base64",
"delivery_tag": str(uuid.uuid4())
}
}
msg = base64.b64encode(bytes(json.dumps(envelope), 'utf-8')).decode("utf-8")
queue.send_message(MessageBody=msg)
return {
'statusCode': 200
}
| null |
lambda_functions/ses_callback/ses_callback_lambda.py
|
ses_callback_lambda.py
|
py
| 1,746 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "boto3.resource",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 57,
"usage_type": "call"
}
] |
225214698
|
import serial
import struct
import sys
import operator
import argparse
import binascii
import time
#VRCSR protocol defines
SYNC_REQUEST = 0x5FF5
SYNC_RESPONSE = 0x0FF0
PROTOCOL_VRCSR_HEADER_SIZE = 6
PROTOCOL_VRCSR_XSUM_SIZE = 4
#CSR Address for sending an application specific custom command
ADDR_CUSTOM_COMMAND = 0xF0
#The command to send.
#The Propulsion command has a payload format of:
# 0xAA R_ID THRUST_0 THRUST_1 THRUST_2 ... THRUST_N
# Where:
# 0xAA is the command byte
# R_ID is the NODE ID of the thruster to respond with data
# THRUST_X is the thruster power value (-1 to 1) for the thruster with motor id X
PROPULSION_COMMAND = 0xAA
#flag for the standard thruster response which contains
RESPONSE_THRUSTER_STANDARD = 0x2
#standard response is the device type followed by 4 32-bit floats and 1 byte
RESPONSE_THRUSTER_STANDARD_LENGTH = 1 + 4 * 4 + 1
#The proppulsion command packets are typically sent as a multicast to a group ID defined for thrusters
THRUSTER_GROUP_ID = 0x81
def main():
#Parse command line arguments for portname, node id, motor id, and thrust values
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--com', help='Comm port to use.', default = 'COM2', dest='portname')
parser.add_argument('-i', '--id', help="Node id for the request packet. The default is the thruster group ID.", default = THRUSTER_GROUP_ID, dest='node_id')
parser.add_argument('-m', '--motor', help="Motor NODE ID from which to get a response.", default = 0, dest='motor_id')
parser.add_argument('thrust', metavar='N', type=float, nargs='*', help='list of thrust settings, in order of motor id. These are power settings and should be in the -1 to 1 range.' )
args = parser.parse_args()
#default to 0 thrust for motor 0 if no thrust parameters are passed in
if (len(args.thrust) == 0):
thrust = [0.0]
else:
thrust = args.thrust
#open the serial port
try:
port = serial.Serial(args.portname,115200)
port.timeout = 1
port.flushInput();
except IOError:
print ("Error: Could not open serial port: " + args.portname)
sys.exit()
#Create the custom command packet for setting the power level to a group of thrusters
#generate the header
flag = RESPONSE_THRUSTER_STANDARD
CSR_address = ADDR_CUSTOM_COMMAND
length = 2 + len(thrust) * 4
header = bytearray(struct.pack('HBBBB',SYNC_REQUEST,int(args.node_id),flag,CSR_address,length))
header_checksum = bytearray(struct.pack('i', binascii.crc32(header)))
#generate the paylaod, limiting the thrust to reasonable values
payload = bytearray(struct.pack('BB', PROPULSION_COMMAND, int(args.motor_id)))
for t in thrust:
t = max(t,-1)
t = min(t, 1)
payload += bytearray(struct.pack('f',t))
payload_checksum = bytearray(struct.pack('i', binascii.crc32(payload)))
#send the packet and wait for a response
packet = header + header_checksum + payload + payload_checksum
#uncomment to dump the request payload
#print (":".join("{:02x}".format(c) for c in packet))
write_time = time.time()
#put the packet on the wire
port.write(bytes(packet))
#get the response
expected_response_length = PROTOCOL_VRCSR_HEADER_SIZE + PROTOCOL_VRCSR_XSUM_SIZE + RESPONSE_THRUSTER_STANDARD_LENGTH + PROTOCOL_VRCSR_XSUM_SIZE
response_buf = port.read(expected_response_length)
print ("Got response: %d bytes" % len(response_buf))
print ("Turnaround time: %f mS" % ((time.time()-write_time) * 1000))
#parse the response
response = struct.unpack('=HBBBB I BffffB I', response_buf)
#uncomment to dump the response buffer
#print (":".join("{:02x}".format(ord(c)) for c in response_buf))
#header data
sync = response[0]
response_node_id = response[1]
flag = response[2]
CSR_address = response[3]
length = response[4]
header_checksum = response[5]
#response device type
device_type = response[6];
#custom response data payload
rpm = response[7]
bus_v = response[8]
bus_i = response[9]
temp = response[10]
fault = response[11]
payload_checksum = response[12]
print ("\nResponse:")
print ("\tSync:\t\t0x%04x" % sync)
print ("\tId:\t\t%d" % response_node_id)
print ("\tFlag:\t\t0x%x" % flag)
print ("\tAddress:\t0x%x" % CSR_address)
print ("\tLength:\t\t0x%x" % length)
print ("\t\tChecksum: 0x%x" % header_checksum)
print ("\n\tDevice Type:\t\t0x%x" % device_type)
print ("\tRPM:\t\t\t%f" % rpm)
print ("\tBus Voltage (V):\t%f" % bus_v)
print ("\tBus Current (A):\t%f" % bus_i)
print ("\tTemp (C):\t\t%f" % temp)
print ("\tFault:\t\t\t0x%x" % fault)
print ("\t\tChecksum: 0x%x" % payload_checksum)
if __name__ == "__main__":
main();
| null |
thruster.py
|
thruster.py
|
py
| 4,959 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "binascii.crc32",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "binascii.crc32",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 100,
"usage_type": "call"
}
] |
291913872
|
#!/usr/bin/python
# modini.py -- modifies ini files from commandline
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# [email protected]
# wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return.
# ----------------------------------------------------------------------------
import configobj
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="specify configuration file intended for modification")
parser.add_argument("section", help="section to be modified")
parser.add_argument("label", help="label to be modified in secion")
parser.add_argument("value", help="value to set it to")
args = parser.parse_args()
f = configobj.ConfigObj(args.filename)
if args.value.count(','):
args.value = map(lambda x: x.lstrip().rstrip(), args.value.split(','))
try:
f[args.section][args.label] = args.value
except:
f[args.section] = {}
f[args.section][args.label] = args.value
f.write()
| null |
modini.py
|
modini.py
|
py
| 1,156 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "configobj.ConfigObj",
"line_number": 22,
"usage_type": "call"
}
] |
343979802
|
import gym
from Reinforcement_learning import DDPG
import numpy as np
MAX_EPISODES = 200
MAX_EP_STEPS = 200
LR_A = 0.001 # learning rate for actor
LR_C = 0.002 # learning rate for critic
GAMMA = 0.9 # reward discount
TAU = 0.01 # soft replacement
MEMORY_CAPACITY = 10000
BATCH_SIZE = 32
RENDER = False
ENV_NAME = 'Pendulum-v0'
env = gym.make(ENV_NAME)
env = env.unwrapped
env.seed(1)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high
ddpg = DDPG(a_dim, s_dim, a_bound)
var = 3 # control exploration
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
for j in range(MAX_EP_STEPS):
if RENDER:
env.render()
# Add exploration noise
a = ddpg.choose_action(s)
a = np.clip(np.random.normal(a, var), -2, 2) # add randomness to action selection for exploration
s_, r, done, info = env.step(a)
ddpg.store_transition(s, a, r / 10, s_)
if ddpg.pointer > MEMORY_CAPACITY:
var *= .9995 # decay the action randomness
ddpg.learn()
s = s_
ep_reward += r
if j == MAX_EP_STEPS-1:
print('Episode:', i, ' Reward: %i' % int(ep_reward), 'Explore: %.2f' % var, )
if ep_reward > -300:RENDER = True
| null |
build/lib/Reinforcement_learning/test/run_ddpg.py
|
run_ddpg.py
|
py
| 1,322 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "gym.make",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "Reinforcement_learning.DDPG",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 38,
"usage_type": "attribute"
}
] |
157851199
|
import cv2
import numpy as np
from scipy import sparse
# Read the image
image = cv2.imread('MicrosoftTeams-image (2).png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Set threshold level
threshold_level = 100
# Find coordinates of all pixels below threshold
coords = np.column_stack(np.where(gray < threshold_level))
x = np.array([coords[:, 1]])
y = np.array([coords[:, 0]])
div = 100000
# Tomorrow, try to change len(x) to the pixel values. which is 273x283 something like this
for i in range(len(x)):
# pair = [x[i], y[i]]
# pair = np.array([pair])
original = np.array(y[i] - x[i])
tran = original.T
dist = np.array([tran * original]) / div
# Make distance matrix
print("distance matrix:\n", dist)
# Gaussian Kernel & Diagonal Degree
for k in range(len(dist)):
diag_deg = []
Gaussian = np.exp(((-dist[k]) ** 2) / 6)
Gaussian = np.array([Gaussian])
diag_deg.append(np.sum(k!=0))
print("Gausian:\n", Gaussian)
print("Diagonal Degree:\n", diag_deg)
# Algorithm num 5
'''
a1 =
'''
'''
# Make Similarity matrix and return Gaussian kernel (apply previous things to this one) then observe is Memory Error solved.
def gkern(kernlen=21, std=3):
"""Returns a 2D Gaussian kernel array."""
gkern1d = signal.gaussian(kernlen, std=std).reshape(kernlen, 1)
gkern2d = np.outer(gkern1d, gkern1d)
return gkern2d
# There is memory problem if I choose len as 29801(# of dist matrix) probably need dim_reduction
print("Gausian Kernel:\n", gkern(276, 1)*273)
'''
cv2.imshow('image', image)
cv2.waitKey()
| null |
matrix_process.py
|
matrix_process.py
|
py
| 1,613 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.column_stack",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 56,
"usage_type": "call"
}
] |
41317362
|
import math
import fractions
n= int(input())
while(n > 0 ):
st = (input())
L = st.split();
a = int(L[0])
b = int (L[1])
cd = fractions.gcd(a, b)
print(cd)
| null |
পাইথন /gcd.py
|
gcd.py
|
py
| 180 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "fractions.gcd",
"line_number": 9,
"usage_type": "call"
}
] |
164350404
|
#coding: utf-8
import jieba
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
texts=[]
for i in range(3):
with open('text'+str(i+1)+'.txt') as fr:
texts+=fr.readlines()
# print(texts[0])
def creadstoplist():
stwlist = [line.strip()
for line in open('stopwords.txt', 'r', encoding='utf-8').readlines()]
return stwlist
stwlist = creadstoplist()
def deletestop(wordlist):
outlist = []
for word in wordlist:
if word not in stwlist:
outlist.append(word)
return outlist
def fenci(text):
doc = jieba.cut(text,cut_all=False)
return doc
docs = [fenci(i) for i in texts]
docs_1 = [deletestop(i) for i in docs]
# print(list(docs_1))
texts = []
for i, count in enumerate(docs_1):
count = ' '.join(count)
texts.append(count)
# print(texts)
vectorizer = CountVectorizer() # 该类会将文本中的词语转换为词频矩阵,矩阵元素a[i][j] 表示j词在i类文本下的词频
transformer = TfidfTransformer() # 该类会统计每个词语的tf-idf权值
tfidf = transformer.fit_transform(vectorizer.fit_transform(texts))
weight = tfidf.toarray()
word = vectorizer.get_feature_names()
for i in range(len(weight)):
print('第'+str(i+1)+'个文档中词语对应的TF-IDF值:')
score = dict(zip(word, weight[i]))
# print(score)
print('dddddddddddddddddddddddddddddddddddddd')
sorted_words = sorted(score.items(), key=lambda x: x[1], reverse=True) # x: x[1]根据第二个元素排序
# print(sorted_words)
for i in sorted_words[:3]:
print(i)
| null |
tf-idf/tf-idf_chinese_2.py
|
tf-idf_chinese_2.py
|
py
| 1,685 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "jieba.cut",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfTransformer",
"line_number": 38,
"usage_type": "call"
}
] |
289446614
|
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.svm import SVR
from qiskit import Aer
from qiskit.aqua import QuantumInstance
from qiskit.aqua.components.feature_maps import SecondOrderExpansion
from tools import construct_kernel_matrix
datasize = 20
np.random.seed(1)
feature_dim = 1
x_train = np.sort(np.random.random([datasize, 1]))
y_train = np.sin(2*np.pi*x_train).ravel() + np.random.normal(0.0, 0.25, datasize)
x_test = np.arange(0.0, 1.0, 0.1).reshape(-1,1)
y_test = np.sin(2*np.pi*x_test).ravel()
plt.plot(x_train, y_train, "o", label="train")
plt.plot(x_test, y_test, "o", label="test")
plt.grid()
plt.legend()
plt.savefig("data_orig.png")
plt.show()
backend = Aer.get_backend('qasm_simulator')
quantum_instance = QuantumInstance(backend, shots=1024, seed_simulator=42, seed_transpiler=42)
feature_map = SecondOrderExpansion(feature_dimension=feature_dim, depth=2, entanglement="linear")
mat_train = construct_kernel_matrix(x_train, quantum_instance, feature_map, num_qubits=feature_dim)
plt.imshow(mat_train)
plt.show()
model = SVR(C=1.0, kernel="precomputed")
model.fit(mat_train, y_train)
model_classical = SVR(C=1.0,kernel='rbf')
model_classical.fit(x_train,y_train)
mat_test = construct_kernel_matrix(x_train, quantum_instance, feature_map, num_qubits=x_train.shape[1], x2_vec=x_test)
mat_grid = construct_kernel_matrix(x_train, quantum_instance, feature_map, num_qubits=feature_dim, x2_vec=x_test)
y_pred = model.predict(mat_test.T)
x_true = np.arange(0.0, 1.0, 0.01)
plt.plot(x_train, y_train, "o", label="train")
plt.plot(x_test, y_pred, "-", label="predict_QSVR")
y_predict = model_classical.predict(x_test)
x_true = np.arange(0.0, 1.0, 0.01)
plt.plot(x_test,y_predict,'c:', label='RBF kernel')
plt.grid()
plt.legend()
plt.xlabel('x')
plt.ylabel("y")
plt.savefig("reg_test.png")
plt.show()
| null |
non-linear.py
|
non-linear.py
|
py
| 1,925 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.random.seed",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.sort",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "qiskit.Aer.get_backend",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "qiskit.Aer",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "qiskit.aqua.QuantumInstance",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "qiskit.aqua.components.feature_maps.SecondOrderExpansion",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tools.construct_kernel_matrix",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tools.construct_kernel_matrix",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tools.construct_kernel_matrix",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
}
] |
345516899
|
#!/usr/bin/env python
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten, TimeDistributed, Reshape
from keras.layers import LSTM, Conv2D, MaxPooling2D, Activation
from keras.preprocessing.image import img_to_array, array_to_img
from keras.preprocessing import sequence
import time
def _load_data():
data=np.load('autorally_data.npz', encoding = 'bytes')
#500 datapoints in total
img = data['observations'][()][b'img_left'] #64*128*3
actions = data['actions']
return img, actions
def main():
#-------------------------------------Model---------------------------------------------------#
model = Sequential()
# define CNN model
model.add(TimeDistributed(Conv2D(32, kernel_size=(3, 3),activation='relu'), input_shape=(None, 64, 128, 3)))
model.add(TimeDistributed(Conv2D(32, kernel_size=(3, 3),activation='relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(TimeDistributed(Conv2D(64, kernel_size=(3, 3),activation='relu')))
model.add(TimeDistributed(Conv2D(64, kernel_size=(3, 3),activation='relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(TimeDistributed(Conv2D(128, kernel_size=(3, 3),activation='relu')))
model.add(TimeDistributed(Conv2D(128, kernel_size=(3, 3),activation='relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(TimeDistributed(Flatten()))
# define LSTM model
model.add(LSTM(128, return_sequences = True))
model.add(LSTM(64, return_sequences = True))
model.add(LSTM(32))
model.add(Dense(32))
model.add(Dense(2))
#model.add(Activation('tanh'))
# compile model
model.compile(loss='mean_squared_error', optimizer='adam',metrics=['accuracy'])
model.summary()
#-----------------------------------Data-----------------------------------------------#
imgs, actions = _load_data()
imgs_input = []
actions_output = []
training_set = 100
past_img_num = 6
predict_step = 10
first_img_num = past_img_num * predict_step
#in this case: 20, 15, 10, 5, 0 -> 25
for i in range(training_set):
imgs_input_delayed = []
actions_input_delayed = []
for j in range(past_img_num):
img = imgs[i + j*predict_step]
img_x = img_to_array(img)
imgs_input_delayed.append(img_x)
imgs_input.append(imgs_input_delayed)
actions_output.append(actions[i+first_img_num])
print("imgs_input shape")
print(np.array(imgs_input).shape[0])
print(np.array(imgs_input).shape[1])
print(np.array(imgs_input).shape[2])
print(np.array(imgs_input).shape[3])
print(np.array(imgs_input).shape[4])
print("actions_output shape")
print(np.array(actions_output).shape[0])
print(np.array(actions_output).shape[1])
evaluation_set = 1
imgs_test_input = []
actions_test_output = []
#start_num = training_set + first_img_num #125
start_num = 200
for i in range(evaluation_set):
print('test images indexes are:')
imgs_input_test_delayed = []
for j in range(past_img_num):
print(start_num+i+(j*predict_step))
img = imgs[start_num+i+(j*predict_step)]
img_x = img_to_array(img)
imgs_input_test_delayed.append(img_x)
imgs_test_input.append(imgs_input_test_delayed)
print('test action index is', start_num+i+first_img_num)
actions_test_output.append(actions[start_num+i+first_img_num])
#---------------------------------------train-------------------------------------------#
start_time = time.time()
model.fit(np.array(imgs_input), np.array(actions_output), batch_size=32, epochs=2)
end_time = time.time()
print('training time: ', end_time - start_time)
#---------------------------------------evaluate----------------------------------------#
#evaluate
stime = time.time()
test_loss, test_accuracy = model.evaluate(np.array(imgs_test_input), np.array(actions_test_output), verbose=True)
etime = time.time()
print('evaluation time: ', etime-stime)
print('test_accuracy: ', test_accuracy)
print('test_loss: ', test_loss)
#-------------------------------------prediction-----------------------------------------#
pre_s_time = time.time()
prediction = model.predict(np.array(imgs_test_input))
pre_e_time = time.time()
print('prediction time is ', pre_e_time - pre_s_time)
print('prediction result is ', prediction)
print('real output is ', actions_test_output)
main()
| null |
readData.py
|
readData.py
|
py
| 4,291 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 121,
"usage_type": "call"
}
] |
380214788
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark tests as slow")
def pytest_collection_modifyitems(config, items):
keywordexpr = config.option.keyword
markexpr = config.option.markexpr
if keywordexpr or markexpr:
return # let pytest handle this
skip_slow_marker = pytest.mark.skip(reason='slow marker not selected')
for item in items:
if 'slow' in item.keywords:
item.add_marker(skip_slow_marker)
| null |
dev_tools/conftest.py
|
conftest.py
|
py
| 1,082 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pytest.mark.skip",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 28,
"usage_type": "attribute"
}
] |
161618604
|
from flask import Flask
from flask import render_template
from flask import Response, request, jsonify
import re
import copy
app = Flask(__name__)
current_id = 1
word_learned_lists = []
# non_ppc_people = [
# "Phyllis",
# "Dwight",
# "Oscar",
# "Creed",
# "Pam",
# "Jim",
# "Stanley",
# "Michael",
# "Kevin",
# "Kelly"
# ]
# ppc_people = [
# "Angela"
# ]
dataset = [
{
'Id': 1,
'List_name': "Word List1: People",
'Poster': "https://www.goethe-verlag.com/book2/_bilder/001.jpg",
'Words': [
{
'Index': 0,
'Korean': "저",
'English': "I",
'Pronunciation': "jeo",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0003.mp3"
},
{
'Index': 1,
'Korean': "저와 당신",
'English': "I and you",
'Pronunciation': "jeowa dangsin",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0004.mp3"
},
{
'Index': 2,
'Korean': "우리 둘 다",
'English': "both of us",
'Pronunciation': "uli dul da",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0005.mp3"
},
{
'Index': 3,
'Korean': "그",
'English': "he",
'Pronunciation': "geu",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0006.mp3"
},
{
'Index': 4,
'Korean': "그와 그녀",
'English': "he and she",
'Pronunciation': "geuwa geunyeo",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0007.mp3"
},
{
'Index': 5,
'Korean': "그들 둘 다",
'English': "they both",
'Pronunciation': "geudeul dul da",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0008.mp3"
},
{
'Index': 6,
'Korean': "남자",
'English': "the man",
'Pronunciation': "namja",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0009.mp3"
},
{
'Index': 7,
'Korean': "여자",
'English': "the woman",
'Pronunciation': "yeoja",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0010.mp3"
},
{
'Index': 8,
'Korean': "아이",
'English': "the child",
'Pronunciation': "ai",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0011.mp3"
},
{
'Index': 9,
'Korean': "가족",
'English': "a family",
'Pronunciation': "gajog",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0012.mp3"
},
{
'Index': 10,
'Korean': "저의 가족",
'English': "my family",
'Pronunciation': "jeoui gajog",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0013.mp3"
},
{
'Index': 11,
'Korean': "저는 여기 있어요",
'English': "I am here.",
'Pronunciation': "jeoneun yeogi iss-eoyo",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0015.mp3"
}
]
},
{
'Id': 2,
'List_name': "Word List2: Family Members",
'Poster': "https://www.goethe-verlag.com/book2/_bilder/002.jpg",
'Words': [
{
'Index': 0,
'Korean': "할아버지",
'English': "the grandfather",
'Pronunciation': "hal-abeoji",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0023.mp3"
},
{
'Index': 1,
'Korean': "할머니",
'English': "the grandmother",
'Pronunciation': "halmeoni",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0024.mp3"
},
{
'Index': 2,
'Korean': "아버지",
'English': "the father",
'Pronunciation': "abeoji",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0026.mp3"
},
{
'Index': 3,
'Korean': "어머니",
'English': "the mother",
'Pronunciation': "eomeoni",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0027.mp3"
},
{
'Index': 4,
'Korean': "아들",
'English': "the son",
'Pronunciation': "adeul",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0029.mp3"
},
{
'Index': 5,
'Korean': "딸",
'English': "the daughter",
'Pronunciation': "ttal",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0030.mp3"
},
{
'Index': 6,
'Korean': "형 / 오빠 / 남동생",
'English': "the brother",
'Pronunciation': "hyeong / oppa / namdongsaeng",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0032.mp3"
},
{
'Index': 7,
'Korean': "누나 / 언니 / 여동생",
'English': "the sister",
'Pronunciation': "nuna / eonni / yeodongsaeng",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0033.mp3"
},
{
'Index': 8,
'Korean': "삼촌",
'English': "the uncle",
'Pronunciation': "samchon",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0035.mp3"
},
{
'Index': 9,
'Korean': "이모 / 고모",
'English': "the aunt",
'Pronunciation': "imo / gomo",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0036.mp3"
},
{
'Index': 10,
'Korean': "우리는 가족이에요.",
'English': "We are a family.",
'Pronunciation': "ulineun gajog-ieyo.",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0038.mp3"
},
{
'Index': 11,
'Korean': "가족이 작지 않아요.",
'English': "The family is not small.",
'Pronunciation': "gajog-i jagji anh-ayo.",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0039.mp3"
}
]
},
{
'Id': 3,
'List_name': "Word List3: Getting to know others",
'Poster': "https://www.goethe-verlag.com/book2/_bilder/003.jpg",
'Words': [
{
'Index': 0,
'Korean': "안녕!",
'English': "Hi!",
'Pronunciation': "annyeong!",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0043.mp3"
},
{
'Index': 1,
'Korean': "안녕하세요!",
'English': "Hello!",
'Pronunciation': "annyeonghaseyo!",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0044.mp3"
},
{
'Index': 2,
'Korean': "잘 지내세요?",
'English': "How are you?",
'Pronunciation': "jal jinaeseyo?",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0045.mp3"
},
{
'Index': 3,
'Korean': "당신은 유럽에서 오셨어요?",
'English': "Do you come from Europe?",
'Pronunciation': "dangsin-eun yuleob-eseo osyeoss-eoyo?",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0046.mp3"
},
{
'Index': 4,
'Korean': "당신은 미국에서 오셨어요?",
'English': "Do you come from America?",
'Pronunciation': "dangsin-eun migug-eseo osyeoss-eoyo?",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0047.mp3"
},
{
'Index': 5,
'Korean': "당신은 아시아에서 오셨어요?",
'English': "Do you come from Asia?",
'Pronunciation': "dangsin-eun asia-eseo osyeoss-eoyo?",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0048.mp3"
},
{
'Index': 6,
'Korean': "당신은 어떤 호텔에서 머물러요?",
'English': "In which hotel are you staying?",
'Pronunciation': "dangsin-eun eotteon hotel-eseo meomulleoyo?",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0049.mp3"
},
{
'Index': 7,
'Korean': "당신은 여기 온 지 얼마나 됐어요?",
'English': "How long have you been here for?",
'Pronunciation': "angsin-eun yeogi on ji eolmana dwaess-eoyo?",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0050.mp3"
},
{
'Index': 8,
'Korean': "이곳이 마음에 들어요?",
'English': "Do you like it here?",
'Pronunciation': "igos-i ma-eum-e deul-eoyo?",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0052.mp3"
},
{
'Index': 9,
'Korean': "이곳에 휴가를 오셨어요?",
'English': "Are you here on vacation?",
'Pronunciation': "igos-e hyugaleul osyeoss-eoyo?",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0053.mp3"
},
{
'Index': 10,
'Korean': "언제 저를 한 번 방문하세요!",
'English': "Please do visit me sometime!",
'Pronunciation': "eonje jeoleul han beon bangmunhaseyo!",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0054.mp3"
},
{
'Index': 11,
'Korean': "이것이 제 주소예요.",
'English': "Here is my address.",
'Pronunciation': "igeos-i je jusoyeyo.",
'Audio': "https://www.book2.nl/book2/KO/SOUND/0055.mp3"
}
]
},
]
# quiz1_answer = ['A', 'A', 'A', 'A', 'A', 'A']
# score = 0
ppc_people = [
# {
# 'Index': 0,
# 'Korean': "저",
# 'English': "I",
# 'Pronunciation': "jeo",
# 'Audio': "https://www.book2.nl/book2/KO/SOUND/0003.mp3"
# }
]
# ppc_people = [
# "저, I",
# "저와 당신, I and you",
# "우리 둘 다, both of us",
# "그, he",
# "그와 그녀, he and she",
# "그들 둘 다, they both",
# "남자, the man",
# "여자, the woman",
# "아이, the child"
# ]
non_ppc_people = [
]
@app.route('/')
def index(name=None):
return render_template('layout.html', name=name)
# @app.route('/init', methods=['GET', 'POST'])
# def init():
# global dataset
#
# # json_data = request.get_json()
# # print("json_data : " + str(json_data))
# return jsonify(dataset = dataset)
@app.route('/init_quiz', methods=['GET', 'POST'])
def init_quiz():
global non_ppc_people
global dataset
global current_id
return jsonify(non_ppc_people = non_ppc_people, dataset = dataset, current_id = current_id)
@app.route('/add_item', methods=['GET', 'POST'])
def add_item():
return render_template('add_item.html')
@app.route('/add_entry', methods=['GET', 'POST'])
def add_entry():
global current_id
global dataset
data_entry = {}
form_data = request.get_json()
# print(form_data)
current_id+=1
data_entry["Id"] = current_id
for value in form_data:
data_entry[value["name"]] = value["value"];
# print(data_entry)
dataset.append(data_entry)
print(dataset)
# render_template('view_item.html')
return jsonify(Id = current_id)
# return render_template('view_item.html')
@app.route('/search', methods=['GET', 'POST'])
def search():
return render_template('search.html')
@app.route('/search_input', methods=['GET', 'POST'])
def search_input():
global current_id
global dataset
# data_entry = {}
result = []
input = request.get_json()
print("input: "+str(input))
pattern = re.compile(input, re.I)
for i in dataset:
for j in i.values():
# print("j:"+str(j))
if re.search(pattern, str(j)):
result.append(i)
break
# print("result: ")
# print(result)
return jsonify(result = result)
@app.route('/view_item', methods=['GET', 'POST'])
def view_item():
return render_template('view_item.html')
@app.route('/take_quiz', methods=['GET', 'POST'])
def take_quiz():
return render_template('quiz.html')
@app.route('/return', methods=['GET', 'POST'])
def return_to():
return render_template('search.html')
@app.route('/view_item/<item_id>', methods=['GET', 'POST'])
def view_item_id(item_id=None):
global dataset
item = dataset[int(item_id)-1]
# print(item["Poster"])
return render_template('item.html', item = item)
@app.route('/init', methods=['GET', 'POST'])
def init():
global non_ppc_people
global ppc_people
global dataset
global word_learned_lists
ppc_people = copy.deepcopy(dataset[0]["Words"])
return jsonify(ppc_people = ppc_people, non_ppc_people = non_ppc_people, dataset = dataset, word_learned_lists = word_learned_lists)
@app.route('/clear_to_learn', methods=['GET', 'POST'])
def clear_to_learn():
global non_ppc_people
global ppc_people
global dataset
ppc_people = []
return jsonify(ppc_people = ppc_people, non_ppc_people = non_ppc_people)
@app.route('/clear_learned', methods=['GET', 'POST'])
def clear_learned():
global non_ppc_people
global ppc_people
global dataset
non_ppc_people = []
return jsonify(ppc_people = ppc_people, non_ppc_people = non_ppc_people)
@app.route('/move_to_ppc', methods=['GET', 'POST'])
def move_to_ppc(name=None):
global non_ppc_people
global ppc_people
json_data = request.get_json()
# print(str(json_data))
# print("non_ppc_people: " + str(non_ppc_people))
# print("ppc_people: " + str(ppc_people))
ppc_people.append(json_data["name"])
non_ppc_people.remove(json_data["name"])
# print("non_ppc_people: " + str(non_ppc_people))
# print("ppc_people: " + str(ppc_people))
return jsonify(ppc_people = ppc_people, non_ppc_people = non_ppc_people)
@app.route('/import_word_list', methods=['GET', 'POST'])
def import_word_list(name=None):
global non_ppc_people
global ppc_people
global current_id
json_data = request.get_json()
# print(str(json_data))
# print("non_ppc_people: " + str(non_ppc_people))
# print("ppc_people: " + str(ppc_people))
ppc_people = copy.deepcopy(dataset[json_data["name"]-1]["Words"])
current_id = json_data["name"]
# non_ppc_people.remove(json_data["name"])
# print("non_ppc_people: " + str(non_ppc_people))
# print("ppc_people: " + str(ppc_people))
return jsonify(ppc_people = ppc_people, non_ppc_people = non_ppc_people)
@app.route('/move_to_non_ppc', methods=['GET', 'POST'])
def move_to_non_ppc(name=None):
global non_ppc_people
global ppc_people
json_data = request.get_json()
# print(str(json_data))
non_ppc_people.append(json_data["name"])
ppc_people.remove(json_data["name"])
# print("non_ppc_people: " + str(non_ppc_people))
# print("ppc_people: " + str(ppc_people))
return jsonify(ppc_people = ppc_people, non_ppc_people = non_ppc_people)
@app.route('/check_answer', methods=['GET', 'POST'])
def check_answer():
global quiz1_answer
global current_id
score = 0
# data_entry = {}
form_data = request.get_json()
# print(form_data)
# current_id+=1
# data_entry["Id"] = current_id
for i, value in enumerate(form_data):
if quiz1_answer[i] == value["value"]:
score+=1
# data_entry[value["name"]] = value["value"]
# print(data_entry)
# dataset.append(data_entry)
# print(dataset)
# render_template('view_item.html')
return jsonify(score = score)
# return render_template('view_item.html')
@app.route('/update_achievement', methods=['GET', 'POST'])
def update_achievement():
global word_learned_lists
global current_id
json_data = request.get_json()
# if dataset[json_data["current_id"]-1] not in word_learned_lists:
# print(json_data)
word_learned_lists.append(dataset[json_data["name"]-1])
print(word_learned_lists)
return jsonify(word_learned_lists = word_learned_lists)
if __name__ == '__main__':
app.run(debug = True)
| null |
Korean_Learning_Website/server.py
|
server.py
|
py
| 16,357 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 363,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 388,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 390,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 483,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 518,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 523,
"usage_type": "call"
}
] |
356491312
|
# -*- coding: utf-8 -*-
""" fichier Solve """
import numpy as np
import scipy as sp
from scipy.sparse.linalg import spsolve
from scipy.sparse import lil_matrix
from element import RefElement, Element, Simplex
class Forms :
""" Formes varitionnelles"""
def __init__(self, a, b, c, q, d, beta):
""" Forme varitionnelles :
-d(a*du) + b*du + c*u = q
Neumann : a*du.n + d*u = beta
Direchlet : u = alpha
"""
self.a = a
self.b = b
self.c = c
self.d = d
self.q = q
self.d = d
self.beta = beta
def omegaA(self, e) :
""" Forme Ae de l'élément sous l'intégrale"""
return lambda x : e.Je*(
np.dot(e.gradPhi(x).T, np.dot(self.a(e.coord(x)), e.gradPhi(x))) +
np.dot(e.phi(x), np.dot(self.b(e.coord(x)).T,e.gradPhi(x))) +
self.c(e.coord(x))*np.dot(e.phi(x),e.phi(x).T) )
def omegaF(self, e) :
""" Forme Fe de l'élément sous l'intégrale"""
return lambda x : e.Je*(self.q(e.coord(x))*e.phi(x))
def neumannA(self, e) :
""" Forme Ab de l'élément sous l'intégrale"""
return lambda x :e.Je*(self.d(e.coord(x))*e.phi(x).dot(e.phi(x).T))
def neumannF(self, e) :
""" Forme Fb de l'éléent sous l'intégrale"""
return lambda x: e.Je*(self.beta(e.coord(x))*e.phi(x))
class Solve :
""" Résolution d'équation """
def __init__(self, equation) :
self.equation = equation
self.coord = sp.loadtxt(equation['coord'], dtype=float)
self.size, self.dim = self.coord.shape
self.forms = Forms(equation['a'], equation['b'], equation['c'], equation['q'], equation['d'], equation['beta'])
self.A = lil_matrix((self.size, self.size), dtype=complex)
self.F = lil_matrix((self.size, 1), dtype=complex)
def omegaAdd(self, intg, poly) :
"""Ajoute la contribution des formes sur omega"""
elements = sp.loadtxt(self.equation['elements']).astype(int)
ref = RefElement(self.dim, intg, poly)
for ddl in elements :
xe = np.array(self.coord[ddl]).T
e = Element(Simplex(xe), ref)
Ae = ref.integrate(self.forms.omegaA(e))
self.A[np.ix_(ddl, ddl)] += Ae
Fe = ref.integrate(self.forms.omegaF(e))
#print("Fe = ", Fe)
self.F[ddl] += Fe
def neumannAdd(self, intg, poly) :
""" Ajoute la contribution des formes sur neumann"""
if self.equation['neumann'] == '' :
return False
Ap = sp.sparse.lil_matrix((self.size, self.size), dtype=complex)
neumann = sp.loadtxt(self.equation['neumann']).astype(int)
ref = RefElement(self.dim-1, intg, poly)
for ddl in neumann :
xe = np.array(self.coord[ddl]).T
e = Element(Simplex(xe), ref)
Ab = ref.integrate(self.forms.neumannA(e))
self.A[np.ix_(ddl, ddl)] += Ab#*55.866
Ap[np.ix_(ddl, ddl)] += Ab
#print(ddl,'\n', Ab, '\n\n')
Fb = ref.integrate(self.forms.neumannF(e))
self.F[ddl] += Fb
return Ap
def dirichletAdd(self) :
""" Ajoute la contribution de dirichlet """
if self.equation['dirichlet'] == '' :
return False
dirichlet = sp.loadtxt(self.equation['dirichlet'], dtype=int)
for noeud in dirichlet :
self.A[noeud] = np.zeros(self.size)
self.A[noeud, noeud] = 1
self.F[noeud] = self.equation['alpha'](self.coord[noeud])
def solve(self) :
self.sol = spsolve(self.A, self.F).T
return self.sol
| null |
solve.py
|
solve.py
|
py
| 4,083 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.dot",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "scipy.loadtxt",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.lil_matrix",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.lil_matrix",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "scipy.loadtxt",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "element.RefElement",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "element.Element",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "element.Simplex",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.ix_",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.lil_matrix",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "scipy.loadtxt",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "element.RefElement",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "element.Element",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "element.Simplex",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.ix_",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.ix_",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "scipy.loadtxt",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.linalg.spsolve",
"line_number": 129,
"usage_type": "call"
}
] |
299115672
|
from django.urls import path
from mainapp.views import (
main, contacts, about
)
app_name = 'mainapp'
urlpatterns = [
path('contacts/', contacts, name='contacts'),
path('about/', about, name='about'),
path('', main, name='index'),
]
| null |
server/mainapp/urls.py
|
urls.py
|
py
| 252 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mainapp.views.contacts",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mainapp.views.about",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mainapp.views.main",
"line_number": 12,
"usage_type": "argument"
}
] |
262159560
|
# -*- encoding: utf-8 -*-
from ..lb_task import LbTask
from lazyblacksmith.extension.celery_app import celery_app
from lazyblacksmith.extension.esipy import esiclient
from lazyblacksmith.extension.esipy.operations import get_characters_skills
from lazyblacksmith.models import Skill
from lazyblacksmith.models import TaskState
from lazyblacksmith.models import TokenScope
from lazyblacksmith.models import User
from lazyblacksmith.models import db
from lazyblacksmith.utils.time import utcnow
from datetime import datetime
from email.utils import parsedate
import pytz
@celery_app.task(name="update_character_skill", base=LbTask, bind=True)
def task_update_character_skills(self, character_id):
""" Update the skills for a given character_id """
self.start()
skill_number = 0
character = User.query.get(character_id)
if character is None:
return
# get token
token = self.get_token_update_esipy(
character_id=character_id,
scope=TokenScope.SCOPE_SKILL
)
# get current character skills from ESI
character_skills = esiclient.request(
get_characters_skills(character_id=character_id),
)
if character_skills.status == 200:
for skill_object in character_skills.data.skills:
char_skill = character.skills.filter(
Skill.skill_id == skill_object.skill_id
).one_or_none()
if char_skill:
char_skill.level = skill_object.active_skill_level
else:
skill = Skill(
character=character,
skill_id=skill_object.skill_id,
level=skill_object.active_skill_level,
)
db.session.merge(skill)
skill_number += 1
db.session.commit()
else:
self.inc_fail_token_scope(token, character_skills.status)
self.end(TaskState.ERROR)
return
# update the token and the state
token.request_try = 0
token.last_update = utcnow()
token.cached_until = datetime(
*parsedate(character_skills.header['Expires'][0])[:6]
).replace(tzinfo=pytz.utc)
db.session.commit()
self.end(TaskState.SUCCESS)
| null |
lazyblacksmith/tasks/character/skills.py
|
skills.py
|
py
| 2,218 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "lazyblacksmith.models.User.query.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "lazyblacksmith.models.User.query",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "lazyblacksmith.models.User",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "lazyblacksmith.models.TokenScope.SCOPE_SKILL",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "lazyblacksmith.models.TokenScope",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "lazyblacksmith.extension.esipy.esiclient.request",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "lazyblacksmith.extension.esipy.esiclient",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "lazyblacksmith.extension.esipy.operations.get_characters_skills",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "lazyblacksmith.models.Skill.skill_id",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "lazyblacksmith.models.Skill",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "lazyblacksmith.models.Skill",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "lazyblacksmith.models.db.session.merge",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "lazyblacksmith.models.db.session",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "lazyblacksmith.models.db",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "lazyblacksmith.models.db.session.commit",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "lazyblacksmith.models.db.session",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "lazyblacksmith.models.db",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "lazyblacksmith.models.TaskState.ERROR",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "lazyblacksmith.models.TaskState",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "lazyblacksmith.utils.time.utcnow",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "email.utils.parsedate",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "lazyblacksmith.models.db.session.commit",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "lazyblacksmith.models.db.session",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "lazyblacksmith.models.db",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "lazyblacksmith.models.TaskState.SUCCESS",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "lazyblacksmith.models.TaskState",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "lazyblacksmith.extension.celery_app.celery_app.task",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "lazyblacksmith.extension.celery_app.celery_app",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "lb_task.LbTask",
"line_number": 20,
"usage_type": "name"
}
] |
231800076
|
# coding: utf-8
import pytest
from tapioca_iugu.tapioca_iugu import IuguClientAdapter
def test_resource_access(api_client):
resource = api_client.customer_list()
assert resource.data == "https://api.iugu.com/v1/customers"
iterator_data = [
({}, {"totalItems": 730, "items": ["item"] * 100}, {"params": {"start": 100}}),
({"params": {"start": 100}}, {"totalItems": 730, "items": ["item"] * 100}, {"params": {"start": 200}}),
({"params": {"start": 700}}, {"totalItems": 730, "items": ["item"] * 100}, None),
]
@pytest.mark.parametrize("request_kwargs, response_data, expected", iterator_data)
def test_resource_pagination_arguments(request_kwargs, response_data, expected):
client = IuguClientAdapter()
result = client.get_iterator_next_request_kwargs(request_kwargs, response_data, None)
assert result == expected
| null |
tests/test_tapioca_iugu.py
|
test_tapioca_iugu.py
|
py
| 853 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tapioca_iugu.tapioca_iugu.IuguClientAdapter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 19,
"usage_type": "attribute"
}
] |
574255471
|
from influxdb import InfluxDBClient
from random import randint
import datetime
class Metrics(object):
def __init__(self):
self.client = InfluxDBClient('54.161.112.58', 8086, 'root', 'root', 'example')
self.client.create_database('example')
self.send_metrics()
def send_metrics(self):
now = datetime.datetime.now()
json_body = [
{
"measurement": "py_hello_world",
"tags": {
"host": "sudhish-mac",
"region": "sfo"
},
"time": now.strftime("%Y-%m-%dT%H:%M:%SZ"),
"fields": {
"value": randint(0, 9)
}
}
]
self.client.write_points(json_body)
| null |
metrics.py
|
metrics.py
|
py
| 929 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "influxdb.InfluxDBClient",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 23,
"usage_type": "call"
}
] |
588607266
|
"""The runes Flask web application."""
import json
from flask import Flask, request, render_template, jsonify
import re
import os
from runes import getSequences, reduce, fold
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def root():
if request.method=='GET':
return render_template('index.html')
# If data was passed in a form, read it from the appropriate form field.
# Otherwise, data is expected as a binary string of words, so decode.
if 'words' in request.form:
words = request.form['words']
else:
words = request.data.decode()
# Words are whitespace delimited, so covert to a list of strings
# and remove special characters.
words = words.split()
word_list = [ word for word in words if not re.search('\'|-', word) ]
# Perform the algorithm.
sequences = getSequences(word_list)
sequences = reduce(sequences)
fold(sequences)
sequences = reduce(sequences)
# Sort by length, and return the seequences.
sequences.sort(key=len, reverse=True)
if 'words' in request.form:
return render_template('index.html', sequences=sequences)
return '\n'.join(sequences)
if __name__ == '__main__':
app.run(debug=True)
| null |
app.py
|
app.py
|
py
| 1,238 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "flask.request.data.decode",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "re.search",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "runes.getSequences",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "runes.reduce",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "runes.fold",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "runes.reduce",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 39,
"usage_type": "call"
}
] |
250105787
|
import sys
import wx
import wx.html
import wx.lib.wxpTag
import __version__
#---------------------------------------------------------------------------
class MyHelpCVS(wx.Dialog):
text = '''
<html>
<body bgcolor="#dddaec">
<table bgcolor="#7a5ada" width="100%%" cellspacing="0"
cellpadding="0" border="1">
<tr>
<td align="center">
<font color="#ffffff">
<h2>Abeni %s</h2>
Python %s<br>
wxPython %s<br>
</font">
</td>
</tr>
</table>
CVS repoman commits have been disabled until gpg signing
of manifests is added in Abeni version ~0.2.0
<p><wxp module="wx" class="Button">
<param name="label" value="Okay">
<param name="id" value="ID_OK">
</wxp></p>
</center>
</body>
</html>
'''
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, 'Help: repoman CVS commits',)
html = wx.html.HtmlWindow(self, -1, size=(420, -1))
py_version = sys.version.split()[0]
html.SetPage(self.text % (__version__.version, py_version, wx.VERSION_STRING))
btn = html.FindWindowById(wx.ID_OK)
btn.SetDefault()
ir = html.GetInternalRepresentation()
html.SetSize( (ir.GetWidth()+25, ir.GetHeight()+25) )
self.SetClientSize(html.GetSize())
self.CentreOnParent(wx.BOTH)
| null |
HelpCVSDialog.py
|
HelpCVSDialog.py
|
py
| 1,307 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "wx.Dialog",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "wx.Dialog.__init__",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "wx.Dialog",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "wx.html.HtmlWindow",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "wx.html",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.version.split",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sys.version",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "__version__.version",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "wx.VERSION_STRING",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_OK",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "wx.BOTH",
"line_number": 48,
"usage_type": "attribute"
}
] |
593316441
|
"""Initial authentication handlers (``/login``)."""
from __future__ import annotations
import base64
import os
from typing import TYPE_CHECKING, Optional
from fastapi import APIRouter, Depends, HTTPException, Query, status
from fastapi.responses import RedirectResponse
from httpx import HTTPError
from gafaelfawr.dependencies.context import RequestContext, context_dependency
from gafaelfawr.dependencies.return_url import return_url_with_header
from gafaelfawr.exceptions import (
InvalidReturnURLError,
InvalidStateError,
ProviderException,
)
if TYPE_CHECKING:
from typing import List, Set
from gafaelfawr.config import Config
from gafaelfawr.models.token import TokenGroup
router = APIRouter()
__all__ = ["get_login"]
@router.get(
"/login",
description=(
"Protected applications redirect to this URL when the user is not"
" authenticated to start the authentication process. The user will"
" then be sent to an authentication provider, back to this URL with"
" additional parameters to complete the process, and then back to the"
" protected site."
),
responses={307: {"description": "Redirect to provider or destination"}},
status_code=status.HTTP_307_TEMPORARY_REDIRECT,
summary="Authenticate browser",
tags=["browser"],
)
@router.get("/oauth2/callback", include_in_schema=False, tags=["browser"])
async def get_login(
code: Optional[str] = Query(
None,
title="Provider code",
description="Set by the authentication provider after authentication",
example="V2hrNqgM_eiIjXvV41RlMw",
),
state: Optional[str] = Query(
None,
title="Authentication state",
description=(
"Set by the authentication provider after authentication to"
" protect against session fixation"
),
example="wkC2bAP5VFpDioKc3JfaDA",
),
return_url: Optional[str] = Depends(return_url_with_header),
context: RequestContext = Depends(context_dependency),
) -> RedirectResponse:
"""Handle an initial login or the return from a login provider.
Notes
-----
Also export the login handler at ``/oauth2/callback``, the route used by
oauth2_proxy, for compatibility with older oauth2_proxy installations.
This avoids needing to change the redirect_uri registered with an OpenID
Connect provider. It can be removed once all registrations have been
updated with the ``/login`` route.
"""
if code:
return await handle_provider_return(code, state, context)
else:
return await redirect_to_provider(return_url, context)
async def redirect_to_provider(
return_url: Optional[str], context: RequestContext
) -> RedirectResponse:
"""Redirect the user to an external authentication provider.
Handles the initial processing and redirect to an external provider,
storing necessary state in the user's session cookie.
Parameters
----------
return_url : `str`, optional
The return URL to which to send the user after authentication.
context : `gafaelfawr.dependencies.config.RequestContext`
The context of the incoming request.
Returns
-------
response : `fastapi.RedirectResponse`
A redirect to the authentication provider.
Raises
------
fastapi.HTTPException
The authentication request is invalid.
"""
if not return_url:
raise InvalidReturnURLError("No return URL given", "rd")
context.state.return_url = return_url
# Reuse the existing state if one already exists in the session cookie.
#
# This is subtle and requires some explanation. Most modern webapps
# involve a lot of background JavaScript. If the user has a tab open when
# their session expires, those background JavaScript requests will start
# turning into redirects to Gafaelfawr and thus to this code. Since there
# isn't a good way to see whether a request is a background JavaScript
# request versus a browser loading a page, we will generate an
# authentication redirect for each one.
#
# This means that if we generate new random state for each request, there
# is a race condition. The user may go to a page with an expired session
# and get redirected to log in. While they are logging in at the external
# provider, another open tab may kick off one of these JavaScript
# requests, which generates a new redirect and replaces the state stored
# in their session cookie. Then, when they return from authentication,
# the state will have changed, and the authentication attempt will fail.
#
# Work around this by reusing the same random state until the user
# completes an authentication. This does not entirely close the window
# for the race condition because it's possible that two requests will both
# see sessions without state, both generate state, and then both set
# cookies, and only one of them will win. However, that race condition
# window is much smaller and is unlikely to persist across authentication
# requests.
state = context.state.state
if not state:
state = base64.urlsafe_b64encode(os.urandom(16)).decode()
context.state.state = state
# Get the authentication provider URL send the user there.
auth_provider = context.factory.create_provider()
redirect_url = auth_provider.get_redirect_url(state)
return RedirectResponse(redirect_url)
async def handle_provider_return(
code: str, state: Optional[str], context: RequestContext
) -> RedirectResponse:
"""Handle the return from an external authentication provider.
Handles the target of the redirect back from an external authentication
provider with new authentication state information.
Parameters
----------
code : `str`
The authentication code from the provider.
state : `str`, optional
The opaque state used to verify that this user initiated the
authentication. This can be `None`, but that will always be an
error.
context : `gafaelfawr.dependencies.config.RequestContext`
The context of the incoming request.
Returns
-------
response : `fastapi.RedirectResponse`
A redirect to the resource the user was trying to reach before
authentication.
Raises
------
fastapi.HTTPException
The authentication request is invalid or retrieving authentication
information from the provider failed.
"""
if not state:
msg = "No authentication state"
context.logger.warning("Authentication failed", error=msg)
raise InvalidStateError(msg)
# Extract details from the reply, check state, and get the return URL.
if state != context.state.state:
msg = "Authentication state mismatch"
context.logger.warning("Authentication failed", error=msg)
raise InvalidStateError(msg)
return_url = context.state.return_url
if not return_url:
msg = "Invalid authentication state: return_url not present in cookie"
context.logger.error("Authentication failed", error=msg)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=[{"msg": msg, "type": "return_url_not_set"}],
)
context.rebind_logger(return_url=return_url)
# Retrieve the user identity and authorization information based on the
# reply from the authentication provider.
auth_provider = context.factory.create_provider()
try:
user_info = await auth_provider.create_user_info(code, state)
except ProviderException as e:
context.logger.warning("Provider authentication failed", error=str(e))
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=[{"type": "provider_failed", "msg": str(e)}],
)
except HTTPError as e:
msg = "Cannot contact authentication provider"
context.logger.exception(msg, error=str(e))
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=[
{
"type": "provider_connect_failed",
"msg": f"{msg}: {str(e)}",
}
],
)
# Construct a token.
scopes = get_scopes_from_groups(context.config, user_info.groups)
admin_service = context.factory.create_admin_service()
if admin_service.is_admin(user_info.username):
scopes = sorted(scopes + ["admin:token"])
token_service = context.factory.create_token_service()
token = await token_service.create_session_token(
user_info, scopes=scopes, ip_address=context.request.client.host
)
context.state.token = token
# Successful login, so clear the login state and send the user back to
# what they were doing.
context.state.state = None
context.state.return_url = None
context.logger.info(
"Successfully authenticated user %s (%s)",
user_info.username,
user_info.uid,
user=user_info.username,
token=token.key,
scope=" ".join(scopes),
)
return RedirectResponse(return_url)
def get_scopes_from_groups(
config: Config, groups: Optional[List[TokenGroup]]
) -> List[str]:
"""Get scopes from a list of groups.
Used to determine the scope claim of a token issued based on an OpenID
Connect authentication.
Parameters
----------
groups : List[`gafaelfawr.models.token.TokenGroup`]
The groups of a token.
Returns
-------
scopes : List[`str`]
The scopes generated from the group membership based on the
``group_mapping`` configuration parameter.
"""
if not groups:
return ["user:token"]
scopes: Set[str] = set(["user:token"])
for group in [g.name for g in groups]:
scopes.update(config.issuer.group_mapping.get(group, set()))
return sorted(scopes)
| null |
src/gafaelfawr/handlers/login.py
|
login.py
|
py
| 10,057 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "fastapi.APIRouter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "gafaelfawr.dependencies.context.RequestContext",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "fastapi.Query",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "fastapi.Query",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "fastapi.Depends",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "gafaelfawr.dependencies.return_url.return_url_with_header",
"line_number": 63,
"usage_type": "argument"
},
{
"api_name": "fastapi.Depends",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "gafaelfawr.dependencies.context.context_dependency",
"line_number": 64,
"usage_type": "argument"
},
{
"api_name": "fastapi.status.HTTP_307_TEMPORARY_REDIRECT",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "fastapi.responses.RedirectResponse",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "gafaelfawr.dependencies.context.RequestContext",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "gafaelfawr.exceptions.InvalidReturnURLError",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "base64.urlsafe_b64encode",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "os.urandom",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "fastapi.responses.RedirectResponse",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "fastapi.responses.RedirectResponse",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "gafaelfawr.dependencies.context.RequestContext",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "gafaelfawr.exceptions.InvalidStateError",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "gafaelfawr.exceptions.InvalidStateError",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_500_INTERNAL_SERVER_ERROR",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "gafaelfawr.exceptions.ProviderException",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_500_INTERNAL_SERVER_ERROR",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "httpx.HTTPError",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_500_INTERNAL_SERVER_ERROR",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "fastapi.responses.RedirectResponse",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "fastapi.responses.RedirectResponse",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "gafaelfawr.config.Config",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "gafaelfawr.models.token.TokenGroup",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 250,
"usage_type": "name"
}
] |
97251826
|
import time
import numpy as np
import tensorflow as tf
import tensorflow.contrib as contrib
from PIL import Image
from scipy.io import loadmat
from imageio import imread, imwrite
from skimage.transform import resize
np.random.seed(1024)
HEIGHT = 360
WIDTH = 360
CHANNEL = 3
PATCH_SIZE = [1, 3, 3, 1]
MAX_ITER = 100
MEAN_VALUES = np.array([123.68, 116.779, 103.939])
KEEP_SAME = False
def original_colors(original, stylized):
"""Keep the colors of original picture.
Parameters:
-----------
original: original picture (HEIGHT, WIDTH, CHANNEL) np.ndarray, dtype=uint8
stylized: mixed picture (HEIGHT, WIDTH, CHANNEL) np.ndarray, dtype=uint8
Returns:
--------
target: (1, HEIGHT, WIDTH, CHANNEL) np.ndarray, dtype=float32
"""
original = Image.fromarray(float2uint8(original))
stylized = Image.fromarray(float2uint8(stylized))
h, s, v = original.convert('HSV').split()
hs, ss, vs = stylized.convert('HSV').split()
return np.array(Image.merge('HSV', (h, s, vs)).convert('RGB'), dtype=np.float32).reshape([1, HEIGHT, WIDTH, 3])
def float2uint8(img):
""""Convert an image ndarray from float to uint8 with clipping"""
return np.uint8(np.clip(img, 0, 255))
def load_picture(path):
"""Read an image and process it to 4-D tensor."""
img = imread(path)
shape = img.shape
assert shape[2] == CHANNEL
img = resize(img, [HEIGHT, WIDTH], preserve_range=True)-MEAN_VALUES
img = np.reshape(img, [1, HEIGHT, WIDTH, 3])
return shape, img
def save_img(img, shape, keep_same=False):
"""Save the mixed 4-D image tensor automatically."""
img = img[0]+MEAN_VALUES
# If we want the output have the same shape as input.
if keep_same:
img = convert_img_shape(img, shape)
imwrite('output/'+time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())+'_im.jpg', float2uint8(img))
def convert_img_shape(img, shape):
"""Convert an 3-D image to given shape."""
img = float2uint8(img)
img = resize(img, shape, preserve_range=True)
return img
def vgg(inputs):
"""VGG16 Network for obtaining feature maps of inputs.
Parameters:
-----------
inputs: input images, np.ndarray with shape (k, Height, Width, Channel)/(k, 224, 224, 3)
Returns:
--------
feature_bank: a dictionary of each layer in VGG NET.
"""
path = r'C:\Users\屿星神\Downloads\vgg-min-16.mat'
# path = r'C:\Users\屿星神\Downloads\vgg.mat'
vgg_para = loadmat(path)
layers = vgg_para["layers"]
feature_bank = {}
with tf.variable_scope("vgg"):
for i in range(26):
if layers[0, i][0, 0]["type"] == "conv":
w = layers[0, i][0, 0]["weights"][0, 0]
b = layers[0, i][0, 0]["weights"][0, 1]
w = tf.constant(w, dtype=tf.float32)
b = tf.constant(b.T, dtype=tf.float32)
with tf.variable_scope(str(i)):
inputs = tf.nn.conv2d(inputs, w, [1, 1, 1, 1], "SAME") + b
elif layers[0, i][0, 0]["type"] == "relu":
inputs = tf.nn.relu(inputs)
feature_bank[layers[0, i][0, 0]["name"][0]] = inputs
else:
inputs = tf.nn.max_pool(inputs, [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
return feature_bank
def style_swap(feature_content, feature_style):
"""Style swap for feature map of content and style image..
Parameters:
-----------
feature_content: a dictionary of each layer in VGG NET for content image.
feature_content: a dictionary of each layer in VGG NET for style image.
Returns:
--------
feature_mix: Output of swapping for specific layer.
"""
layer = 'relu4_2'
feature_content = feature_content[layer]
feature_style = feature_style[layer]
_, h, w, channel = list(map(lambda x: int(x), feature_content.shape))
m = np.ones((h, w))*9
m[0, 1:-1] = m[-1, 1:-1] = m[1:-1, 0] = m[1:-1, -1] = 6
m[0, 0] = m[0, -1] = m[-1, 0] = m[-1, -1] = 4
m = m.reshape([1, h, w, 1])
lst = []
for content, style in zip(tf.split(feature_content, num_or_size_splits=channel, axis=-1),
tf.split(feature_style, num_or_size_splits=channel, axis=-1)):
style_patches = tf.extract_image_patches(style,
ksizes=PATCH_SIZE,
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
padding='SAME')
filters = tf.transpose(tf.reshape(style_patches, [-1, 3, 3, 1]), [1, 2, 3, 0])
normalize_filters = tf.nn.l2_normalize(filters, dim=(0, 1, 2))
conv1 = tf.nn.conv2d(content, filter=normalize_filters, strides=[1, 1, 1, 1], padding='SAME')
maxcoord = tf.argmax(conv1, axis=3)
one_hot_filter = tf.one_hot(maxcoord, depth=conv1.get_shape()[-1].value)
conv2 = tf.nn.conv2d_transpose(one_hot_filter,
filter=filters,
strides=[1, 1, 1, 1],
output_shape=tf.shape(content),
padding='SAME')
lst.append(conv2/m)
mix = tf.concat(lst, axis=-1)
return mix
def content_loss(feature_x, feature_mix):
"""Content loss."""
return tf.nn.l2_loss(feature_x["relu4_2"] - feature_mix)
def variation_loss(img):
"""Variation loss.
Parameters:
-----------
img: H*W*C np.array
"""
return tf.nn.l2_loss(img[:-1]-img[1:])+tf.nn.l2_loss(img[:, :-1]-img[:, 1:])
def losses(feature_mix, img_y, lmbd=1e-6):
feature_y = vgg(img_y)
loss = lmbd * content_loss(feature_y, feature_mix) + variation_loss(img_y[0])
return loss
def main(keep_color=False, keep_same=False, load=False):
# path_content = 'input/zsh4.jpg'
path_content = 'input/profile.jpg'
path_style = 'input/starry_night.jpg'
# path_style = 'input/sketch.jpg'
# path_style = 'input/skrik.jpg'
# path_style = 'input/lundstroem.jpg'
path_load = 'output/2018-05-21-12-26-12_im.jpg'
# Network
img_content = tf.placeholder("float", [1, HEIGHT, WIDTH, 3])
img_style = tf.placeholder("float", [1, HEIGHT, WIDTH, 3])
img_target = tf.get_variable("target", shape=[1, HEIGHT, WIDTH, 3],
initializer=tf.truncated_normal_initializer(stddev=0.02))
feature_content = vgg(img_content)
feature_style = vgg(img_style)
feature_mix = style_swap(feature_content, feature_style)
# Initialization
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
shape, content = load_picture(path_content)
_, style = load_picture(path_style)
feed_dict = {img_content: content,
img_style: style}
if load:
_, img_load = load_picture(path_load)
sess.run(tf.assign(img_target, img_load), feed_dict=feed_dict)
else:
sess.run(tf.assign(img_target, content), feed_dict=feed_dict)
feature_mix = sess.run(feature_mix, feed_dict=feed_dict)
loss = losses(feature_mix, img_target, 1e-4)
optimizer = contrib.opt.ScipyOptimizerInterface(loss,
method='L-BFGS-B',
options={'maxiter': MAX_ITER, 'disp': 1})
# print(sess.run([content_loss(vgg(img_target), feature_mix), variation_loss(img_target)]))
# exit()
# Start Optimization
start_time = time.time()
print('Transform Start')
optimizer.minimize(sess, feed_dict=feed_dict)
print('Time eclipsed: %.4f' % (time.time()-start_time))
# Save target image
img_target = sess.run(img_target, feed_dict=feed_dict)
if keep_color:
img_target = original_colors(img_content[0], img_target[0])
save_img(img_target, shape, keep_same)
def vgg_test():
path = r'C:\Users\屿星神\Downloads\vgg-min-16.mat'
vgg_para = loadmat(path)
layers = vgg_para["layers"]
print(layers[0][0][0][0]['name'])
def transform(keep_color=False, keep_same=False):
"""Test whether keeping color works."""
# path_target = 'output/2018-05-20-14-52-26_im.jpg'
path_content = 'input/why.jpg'
path_target = 'output/2018-05-20-16-12-20_im.jpg'
shape, img_content = load_picture(path_content)
_, img_target = load_picture(path_target)
img = img_target
if keep_color:
img = original_colors(img_content[0], img[0])
save_img(img, shape, keep_same)
if __name__ == '__main__':
main(False, KEEP_SAME, False)
| null |
code/StyleSwap_W.py
|
StyleSwap_W.py
|
py
| 8,694 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.random.seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PIL.Image.merge",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "imageio.imread",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "skimage.transform.resize",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "imageio.imwrite",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "skimage.transform.resize",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.constant",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.conv2d",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.relu",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.max_pool",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tensorflow.split",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "tensorflow.split",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tensorflow.extract_image_patches",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tensorflow.transpose",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.l2_normalize",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.conv2d",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.argmax",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "tensorflow.one_hot",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.conv2d_transpose",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.shape",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "tensorflow.concat",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.l2_loss",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.l2_loss",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "tensorflow.truncated_normal_initializer",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "tensorflow.assign",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "tensorflow.assign",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.opt.ScipyOptimizerInterface",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.opt",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.contrib",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 233,
"usage_type": "call"
}
] |
639958671
|
import asyncore
import socket
import errno
import logging
from ..core import deferred
from ..comm import PDU
DEBUG = True
_logger = logging.getLogger(__name__)
__all__ = ['TCPClient']
CONNECT_TIMEOUT = 30.0
class TCPClient(asyncore.dispatcher):
"""
This class is a mapping between the client/server pattern and the socket API.
The ctor is given the address to connect as a TCP client.
Because objects of this class sit at the bottom of a protocol stack they are accessed as servers.
"""
_connect_timeout = CONNECT_TIMEOUT
def __init__(self, peer):
raise NotImplementedError
if DEBUG: _logger.debug("__init__ %r", peer)
asyncore.dispatcher.__init__(self)
# ask the dispatcher for a socket
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# make sure the connection attempt is non-blocking
self.socket.setblocking(0)
if DEBUG: _logger.debug(" - non-blocking")
# save the peer
self.peer = peer
self.connected = False
# create a request buffer
self.request = b''
# try to connect
try:
rslt = self.socket.connect_ex(peer)
if (rslt == 0):
if DEBUG: _logger.debug(" - connected")
self.connected = True
elif rslt == errno.EINPROGRESS:
if DEBUG: _logger.debug(" - in progress")
elif rslt == errno.ECONNREFUSED:
if DEBUG: _logger.debug(" - connection refused")
self.handle_error(rslt)
else:
if DEBUG: _logger.debug(" - connect_ex: %r", rslt)
except socket.error as err:
if DEBUG: _logger.debug(" - connect socket error: %r", err)
# pass along to a handler
self.handle_error(err)
def handle_accept(self):
if DEBUG: _logger.debug("handle_accept")
def handle_connect(self):
if DEBUG: _logger.debug("handle_connect")
self.connected = True
def handle_connect_event(self):
if DEBUG: _logger.debug("handle_connect_event")
# there might be an error
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if DEBUG: _logger.debug(" - err: %r", err)
# check for connection refused
if (err == 0):
if DEBUG: _logger.debug(" - no error")
self.connected = True
elif (err == errno.ECONNREFUSED):
if DEBUG: _logger.debug(" - connection to %r refused", self.peer)
self.handle_error(socket.error(errno.ECONNREFUSED, "connection refused"))
return
# pass along
asyncore.dispatcher.handle_connect_event(self)
def readable(self):
return self.connected
def handle_read(self):
if DEBUG: _logger.debug("handle_read")
try:
msg = self.recv(65536)
if DEBUG: _logger.debug(" - received %d octets", len(msg))
# no socket means it was closed
if not self.socket:
if DEBUG: _logger.debug(" - socket was closed")
else:
# send the data upstream
deferred(self.response, PDU(msg))
except socket.error as err:
if (err.args[0] == errno.ECONNREFUSED):
if DEBUG: _logger.debug(" - connection to %r refused", self.peer)
else:
if DEBUG: _logger.debug(" - recv socket error: %r", err)
# pass along to a handler
self.handle_error(err)
def writable(self):
if not self.connected:
return True
return (len(self.request) != 0)
def handle_write(self):
if DEBUG: _logger.debug("handle_write")
try:
sent = self.send(self.request)
if DEBUG: _logger.debug(" - sent %d octets, %d remaining", sent, len(self.request) - sent)
self.request = self.request[sent:]
except socket.error as err:
if (err.args[0] == errno.EPIPE):
if DEBUG: _logger.debug(" - broken pipe to %r", self.peer)
return
elif (err.args[0] == errno.ECONNREFUSED):
if DEBUG: _logger.debug(" - connection to %r refused", self.peer)
else:
if DEBUG: _logger.debug(" - send socket error: %s", err)
# pass along to a handler
self.handle_error(err)
def handle_write_event(self):
if DEBUG: _logger.debug("handle_write_event")
# there might be an error
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if DEBUG: _logger.debug(" - err: %r", err)
# check for connection refused
if err == 0:
if not self.connected:
if DEBUG: _logger.debug(" - connected")
self.handle_connect()
else:
if DEBUG: _logger.debug(" - peer: %r", self.peer)
if (err == errno.ECONNREFUSED):
socket_error = socket.error(err, "connection refused")
elif (err == errno.ETIMEDOUT):
socket_error = socket.error(err, "timed out")
elif (err == errno.EHOSTUNREACH):
socket_error = socket.error(err, "host unreachable")
else:
socket_error = socket.error(err, "other unknown: %r" % (err,))
if DEBUG: _logger.debug(" - socket_error: %r", socket_error)
self.handle_error(socket_error)
return
# pass along
asyncore.dispatcher.handle_write_event(self)
def handle_close(self):
if DEBUG: _logger.debug("handle_close")
# close the socket
self.close()
# no longer connected
self.connected = False
# make sure other routines know the socket is closed
self.socket = None
def handle_error(self, error=None):
"""Trap for TCPClient errors, otherwise continue."""
if DEBUG: _logger.debug("handle_error %r", error)
# if there is no socket, it was closed
if not self.socket:
if DEBUG: _logger.debug(" - error already handled")
return
# core does not take parameters
asyncore.dispatcher.handle_error(self)
def indication(self, pdu):
"""Requests are queued for delivery."""
if DEBUG: _logger.debug("indication %r", pdu)
self.request += pdu.pduData
| null |
bacpypes/transport/tcp_client.py
|
tcp_client.py
|
py
| 6,503 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "asyncore.dispatcher",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "asyncore.dispatcher.__init__",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "asyncore.dispatcher",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "socket.AF_INET",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "errno.EINPROGRESS",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "errno.ECONNREFUSED",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "socket.error",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "socket.SO_ERROR",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "errno.ECONNREFUSED",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "socket.error",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "errno.ECONNREFUSED",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "asyncore.dispatcher.handle_connect_event",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "asyncore.dispatcher",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "core.deferred",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "comm.PDU",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "socket.error",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "errno.ECONNREFUSED",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "socket.error",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "errno.EPIPE",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "errno.ECONNREFUSED",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "socket.SO_ERROR",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "errno.ECONNREFUSED",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "socket.error",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "errno.ETIMEDOUT",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "socket.error",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "errno.EHOSTUNREACH",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "socket.error",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "socket.error",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "asyncore.dispatcher.handle_write_event",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "asyncore.dispatcher",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "asyncore.dispatcher.handle_error",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "asyncore.dispatcher",
"line_number": 168,
"usage_type": "attribute"
}
] |
437447831
|
import datetime
from threading import Thread
import logging
import redis
class EventListener(Thread):
def __init__(self):
Thread.__init__(self)
self.__r = redis.Redis(charset="utf-8", decode_responses=True)
self.__events = []
def run(self):
pubsub = self.__r.pubsub()
pubsub.subscribe(['users', 'spam'])
for item in pubsub.listen():
if item['type'] == 'message':
message = "\nEVENT: %s | %s" % (item['data'], datetime.datetime.now())
self.__events.append(message)
logging.info(message)
def get_events(self):
return self.__events
| null |
lab3/Listener.py
|
Listener.py
|
py
| 662 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "threading.Thread",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "redis.Redis",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 21,
"usage_type": "call"
}
] |
206397758
|
import discord
from discord.ext import commands as bot
import urllib
import urllib.request
import urllib.error
import json
import asyncio
import re
import random
import os
import aiohttp
import csv
def trunc_to(ln, s):
if len(s) <= ln: return s
else: return s[:ln-3] + "..."
def highlight(s, term, type='**'):
to_bold = term.split(' ')
output = re.sub(r'(%s)' % '|'.join(to_bold), (type + r'\1' + type), s, flags=re.IGNORECASE)
return output
def capitalise(s, term):
to_bold = term.split(' ')
output = re.sub(r'(%s)' % '|'.join(to_bold), lambda match: r'{}'.format(match.group(1).upper()), s, flags=re.IGNORECASE)
return output
def linkify(searchtext):
feu_post_base = "http://feuniverse.us/t/{}/{}"
def result(data):
post, thread = data
title = highlight(thread['title'], searchtext, '*')
blurb = highlight(trunc_to(100, post['blurb']), searchtext)
link = '[Post in %s](%s)' % (
title,
feu_post_base.format(post['topic_id'], post['post_number']))
threadline = '**%s by %s**' % (link, highlight(post['username'], searchtext, '*'))
return threadline + '\n' + blurb
return result
def create_embed(posts, threads, term):
feu_search_base = "http://feuniverse.us/search?q=%s"
searchtext = urllib.parse.unquote(term)
numresults = 3
result = discord.Embed(
title='Search results for "%s"' % urllib.parse.unquote(term),
url=feu_search_base % term,
color=0xde272c)
if len(posts)>0:
innerEmbed = '\n\n'.join(
map(linkify(searchtext),zip(posts[:numresults],threads)))
result.add_field(
name='Found %d result(s)' % len(posts),
value=innerEmbed,
inline=False)
else:
result.description = 'Found %d result(s)' % len(posts)
if len(posts) > numresults:
result.set_footer(
text="Truncated %d result(s)." % (len(posts)-numresults))
return result
class Helpful(bot.Cog):
"""Actually Helpful commands"""
def __init__(self, bot):
self.bot = bot
# ctx = bot
# self.port = bot.listen('on_message')(self.port)
@bot.command()
async def mod(self, ctx, rule_num, *, link):
"""!mod <rule number> <link to objectionable message>"""
FEU_id = 144670830150811649
if ctx.message.guild is None or ctx.message.guild.id == FEU_id:
await ctx.message.author.send("Your request for moderation was successful.")
if ctx.message.guild is not None:
await ctx.message.delete
mod_channel = self.bot.get_channel(650911156277477377)
paladins = discord.utils.get(ctx.message.guild.roles, id=145992793796378624).mention
await ctx.mod_channel.send("%s, moderation request received by user %s: Rule %s, at <%s>." % (paladins, ctx.message.author.name, rule_num, link))
else:
await ctx.send("Moderation features are for FEU only.")
@bot.command()
async def howtomod(self, ctx):
"""Gives information on how to use the !mod command."""
await ctx.send("First, have Developer Mode enabled (Settings -> Appearance -> Developer Mode).")
await ctx.send("Then, click the `...` by the offending message, and click \"Copy Link\".")
await ctx.send("Then simple say !mod <n> <link>, where <n> is the rule it violates, and <link> is the pasted link to the message.")
await ctx.send("If you do not have Developer Mode, you may instead of a link, write a short description of where the infraction took place, and by who.")
await ctx.send("Note that after requesting moderation, the message requesting moderation will be removed.")
@bot.command()
async def goldmine(self, ctx):
"""everything you ever wanted"""
embed=discord.Embed(title="Unified FE Hacking Dropbox", url='https://www.dropbox.com/sh/xl73trcck2la799/AAAMdpNSGQWEzYkLEQEiEhGFa?dl=0', description="All the hacking resources you could ever need, in one place", color=0xefba01)
# embed.set_thumbnail(url='http://i.imgur.com/Bg5NSga.png')
await ctx.send(embed=embed)
@bot.command() # removed aliases=["repo"]
async def repository(self, ctx):
"""graphics for you"""
embed=discord.Embed(title="Emblem Anims", url='https://emblem-anims.herokuapp.com/', description="Get your animations here (credits missing on some, check just in case!)", color=0x4286f4)
await ctx.send(embed=embed)
@bot.command()
async def mugs(self, ctx):
"""Link to image of all GBAFE mugs."""
await ctx.send("http://doc.feuniverse.us/static/resources/mugs.png")
@bot.command()
async def hit(self, ctx, number, type="2RN"):
"""Convert 2RN/fates hit to actual chance"""
try:
num = int(number)
except ValueError:
await ctx.send("Specify an integer 0-100")
return
if (num < 0) or (num > 100):
await ctx.send("Specify an integer 0-100")
return
if type.upper()=="2RN":
table = [0.00, 0.03, 0.10, 0.21, 0.36, 0.55, 0.78, 1.05, 1.36, 1.71, 2.10, 2.53, 3.00, 3.51, 4.06, 4.65, 5.28, 5.95, 6.66, 7.41, 8.20, 9.03, 9.90, 10.81, 11.76, 12.75, 13.78, 14.85, 15.96, 17.11, 18.30, 19.53, 20.80, 22.11, 23.46, 24.85, 26.28, 27.75, 29.26, 30.81, 32.40, 34.03, 35.70, 37.41, 39.16, 40.95, 42.78, 44.65, 46.56, 48.51, 50.50, 52.47, 54.40, 56.29, 58.14, 59.95, 61.72, 63.45, 65.14, 66.79, 68.40, 69.97, 71.50, 72.99, 74.44, 75.85, 77.22, 78.55, 79.84, 81.09, 82.30, 83.47, 84.60, 85.69, 86.74, 87.75, 88.72, 89.65, 90.54, 91.39, 92.20, 92.97, 93.70, 94.39, 95.04, 95.65, 96.22, 96.75, 97.24, 97.69, 98.10, 98.47, 98.80, 99.09, 99.34, 99.55, 99.72, 99.85, 99.94, 99.99, 100.00]
elif type.upper()=="FATES":
table = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50.5,51.83,53.17,54.5,55.83,57.17,58.5,59.83,61.17,62.5,63.83,65.17,66.5,67.83,69.17,70.5,71.83,73.17,74.5,75.83,77.17,78.5,79.83,81.17,82.5,83.83,85.12,86.35,87.53,88.66,89.73,90.75,91.72,92.63,93.49,94.3,95.05,95.75,96.4,96.99,97.53,98.02,98.45,98.83,99.16,99.43,99.65,99.82,99.93,99.99,100]
else:
await ctx.send("Valid types are 2RN, Fates")
return
await ctx.send(str(table[num]))
@bot.command()
async def roll(self, ctx, number, type="2RN"):
"""rolls hit or miss (e.g. >>hit 50 1rn/2rn[default]/fates)"""
try:
num = int(number)
except ValueError:
await ctx.send("Specify an integer 0-100")
return
if (num < 0) or (num > 100):
await ctx.send("Specify an integer 0-100")
return
if type.upper()=="1RN":
rolled = random.randint(1,100)
elif type.upper()=="2RN":
rolled = (random.randint(1,100) + random.randint(1,100))>>1
elif type.upper()=="FATES":
rolled = random.randint(1,100)
if rolled > 50:
rolled = ((rolled*3) + random.randint(1,100))>>2
else:
await ctx.send("Valid types are 1RN, 2RN, Fates")
return
if rolled <= num: await ctx.send("HIT (%d)" % rolled)
else: await ctx.send("MISS (%d)" % rolled)
@bot.command() # removed aliases = ['die']
async def rollDie(self, ctx, n : int):
if n <= 0:
await ctx.send("Specify a positive integer.")
return
res = random.randrange(n) + 1
await ctx.send(str(res))
@bot.command(aliases=['s'])
async def search(self, ctx, *, term):
"""search feu"""
root = "http://feuniverse.us/search.json?q=%s"
payload = urllib.parse.quote(term)
async with aiohttp.ClientSession() as session:
async with session.get(root % payload) as response:
data = await response.json()
# async with aiohttp.get(root % payload) as query:
# # with urllib.request.urlopen(root % payload) as query:
# if query.status == 200:
# data = await query.json()
try:
# data = json.loads(js.read().decode())
posts = data["posts"]
threads = data["topics"]
await ctx.send(embed=create_embed(posts, threads, payload))
except urllib.error.URLError:
await ctx.send("Error accessing FEU server, please try again later.")
except KeyError:
embedded=create_embed(posts, [], payload)
try:
await ctx.send(embed=embedded)
except discord.errors.HTTPException:
print(embedded.title)
@bot.command() #aliases=["UT2"]
async def ut2(self, ctx):
"""links ultimate tutorial v2"""
embed=discord.Embed(title="Fire Emblem Hacking Ultimate Tutorial v2", url='https://tutorial.feuniverse.us/', description="How to do everything with Event Assembler buildfiles", color=0x40caf2)
await ctx.send(embed=embed)
async def port(self, ctx, msg):
if str(msg.author.id) != 149576374984638464: return
if 'PORT' in msg.content.upper():
pass
# await ctx.send(msg.channel, '```I think you mean MUG```')
async def setup(bot):
await bot.add_cog(Helpful(bot))
| null |
helpful.py
|
helpful.py
|
py
| 9,528 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "re.sub",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse.unquote",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "urllib.parse.unquote",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "discord.utils.get",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "random.randrange",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "urllib.parse.quote",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "urllib.error",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "discord.errors",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.add_cog",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 207,
"usage_type": "name"
}
] |
202090211
|
import logging
import os
import sys
import time
import aiohttp_jinja2
import jinja2
from aiohttp import web
import ledfx_frontend
from ledfx.api import RestApi
try:
base_path = sys._MEIPASS
except BaseException:
base_path = os.path.abspath(".")
_LOGGER = logging.getLogger(__name__)
class HttpServer(object):
def __init__(self, ledfx, host, port):
"""Initialize the HTTP server"""
self.app = web.Application()
self.api = RestApi(ledfx)
templates_path = os.path.abspath(
os.path.dirname(ledfx_frontend.__file__)
)
aiohttp_jinja2.setup(
self.app, loader=jinja2.FileSystemLoader(templates_path)
)
self.register_routes()
self._ledfx = ledfx
self.host = host
self.port = port
@aiohttp_jinja2.template("index.html")
async def index(self, request):
return {}
def register_routes(self):
self.api.register_routes(self.app)
self.app.router.add_route("get", "/favicon.ico", self.favicon)
self.app.router.add_route("get", "/manifest.json", self.manifest)
self.app.router.add_static(
"/static",
path=ledfx_frontend.where() + "/static",
name="static",
)
self.app.router.add_route("get", "/", self.index)
self.app.router.add_route("get", "/{extra:.+}", self.index)
async def favicon(self, response):
return web.FileResponse(
path=ledfx_frontend.where() + "/favicon.ico", status=200
)
async def manifest(self, response):
return web.FileResponse(
path=ledfx_frontend.where() + "/manifest.json", status=200
)
async def start(self):
self.runner = web.AppRunner(self.app)
await self.runner.setup()
try:
site = web.TCPSite(self.runner, self.host, self.port)
await site.start()
self.base_url = ("http://{}:{}").format(self.host, self.port)
print(("Started webinterface at {}").format(self.base_url))
except OSError as error:
_LOGGER.error(
"Shutting down - Failed to create HTTP server at port %d: %s.",
self.port,
error,
)
time.sleep(2)
self._ledfx.stop()
async def stop(self):
await self.app.shutdown()
if self.runner:
await self.runner.cleanup()
await self.app.cleanup()
| null |
ledfx/http_manager.py
|
http_manager.py
|
py
| 2,490 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys._MEIPASS",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "aiohttp.web.Application",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "ledfx.api.RestApi",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ledfx.api",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "os.path.abspath",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "ledfx_frontend.__file__",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "aiohttp_jinja2.setup",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "ledfx.api",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "aiohttp_jinja2.template",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "ledfx_frontend.where",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "aiohttp.web.FileResponse",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "ledfx_frontend.where",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "aiohttp.web.FileResponse",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "ledfx_frontend.where",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "aiohttp.web.AppRunner",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "aiohttp.web.TCPSite",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 83,
"usage_type": "call"
}
] |
502780999
|
"""
The Python standard library's 'calendar' module allows you to
render a calendar to your terminal.
https://docs.python.org/3.6/library/calendar.html
Write a program that accepts user input of the form
`14_cal.py [month] [year]`
and does the following:
- If the user doesn't specify any input, your program should
print the calendar for the current month. The 'datetime'
module may be helpful for this.
- If the user specifies one argument, assume they passed in a
month and render the calendar for that month of the current year.
- If the user specifies two arguments, assume they passed in
both the month and the year. Render the calendar for that
month and year.
- Otherwise, print a usage statement to the terminal indicating
the format that your program expects arguments to be given.
Then exit the program.
Note: the user should provide argument input (in the initial call to run the file) and not
prompted input. Also, the brackets around year are to denote that the argument is
optional, as this is a common convention in documentation.
This would mean that from the command line you would call `python3 14_cal.py 4 2015` to
print out a calendar for April in 2015, but if you omit either the year or both values,
it should use today’s date to get the month and year.
"""
import sys
import calendar
from datetime import datetime
def calendarFunction():
# grab today's information for when nothing is input by the user
# today = datetime.now()
# year = datetime.now().year
# month = datetime.now().month
# print("This is the month", str(month))
today = datetime.today()
year = today.year
month = today.month
# print("This is the month", str(month))
#make inputs
inputMonth = input("Please type a month, ex: 8 (only numbers and no spaces)! :")
inputYear = input("Please type a year, ex: 1995 (only numbers and no spaces)! :")
# print(inputMonth, inputYear)
# write conditionals to print calendar
if len(inputMonth) > 0 and len(inputMonth) <= 2 and inputMonth.isdigit():
month = int(inputMonth)
if len(inputYear) > 0 and len(inputYear) <= 4 and inputYear.isdigit():
year = int(inputYear)
print(calendar.month(year, month))
calendarFunction()
# works
# test = input("Enter the date: ").split(",")
# print(test)
# works
# print(calendar.weekday(2020, 7, 7))
# works
# tc = calendar.TextCalendar(firstweekday=6)
# print(tc.formatmonth(2020, 7))
# userinputdate = input("Enter the date: ").split(",")
# def calendarfunction():
# if len(userinputdate) == 0:
# tc = calendar.TextCalendar(firstweekday=6)
# testmonth = datetime.datetime.now()
# print(tc.formatmonth(testmonth.year, testmonth.month))
# else:
# return None
# calendarfunction()
# tc = calendar.TextCalendar(firstweekday=6)
# testmonth = datetime.datetime.now()
# print(tc.formatmonth(testmonth.year, testmonth.month))
# cal = calendar.Calendar()
# print(cal.monthdatescalendar(2020, 7))
| null |
src/14_cal.py
|
14_cal.py
|
py
| 3,024 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.today",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "calendar.month",
"line_number": 59,
"usage_type": "call"
}
] |
612097054
|
from VideoGet import VideoGet
from imutils.object_detection import non_max_suppression
from imutils.video import FPS
import numpy as np
import pytesseract
import argparse
import imutils
import time
import cv2 as cv
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str, help='path to input image')
ap.add_argument("-w", '--width', type=int, default=320, help="nearest multiple of 32 for resized width")
ap.add_argument('-e', '--height', type=int, default=320, help='nearest multiple of 32 for resized height')
ap.add_argument("-p", "--padding", type=float, default=0.0, help="amount of padding to add to each border of ROI")
args = vars(ap.parse_args())
def decode_predictions(scores, geometry):
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
for y in range(0, numRows):
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
for x in range(0, numCols):
if scoresData[x] < 0.5:
continue
(offsetX, offsetY) = (x * 4.0, y * 4.0)
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
return rects, confidences
(W, H) = (None, None)
(newW, newH) = (args["width"], args["height"])
(rW, rH) = (None, None)
layerNames = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]
print("[INFO] loading EAST text detector...")
net = cv.dnn.readNet("frozen_east_text_detection.pb")
fps = FPS().start()
video_getter = VideoGet(0).start()
while True:
if cv.waitKey(1) == ord('q') or video_getter.stopped:
video_getter.stop()
break
frame = video_getter.frame
orig = frame.copy()
if W is None or H is None:
(H, W) = frame.shape[:2]
rW = W / float(newW)
rH = H / float(newH)
frame = cv.resize(frame, (newW, newH))
blob = cv.dnn.blobFromImage(frame, 1.0, (newW, newH),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
(rects, confidences) = decode_predictions(scores, geometry)
boxes = non_max_suppression(np.array(rects), probs=confidences)
for (startX, startY, endX, endY) in boxes:
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
roi = orig[startY:endY, startX:endX]
config = "-l eng --oem 1 --psm 7"
text = pytesseract.image_to_string(roi, config=config)
print("OCR TEXT: {}\n".format(text))
cv.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)
cv.putText(orig, text, (startX, startY - 20), cv.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 3)
fps.update()
cv.namedWindow("Text Detection", cv.WINDOW_NORMAL)
cv.imshow("Text Detection", orig)
cv.resizeWindow("Text Detection", 2000, 2000)
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
| null |
multithreading_video_recognition.py
|
multithreading_video_recognition.py
|
py
| 3,566 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.readNet",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "imutils.video.FPS",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "VideoGet.VideoGet",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.blobFromImage",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "imutils.object_detection.non_max_suppression",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pytesseract.image_to_string",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "cv2.namedWindow",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_NORMAL",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "cv2.resizeWindow",
"line_number": 109,
"usage_type": "call"
}
] |
644589274
|
"""
-A simple link simulator that emulates the transmission of packets from a
-sender A to a receiver B.
-
-"""
from collections import deque
received_packets = []
received_packets_atC = []
received_packets_atD = []
class Packet:
"""
This stores the information associated with a packet
"""
seq_num = 0
def __init__(self, num_bits, seq_num, data):
self.num_bits = num_bits
self.seq_num = Packet.seq_num
Packet.seq_num += 1
self.data = data
self.enqueue_time = None
self.transmit_time = None
self.receive_time = None
def __str__(self):
return str(self.seq_num)
class Node:
"""
This manages the state of a node and handle the events
"""
IDLE = 0
BUSY = 1
def __init__(self, name):
self.name = name
self.incoming_link = None
self.outgoing_link = None
self.input_queue = deque()
self.output_queue = deque()
self.state = Node.IDLE
self.seq = 0
def enqueue(self, sim, owner, p):
"""
- Handles the enqueueing of a packet
- If the queue is empty, then we can start transmitting ASAP
- Else, put the packet in the queue for later
- :param sim:
- :param owner:
- :param data:
- :return:
- """
p.enqueue_time = sim.now
self.seq += 1
if self.state == Node.IDLE:
sim.schedule_event(self.next_tx, self, p, 0, 'start-tx[%d]' % p.seq_num)
self.output_queue.append(p)
self.state = Node.BUSY
elif self.state == Node.BUSY:
self.output_queue.append(p)
else:
raise Exception('unknown state')
def next_tx(self, sim, owner, data):
"""
- Called to transmit the packet that is at the HOQ if such a packet exists
- A start propagation event will be called
-
- :return:
- """
if len(self.output_queue) > 0:
self.state = Node.BUSY
p = self.output_queue.popleft()
tx_delay = self.outgoing_link.compute_transmit_delay(p)
sim.schedule_event(self.outgoing_link.start_propagation,
self.outgoing_link, p, tx_delay,
'start-prop[%d]' % p.seq_num)
else:
self.state = Node.IDLE
def receive(self, sim, owner, data):
data.receive_time = sim.now
#need to track when 'C' recieves data #############################
#if owner.name == 'C':
# print("C recieved a packet")
# received_packets.append(data)
if owner.name == 'D':
print("D recieved a packet")
received_packets.append(data)
else:
self.enqueue(sim, self, data)
def __str__(self):
return '%s' % self.name
class Routher:
def __init__(self, incomingLink, outgoingLink, bandwidth, distance):
self.incomingLink = Link(src, dst, bandwidth, distance)
self.outgoingLink = Link(src, dst, bandwidth, distance)
self.bandwidth = bandwidth
self.distance = distance
class Link:
def __init__(self, src, dst, bandwidth, distance):
self.src = src
self.dst = dst
self.bandwidth = bandwidth
self.distance = distance
def compute_transmit_delay(self, pkt):
"""
- Computes the transmission delay
-
- :param pkt:
- :return:
- """
d = pkt.num_bits / self.bandwidth
return d
def compute_propagation_delay(self):
"""
- Computes the propagation delay
-
- :return:
- """
d = self.distance / 2e8
return d
def start_propagation(self, sim, owner, data):
"""
- Called after the packet is transmitted, now we just need for it to propagate
- :param sim:
- :param owner:
- :param data:
- :return:
- """
propagation_delay = self.compute_propagation_delay()
data.transmit_time = sim.now
sim.schedule_event(self.dst.receive, self.dst,
data, propagation_delay,
'receive[%d]' % data.seq_num)
sim.schedule_event(self.src.next_tx, self.src, None, 0, 'next-tx')
def __str__(self):
return '%s-%s' % (self.src, self.dst)
class Event:
"""
- This class holds all the information associated with the event
- -- fh -- is the function handler that will be invoked when the event fires
-
- -- owner -- is not used in the code, but is intended as a pointer to the object on which fh was called
- For example, calling a.enqueue would have the fh = a.enqueue and the owner = a
-
- -- data -- the information associated with the event
-
- -- time -- the absolute time when the event should be executed
-
- -- tag -- a user-readable description of the event
-
- -- seq_num -- the sequence number of the event
- """
seq_num = 0
def __init__(self, fh, owner, data, time, tag):
self.fh = fh
self.owner = owner
self.data = data
self.time = time
self.tag = tag
self.seq_num = Event.seq_num
Event.seq_num += 1
def __str__(self):
return 'id=%d @%.1f %s' % (self.seq_num, self.time, self.tag)
class Simulator:
"""
- Simulator maintains a queue of events that are sorted by time (and seq_num)
- The event at the HOQ will be executed by calling the function handler that is associated with it.
- New events may be added using the schedule_event function.
-
- The simulator also help connect together the nodes via links.
- """
def __init__(self):
self.queue = []
self.links = []
self.now = 0
def connect(self, src, dst, bandwidth, distance):
link = Link(src, dst, bandwidth, distance)
src.outgoing_link = link
dst.input_queue = link
self.links.append(link)
def schedule_event(self, fh, owner, data, delay, tag):
event = Event(fh, owner, data, self.now + delay, tag)
self.queue.append(event)
def run(self, duration):
print('%10s %8s %16s %16s %16s' % ('now', 'seq_num', 'data', 'where', 'tag'))
while self.now < duration:
self.queue.sort(key=lambda e: (e.time, e.seq_num))
if len(self.queue) == 0: break
hoq = self.queue.pop(0)
self.now = hoq.time
print('%10.1f %8d %16s %16s %16s' %
(self.now, hoq.seq_num, self.print_none(hoq.data), self.print_none(hoq.owner), hoq.tag))
hoq.fh(self, hoq.owner, hoq.data)
def print_none(self, x):
if x is None:
return '-'
else:
return str(x)
if __name__ == "__main__":
"""
- Setup a simple topology a --> b
"""
node_a = Node('A')
node_b = Node('B')
node_c = Node('C')
node_d = Node('D')
sim = Simulator()
sim.connect(node_a, node_c, 100, 2e8)
sim.connect(node_b, node_c, 100, 2e8)
sim.connect(node_c, node_d, 100, 2e8)
# generate the packets from A
seq_num = 0
for time in range(0, 10000, 1000):
for seq in range(10):
pkt = Packet(1000, seq_num, None)
sim.schedule_event(node_a.enqueue, node_a, pkt, time, 'queue')
seq_num += 1
# generate the packets from B
for time in range(0, 10000, 500):
for seq in range(2):
pkt = Packet(1000, seq_num, None)
sim.schedule_event(node_b.enqueue, node_b, pkt, time, 'queue')
seq_num += 1
sim.run(100)
# que packets at C
for time in range(0, 10000):
sim.schedule_event(node_c.enqueue, node_c, pkt, time, 'queue')
#sim.schedule_event(node_c.enqueue, node_b, pkt, time, 'link queue')
# transmit packets from C to D
# recieve packets from C
# ','.join([str(x) for x in list])
print('\n\nSimulation Results:')
print('enqueue time, tx time, receive time, end-to-end delay, queue delay')
for packet in received_packets:
queue_delay = packet.transmit_time - packet.enqueue_time
e2e_delay = packet.receive_time - packet.enqueue_time
print('receive', packet.enqueue_time, packet.transmit_time, packet.receive_time, e2e_delay, queue_delay)
| null |
networkstype.py
|
networkstype.py
|
py
| 8,690 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.deque",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 45,
"usage_type": "call"
}
] |
561200121
|
"""
*******************************************************
* Copyright (C) 2017 MindsDB Inc. <[email protected]>
*
* This file is part of MindsDB Server.
*
* MindsDB Server can not be copied and/or distributed without the express
* permission of MindsDB Inc
*******************************************************
"""
# import logging
from libs.helpers.logging import logging
import random
import socketserver as SocketServer
from mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packet import Packet
from mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets import ErrPacket
from mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets import HandshakePacket
from mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets import HandshakeResponsePacket
from mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets import OkPacket
from mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets import SwitchOutPacket
from mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.resultset_packet import ResultsetPacket
import config as CONFIG
from libs.constants.mysql import *
from libs.controllers.session_controller import SessionController
from mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.command_packet import CommandPacket
connection_id = 0
import traceback
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
class MysqlProxy(SocketServer.BaseRequestHandler):
"""
The Main Server controller class
"""
def initSession(self):
global connection_id, ALPHABET
logging.info('New connection [{ip}:{port}]'.format(
ip=self.client_address[0], port=self.client_address[1]))
logging.debug(self.__dict__)
connection_id += 1
self.session = SessionController()
self.salt = ''.join([random.choice(ALPHABET) for i in range(20)])
self.socket = self
self.count = 0
self.connection_id = connection_id
self.logging = logging
self.current_transaction = None
logging.debug('session salt: {salt}'.format(salt=self.salt))
def handle(self):
"""
Handle new incoming connections
:return:
"""
self.initSession()
HARDCODED_PASSWORD = 'test1'
HARDCODED_USER = 'root'
# start by sending of the handshake
self.packet(HandshakePacket).send()
# get the response
handshake_resp = self.packet(HandshakeResponsePacket)
handshake_resp.get()
if handshake_resp.length == 0:
self.packet(OkPacket).send()
return
# get the response
# check if the authentication matches the desired password
if handshake_resp.isAuthOk(HARDCODED_USER, HARDCODED_PASSWORD):
salt = ''.join([random.choice(ALPHABET) for i in range(20)])
self.packet(SwitchOutPacket, seed=salt).send()
tmp = self.socket.request.recv(MAX_PACKET_SIZE)
self.packet(OkPacket).send()
# stay on ping pong loop
while True:
logging.debug('Got a new packet')
p = self.packet(CommandPacket)
try:
success = p.get()
if success == False:
logging.info('Session closed by client')
return
logging.info('Command TYPE: {type}'.format(
type=VAR_NAME(p.type.value, prefix='COM')))
if p.type.value == COM_QUERY:
try:
sql = p.sql.value.decode('utf-8')
except:
logging.error('SQL contains non utf-8 values: {sql}'.format(sql=p.sql.value))
self.packet(OkPacket).send()
continue
self.current_transaction = self.session.newTransaction(sql_query=sql)
if self.current_transaction.output_data_array is None:
self.packet(OkPacket).send()
else:
self.packet(ResultsetPacket, metadata=self.current_transaction.output_metadata,
data_array=self.current_transaction.output_data_array).send()
else:
logging.info('Command has no specific handler, return OK msg')
logging.debug(str(p))
# p.pprintPacket() TODO: Make a version of print packet
# that sends it to debug isntead
self.packet(OkPacket).send()
except:
logging.warning('Session closed, on packet read error')
logging.debug(traceback.format_exc())
break
# else send error packet
else:
msg = 'Access denied for user {user} (using password: YES)'.format(
user=self.session.username)
self.packet(ErrPacket, err_code=ER_PASSWORD_NO_MATCH,
msg=msg).send()
logging.warning('AUTH FAIL')
def packet(self, packetClass=Packet, **kwargs):
"""
Factory method for packets
:param packetClass:
:param kwargs:
:return:
"""
return packetClass(socket=self.socket, seq=self.count, session=self.session, proxy=self, **kwargs)
@staticmethod
def startProxy():
"""
Create a server and wait for incoming connections until Ctrl-C
"""
logging.basicConfig(**CONFIG.PROXY_LOG_CONFIG)
HOST, PORT = CONFIG.PROXY_SERVER_HOST, CONFIG.PROXY_SERVER_PORT
logging.info('Starting MindsDB Mysql proxy server on tcp://{host}:{port}'.format(host=HOST, port=PORT))
# Create the server
#server = SocketServer.ThreadingUnixDatagramServer(HOST, MysqlProxy)
server = SocketServer.ThreadingTCPServer(("192.168.1.17", 3306), MysqlProxy)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
logging.info('Waiting for incoming connections...')
server.serve_forever()
if __name__ == "__main__":
MysqlProxy.startProxy()
| null |
mindsdb/proxies/mysql/mysql_proxy.py
|
mysql_proxy.py
|
py
| 6,349 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "socketserver.BaseRequestHandler",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "libs.helpers.logging.logging.info",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "libs.helpers.logging.logging.debug",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "libs.controllers.session_controller.SessionController",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "libs.helpers.logging.logging.debug",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.HandshakePacket",
"line_number": 78,
"usage_type": "argument"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.HandshakeResponsePacket",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.OkPacket",
"line_number": 83,
"usage_type": "argument"
},
{
"api_name": "random.choice",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.SwitchOutPacket",
"line_number": 90,
"usage_type": "argument"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.OkPacket",
"line_number": 92,
"usage_type": "argument"
},
{
"api_name": "libs.helpers.logging.logging.debug",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.command_packet.CommandPacket",
"line_number": 96,
"usage_type": "argument"
},
{
"api_name": "libs.helpers.logging.logging.info",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "libs.helpers.logging.logging.info",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "libs.helpers.logging.logging.error",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.OkPacket",
"line_number": 113,
"usage_type": "argument"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.OkPacket",
"line_number": 118,
"usage_type": "argument"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.resultset_packet.ResultsetPacket",
"line_number": 120,
"usage_type": "argument"
},
{
"api_name": "libs.helpers.logging.logging.info",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "libs.helpers.logging.logging.debug",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.OkPacket",
"line_number": 128,
"usage_type": "argument"
},
{
"api_name": "libs.helpers.logging.logging.warning",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "libs.helpers.logging.logging.debug",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "traceback.format_exc",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packets.ErrPacket",
"line_number": 138,
"usage_type": "argument"
},
{
"api_name": "libs.helpers.logging.logging.warning",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "mindsdb.mindsdb_server.proxies.mysql.data_types.mysql_packet.Packet",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "libs.helpers.logging.logging.basicConfig",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "config.PROXY_LOG_CONFIG",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "config.PROXY_SERVER_HOST",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "config.PROXY_SERVER_PORT",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "libs.helpers.logging.logging.info",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "socketserver.ThreadingTCPServer",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging.info",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "libs.helpers.logging.logging",
"line_number": 166,
"usage_type": "name"
}
] |
600341298
|
from flask import Flask, request,render_template, url_for ,redirect
#UPLAD_FLODER ='static/uploads'
app = Flask(__name__)
@app.route('/')
def hello():
return render_template('test.html')
@app.route('/test',methods=['GET', 'POST'])
def test():
return render_template('test.html')
@app.route('/first1',methods=['GET','POST'])
def first1():
return render_template('first1.html')
def display_on_console(vaccine_name,city_name,ava_date,vacc_qty,expiry_date):
print("Vaccine Name :\t",vaccine_name)
print("Available In :\t",city_name)
print("Date :\t",ava_date)
print("Vaccine Qty :\t",vacc_qty)
print("Expiry Date :\t",expiry_date)
def display_all():
data1=[]
"""data2=[]
data3=[]
data4=[]
data5=[]"""
for i in range(len(data1)):
_vac_name=input()
data1.append(_vac_name)
@app.route('/first2',methods=['GET','POST'])
def first2():
print("Inside first2")
_vac_name = request.form['fname']
ava_city = request.form['dname']
ava_date = request.form['lname']
qt_vacc = request.form['mname']
ex_date = request.form['ename']
display_on_console(_vac_name,ava_city,ava_date,qt_vacc,ex_date)
print("Calling HTML Page in app route first 2")
return render_template('first1.html')
"""def display_all():
data1=[]
data2=[]
data3=[]
data4=[]
data5=[]
for i in range(len(data1)):
_vac_name=input()
data1.append(_vac_name)"""
@app.route('/second1',methods=['GET', 'POST'])
def second2():
return render_template('second2.html')
def outputdisplay(vaccine_namee,available_city,available_date,quantity_vaccine,expi_date,):
print("Vaccine Name :\t",vaccine_namee)
print("Avaliable City :\t",available_city)
print("Date :\t",available_date)
print("Vaccine Qty :\t",quantity_vaccine)
print("Expiry Date :\t",expi_date)
@app.route('/second2',methods=['GET','POST'])
def second1():
_vac_name = request.form['vname']
av_city = request.form['aname']
av_date = request.form['avname']
qtn_vacc = request.form['nname']
exp_date = request.form['qname']
outputdisplay(_vac_name,av_city,av_date,qtn_vacc,exp_date)
return render_template('second2.html')
def dispaly_all(_vac_name,av_city,av_date,qtn_vacc,exp_date):
print("_vac_name")
print("av_city")
print("av_date")
print("qtn_vacc")
@app.route('/third1',methods=['GET','POST'])
def third():
return render_template('third.html')
"""def display_all(_vac_name,av_city,av_date,qtn_vacc,exp_date):
vaccine=data['_vac_name']
city=data['av_city']
date=data['av_data']
quantity=data['qtn_vacc']
expire=data['exp_date']"""
#data=[_vac_name,av_city,av_date
"""_vac_name = request.form['vname']
av_city = request.form['aname']
av_date = request.form['avname']
qtn_vacc = request.form['nname']
exp_date = request.form['qname']
data=
vaccine=data['vname']
city=data['aname']
date=data['avname']
quantity=data['nname']
expire=data['qname']
av_city=[]
data.append(_vac_name)
print("k")
print(data)"""
@app.route('/third',methods=['GET','POST'])
def third1():
print("a")
"""_vac_name = request.form['vname']
av_city = request.form['aname']
av_date = request.form['avname']
qtn_vacc = request.form['nname']
exp_date = request.form['qname']"""
display_all(_vac_name,av_city,av_date,qtn_vacc,exp_date)
vaccine=data['vname']
city=data['aname']
date=data['avname']
quantity=data['nname']
expire=data['qname']
print(vaccine, city, date)
return render_template('third.html',vaccine=vaccine,city=city,date=date,quantity=quantity,expire=expire)
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
#app.run(host='0.0.0.0', port=8080, debug=True, processes=4, threaded=True)
app.run(threaded=True,debug=True)
#app.run(host='127.0.0.1', port=8080, debug=True)
## [END app]
| null |
covid19_vaccine/main.py
|
main.py
|
py
| 4,013 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 136,
"usage_type": "call"
}
] |
196290852
|
import os
import pandas as pd
import wget
LIFESAT_PATH = os.path.join("datasets", "lifesat", "")
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
LIFESAT_URL = DOWNLOAD_ROOT + "datasets/lifesat/"
FILE_NAMES = ("oecd_bli_2015.csv", "gdp_per_capita.csv")
def fetch_lifesat_data(lifesat_url=LIFESAT_URL, lifesat_path=LIFESAT_PATH):
if not os.path.exists(lifesat_path):
os.makedirs(lifesat_path)
for file_name in FILE_NAMES:
file_path = os.path.join(lifesat_path, file_name)
print("Downloading " + file_name)
if not os.path.exists(file_path):
url = lifesat_url + file_name
wget.download(url, lifesat_path + file_name)
else:
print("Warning: " + file_name + " already exists")
print()
def load_lifesat_data(lifesat_path=LIFESAT_PATH, file_names=FILE_NAMES):
if len(file_names) != 2:
print("Error: Wrong input data")
return
oecd_bli_path = os.path.join(lifesat_path, file_names[0])
gdp_per_capita_path = os.path.join(lifesat_path, file_names[1])
oecd_bli = pd.read_csv(oecd_bli_path, thousands=',')
gdp_per_capita = pd.read_csv(gdp_per_capita_path, thousands=',', delimiter='\t',
encoding='latin1', na_values='n/a')
return oecd_bli, gdp_per_capita
def prepare_country_stats(oecd_bli, gdp_per_capita):
oecd_bli = oecd_bli[oecd_bli["INEQUALITY"] == "TOT"]
oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value")
gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True)
gdp_per_capita.set_index("Country", inplace=True)
full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita, left_index=True, right_index=True)
full_country_stats.sort_values(by="GDP per capita", inplace=True)
remove_indices = [0, 1, 6, 8, 33, 34, 35]
keep_indices = list(set(range(36)) - set(remove_indices))
return full_country_stats[["GDP per capita", "Life satisfaction"]].iloc[keep_indices]
| null |
example1-1/util/dataset.py
|
dataset.py
|
py
| 2,047 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "wget.download",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 50,
"usage_type": "call"
}
] |
304746831
|
import pygame
import os.path
from Cell import Cell
# Loading all images
bg = pygame.image.load(os.path.join('Pictures', 'BG.jpg'))
cross = pygame.image.load(os.path.join('Pictures', 'Cross.png'))
zero = pygame.image.load(os.path.join('Pictures', 'Zero.png'))
draw = pygame.image.load(os.path.join('Pictures', 'Draw.png'))
right_diagonal = pygame.image.load(os.path.join('Pictures', 'Right diagonal.png'))
left_diagonal = pygame.image.load(os.path.join('Pictures', 'Left diagonal.png'))
vertical_line = pygame.image.load(os.path.join('Pictures', 'Vertical line.png'))
horizontal_line = pygame.image.load(os.path.join('Pictures', 'Horizontal line.png'))
back_picture = pygame.image.load(os.path.join('Pictures', 'Back.png'))
loading_picture = pygame.image.load(os.path.join('Pictures', 'Loading.jpg'))
class GameField:
def __init__(self, window):
self.window = window
self.cells = [['.', '.', '.'],
['.', '.', '.'],
['.', '.', '.']]
self.score1 = pygame.font.SysFont('comicsans', 30)
self.score2 = pygame.font.SysFont('comicsans', 30)
self.back = back_picture
self.loading = loading_picture
def draw(self):
"""Draws/redraws game field and back button"""
self.window.blit(bg, (0, 0))
for i in range(len(self.cells)):
for j in range(len(self.cells[i])):
if self.cells[i][j] != '.':
self.cells[i][j].draw(self.window)
self.draw_back()
pygame.display.update()
def start(self):
"""Reset all settings"""
self.window.blit(bg, (0, 0))
self.draw_back()
self.cells = [['.', '.', '.'],
['.', '.', '.'],
['.', '.', '.']]
def game_draw(self):
"""Handles draw situation. Draws background and title 'Draw'"""
pygame.time.delay(300)
self.window.blit(bg, (0, 0))
self.window.blit(draw, (0, 0))
def button_is_pressed(self, owner, x=None, y=None, i=None, j=None):
"""Makes new cell and returns location of it
if received information from which_button method
about location of cell.
If received (None, None) returns it.
It means that back button was pressed.
"""
if i is None or j is None:
i, j = self.which_button(x, y)
if i is None:
return None, None
if self.cells[i][j] == '.':
self.cells[i][j] = Cell(i, j, owner, owner.picture)
return i, j
return False
@staticmethod
def which_button(x, y):
"""Static method for determining which button was pressed.
Return location of button which was pressed
or (None, None) if Back button was pressed
"""
if 20 <= x <= 67 and 30 <= y <= 77:
return None, None
if 0 <= x < 200:
j = 0
elif 200 <= x <= 400:
j = 1
else:
j = 2
if 0 <= y < 200:
i = 0
elif 200 <= y <= 400:
i = 1
else:
i = 2
return i, j
def draw_line(self, i, j, name, winner):
"""Draws win line"""
winner.score += 1
if name == 'horizontal_line':
self.window.blit(horizontal_line, (0, 200 * i - 200))
elif name == 'vertical_line':
self.window.blit(vertical_line, (200 * j - 203, 0))
elif name == 'left_diagonal':
self.window.blit(left_diagonal, (0, 0))
else:
self.window.blit(right_diagonal, (0, 0))
pygame.display.update()
def draw_score(self, person1, person2):
"""Makes and draws score of both players"""
text1 = self.score1.render('Cross score: ' + str(person1.score), 1, (0, 0, 0))
text2 = self.score2.render('Oval score: ' + str(person2.score), 1, (0, 0, 0))
self.window.blit(text1, (20, 10))
self.window.blit(text2, (450, 10))
pygame.display.update()
def draw_back(self):
"""Draws the back button"""
self.window.blit(self.back, (20, 30))
pygame.display.update()
def draw_loading(self):
"""Draws the loading picture"""
print('loading...')
self.window.blit(self.loading, (0, 0))
pygame.display.update()
| null |
Desk.py
|
Desk.py
|
py
| 4,488 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.image.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.delay",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "Cell.Cell",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 127,
"usage_type": "attribute"
}
] |
360007670
|
import numpy as np
import os
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import nlp.nlp as nlp
import nlp.dynamics as dynamics
import nlp.cost_functions as cost_functions
import nlp.constraints as constraints
import nlp.measurements as measurements
import utils.gnss as gnss
import utils.utils as utils
import utils.ekf as ekf
import utils.data as data_utils
import utils.leastsquares as ls
import utils.simulate as simulate
import utils.vehicle_sim as vehicle_sim
def discrete_vehicle_dynamics(x, u, params=None, jac=False):
""" Discretizing using explicit Euler
x = [px, py, psi, vx, vy, psid, b, bd, pz]
u = [F, steering] """
dt = params["dt"]
xd = vehicle_sim.vehicle_dynamics(x[:6], u, {"tire_model_func":vehicle_sim.linear_tire_model})
xd = np.hstack((xd, np.array([x[7], 0.0, 0.0])))
x += dt*xd
C = params["car_params"] # constants
if jac:
J = np.eye(9)
dF_yf_dvx = C["C_AF"]*(x[4] + C["D_F"]*x[5])*(1./x[3]**2)
dF_yf_dvy = -C["C_AF"]/x[3]
dF_yf_dr = -C["C_AF"]*C["D_F"]/x[3]
dF_yr_dvx = C["C_AR"]*(x[4] - C["D_R"]*x[5])*(1./x[3]**2)
dF_yr_dvy = -C["C_AR"]/x[3]
dF_yr_dr = C["C_AR"]*C["D_R"]/x[3]
J[0, 2] += params["dt"]*(-x[3]*np.sin(x[2]) - x[4]*np.cos(x[2]))
J[0, 3] += params["dt"]*np.cos(x[2])
J[0, 4] += -params["dt"]*np.sin(x[2])
J[1, 2] += params["dt"]*(x[3]*np.cos(x[2]) - x[4]*np.sin(x[2]))
J[1, 3] += params["dt"]*np.sin(x[2])
J[1, 4] += params["dt"]*np.cos(x[2])
J[2, 5] += params["dt"]
J[3, 3] += -(params["dt"]/C["M"])*(np.sin(u[1])*dF_yf_dvx)
J[3, 4] += params["dt"]*(x[5] - (np.sin(u[1])*dF_yf_dvy)/C["M"])
J[3, 5] += params["dt"]*(x[4] - (np.sin(u[1])*dF_yf_dr)/C["M"])
J[4, 3] += params["dt"]*((np.cos(u[1])*dF_yf_dvx + dF_yr_dvx)/C["M"] - x[5])
J[4, 4] += (params["dt"]/C["M"])*(np.cos(u[1])*dF_yf_dvy + dF_yr_dvy)
J[4, 5] += params["dt"]*((np.cos(u[1])*dF_yf_dr + dF_yr_dr)/C["M"] - x[3])
J[5, 3] += (params["dt"]/C["I_Z"])*(C["D_F"]*np.cos(u[1])*dF_yf_dvx - C["D_R"]*dF_yr_dvx)
J[5, 4] += (params["dt"]/C["I_Z"])*(C["D_F"]*np.cos(u[1])*dF_yf_dvy - C["D_R"]*dF_yr_dvy)
J[5, 5] += (params["dt"]/C["I_Z"])*(C["D_F"]*np.cos(u[1])*dF_yf_dr - C["D_R"]*dF_yr_dr)
J[6, 7] += params["dt"]
return x, J
else:
return x
def vehicle_sensors_model(x, params=None, jac=False):
# xmeas = [x,y,z,b,bd]
x_meas = np.array([x[0], x[1], x[8], x[6], x[7]])
if jac:
y = np.array([])
J = np.array([]).reshape(-1, 9)
y_pr, J_pr = gnss.multi_pseudorange(x_meas, params=params, jac=True)
N_pr = y_pr.shape[0] # Number of pseudorange measurements
# Reorder to make sure the Jacobians match
J_pr = np.hstack((J_pr[:,0].reshape(N_pr, 1), J_pr[:,1].reshape(N_pr, 1),
np.zeros((N_pr, 4)), J_pr[:,3].reshape(N_pr, 1), J_pr[:,4].reshape(N_pr, 1),
J_pr[:,2].reshape(N_pr, 1)))
# Now combine into measurement and Jacobian
y = np.hstack((y, y_pr))
J = np.vstack((J, J_pr))
return y, J
else:
y = gnss.multi_pseudorange(x_meas, params=params, jac=False)
return y
def check_for_divergence(x, x_true):
if np.linalg.norm(x-x_true) > 100:
return True
else:
return False
# Set directory paths
dir_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(dir_path, 'data')
car_path = os.path.join(data_path, 'autonomous-car')
# Reference location on Earth (Hoover Tower)
lat0 = 37.4276
lon0 = -122.1670
h0 = 0
p_ref_ECEF = utils.lla2ecef(np.array([lat0, lon0, h0]))
# Load data
sensors = data_utils.load_obj(car_path + '/sim/sensor_data')
data_gnss = sensors["gnss"]
data_compass = sensors["compass"]
car_params = vehicle_sim.get_parameters()
traj = data_utils.load_obj(car_path + '/sim/traj_data')
################################################################
# Least Squares
################################################################
LS = ls.runLeastSquares(data_gnss["t"], data_gnss["sat_pos"], data_gnss["pr"], p_ref_ECEF=p_ref_ECEF)
data_utils.save_obj(LS, car_path + '/filtering/leastsquares')
################################################################
# Cost matrices for EKF and NLP
################################################################
Q_NLP = np.diag([0.01, 0.01, 0.01, 100, 500, 500, .001, .001, .001]) # covariance for dynamics
Q_EKF = .001*Q_NLP # 1/1000 factor due to time discretization of .01 and sampling time of .1
r_pr = float(data_gnss["R"]) # covariance for pseudorange measurement
P_NLP = np.diag(1*np.ones(9))
P_EKF = P_NLP
################################################################
# EKF
################################################################
dt = traj["dt"]
N_steps = traj["t"].shape[0]
EKF = {"t":traj["t"], "p_ref_ECEF":p_ref_ECEF, "bias":np.zeros(N_steps),
"x_ENU":np.zeros(N_steps), "y_ENU":np.zeros(N_steps), "z_ENU":np.zeros(N_steps),
"lat":np.zeros(N_steps), "lon":np.zeros(N_steps), "h":np.zeros(N_steps),
"psi":np.zeros(N_steps), "vx":np.zeros(N_steps), "vy":np.zeros(N_steps)}
# Create EKF object
xhat0 = np.hstack((traj["x0"], np.array([data_gnss["b0"], data_gnss["alpha"], 0.0])))
ekf_filter = ekf.EKF(discrete_vehicle_dynamics, vehicle_sensors_model, xhat0, P_EKF)
# Run EKF
for (k, t) in enumerate(traj["t"]):
EKF["x_ENU"][k] = ekf_filter.mu[0]
EKF["y_ENU"][k] = ekf_filter.mu[1]
EKF["z_ENU"][k] = ekf_filter.mu[8]
EKF["psi"][k] = ekf_filter.mu[2]
EKF["vx"][k] = ekf_filter.mu[3]
EKF["vy"][k] = ekf_filter.mu[4]
EKF["bias"][k] = ekf_filter.mu[6]
# Only perform measurement correction when GNSS data is available
i0 = np.where(data_gnss["t"] <= t + .00001)[0][-1]
i1 = np.where(data_gnss["t"] >= t - .00001)[0][0]
if i0 == i1:
# Convert satellite positions to ENU coordinates
N_sat = data_gnss["pr"][i0].shape[0]
sat_pos_k = np.zeros((N_sat, 3))
for j in range(data_gnss["sat_pos"][i0].shape[0]):
sat_pos_ENU = utils.ecef2enu(data_gnss["sat_pos"][i0][j,:], p_ref_ECEF)
sat_pos_k[j,:] = sat_pos_ENU
R_k = np.diag(r_pr*np.ones(data_gnss["pr"][i0].shape[0]))
pr_k = data_gnss["pr"][i0]
else:
sat_pos_k = None
R_k = None
pr_k = None
# Update EKF using measurement and control
u_k = traj["u"][:,k]
dparams = {"dt":dt, "car_params":car_params}
mparams = {"sat_pos":sat_pos_k}
ekf_filter.update(u_k, pr_k, Q_EKF, R_k, dyn_func_params=dparams, meas_func_params=mparams)
# Check for divergence
if check_for_divergence(ekf_filter.mu[:2], traj["x"][:2,k]):
EKF["t"] = EKF["t"][:k]
EKF["bias"] = EKF["bias"][:k]
EKF["x_ENU"] = EKF["x_ENU"][:k]
EKF["y_ENU"] = EKF["y_ENU"][:k]
EKF["z_ENU"] = EKF["z_ENU"][:k]
EKF["psi"] = EKF["psi"][:k]
EKF["vx"] = EKF["vx"][:k]
EKF["vy"] = EKF["vy"][:k]
break
data_utils.save_obj(EKF, car_path + '/filtering/ekf')
################################################################
# NLP using 2 norm
################################################################
T = 2 # finite time horizon for NLP
N = 5 # number of pseudospectral nodes
n = 9 # state is x = [px, py, psi, vx, vy, psid, b, bd, pz]
m = 2 # control is [throttle, steer]
dt_gnss = data_gnss["t"][1] - data_gnss["t"][0]
N_sat = 11
problem = nlp.fixedTimeOptimalEstimationNLP(N, T, n, m)
X = problem.addVariables(N+1, n, name='x')
U, W = problem.addDynamics(dynamics.vehicle_dynamics_and_gnss, X, None, None, {"car_params":car_params})
problem.addDynamicsCost(cost_functions.weighted_l2_norm, W, {"Q":np.linalg.inv(Q_NLP)})
problem.addVarBounds(X, 2, -np.pi, np.pi)
problem.addVarBounds(X, 3, 0, np.inf)
X0 = problem.addInitialCost(cost_functions.weighted_l2_norm, X[0], {"Q":np.linalg.inv(P_NLP)})
# Add costs for the pseudorange measurements at sampling frequency
N_gnss = int(np.floor(T/dt_gnss))
t_gnss = np.linspace(0, T, N_gnss + 1)
Y = []
R = []
sat_pos = []
for i in range(N_gnss + 1):
t_i = np.array([[t_gnss[i]]])
Y_i = []
R_i = []
sat_pos_i = []
for j in range(N_sat):
sat_pos_ij = problem.addParameter(1, 3)[0]
R_ij = problem.addParameter(1, 1)[0]
Y_ij = problem.addResidualCost(measurements.vehicle_pseudorange, X, t_i, None,
R_ij, {"p":1, "sat_pos":sat_pos_ij})[0]
Y_i.append(Y_ij)
R_i.append(R_ij)
sat_pos_i.append(sat_pos_ij)
Y.append(Y_i)
R.append(R_i)
sat_pos.append(sat_pos_i)
problem.build()
NLP = {"t":[], "p_ref_ECEF":p_ref_ECEF, "bias":[],
"x_ENU":[], "y_ENU":[], "z_ENU":[],
"lat":[], "lon":[], "h":[],
"psi":[], "vx":[], "vy":[], "t_solve":[]}
# Run NLP filter
DT = 1 # s, how often to recompute
tf = 89 # s, final time to stop computing
compute_times = np.linspace(0, tf, np.floor(tf/DT) + 1)
xhat0 = np.hstack((traj["x0"], np.array([data_gnss["b0"], data_gnss["alpha"], 0.0])))
for (step, t0) in enumerate(compute_times):
traj_indices = utils.get_time_indices(traj["t"], t0, t0+T)
gnss_indices = utils.get_time_indices(data_gnss["t"], t0, t0+T)
traj_shifted_times = traj["t"][traj_indices] - t0
gnss_shifted_times = data_gnss["t"][gnss_indices] - t0
# Define control inputs
u = traj["u"][:, traj_indices]
problem.setControl(U, traj_shifted_times, u)
# Set the initial condition
problem.setParameter(X0, xhat0)
# Specify the measurements
for i in range(N_gnss + 1):
i_gnss = gnss_indices[i]
t_i = np.array([[t_gnss[i]]])
N_sat_i = data_gnss["sat_pos"][i_gnss].shape[0]
for j in range(N_sat):
# Not every time step will have N_sat measurements, so set some costs to 0
if j < N_sat_i:
sat_pos_ENU = utils.ecef2enu(data_gnss["sat_pos"][i_gnss][j,:], p_ref_ECEF)
R_ij = dt_gnss*np.linalg.inv(np.diag([r_pr])) # multiply by dt to integrate over the sampling interval
problem.setParameter(R[i][j], R_ij)
problem.setParameter(sat_pos[i][j], sat_pos_ENU)
y_ij = np.array([[data_gnss["pr"][i_gnss][j]]])
problem.setMeasurement(Y[i][j], t_i, y_ij)
else:
problem.setParameter(R[i][j], 0.0)
problem.setParameter(sat_pos[i][j], np.zeros(3))
problem.setMeasurement(Y[i][j], t_i, np.array([[0.0]]))
# Solve problem
print('Solving problem.')
problem.solve(warmstart=True)
t = np.linspace(0, T, 10)
x_opt = problem.extractSolution('x', t) # full trajectory sampling
xhat0 = problem.extractSolution('x', [DT]) # next initialization point
# Store solution information
for k in range(t.shape[0]):
p_ENU = np.array([x_opt[k, 0], x_opt[k, 1], x_opt[k, 8]])
p_ECEF = utils.enu2ecef(p_ENU, p_ref_ECEF)
p_LLA = utils.ecef2lla(p_ECEF)
NLP["x_ENU"].append(p_ENU[0])
NLP["y_ENU"].append(p_ENU[1])
NLP["z_ENU"].append(p_ENU[2])
NLP["lat"].append(p_LLA[0])
NLP["lon"].append(p_LLA[1])
NLP["h"].append(p_LLA[2])
NLP["bias"].append(x_opt[k,6])
NLP["psi"].append(x_opt[k,2])
NLP["vx"].append(x_opt[k,3])
NLP["vy"].append(x_opt[k,4])
NLP["t"].append(t0 + t[k])
NLP["t_solve"].append(problem.solver["t_wall_total"])
data_utils.save_obj(NLP, car_path + '/filtering/nlp-l2')
################################################################
# NLP using huber loss
################################################################
problem = nlp.fixedTimeOptimalEstimationNLP(N, T, n, m)
X = problem.addVariables(N+1, n, name='x')
U, W = problem.addDynamics(dynamics.vehicle_dynamics_and_gnss, X, None, None, {"car_params":car_params})
problem.addDynamicsCost(cost_functions.pseudo_huber_loss, W, {"Q":np.linalg.inv(Q_NLP), "delta":5.0})
problem.addVarBounds(X, 2, -np.pi, np.pi)
problem.addVarBounds(X, 3, 0, np.inf)
X0 = problem.addInitialCost(cost_functions.weighted_l2_norm, X[0], {"Q":np.linalg.inv(P_NLP)})
# Add costs for the pseudorange measurements at sampling frequency
Y = []
R = []
sat_pos = []
for i in range(N_gnss + 1):
t_i = np.array([[t_gnss[i]]])
Y_i = []
R_i = []
sat_pos_i = []
for j in range(N_sat):
sat_pos_ij = problem.addParameter(1, 3)[0]
R_ij = problem.addParameter(1, 1)[0]
Y_ij = problem.addResidualCost(measurements.vehicle_pseudorange, X, t_i, None,
R_ij, {"p":1, "sat_pos":sat_pos_ij})[0]
Y_i.append(Y_ij)
R_i.append(R_ij)
sat_pos_i.append(sat_pos_ij)
Y.append(Y_i)
R.append(R_i)
sat_pos.append(sat_pos_i)
problem.build()
NLP = {"t":[], "p_ref_ECEF":p_ref_ECEF, "bias":[],
"x_ENU":[], "y_ENU":[], "z_ENU":[],
"lat":[], "lon":[], "h":[],
"psi":[], "vx":[], "vy":[], "t_solve":[]}
# Run NLP filter
xhat0 = np.hstack((traj["x0"], np.array([data_gnss["b0"], data_gnss["alpha"], 0.0])))
for (step, t0) in enumerate(compute_times):
traj_indices = utils.get_time_indices(traj["t"], t0, t0+T)
gnss_indices = utils.get_time_indices(data_gnss["t"], t0, t0+T)
traj_shifted_times = traj["t"][traj_indices] - t0
gnss_shifted_times = data_gnss["t"][gnss_indices] - t0
# Define control inputs
u = traj["u"][:, traj_indices]
problem.setControl(U, traj_shifted_times, u)
# Set the initial condition
problem.setParameter(X0, xhat0)
# Specify the measurements
M_gnss = (N_gnss + 1)*N_sat # a value to normalize the cost of the measurements
for i in range(N_gnss + 1):
i_gnss = gnss_indices[i]
t_i = np.array([[t_gnss[i]]])
N_sat_i = data_gnss["sat_pos"][i].shape[0]
for j in range(N_sat):
# Not every time step will have N_sat measurements, so set some costs to 0
if j < N_sat_i:
sat_pos_ENU = utils.ecef2enu(data_gnss["sat_pos"][i_gnss][j,:], p_ref_ECEF)
R_ij = dt_gnss*np.linalg.inv(np.diag([r_pr]))
problem.setParameter(R[i][j], R_ij)
problem.setParameter(sat_pos[i][j], sat_pos_ENU)
y_ij = np.array([[data_gnss["pr"][i_gnss][j]]])
problem.setMeasurement(Y[i][j], t_i, y_ij)
else:
problem.setParameter(R[i][j], 0.0)
problem.setParameter(sat_pos[i][j], np.zeros(3))
problem.setMeasurement(Y[i][j], t_i, np.array([[0.0]]))
# Solve problem
print('Solving problem.')
problem.solve(warmstart=True)
t = np.linspace(0, T, 10)
x_opt = problem.extractSolution('x', t) # full trajectory sampling
xhat0 = problem.extractSolution('x', [DT]) # next initialization point
for k in range(t.shape[0]):
p_ENU = np.array([x_opt[k, 0], x_opt[k, 1], x_opt[k, 8]])
p_ECEF = utils.enu2ecef(p_ENU, p_ref_ECEF)
p_LLA = utils.ecef2lla(p_ECEF)
NLP["x_ENU"].append(p_ENU[0])
NLP["y_ENU"].append(p_ENU[1])
NLP["z_ENU"].append(p_ENU[2])
NLP["lat"].append(p_LLA[0])
NLP["lon"].append(p_LLA[1])
NLP["h"].append(p_LLA[2])
NLP["bias"].append(x_opt[k,6])
NLP["psi"].append(x_opt[k,2])
NLP["vx"].append(x_opt[k,3])
NLP["vy"].append(x_opt[k,4])
NLP["t"].append(t0 + t[k])
NLP["t_solve"].append(problem.solver["t_wall_total"])
data_utils.save_obj(NLP, car_path + '/filtering/nlp-huber')
| null |
autonomous-car.py
|
autonomous-car.py
|
py
| 15,648 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "utils.vehicle_sim.vehicle_dynamics",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "utils.vehicle_sim",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "utils.vehicle_sim.linear_tire_model",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.hstack",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "utils.gnss.multi_pseudorange",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "utils.gnss",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "numpy.hstack",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "utils.gnss.multi_pseudorange",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "utils.gnss",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "utils.utils.lla2ecef",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "utils.data.load_obj",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "utils.data",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "utils.vehicle_sim.get_parameters",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "utils.vehicle_sim",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "utils.data.load_obj",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "utils.data",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "utils.leastsquares.runLeastSquares",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "utils.leastsquares",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "utils.data.save_obj",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "utils.data",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "numpy.diag",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.diag",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "utils.ekf.EKF",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "utils.ekf",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "numpy.where",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "utils.utils.ecef2enu",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "numpy.diag",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "utils.data.save_obj",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "utils.data",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "nlp.nlp.fixedTimeOptimalEstimationNLP",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "nlp.nlp",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "nlp.dynamics.vehicle_dynamics_and_gnss",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "nlp.dynamics",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "nlp.cost_functions.weighted_l2_norm",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "nlp.cost_functions",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "nlp.cost_functions.weighted_l2_norm",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "nlp.cost_functions",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "numpy.floor",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "nlp.measurements.vehicle_pseudorange",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "nlp.measurements",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "utils.utils.get_time_indices",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "utils.utils.get_time_indices",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "utils.utils.ecef2enu",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "numpy.diag",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "utils.utils.enu2ecef",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "utils.utils.ecef2lla",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "utils.data.save_obj",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "utils.data",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "nlp.nlp.fixedTimeOptimalEstimationNLP",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "nlp.nlp",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "nlp.dynamics.vehicle_dynamics_and_gnss",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "nlp.dynamics",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "nlp.cost_functions.pseudo_huber_loss",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "nlp.cost_functions",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "nlp.cost_functions.weighted_l2_norm",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "nlp.cost_functions",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "nlp.measurements.vehicle_pseudorange",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "nlp.measurements",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "numpy.hstack",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "utils.utils.get_time_indices",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 333,
"usage_type": "name"
},
{
"api_name": "utils.utils.get_time_indices",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "utils.utils.ecef2enu",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 355,
"usage_type": "attribute"
},
{
"api_name": "numpy.diag",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "utils.utils.enu2ecef",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "utils.utils.ecef2lla",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "utils.data.save_obj",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "utils.data",
"line_number": 389,
"usage_type": "name"
}
] |
323064837
|
import html as escaper
from typing import List
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import colors
from sklearn.metrics import confusion_matrix
from analyser.legal_docs import LegalDocument
from analyser.ml_tools import ProbableValue
from analyser.patterns import AV_PREFIX, AV_SOFT
from analyser.structures import ContractSubject
from analyser.structures import OrgStructuralLevel
from analyser.text_tools import Tokens
from analyser.transaction_values import ValueConstraint
head_types_colors = {'head.directors': 'crimson',
'head.all': 'orange',
'head.gen': 'blue',
'head.shareholders': '#666600',
'head.pravlenie': '#0099cc',
'head.unknown': '#999999'}
org_level_colors = {OrgStructuralLevel.BoardOfDirectors: 'crimson',
OrgStructuralLevel.ShareholdersGeneralMeeting: 'orange',
OrgStructuralLevel.CEO: 'blue',
OrgStructuralLevel.BoardOfCompany: '#0099cc',
None: '#999999'}
known_subjects_dict = {
ContractSubject.Charity: 'Благотворительность',
ContractSubject.RealEstate: "Сделки с имуществом",
ContractSubject.Lawsuit: "Судебные споры",
ContractSubject.Deal: "Совершение сделки",
ContractSubject.Other: "Прочее"
}
known_subjects = [
ContractSubject.Charity,
ContractSubject.RealEstate,
ContractSubject.Lawsuit]
org_level_dict = {OrgStructuralLevel.BoardOfDirectors: 'Совет директоров',
OrgStructuralLevel.ShareholdersGeneralMeeting: 'Общее собрание участников/акционеров',
OrgStructuralLevel.CEO: 'Генеральный директор',
OrgStructuralLevel.BoardOfCompany: 'Правление общества',
None: '*Неизвестный орган управления*'}
WARN = '\033[1;31m======== Dear Artem, ACHTUNG! 🔞 '
def as_smaller(x):
return f'<span style="font-size:80%;">{x}</span>'
def as_error_html(txt):
return f'<div style="color:red">⚠️ {txt}</div>'
def as_warning(txt):
return f'<div style="color:orange">⚠️ {txt}</div>'
def as_msg(txt):
return f'<div>{txt}</div>'
def as_quote(txt):
return f'<i style="margin-top:0.2em; margin-left:2em; font-size:90%">"...{txt} ..."</i>'
def as_headline_2(txt):
return f'<h2>{txt}</h2>'
def as_headline_3(txt):
return f'<h3 style="margin:0">{txt}</h3>'
def as_headline_4(txt):
return f'<h4 style="margin:0">{txt}</h4>'
def as_offset(txt):
return f'<div style="margin-left:2em">{txt}</div>'
def as_currency(v):
if v is None:
return "any"
return f'{v.value:20,.0f} {v.currency} '
class AbstractRenderer:
def sign_to_text(self, sign: int):
if sign < 0:
return " < "
if sign > 0:
return " > "
return ' = '
def sign_to_html(self, sign: int):
if sign < 0:
return " < "
if sign > 0:
return " > "
return ' = '
def value_to_html(self, vc: ValueConstraint):
color = '#333333'
if vc.sign > 0:
color = '#993300'
elif vc.sign < 0:
color = '#009933'
return f'<b style="color:{color}">{self.sign_to_html(vc.sign)} {vc.currency} {vc.value:20,.2f}</b> '
def render_value_section_details(self, value_section_info):
pass
def to_color_text(self, tokens, weights, colormap='coolwarm', print_debug=False, _range=None) -> str:
pass
def render_color_text(self, tokens, weights, colormap='coolwarm', print_debug=False, _range=None):
pass
def print_results(self, doc, results):
raise NotImplementedError()
def render_values(self, values: List[ProbableValue]):
for pv in values:
vc = pv.value
s = f'{self.sign_to_text(vc.sign)} \t {vc.currency} \t {vc.value:20,.2f} \t {pv.confidence:20,.2f} '
print(s)
def render_contents(self, doc):
pass
class SilentRenderer(AbstractRenderer):
pass
v_color_map = {
'deal_value_attention_vector': (1, 0.0, 0.5),
'soft$.$at_sum__': (0.9, 0.5, 0.0),
'$at_sum__': (0.9, 0, 0.1),
'soft$.$at_d_order_': (0.0, 0.3, 0.9),
f'{AV_PREFIX}margin_value': (1, 0.0, 0.5),
f'{AV_SOFT}{AV_PREFIX}margin_value': (1, 0.0, 0.5),
f'{AV_PREFIX}x_{ContractSubject.Charity}': (0.0, 0.9, 0.3),
f'{AV_SOFT}{AV_PREFIX}x_{ContractSubject.Charity}': (0.0, 1.0, 0.0),
f'{AV_PREFIX}x_{ContractSubject.Lawsuit}': (0.8, 0, 0.7),
f'{AV_SOFT}{AV_PREFIX}x_{ContractSubject.Lawsuit}': (0.9, 0, 0.9),
f'{AV_PREFIX}x_{ContractSubject.RealEstate}': (0.2, 0.2, 1),
f'{AV_SOFT}{AV_PREFIX}x_{ContractSubject.RealEstate}': (0.2, 0.2, 1),
}
colors_by_contract_subject = {
ContractSubject.RealEstate: (0.2, 0.2, 1),
ContractSubject.Lawsuit: (0.9, 0, 0.9),
ContractSubject.Charity: (0.0, 0.9, 0.3),
}
for k in colors_by_contract_subject:
v_color_map[f'{AV_SOFT}{AV_PREFIX}x_{k}'] = colors_by_contract_subject[k]
class HtmlRenderer(AbstractRenderer):
def _to_color_text(self, _tokens, weights, mpl_, colormap='coolwarm', _range=None, separator=' '):
tokens = [escaper.escape(t) for t in _tokens]
if len(tokens) == 0:
return " - empty -"
if len(weights) != len(tokens):
raise ValueError("number of weights differs weights={} tokens={}".format(len(weights), len(tokens)))
vmin = weights.min() - 0.00001
vmax = weights.max() + 0.00001
if _range is not None:
vmin = _range[0]
vmax = _range[1]
norm = mpl_.colors.Normalize(vmin=vmin - 0.5, vmax=vmax)
cmap = mpl_.cm.get_cmap(colormap)
html = ""
for d in range(0, len(weights)):
word = tokens[d]
if word == ' ':
word = ' _ '
token_color = mpl.colors.to_hex(cmap(norm(weights[d])))
html += f'<span title="{d} {weights[d]:.4f}" style="background-color:{token_color}">{word}{separator}</span>'
if tokens[d] == '\n':
html += "¶<br>"
return html
def map_attention_vectors_to_colors(self, search_result):
attention_vectors = {
search_result.attention_vector_name: search_result.get_attention(),
}
for subj in known_subjects:
attention_vectors[AV_PREFIX + f'x_{subj}'] = search_result.get_attention(AV_PREFIX + f'x_{subj}')
attention_vectors[AV_SOFT + AV_PREFIX + f'x_{subj}'] = search_result.get_attention(
AV_SOFT + AV_PREFIX + f'x_{subj}')
return attention_vectors
def sign_to_text(self, sign: int):
if sign < 0:
return " < "
if sign > 0:
return " > "
return ' = '
def probable_value_to_html(self, pv):
vc = pv.value
color = '#333333'
if vc.sign > 0:
color = '#993300'
elif vc.sign < 0:
color = '#009933'
return f'<b style="color:{color}">{self.sign_to_text(vc.sign)} {vc.currency} {vc.value:20,.2f}' \
f'<sup>confidence={pv.confidence:20,.2f}</sup></b> '
''' AZ:- 🌈 -----🌈 ------🌈 --------------------------END-Rendering COLORS--------'''
def mixclr(color_map, dictionary, min_color=None, _slice=None):
reds = None
greens = None
blues = None
fallback = (0.5, 0.5, 0.5)
for c in dictionary:
vector = np.array(dictionary[c])
if _slice is not None:
vector = vector[_slice]
if reds is None:
reds = np.zeros(len(vector))
if greens is None:
greens = np.zeros(len(vector))
if blues is None:
blues = np.zeros(len(vector))
vector_color = fallback
if c in color_map:
vector_color = color_map[c]
reds += vector * vector_color[0]
greens += vector * vector_color[1]
blues += vector * vector_color[2]
if min_color is not None:
reds += min_color[0]
greens += min_color[1]
blues += min_color[2]
def cut_(x):
up = [min(i, 1) for i in x]
down = [max(i, 0) for i in up]
return down
return np.array([cut_(reds), cut_(greens), cut_(blues)]).T
def to_multicolor_text(tokens, vectors, colormap, min_color=None, _slice=None) -> str:
if _slice is not None:
tokens = tokens[_slice]
_colors = mixclr(colormap, vectors, min_color=min_color, _slice=_slice)
html = ''
for i, token in enumerate(tokens):
c = _colors[i]
r = int(255 * c[0])
g = int(255 * c[1])
b = int(255 * c[2])
if token == '\n':
html += '<br>'
html += f'<span style="background:rgb({r},{g},{b})">{token} </span>'
return html
''' AZ:- 🌈 -----🌈 ------🌈 --------------------------END-Rendering COLORS--------'''
def _as_smaller(txt):
return f'<div font-size:12px">{txt}</div>'
def as_c_quote(txt):
return f'<div style="margin-top:0.2em; margin-left:2em; font-size:14px">"...{txt} ..."</div>'
def print_headers(contract: LegalDocument):
for p in contract.paragraphs:
print('\t --> 📂', contract.substr(p.header))
def _to_color_text(_tokens, weights, colormap='coolwarm', _range=None, separator=' '):
tokens = [escaper.escape(t) for t in _tokens]
if len(tokens) == 0:
return " - empty -"
if len(weights) != len(tokens):
raise ValueError("number of weights differs weights={} tokens={}".format(len(weights), len(tokens)))
vmin = weights.min() - 0.00001
vmax = weights.max() + 0.00001
if _range is not None:
vmin = _range[0]
vmax = _range[1]
norm = mpl.colors.Normalize(vmin=vmin - 0.5, vmax=vmax)
cmap = mpl.cm.get_cmap(colormap)
html = ""
for d in range(0, len(weights)):
word = tokens[d]
if word == ' ':
word = ' _ '
token_color = mpl.colors.to_hex(cmap(norm(weights[d])))
html += f'<span title="{d} {weights[d]:.4f}" style="background-color:{token_color}">{word}{separator}</span>'
if tokens[d] == '\n':
html += "¶<br>"
return html
def to_color_text(tokens, weights, colormap='coolwarm', _range=None, separator=' '):
return _to_color_text(tokens, weights, colormap=colormap, _range=_range, separator=separator)
def render_spans(spans, subdoc, attention_v, ht='') -> str:
ht += '<ol>'
for span in spans:
_s = slice(span[0], span[1])
ht += '<li>'
t = subdoc.tokens_map.tokens[_s]
att = attention_v[_s]
ht += to_color_text(t, att, _range=(0, 1.2))
ht += '<br><hr>'
ht += '</li>'
ht += '</ol>'
return ht
def render_slices(slices, subdoc, attention_v, ht='') -> str:
ht += '<ol>'
for _s in slices:
ht += '<li>'
t = subdoc.tokens_map.tokens[_s]
att = attention_v[_s]
ht += to_color_text(t, att, _range=(0, 1.2))
ht += '<br><hr>'
ht += '</li>'
ht += '</ol>'
return ht
def rgb_by_string(s):
h = abs(hash(s))
r = (h % 256)
g = int(h / 256.) % 256
b = 255 - g
return (r, g, b)
def color_by_string(s, palette=None) -> str:
h = abs(hash(s))
if palette is None:
r = (h % 256)
g = int(h / 256.) % 256
b = 255 - g
return f'#{r:02x}{g:02x}{b:02x}'
else:
return palette[h % (len(palette))]
def hex2rgb(c: str):
return [int(c[1:3], base=16), int(c[3:5], base=16), int(c[5:], base=16)]
def lerp_int(a, b, w):
return int(a * w + b * (1. - w))
def lerp_float(a, b, w):
return a * w + b * (1. - w)
def whiter(c: str, weight: float):
rgb = hex2rgb(c)
return colors.to_hex([lerp_float(a / 255., 1, weight) for a in rgb])
def render_token_clusters(tokens: Tokens, clusters: [int], pal: [str], weights=None) -> str:
html = ''
separator = ' '
for d, word in enumerate(tokens):
if word == ' ':
word = ' _ '
token_color = pal[clusters[d]]
if weights is not None:
_w = max(0.5, min(weights[d], 1))
token_color = whiter(token_color, _w)
html += f'<span title="{d} {clusters[d]:.2f}" style="background-color:{token_color}">{word}{separator}</span>'
if tokens[d] == '\n':
html += "¶<br>"
return html
def render_doc(doc, semantic_map, default_color='#eeeeee', palette: [str] or None = None):
_pal = [default_color] + [color_by_string(c, palette) for c in semantic_map.columns]
clusters: [int] = np.argmax(semantic_map.values, axis=1)
wieghts = np.max(semantic_map.values, axis=1)
h = ''
for p in doc.paragraphs:
s: slice = p.body.as_slice()
hs: slice = p.header.as_slice()
head_html = render_token_clusters(doc.tokens[hs], clusters[hs], _pal, wieghts[hs])
h += f'<h3>{head_html}</h3>'
paragraph_html = render_token_clusters(doc.tokens[s], clusters[s], _pal, wieghts[s])
h += f'<p style="padding:0.5cm 1cm">{paragraph_html}</p>'
return h
def plot_embedding(matrix, title=None, width=25, height=6):
mt = matrix.T
with sns.axes_style("white"):
fig = plt.figure(figsize=(width, height))
sns.heatmap(mt, square=False, cmap="RdYlBu", center=0)
if title:
plt.title(title)
plt.show()
return fig
def plot_cm(y_true, y_pred, figsize=(12, 12)):
cm = confusion_matrix(y_true, y_pred, labels=np.unique(y_true))
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s)
elif c == 0:
annot[i, j] = ''
else:
annot[i, j] = '%.1f%%\n%d' % (p, c)
cm = pd.DataFrame(cm_perc, index=np.unique(y_true), columns=np.unique(y_true))
cm.index.name = 'Actual'
cm.columns.name = 'Predicted'
fig, ax = plt.subplots(figsize=figsize)
sns.heatmap(cm, cmap="YlGnBu", annot=annot, fmt='', ax=ax)
| null |
colab_support/renderer.py
|
renderer.py
|
py
| 13,632 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "analyser.structures.OrgStructuralLevel.BoardOfDirectors",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.OrgStructuralLevel",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "analyser.structures.OrgStructuralLevel.ShareholdersGeneralMeeting",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.OrgStructuralLevel",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "analyser.structures.OrgStructuralLevel.CEO",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.OrgStructuralLevel",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "analyser.structures.OrgStructuralLevel.BoardOfCompany",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.OrgStructuralLevel",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Charity",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.RealEstate",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Lawsuit",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Deal",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Other",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Charity",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.RealEstate",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Lawsuit",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "analyser.structures.OrgStructuralLevel.BoardOfDirectors",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.OrgStructuralLevel",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "analyser.structures.OrgStructuralLevel.ShareholdersGeneralMeeting",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.OrgStructuralLevel",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "analyser.structures.OrgStructuralLevel.CEO",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.OrgStructuralLevel",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "analyser.structures.OrgStructuralLevel.BoardOfCompany",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.OrgStructuralLevel",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "analyser.transaction_values.ValueConstraint",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "analyser.ml_tools.ProbableValue",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_SOFT",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Charity",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_SOFT",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Charity",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Lawsuit",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_SOFT",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Lawsuit",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.RealEstate",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_SOFT",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.RealEstate",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.RealEstate",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Lawsuit",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "analyser.structures.ContractSubject.Charity",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "analyser.structures.ContractSubject",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_SOFT",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "html.escape",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.to_hex",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_SOFT",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_SOFT",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "analyser.patterns.AV_PREFIX",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "analyser.legal_docs.LegalDocument",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "html.escape",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.Normalize",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 334,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.cm.get_cmap",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm",
"line_number": 335,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.colors.to_hex",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 343,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.colors.to_hex",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 419,
"usage_type": "name"
},
{
"api_name": "analyser.text_tools.Tokens",
"line_number": 422,
"usage_type": "name"
},
{
"api_name": "numpy.argmax",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "seaborn.axes_style",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 465,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 469,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "numpy.empty_like",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 493,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 493,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 494,
"usage_type": "call"
}
] |
107657196
|
#!/usr/bin/env python3
__version__ = "0.1.0"
import os
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
import sys
import json
import argparse
import importlib.resources
from dpterminal import printingloop
class appcaller():
def startfromscratch(self, config='config.json'):
loop = printingloop.printingloop()
loop.openconfig(config)
loop.mainloop()
def convertfromlisttoredditjsonlink(key, subreddit, option=''):
lista = []
for x in subreddit:
tempstring = 'http://www.reddit.com/r/'+x+'/'+key+'.json'
if option != '':
tempstring = tempstring+'?t='+option
lista += [tempstring]
return lista
def main():
# args just running not being in any def, maybe bad.
#config = importlib.resources.path('dpterm', 'config.json')
config = (__location__+'/config.json')
#return
parser = argparse.ArgumentParser()
startarguments = parser.add_argument_group(
'Arguments used when starting app')
runningarguments = parser.add_argument_group(
'Arguments used when program is running')
#ADD SUB
runningarguments.add_argument('-as', '--addsubreddit', help='add a subreddit to be displayed next',
type=str, action='append', nargs='+')
#ADD COMMENT
#runningarguments.add_argument( '-ac', '--addcomment', help='add a comment thread to be displayed next',
# type=str, action='append', nargs='+')
#SUBSCRIBE SUB
#runningarguments.add_argument( '-ss', '--subsubreddit',
# help='temporary subscribe to sub until exit',
# type=str, action='append', nargs='+')
#SUBSCRIBE COMMENT
#runningarguments.add_argument( '-sc', '--subcomment',
# help='temporary subscribe to comment until exit',
# type=str, action='append', nargs='+')
#CONFIG
startarguments.add_argument(
'-c', '--config',
help='set path to config.json file', type=str)
#CONFIGDIR
startarguments.add_argument(
'-cd', '--configdir',
help='get path to default config.json file', action='store_true')
args = parser.parse_args()
if args.addsubreddit:
# its list in list so you can pass same flag multiple times
print(args.addsubreddit)
noOptionCategories = ['new', 'hot', 'rising']
optionCategories = ['top', 'controversial']
options = ['hour', 'day', 'week', 'mounth', 'year', 'all']
for x in range(0, len(args.addsubreddit)):
if args.addsubreddit[x][0] in noOptionCategories:
# for creating new hot rising links
listforloop = convertfromlisttoredditjsonlink(
args.addsubreddit[x][0], args.addsubreddit[x][1:])
elif args.addsubreddit[x][0] in optionCategories:
if args.addsubreddit[x][1] in options:
listforloop = convertfromlisttoredditjsonlink(args.addsubreddit[x][0],
args.addsubreddit[x][2:],
args.addsubreddit[x][1])
else:
listforloop = convertfromlisttoredditjsonlink(args.addsubreddit[x][0],
args.addsubreddit[x][1:])
else:
listforloop = convertfromlisttoredditjsonlink(
'', args.addsubreddit[x])
with open(__location__+'/linkqueue', 'a') as file:
for x in listforloop:
file.write(x+'\n')
print('subreddits added into queue')
pass
#WORK IN PROGRESS
'''
if args.addcomment:
print('work in progress')
pass
if args.subsubreddit:
print('work in progress')
pass
if args.subcomment:
print('work in progress')
pass
'''
if args.config:
print('adding config and starting program')
config = args.config
appcaller().startfromscratch(config)
pass
if args.configdir:
print('The default config.json location is: ')
print(__location__+'/')
pass
if (len(sys.argv)) == 1:
print('starting program with default config')
appcaller().startfromscratch(config)
'''
arguments I want
--subscribe
-s
sub
comment
--add
-a
sub
comment
--textspeed
int
auto
--config
-c
config.json
--save
stringname
'''
| null |
dpterminal/dpterm.py
|
dpterm.py
|
py
| 4,676 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.realpath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dpterminal.printingloop.printingloop",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "dpterminal.printingloop",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 136,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.