code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
#!/usr/bin/python3.8
"""
@file tmtcc_sequential_sender_receiver.py
@date 01.11.2019
@brief Used to send multiple TCs in sequence and listen for replies after each sent TC
"""
import sys
import time
from tmtccmd.sendreceive.cmd_sender_receiver import CommandSenderReceiver
from tmtccmd.ccsds.handler import CcsdsTmHandler
from tmtccmd.sendreceive.tm_listener import TmListener
from tmtccmd.com_if.com_interface_base import CommunicationInterface
from tmtccmd.utility.tmtc_printer import TmTcPrinter
from tmtccmd.utility.logger import get_console_logger
from tmtccmd.tc.definitions import TcQueueT
LOGGER = get_console_logger()
class SequentialCommandSenderReceiver(CommandSenderReceiver):
"""Specific implementation of CommandSenderReceiver to send multiple telecommands in sequence"""
def __init__(
self,
com_if: CommunicationInterface,
tmtc_printer: TmTcPrinter,
tm_handler: CcsdsTmHandler,
apid: int,
tm_listener: TmListener,
tc_queue: TcQueueT,
):
"""
:param com_if: CommunicationInterface object, passed on to CommandSenderReceiver
:param tm_listener: TmListener object which runs in the background and receives
all Telemetry
:param tmtc_printer: TmTcPrinter object, passed on to CommandSenderReceiver for
this time period
"""
super().__init__(
com_if=com_if,
tmtc_printer=tmtc_printer,
tm_listener=tm_listener,
tm_handler=tm_handler,
apid=apid,
)
self._tc_queue = tc_queue
self.__all_replies_received = False
def send_queue_tc_and_receive_tm_sequentially(self):
"""Primary function which is called for sequential transfer.
:return:
"""
self._tm_listener.set_listener_mode(TmListener.ListenerModes.SEQUENCE)
# tiny delay for pus_tm listener
time.sleep(0.05)
if self._tc_queue:
try:
# Set to true for first packet, otherwise nothing will be sent.
self._reply_received = True
self.__handle_tc_sending()
except (KeyboardInterrupt, SystemExit):
LOGGER.info("Keyboard Interrupt.")
sys.exit()
else:
LOGGER.warning("Supplied TC queue is empty!")
def __handle_tc_sending(self):
while not self.__all_replies_received:
while not self._tc_queue.__len__() == 0:
self.__check_for_reply()
self.__check_next_tc_send()
if self._tc_queue.__len__() == 0:
self._start_time = time.time()
break
time.sleep(0.2)
if not self._reply_received:
self.__check_for_reply()
self._check_for_timeout()
if self._reply_received:
self.__all_replies_received = True
break
time.sleep(0.2)
self._tm_listener.set_mode_op_finished()
LOGGER.info("SequentialSenderReceiver: All replies received!")
def __check_for_reply(self):
if self._tm_listener.reply_event():
self._reply_received = True
self._tm_listener.clear_reply_event()
packet_queue = self._tm_listener.retrieve_ccsds_tm_packet_queue(
apid=self._apid, clear=True
)
self._tm_handler.handle_ccsds_packet_queue(
apid=self._apid, packet_queue=packet_queue
)
# This makes reply reception more responsive
elif self._tm_listener.tm_packets_available():
packet_queue = self._tm_listener.retrieve_ccsds_tm_packet_queue(
apid=self._apid, clear=True
)
self._tm_handler.handle_ccsds_packet_queue(
apid=self._apid, packet_queue=packet_queue
)
def __check_next_tc_send(self):
if self.wait_period_ongoing():
return
# this flag is set in the separate receiver thread too
if self._reply_received:
if self.__send_next_telecommand():
self._reply_received = False
# just calculate elapsed time if start time has already been set (= command has been sent)
else:
self._check_for_timeout()
def __send_next_telecommand(self) -> bool:
"""Sends the next telecommand and returns whether an actual telecommand was sent"""
tc_queue_tuple = self._tc_queue.pop()
if self.check_queue_entry(tc_queue_tuple):
self._start_time = time.time()
pus_packet, pus_packet_info = tc_queue_tuple
self._com_if.send(pus_packet)
return True
# queue empty.
elif not self._tc_queue:
# Special case: Last queue entry is not a Telecommand
self._reply_received = True
# Another specal case: Last queue entry is to wait.
if self._wait_period > 0:
self.wait_period_ongoing(True)
self.__all_replies_received = True
return False
else:
# If the queue entry was not a telecommand, send next telecommand
self.__check_next_tc_send()
return True
| [
"tmtccmd.utility.logger.get_console_logger",
"time.time",
"time.sleep",
"sys.exit"
] | [((612, 632), 'tmtccmd.utility.logger.get_console_logger', 'get_console_logger', ([], {}), '()\n', (630, 632), False, 'from tmtccmd.utility.logger import get_console_logger\n'), ((1988, 2004), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1998, 2004), False, 'import time\n'), ((3044, 3059), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (3054, 3059), False, 'import time\n'), ((4686, 4697), 'time.time', 'time.time', ([], {}), '()\n', (4695, 4697), False, 'import time\n'), ((2782, 2797), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2792, 2797), False, 'import time\n'), ((2335, 2345), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2343, 2345), False, 'import sys\n'), ((2728, 2739), 'time.time', 'time.time', ([], {}), '()\n', (2737, 2739), False, 'import time\n')] |
from .conversion_args import ConversionArgs
from .conversion_context import ConversionContext
from .handbrake_runner import HandbrakeRunner
from .hbclis import HandBrakeClis
from glob import glob
import os
class Converter:
""" Represents a Converter than can convert files using the HandBrakeCLI """
def __init__(self, config):
""" Initialize with the config """
self.config = config
def run(self, paths, **kwargs):
""" Run the Converter for the given filenames """
filenames = self.getFilenames(paths)
for i, filename in enumerate(filenames):
runner = HandbrakeRunner()
runner.addArgs(['-i', filename])
conversionArgs = ConversionArgs(filename, i, **kwargs)
context = ConversionContext(filename, self.config, conversionArgs)
for cliArg in HandBrakeClis:
if cliArg.check(context):
runner.addArgs(cliArg.build(context))
runner.run()
def getFilenames(self, paths):
""" Return the proper filenames """
filenames = []
for path in paths:
if os.path.isdir(path):
filenames.extend(glob(os.path.join(path, '*.mkv')))
else:
filenames.append(path)
return filenames | [
"os.path.isdir",
"os.path.join"
] | [((1257, 1276), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1270, 1276), False, 'import os\n'), ((1317, 1344), 'os.path.join', 'os.path.join', (['path', '"""*.mkv"""'], {}), "(path, '*.mkv')\n", (1329, 1344), False, 'import os\n')] |
'''
Created on 1 Mar 2018
@author: Slaporter
'''
import sys
sys.path.append(".")
from countMeIn.main import main, parse_file, script_run
from countMeIn.lightTester import lightTester
def test_parse_file():
answer=[10, ['turn on', 0,0,9,9], ['turn off', 0,0, 9,9], ['switch', 0,0,9,9], ['turn off', 0,0,9,9], ['turn on', 2,2,7,7]]
assert answer==parse_file('input_test')
def test_turn_on():
lt=lightTester(10)
lt.turn_on(["turn on", 0, 0, 9, 9])
assert lt.count==100
def test_turn_off():
lt=lightTester(10)
lt.turn_on(["turn on", 0, 0, 9, 9])
lt.turn_off(["turn off", 0, 0, 9, 9])
assert lt.count==0
def test_switch():
lt=lightTester(10)
lt.turn_on(["switch", 0, 0, 9, 9])
assert lt.count==100
| [
"countMeIn.main.parse_file",
"countMeIn.lightTester.lightTester",
"sys.path.append"
] | [((61, 81), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (76, 81), False, 'import sys\n'), ((421, 436), 'countMeIn.lightTester.lightTester', 'lightTester', (['(10)'], {}), '(10)\n', (432, 436), False, 'from countMeIn.lightTester import lightTester\n'), ((531, 546), 'countMeIn.lightTester.lightTester', 'lightTester', (['(10)'], {}), '(10)\n', (542, 546), False, 'from countMeIn.lightTester import lightTester\n'), ((679, 694), 'countMeIn.lightTester.lightTester', 'lightTester', (['(10)'], {}), '(10)\n', (690, 694), False, 'from countMeIn.lightTester import lightTester\n'), ((362, 386), 'countMeIn.main.parse_file', 'parse_file', (['"""input_test"""'], {}), "('input_test')\n", (372, 386), False, 'from countMeIn.main import main, parse_file, script_run\n')] |
from pyhomgo import MongoClient
from bson import Binary, Code
from bson.json_util import dumps
import json
client = MongoClient('address')
db = client['Database_name']
cursor = db.Name_of_collection.find({"borough": "Manhattan"}) #<- it is query
json_string = dumps(cursor)
with open('data.json', 'w') as outfile:
json.dump(json_string, outfile)
| [
"pyhomgo.MongoClient",
"json.dump",
"bson.json_util.dumps"
] | [((117, 139), 'pyhomgo.MongoClient', 'MongoClient', (['"""address"""'], {}), "('address')\n", (128, 139), False, 'from pyhomgo import MongoClient\n'), ((261, 274), 'bson.json_util.dumps', 'dumps', (['cursor'], {}), '(cursor)\n', (266, 274), False, 'from bson.json_util import dumps\n'), ((320, 351), 'json.dump', 'json.dump', (['json_string', 'outfile'], {}), '(json_string, outfile)\n', (329, 351), False, 'import json\n')] |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^stream/(?P<stream_slug>[-\w]+)/$', views.stream_detail, name='stream_detail'),
# ex: /vacation/
url(r'^(?P<collection_slug>[-\w]+)/$', views.collection_detail, name='collection_detail'),
# /family/home/fun/DCIM_4242.jpg/view
url(r'^(?P<collection_slug>[-\w]+)/(?P<file_path>.+)/(?P<imagename>.+)/view/$', views.image_detail, name='image_detail'),
url(r'^(?P<collection_slug>[-\w]+)/(?P<file_path>.+)/(?P<imagename>.+)/full/$', views.image_full, name='image_full'),
url(r'^(?P<collection_slug>[-\w]+)/(?P<file_path>.+)/(?P<imagename>.+)/max/$', views.image_max, name='image_max'),
url(r'^(?P<collection_slug>[-\w]+)/(?P<imagename>.+)/view/$', views.rootdir_image_detail, name='rootdir_image_detail'),
url(r'^(?P<collection_slug>[-\w]+)/(?P<imagename>.+)/full/$', views.rootdir_image_full, name='rootdir_image_full'),
url(r'^(?P<collection_slug>[-\w]+)/(?P<imagename>.+)/max/$', views.rootdir_image_max, name='rootdir_image_max'),
url(r'^(?P<collection_slug>[-\w]+)/(?P<directory>.+)/$', views.directory_detail, name='directory_detail'),
url(r'^imagedetail/(?P<imagehash>\w+)/$', views.imagehash_detail, name='imagehash_detail'),
]
| [
"django.conf.urls.url"
] | [((75, 111), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (78, 111), False, 'from django.conf.urls import url\n'), ((118, 207), 'django.conf.urls.url', 'url', (['"""^stream/(?P<stream_slug>[-\\\\w]+)/$"""', 'views.stream_detail'], {'name': '"""stream_detail"""'}), "('^stream/(?P<stream_slug>[-\\\\w]+)/$', views.stream_detail, name=\n 'stream_detail')\n", (121, 207), False, 'from django.conf.urls import url\n'), ((229, 323), 'django.conf.urls.url', 'url', (['"""^(?P<collection_slug>[-\\\\w]+)/$"""', 'views.collection_detail'], {'name': '"""collection_detail"""'}), "('^(?P<collection_slug>[-\\\\w]+)/$', views.collection_detail, name=\n 'collection_detail')\n", (232, 323), False, 'from django.conf.urls import url\n'), ((366, 490), 'django.conf.urls.url', 'url', (['"""^(?P<collection_slug>[-\\\\w]+)/(?P<file_path>.+)/(?P<imagename>.+)/view/$"""', 'views.image_detail'], {'name': '"""image_detail"""'}), "('^(?P<collection_slug>[-\\\\w]+)/(?P<file_path>.+)/(?P<imagename>.+)/view/$',\n views.image_detail, name='image_detail')\n", (369, 490), False, 'from django.conf.urls import url\n'), ((492, 612), 'django.conf.urls.url', 'url', (['"""^(?P<collection_slug>[-\\\\w]+)/(?P<file_path>.+)/(?P<imagename>.+)/full/$"""', 'views.image_full'], {'name': '"""image_full"""'}), "('^(?P<collection_slug>[-\\\\w]+)/(?P<file_path>.+)/(?P<imagename>.+)/full/$',\n views.image_full, name='image_full')\n", (495, 612), False, 'from django.conf.urls import url\n'), ((614, 731), 'django.conf.urls.url', 'url', (['"""^(?P<collection_slug>[-\\\\w]+)/(?P<file_path>.+)/(?P<imagename>.+)/max/$"""', 'views.image_max'], {'name': '"""image_max"""'}), "('^(?P<collection_slug>[-\\\\w]+)/(?P<file_path>.+)/(?P<imagename>.+)/max/$',\n views.image_max, name='image_max')\n", (617, 731), False, 'from django.conf.urls import url\n'), ((733, 856), 'django.conf.urls.url', 'url', (['"""^(?P<collection_slug>[-\\\\w]+)/(?P<imagename>.+)/view/$"""', 'views.rootdir_image_detail'], {'name': '"""rootdir_image_detail"""'}), "('^(?P<collection_slug>[-\\\\w]+)/(?P<imagename>.+)/view/$', views.\n rootdir_image_detail, name='rootdir_image_detail')\n", (736, 856), False, 'from django.conf.urls import url\n'), ((857, 976), 'django.conf.urls.url', 'url', (['"""^(?P<collection_slug>[-\\\\w]+)/(?P<imagename>.+)/full/$"""', 'views.rootdir_image_full'], {'name': '"""rootdir_image_full"""'}), "('^(?P<collection_slug>[-\\\\w]+)/(?P<imagename>.+)/full/$', views.\n rootdir_image_full, name='rootdir_image_full')\n", (860, 976), False, 'from django.conf.urls import url\n'), ((977, 1093), 'django.conf.urls.url', 'url', (['"""^(?P<collection_slug>[-\\\\w]+)/(?P<imagename>.+)/max/$"""', 'views.rootdir_image_max'], {'name': '"""rootdir_image_max"""'}), "('^(?P<collection_slug>[-\\\\w]+)/(?P<imagename>.+)/max/$', views.\n rootdir_image_max, name='rootdir_image_max')\n", (980, 1093), False, 'from django.conf.urls import url\n'), ((1094, 1204), 'django.conf.urls.url', 'url', (['"""^(?P<collection_slug>[-\\\\w]+)/(?P<directory>.+)/$"""', 'views.directory_detail'], {'name': '"""directory_detail"""'}), "('^(?P<collection_slug>[-\\\\w]+)/(?P<directory>.+)/$', views.\n directory_detail, name='directory_detail')\n", (1097, 1204), False, 'from django.conf.urls import url\n'), ((1205, 1300), 'django.conf.urls.url', 'url', (['"""^imagedetail/(?P<imagehash>\\\\w+)/$"""', 'views.imagehash_detail'], {'name': '"""imagehash_detail"""'}), "('^imagedetail/(?P<imagehash>\\\\w+)/$', views.imagehash_detail, name=\n 'imagehash_detail')\n", (1208, 1300), False, 'from django.conf.urls import url\n')] |
# generated by appcreator
from django.contrib import admin
from mptt.admin import DraggableMPTTAdmin
from . models import (
Archiv,
Bibliography,
Glossary,
Introduction,
Place,
Tablet
)
@admin.register(Glossary)
class GlossaryAdmin(DraggableMPTTAdmin):
model = Glossary
list_filter = (
('broader_concept', admin.RelatedOnlyFieldListFilter),
)
search_fields = ['pref_label']
autocomplete_fields = ['broader_concept']
mptt_level_indent = 50
admin.site.register(Archiv)
admin.site.register(Bibliography)
admin.site.register(Place)
admin.site.register(Tablet)
admin.site.register(Introduction)
| [
"django.contrib.admin.register",
"django.contrib.admin.site.register"
] | [((214, 238), 'django.contrib.admin.register', 'admin.register', (['Glossary'], {}), '(Glossary)\n', (228, 238), False, 'from django.contrib import admin\n'), ((501, 528), 'django.contrib.admin.site.register', 'admin.site.register', (['Archiv'], {}), '(Archiv)\n', (520, 528), False, 'from django.contrib import admin\n'), ((529, 562), 'django.contrib.admin.site.register', 'admin.site.register', (['Bibliography'], {}), '(Bibliography)\n', (548, 562), False, 'from django.contrib import admin\n'), ((563, 589), 'django.contrib.admin.site.register', 'admin.site.register', (['Place'], {}), '(Place)\n', (582, 589), False, 'from django.contrib import admin\n'), ((590, 617), 'django.contrib.admin.site.register', 'admin.site.register', (['Tablet'], {}), '(Tablet)\n', (609, 617), False, 'from django.contrib import admin\n'), ((618, 651), 'django.contrib.admin.site.register', 'admin.site.register', (['Introduction'], {}), '(Introduction)\n', (637, 651), False, 'from django.contrib import admin\n')] |
# Generated by Django 2.2.4 on 2019-08-25 15:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word', models.CharField(max_length=100)),
('polarity', models.CharField(choices=[('None', 'Neutral'), ('P', 'Positive'), ('N', 'Negative')], default='None', max_length=4)),
('liked', models.IntegerField(default=0)),
('shared', models.IntegerField(default=0)),
('is_active', models.BooleanField(default=True)),
('is_deleted', models.BooleanField(default=False)),
('updated_date', models.DateTimeField(auto_now=True)),
('searched_date', models.DateTimeField(auto_now_add=True)),
('social_network', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='search_social_network_id', to='api.Topic')),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='search_topic_id', to='api.Topic')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='searched_user_id', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='wordroot',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='word_root_topic_id', to='api.Topic'),
),
migrations.DeleteModel(
name='Searched',
),
]
| [
"django.db.migrations.DeleteModel",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((1865, 1904), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Searched"""'}), "(name='Searched')\n", (1887, 1904), False, 'from django.db import migrations, models\n'), ((1731, 1849), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""word_root_topic_id"""', 'to': '"""api.Topic"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='word_root_topic_id', to='api.Topic')\n", (1748, 1849), False, 'from django.db import migrations, models\n'), ((448, 541), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (464, 541), False, 'from django.db import migrations, models\n'), ((565, 597), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (581, 597), False, 'from django.db import migrations, models\n'), ((629, 748), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('None', 'Neutral'), ('P', 'Positive'), ('N', 'Negative')]", 'default': '"""None"""', 'max_length': '(4)'}), "(choices=[('None', 'Neutral'), ('P', 'Positive'), ('N',\n 'Negative')], default='None', max_length=4)\n", (645, 748), False, 'from django.db import migrations, models\n'), ((773, 803), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (792, 803), False, 'from django.db import migrations, models\n'), ((833, 863), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (852, 863), False, 'from django.db import migrations, models\n'), ((896, 929), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (915, 929), False, 'from django.db import migrations, models\n'), ((963, 997), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (982, 997), False, 'from django.db import migrations, models\n'), ((1033, 1068), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1053, 1068), False, 'from django.db import migrations, models\n'), ((1105, 1144), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1125, 1144), False, 'from django.db import migrations, models\n'), ((1182, 1306), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""search_social_network_id"""', 'to': '"""api.Topic"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='search_social_network_id', to='api.Topic')\n", (1199, 1306), False, 'from django.db import migrations, models\n'), ((1330, 1445), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""search_topic_id"""', 'to': '"""api.Topic"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='search_topic_id', to='api.Topic')\n", (1347, 1445), False, 'from django.db import migrations, models\n'), ((1468, 1597), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""searched_user_id"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='searched_user_id', to=settings.AUTH_USER_MODEL)\n", (1485, 1597), False, 'from django.db import migrations, models\n')] |
from .hmm import HMM
from .arhmm import ARHMM
from .rarhmm import rARHMM
from .erarhmm import erARHMM
from .ensemble import Ensemble
import os
import torch
from gym.envs.registration import register
register(
id='MassSpringDamper-ID-v0',
entry_point='sds.envs:MassSpringDamper',
max_episode_steps=1000,
)
register(
id='BouncingBall-ID-v0',
entry_point='sds.envs:BouncingBall',
max_episode_steps=1000,
)
register(
id='Pendulum-ID-v0',
entry_point='sds.envs:Pendulum',
max_episode_steps=1000,
)
register(
id='Pendulum-ID-v1',
entry_point='sds.envs:PendulumWithCartesianObservation',
max_episode_steps=1000,
)
register(
id='Cartpole-ID-v0',
entry_point='sds.envs:Cartpole',
max_episode_steps=1000,
)
register(
id='Cartpole-ID-v1',
entry_point='sds.envs:CartpoleWithCartesianObservation',
max_episode_steps=1000,
)
register(
id='QQube-ID-v0',
entry_point='sds.envs:Qube',
max_episode_steps=1000,
kwargs={'fs': 500.0, 'fs_ctrl': 100.0}
)
register(
id='QQube-ID-v1',
entry_point='sds.envs:QubeWithCartesianObservation',
max_episode_steps=1000,
kwargs={'fs': 500.0, 'fs_ctrl': 100.0}
)
try:
register(
id='HybridMassSpringDamper-ID-v0',
entry_point='sds.envs:HybridMassSpringDamper',
max_episode_steps=1000,
kwargs={'rarhmm': torch.load(open(os.path.dirname(__file__)
+ '/envs/hybrid/models/poly_rarhmm_msd.pkl', 'rb'),
map_location='cpu')}
)
except:
pass
try:
register(
id='HybridPendulum-ID-v0',
entry_point='sds.envs:HybridPendulum',
max_episode_steps=1000,
kwargs={'rarhmm': torch.load(open(os.path.dirname(__file__)
+ '/envs/hybrid/models/neural_rarhmm_pendulum_polar.pkl', 'rb'),
map_location='cpu')}
)
except:
pass
try:
register(
id='HybridPendulum-ID-v1',
entry_point='sds.envs:HybridPendulumWithCartesianObservation',
max_episode_steps=1000,
kwargs={'rarhmm': torch.load(open(os.path.dirname(__file__)
+ '/envs/hybrid/models/neural_rarhmm_pendulum_cart.pkl', 'rb'),
map_location='cpu')}
)
except:
pass
| [
"os.path.dirname",
"gym.envs.registration.register"
] | [((202, 309), 'gym.envs.registration.register', 'register', ([], {'id': '"""MassSpringDamper-ID-v0"""', 'entry_point': '"""sds.envs:MassSpringDamper"""', 'max_episode_steps': '(1000)'}), "(id='MassSpringDamper-ID-v0', entry_point=\n 'sds.envs:MassSpringDamper', max_episode_steps=1000)\n", (210, 309), False, 'from gym.envs.registration import register\n'), ((321, 419), 'gym.envs.registration.register', 'register', ([], {'id': '"""BouncingBall-ID-v0"""', 'entry_point': '"""sds.envs:BouncingBall"""', 'max_episode_steps': '(1000)'}), "(id='BouncingBall-ID-v0', entry_point='sds.envs:BouncingBall',\n max_episode_steps=1000)\n", (329, 419), False, 'from gym.envs.registration import register\n'), ((432, 522), 'gym.envs.registration.register', 'register', ([], {'id': '"""Pendulum-ID-v0"""', 'entry_point': '"""sds.envs:Pendulum"""', 'max_episode_steps': '(1000)'}), "(id='Pendulum-ID-v0', entry_point='sds.envs:Pendulum',\n max_episode_steps=1000)\n", (440, 522), False, 'from gym.envs.registration import register\n'), ((535, 650), 'gym.envs.registration.register', 'register', ([], {'id': '"""Pendulum-ID-v1"""', 'entry_point': '"""sds.envs:PendulumWithCartesianObservation"""', 'max_episode_steps': '(1000)'}), "(id='Pendulum-ID-v1', entry_point=\n 'sds.envs:PendulumWithCartesianObservation', max_episode_steps=1000)\n", (543, 650), False, 'from gym.envs.registration import register\n'), ((662, 752), 'gym.envs.registration.register', 'register', ([], {'id': '"""Cartpole-ID-v0"""', 'entry_point': '"""sds.envs:Cartpole"""', 'max_episode_steps': '(1000)'}), "(id='Cartpole-ID-v0', entry_point='sds.envs:Cartpole',\n max_episode_steps=1000)\n", (670, 752), False, 'from gym.envs.registration import register\n'), ((765, 880), 'gym.envs.registration.register', 'register', ([], {'id': '"""Cartpole-ID-v1"""', 'entry_point': '"""sds.envs:CartpoleWithCartesianObservation"""', 'max_episode_steps': '(1000)'}), "(id='Cartpole-ID-v1', entry_point=\n 'sds.envs:CartpoleWithCartesianObservation', max_episode_steps=1000)\n", (773, 880), False, 'from gym.envs.registration import register\n'), ((892, 1016), 'gym.envs.registration.register', 'register', ([], {'id': '"""QQube-ID-v0"""', 'entry_point': '"""sds.envs:Qube"""', 'max_episode_steps': '(1000)', 'kwargs': "{'fs': 500.0, 'fs_ctrl': 100.0}"}), "(id='QQube-ID-v0', entry_point='sds.envs:Qube', max_episode_steps=\n 1000, kwargs={'fs': 500.0, 'fs_ctrl': 100.0})\n", (900, 1016), False, 'from gym.envs.registration import register\n'), ((1031, 1184), 'gym.envs.registration.register', 'register', ([], {'id': '"""QQube-ID-v1"""', 'entry_point': '"""sds.envs:QubeWithCartesianObservation"""', 'max_episode_steps': '(1000)', 'kwargs': "{'fs': 500.0, 'fs_ctrl': 100.0}"}), "(id='QQube-ID-v1', entry_point=\n 'sds.envs:QubeWithCartesianObservation', max_episode_steps=1000, kwargs\n ={'fs': 500.0, 'fs_ctrl': 100.0})\n", (1039, 1184), False, 'from gym.envs.registration import register\n'), ((1385, 1410), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1400, 1410), False, 'import os\n'), ((1762, 1787), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1777, 1787), False, 'import os\n'), ((2176, 2201), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2191, 2201), False, 'import os\n')] |
from setuptools import setup, find_packages
#from Cython.Build import cythonize
setup(name='cLoops',
version='0.93',
author=['<NAME>',"<NAME>"],
author_email=['<EMAIL>','<EMAIL>'],
url='https://github.com/YaqiangCao/cLoops',
description='Loops calling for ChIA-PET,HiChIP,Hi-C and Trac-looping data. Can be applied to similar datasets.',
classifiers=[
'Environment :: Console',
'Programming Language :: Python :: 2.7',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
packages=find_packages(exclude=['tests','docs']),
long_description=open('README.md').read(),
#setup_requires=["joblib","numpy","seaborn","pandas","scipy","HTSeq"],
entry_points={
'console_scripts': [
'cLoops=cLoops.pipe:main',
],
},
#scripts = ["scripts/deLoops","scripts/jd2juice","scripts/jd2washU","scripts/jd2saturation","scripts/jd2fingerprint"],
#temply disable deLoops for furthur development
scripts = ["scripts/jd2juice","scripts/jd2washU","scripts/jd2saturation","scripts/jd2fingerprint","scripts/callStripes","scripts/hicpropairs2bedpe"],
)
| [
"setuptools.find_packages"
] | [((632, 672), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests', 'docs']"}), "(exclude=['tests', 'docs'])\n", (645, 672), False, 'from setuptools import setup, find_packages\n')] |
import os
import string
import random
import numpy as np
from sklearn.metrics import accuracy_score
from tqdm.notebook import tqdm
from sklearn.base import TransformerMixin
from sklearn.naive_bayes import GaussianNB, CategoricalNB
import nltk
from nltk import word_tokenize, WordNetLemmatizer
from nltk.stem import WordNetLemmatizer
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
wordnet_lemmatizer: WordNetLemmatizer = WordNetLemmatizer()
def clean_text(text: str) -> str:
# removes upper cases
text = text.lower()
# removes punctuation
for char in string.punctuation:
text = text.replace(char, "")
# lemmatize the words and join back into string text
text = " ".join([wordnet_lemmatizer.lemmatize(word) for word in word_tokenize(text)])
return text
class DenseTransformer(TransformerMixin):
def fit(self, x, y=None, **fit_params):
return self
@staticmethod
def transform(x, y=None, **fit_params):
return x.todense()
def __str__(self):
return "DenseTransformer()"
def __repr__(self):
return self.__str__()
class CleanTextTransformer(TransformerMixin):
def fit(self, x, y=None, **fit_params):
return self
@staticmethod
def transform(x, y=None, **fit_params):
return np.vectorize(clean_text)(x)
def __str__(self):
return 'CleanTextTransformer()'
def __repr__(self):
return self.__str__()
def load_imdb_sentiment_analysis_dataset(imdb_data_path, seed=123):
"""Loads the IMDb movie reviews sentiment analysis dataset.
# Arguments
data_path: string, path to the data directory.
seed: int, seed for randomizer.
# Returns
A tuple of training and validation data.
Number of training samples: 25000
Number of test samples: 25000
Number of categories: 2 (0 - negative, 1 - positive)
# References
Mass et al., http://www.aclweb.org/anthology/P11-1015
Download and uncompress archive from:
http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
"""
# Load the training data
train_texts = []
train_labels = []
for category in ['pos', 'neg']:
print(f"loading train: {category} ...")
train_path = os.path.join(imdb_data_path, 'train', category)
for fname in tqdm(sorted(os.listdir(train_path))):
if fname.endswith('.txt'):
with open(os.path.join(train_path, fname), encoding="utf-8") as f:
train_texts.append(f.read())
train_labels.append(0 if category == 'neg' else 1)
# Load the validation data.
test_texts = []
test_labels = []
for category in ['pos', 'neg']:
print(f"loading test: {category} ...")
test_path = os.path.join(imdb_data_path, 'test', category)
for fname in tqdm(sorted(os.listdir(test_path))):
if fname.endswith('.txt'):
with open(os.path.join(test_path, fname), encoding="utf-8") as f:
test_texts.append(f.read())
test_labels.append(0 if category == 'neg' else 1)
# Shuffle the training data and labels.
random.seed(seed)
random.shuffle(train_texts)
random.seed(seed)
random.shuffle(train_labels)
return ((np.array(train_texts), np.array(train_labels)),
(np.array(test_texts), np.array(test_labels)))
class CategoricalBatchNB(TransformerMixin):
def __init__(self, batch_size, classes, *args, **kwargs):
self._batch_size = batch_size
self._classes = classes
self._args = args
self._kwargs = kwargs
self._model = CategoricalNB(*args, **kwargs)
def fit(self, x, y, **fit_params):
batch_size = self._batch_size
self._model = CategoricalNB(*self._args, **self._kwargs)
for index in tqdm(range(batch_size, x.shape[0] + batch_size, batch_size)):
self._model.partial_fit(
x[index - batch_size:index, :].toarray(),
y[index - batch_size:index],
classes=self._classes
)
return self
@staticmethod
def transform(x, y=None, **fit_params):
return x
def predict(self, x):
batch_size = self._batch_size
predictions = []
for index in tqdm(range(batch_size, x.shape[0] + batch_size, batch_size)):
predictions.extend(
self._model.predict(
x[index - batch_size:index, :].toarray()
).tolist()
)
return np.array(predictions).ravel()
def score(self, x, y):
y_pred = self.predict(x)
return accuracy_score(y, y_pred)
def __str__(self):
return "CategoricalBatchNB()"
def __repr__(self):
return self.__str__()
class GaussianBatchNB(TransformerMixin):
def __init__(self, batch_size, classes, *args, **kwargs):
self._batch_size = batch_size
self._classes = classes
self._args = args
self._kwargs = kwargs
self._model = GaussianNB(*args, **kwargs)
def fit(self, x, y, **fit_params):
batch_size = self._batch_size
self._model = GaussianNB(*self._args, **self._kwargs)
for index in tqdm(range(batch_size, x.shape[0]+batch_size, batch_size)):
self._model.partial_fit(
x[index-batch_size:index, :].toarray(),
y[index-batch_size:index],
classes=self._classes
)
return self
@staticmethod
def transform(x, y=None, **fit_params):
return x
def predict(self, x):
batch_size = self._batch_size
predictions = []
for index in tqdm(range(batch_size, x.shape[0]+batch_size, batch_size)):
predictions.extend(
self._model.predict(
x[index-batch_size:index, :].toarray()
).tolist()
)
return np.array(predictions).ravel()
def score(self, x, y):
y_pred = self.predict(x)
return accuracy_score(y, y_pred)
def __str__(self):
return "GaussianBatchNB()"
def __repr__(self):
return self .__str__() | [
"os.listdir",
"random.shuffle",
"nltk.download",
"sklearn.naive_bayes.CategoricalNB",
"nltk.word_tokenize",
"os.path.join",
"nltk.stem.WordNetLemmatizer",
"random.seed",
"numpy.array",
"sklearn.naive_bayes.GaussianNB",
"numpy.vectorize",
"sklearn.metrics.accuracy_score"
] | [((336, 362), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (349, 362), False, 'import nltk\n'), ((363, 387), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (376, 387), False, 'import nltk\n'), ((388, 410), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (401, 410), False, 'import nltk\n'), ((452, 471), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (469, 471), False, 'from nltk.stem import WordNetLemmatizer\n'), ((3252, 3269), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3263, 3269), False, 'import random\n'), ((3274, 3301), 'random.shuffle', 'random.shuffle', (['train_texts'], {}), '(train_texts)\n', (3288, 3301), False, 'import random\n'), ((3306, 3323), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3317, 3323), False, 'import random\n'), ((3328, 3356), 'random.shuffle', 'random.shuffle', (['train_labels'], {}), '(train_labels)\n', (3342, 3356), False, 'import random\n'), ((2341, 2388), 'os.path.join', 'os.path.join', (['imdb_data_path', '"""train"""', 'category'], {}), "(imdb_data_path, 'train', category)\n", (2353, 2388), False, 'import os\n'), ((2863, 2909), 'os.path.join', 'os.path.join', (['imdb_data_path', '"""test"""', 'category'], {}), "(imdb_data_path, 'test', category)\n", (2875, 2909), False, 'import os\n'), ((3734, 3764), 'sklearn.naive_bayes.CategoricalNB', 'CategoricalNB', (['*args'], {}), '(*args, **kwargs)\n', (3747, 3764), False, 'from sklearn.naive_bayes import GaussianNB, CategoricalNB\n'), ((3865, 3907), 'sklearn.naive_bayes.CategoricalNB', 'CategoricalNB', (['*self._args'], {}), '(*self._args, **self._kwargs)\n', (3878, 3907), False, 'from sklearn.naive_bayes import GaussianNB, CategoricalNB\n'), ((4749, 4774), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (4763, 4774), False, 'from sklearn.metrics import accuracy_score\n'), ((5145, 5172), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', (['*args'], {}), '(*args, **kwargs)\n', (5155, 5172), False, 'from sklearn.naive_bayes import GaussianNB, CategoricalNB\n'), ((5281, 5320), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', (['*self._args'], {}), '(*self._args, **self._kwargs)\n', (5291, 5320), False, 'from sklearn.naive_bayes import GaussianNB, CategoricalNB\n'), ((6187, 6212), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (6201, 6212), False, 'from sklearn.metrics import accuracy_score\n'), ((1359, 1383), 'numpy.vectorize', 'np.vectorize', (['clean_text'], {}), '(clean_text)\n', (1371, 1383), True, 'import numpy as np\n'), ((3371, 3392), 'numpy.array', 'np.array', (['train_texts'], {}), '(train_texts)\n', (3379, 3392), True, 'import numpy as np\n'), ((3394, 3416), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (3402, 3416), True, 'import numpy as np\n'), ((3432, 3452), 'numpy.array', 'np.array', (['test_texts'], {}), '(test_texts)\n', (3440, 3452), True, 'import numpy as np\n'), ((3454, 3475), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (3462, 3475), True, 'import numpy as np\n'), ((793, 812), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (806, 812), False, 'from nltk import word_tokenize, WordNetLemmatizer\n'), ((2422, 2444), 'os.listdir', 'os.listdir', (['train_path'], {}), '(train_path)\n', (2432, 2444), False, 'import os\n'), ((2943, 2964), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (2953, 2964), False, 'import os\n'), ((4643, 4664), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (4651, 4664), True, 'import numpy as np\n'), ((6077, 6098), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (6085, 6098), True, 'import numpy as np\n'), ((2513, 2544), 'os.path.join', 'os.path.join', (['train_path', 'fname'], {}), '(train_path, fname)\n', (2525, 2544), False, 'import os\n'), ((3033, 3063), 'os.path.join', 'os.path.join', (['test_path', 'fname'], {}), '(test_path, fname)\n', (3045, 3063), False, 'import os\n')] |
"""Common get info functions for BGP"""
# Python
import re
import logging
import datetime
# Genie
from genie.utils.dq import Dq
from genie.utils.timeout import Timeout
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
def get_peer_bgp_address(device, address_family):
""" Retrieve peer's ip address for 'show bpg neighbor' command
Args:
device ('obj'): Device object
address_family('str'): Mandatory field (ipv4, ipv6)
Returns:
an ip address
"""
# 172.16.58.3
ipv4 = re.compile(r'^[\d\.]+$')
# 2001:30::1
# 2001:0:3238:DFE1:63::FEFB
ipv6 = re.compile(r'^[\w\:]+$')
try:
out = device.parse("show bgp neighbor")
except SchemaEmptyParserError:
return None
peers_list = out.q.get_values("bgp-peer")
for peer in peers_list:
peer_address = peer.get('peer-address')
# 172.16.58.3+63208
if '+' in peer_address:
peer_address = peer_address.split('+')[0]
if 'ipv4' in address_family:
if ipv4.match(peer_address):
return peer_address
else:
continue
elif 'ipv6' in address_family:
if ipv6.match(peer_address):
return peer_address
else:
continue
else:
return None
| [
"logging.getLogger",
"re.compile"
] | [((245, 272), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (262, 272), False, 'import logging\n'), ((594, 619), 're.compile', 're.compile', (['"""^[\\\\d\\\\.]+$"""'], {}), "('^[\\\\d\\\\.]+$')\n", (604, 619), False, 'import re\n'), ((680, 705), 're.compile', 're.compile', (['"""^[\\\\w\\\\:]+$"""'], {}), "('^[\\\\w\\\\:]+$')\n", (690, 705), False, 'import re\n')] |
import json
import argparse
from terra_notebook_utils import vcf
from terra_notebook_utils.cli import dispatch, CLIConfig
from terra_notebook_utils.drs import blob_for_url
vcf_cli = dispatch.group("vcf", help=vcf.__doc__, arguments={
"path": dict(
help="local path, gs://, or drs://"
),
"--workspace": dict(
type=str,
default=None,
help="Workspace name. If not provided, the configured CLI workspace will be used."
),
"--workspace-namespace": dict(
type=str,
required=False,
default=CLIConfig.info['workspace_namespace'],
help=("The billing project for GS requests. "
"If omitted, the CLI configured `workspace_google_project` will be used. "
"Note that DRS URLs also involve a GS request.")
),
})
@vcf_cli.command("head")
def head(args: argparse.Namespace):
"""
Output VCF header.
"""
args.workspace, args.workspace_namespace = CLIConfig.resolve(args.workspace, args.workspace_namespace)
blob = blob_for_url(args.path, args.workspace_namespace)
info = vcf.VCFInfo.with_blob(blob)
info.print_header()
@vcf_cli.command("samples")
def samples(args: argparse.Namespace):
"""
Output VCF samples.
"""
args.workspace, args.workspace_namespace = CLIConfig.resolve(args.workspace, args.workspace_namespace)
blob = blob_for_url(args.path, args.workspace_namespace)
info = vcf.VCFInfo.with_blob(blob)
print(json.dumps(info.samples, indent=2))
@vcf_cli.command("stats")
def stats(args: argparse.Namespace):
"""
Output VCF stats.
"""
args.workspace, args.workspace_namespace = CLIConfig.resolve(args.workspace, args.workspace_namespace)
blob = blob_for_url(args.path, args.workspace_namespace)
info = vcf.VCFInfo.with_blob(blob)
stats = {
'first data line chromosome': info.chrom,
'length associated with first data line chromosome': info.length,
'number of samples': len(info.samples),
'size': blob.size(),
}
print(json.dumps(stats, indent=2))
| [
"json.dumps",
"terra_notebook_utils.cli.CLIConfig.resolve",
"terra_notebook_utils.drs.blob_for_url",
"terra_notebook_utils.vcf.VCFInfo.with_blob"
] | [((965, 1024), 'terra_notebook_utils.cli.CLIConfig.resolve', 'CLIConfig.resolve', (['args.workspace', 'args.workspace_namespace'], {}), '(args.workspace, args.workspace_namespace)\n', (982, 1024), False, 'from terra_notebook_utils.cli import dispatch, CLIConfig\n'), ((1036, 1085), 'terra_notebook_utils.drs.blob_for_url', 'blob_for_url', (['args.path', 'args.workspace_namespace'], {}), '(args.path, args.workspace_namespace)\n', (1048, 1085), False, 'from terra_notebook_utils.drs import blob_for_url\n'), ((1097, 1124), 'terra_notebook_utils.vcf.VCFInfo.with_blob', 'vcf.VCFInfo.with_blob', (['blob'], {}), '(blob)\n', (1118, 1124), False, 'from terra_notebook_utils import vcf\n'), ((1304, 1363), 'terra_notebook_utils.cli.CLIConfig.resolve', 'CLIConfig.resolve', (['args.workspace', 'args.workspace_namespace'], {}), '(args.workspace, args.workspace_namespace)\n', (1321, 1363), False, 'from terra_notebook_utils.cli import dispatch, CLIConfig\n'), ((1375, 1424), 'terra_notebook_utils.drs.blob_for_url', 'blob_for_url', (['args.path', 'args.workspace_namespace'], {}), '(args.path, args.workspace_namespace)\n', (1387, 1424), False, 'from terra_notebook_utils.drs import blob_for_url\n'), ((1436, 1463), 'terra_notebook_utils.vcf.VCFInfo.with_blob', 'vcf.VCFInfo.with_blob', (['blob'], {}), '(blob)\n', (1457, 1463), False, 'from terra_notebook_utils import vcf\n'), ((1659, 1718), 'terra_notebook_utils.cli.CLIConfig.resolve', 'CLIConfig.resolve', (['args.workspace', 'args.workspace_namespace'], {}), '(args.workspace, args.workspace_namespace)\n', (1676, 1718), False, 'from terra_notebook_utils.cli import dispatch, CLIConfig\n'), ((1730, 1779), 'terra_notebook_utils.drs.blob_for_url', 'blob_for_url', (['args.path', 'args.workspace_namespace'], {}), '(args.path, args.workspace_namespace)\n', (1742, 1779), False, 'from terra_notebook_utils.drs import blob_for_url\n'), ((1791, 1818), 'terra_notebook_utils.vcf.VCFInfo.with_blob', 'vcf.VCFInfo.with_blob', (['blob'], {}), '(blob)\n', (1812, 1818), False, 'from terra_notebook_utils import vcf\n'), ((1474, 1508), 'json.dumps', 'json.dumps', (['info.samples'], {'indent': '(2)'}), '(info.samples, indent=2)\n', (1484, 1508), False, 'import json\n'), ((2050, 2077), 'json.dumps', 'json.dumps', (['stats'], {'indent': '(2)'}), '(stats, indent=2)\n', (2060, 2077), False, 'import json\n')] |
from dodo_commands import Dodo
from dodo_commands.framework.args_tree import ArgsTreeNode
from dodo_commands.framework.decorator_utils import uses_decorator
class Decorator: # noqa
def add_arguments(self, parser): # noqa
parser.add_argument(
"--use-debugger",
action="store_true",
default=False,
help="Run the command through the debugger",
)
def is_used(self, config, command_name, decorator_name):
return uses_decorator(config, command_name, decorator_name)
def modify_args(self, command_line_args, root_node, cwd): # noqa
if not getattr(command_line_args, "use_debugger", False):
return root_node, cwd
debugger_node = ArgsTreeNode("debugger", args=[Dodo.get("/BUILD/debugger")])
debugger_node.add_child(root_node)
return debugger_node, cwd
| [
"dodo_commands.framework.decorator_utils.uses_decorator",
"dodo_commands.Dodo.get"
] | [((492, 544), 'dodo_commands.framework.decorator_utils.uses_decorator', 'uses_decorator', (['config', 'command_name', 'decorator_name'], {}), '(config, command_name, decorator_name)\n', (506, 544), False, 'from dodo_commands.framework.decorator_utils import uses_decorator\n'), ((772, 799), 'dodo_commands.Dodo.get', 'Dodo.get', (['"""/BUILD/debugger"""'], {}), "('/BUILD/debugger')\n", (780, 799), False, 'from dodo_commands import Dodo\n')] |
# Generated by Django 2.2.3 on 2019-11-17 18:49
from django.db import migrations, models
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0002_product_user'),
]
operations = [
migrations.AlterField(
model_name='product',
name='document',
field=models.FileField(null=True, upload_to=products.models.product_get_file_path, validators=[products.models.validate_product_file_extension], verbose_name='Confirmation of the rights to TM (PDF file with WIPO)'),
),
migrations.AlterField(
model_name='product',
name='document_file_name',
field=models.CharField(max_length=200, verbose_name='Document file name'),
),
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(max_length=200, verbose_name='The name of the product'),
),
]
| [
"django.db.models.FileField",
"django.db.models.CharField"
] | [((357, 574), 'django.db.models.FileField', 'models.FileField', ([], {'null': '(True)', 'upload_to': 'products.models.product_get_file_path', 'validators': '[products.models.validate_product_file_extension]', 'verbose_name': '"""Confirmation of the rights to TM (PDF file with WIPO)"""'}), "(null=True, upload_to=products.models.product_get_file_path,\n validators=[products.models.validate_product_file_extension],\n verbose_name='Confirmation of the rights to TM (PDF file with WIPO)')\n", (373, 574), False, 'from django.db import migrations, models\n'), ((701, 768), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Document file name"""'}), "(max_length=200, verbose_name='Document file name')\n", (717, 768), False, 'from django.db import migrations, models\n'), ((889, 961), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""The name of the product"""'}), "(max_length=200, verbose_name='The name of the product')\n", (905, 961), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python3
import json
import sys
def markdown_to_json(filename, anchor):
"""Convert a Markdown file into a JSON string"""
category = ""
entries = []
with open(filename) as fp:
lines = (line.rstrip() for line in fp)
lines = list(line for line in lines if line and
line.startswith(anchor) or line.startswith('| '))
for line in lines:
if line.startswith(anchor):
category = line.split(anchor)[1].strip()
continue
chunks = [x.strip() for x in line.split('|')[1:-1]]
entry = {
'API': chunks[0],
'Description': chunks[1],
'Auth': None if chunks[2].upper() == 'NO' else chunks[2].strip('`'),
'HTTPS': True if chunks[3].upper() == 'YES' else False,
'CORS': chunks[4].strip('`'),
'Link': chunks[5].replace('[Go!]', '')[1:-1],
'Category': category,
}
entries.append(entry)
final = {
'count': len(entries),
'entries': entries,
}
return json.dumps(final)
def main():
num_args = len(sys.argv)
if num_args < 2:
print("No .md file passed")
sys.exit(1)
if num_args < 3:
anchor = '###'
else:
anchor = sys.argv[2]
print(markdown_to_json(sys.argv[1], anchor))
if __name__ == "__main__":
main()
| [
"json.dumps",
"sys.exit"
] | [((1074, 1091), 'json.dumps', 'json.dumps', (['final'], {}), '(final)\n', (1084, 1091), False, 'import json\n'), ((1200, 1211), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1208, 1211), False, 'import sys\n')] |
from datetime import date
sexo = str(input('Qual o seu sexo [M] ou [F]: '))
if sexo == 'M':
nasc = int(input('informe o ano você nasceu: '))
ano = date.today().year
idade = ano - nasc
print('No ano de {} você tem {} anos'.format(ano , idade))
if idade > 18:
print('sua idade é {} anos e\033[4;31m você já passou {} anos do prazo de alistamento \033[m'.format(idade, idade - 18))
print('Seu ano de alistamento foi {} ' .format(nasc + 18))
elif idade == 18:
print('\033[32mVocê tem {}, Chegou a hora de seu alistamento\033[m'.format(idade))
else:
print('\033[7;35mSua idade é {} ano, aguarde {} anos para seu alistamento\033[m'.format(idade, 18 - idade))
print('Você vai se alistar no ano {}'.format(ano + 18 - idade))
else:
print(' Não Há Alistamento para mulheree, obrigado') | [
"datetime.date.today"
] | [((155, 167), 'datetime.date.today', 'date.today', ([], {}), '()\n', (165, 167), False, 'from datetime import date\n')] |
import unittest
try:
from unittest.mock import Mock, call
except ImportError:
from mock import Mock, call
from pytf import TestExecutor, Test, TestException
class TestTestExecutor(unittest.TestCase):
def test_simple(self):
test_suite = [Mock() for i in xrange(3)]
# Create messages in order to match API
for test in test_suite:
test.messages = []
test_runner = TestExecutor()
test_runner.execute(test_suite)
for test in test_suite:
self.assertTrue(test.called)
def test_simple_result(self):
test_suite = [Test('test_id', Mock())]
test_runner = TestExecutor()
test_result = test_runner.execute(test_suite)
self.assertEqual(len(test_result), 1)
self.assertEqual(test_result[0].id, test_suite[0].id)
self.assertEqual(test_result[0].success, True)
def test_simple_fail_result(self):
test_suite = [Test('test_id', Mock(side_effect=Exception))]
test_runner = TestExecutor()
test_result = test_runner.execute(test_suite)
self.assertEqual(len(test_result), 1)
self.assertEqual(test_result[0].id, test_suite[0].id)
self.assertEqual(test_result[0].success, False)
self.assertTrue(isinstance(test_result[0].exception, TestException))
def test_contexts(self):
test_suite = [Test('test_id', Mock())]
context_mock = Mock()
context_mock.exit.return_value = None
test_runner = TestExecutor(contexts=[context_mock])
test_result = test_runner.execute(test_suite)
self.assertTrue(context_mock.enter.called)
self.assertEqual(context_mock.exit.call_args_list,
[call(test_result[0])])
def test_contexts_add_message(self):
test_suite = [Test('test_id', Mock())]
context_mock = Mock()
title = 'Title'
message = 'Message'
context_mock.exit.side_effect = lambda result: \
result.add_message(title, message)
test_runner = TestExecutor(contexts=[context_mock])
test_result = test_runner.execute(test_suite)
self.assertEqual(test_result[0].id, test_suite[0].id)
self.assertEqual(test_result[0].success, True)
self.assertEqual(test_result[0].messages, [(title, message)])
def test_message_transmission(self):
title = 'title'
message = 'message'
test = Test('test_id', Mock())
test.add_message(title, message)
test_suite = [test]
test_runner = TestExecutor()
test_result = test_runner.execute(test_suite)
self.assertEqual(test_result[0].messages, [(title, message)])
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"pytf.TestExecutor",
"mock.Mock",
"mock.call"
] | [((2726, 2741), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2739, 2741), False, 'import unittest\n'), ((424, 438), 'pytf.TestExecutor', 'TestExecutor', ([], {}), '()\n', (436, 438), False, 'from pytf import TestExecutor, Test, TestException\n'), ((658, 672), 'pytf.TestExecutor', 'TestExecutor', ([], {}), '()\n', (670, 672), False, 'from pytf import TestExecutor, Test, TestException\n'), ((1022, 1036), 'pytf.TestExecutor', 'TestExecutor', ([], {}), '()\n', (1034, 1036), False, 'from pytf import TestExecutor, Test, TestException\n'), ((1434, 1440), 'mock.Mock', 'Mock', ([], {}), '()\n', (1438, 1440), False, 'from mock import Mock, call\n'), ((1509, 1546), 'pytf.TestExecutor', 'TestExecutor', ([], {'contexts': '[context_mock]'}), '(contexts=[context_mock])\n', (1521, 1546), False, 'from pytf import TestExecutor, Test, TestException\n'), ((1861, 1867), 'mock.Mock', 'Mock', ([], {}), '()\n', (1865, 1867), False, 'from mock import Mock, call\n'), ((2047, 2084), 'pytf.TestExecutor', 'TestExecutor', ([], {'contexts': '[context_mock]'}), '(contexts=[context_mock])\n', (2059, 2084), False, 'from pytf import TestExecutor, Test, TestException\n'), ((2553, 2567), 'pytf.TestExecutor', 'TestExecutor', ([], {}), '()\n', (2565, 2567), False, 'from pytf import TestExecutor, Test, TestException\n'), ((262, 268), 'mock.Mock', 'Mock', ([], {}), '()\n', (266, 268), False, 'from mock import Mock, call\n'), ((2453, 2459), 'mock.Mock', 'Mock', ([], {}), '()\n', (2457, 2459), False, 'from mock import Mock, call\n'), ((626, 632), 'mock.Mock', 'Mock', ([], {}), '()\n', (630, 632), False, 'from mock import Mock, call\n'), ((969, 996), 'mock.Mock', 'Mock', ([], {'side_effect': 'Exception'}), '(side_effect=Exception)\n', (973, 996), False, 'from mock import Mock, call\n'), ((1401, 1407), 'mock.Mock', 'Mock', ([], {}), '()\n', (1405, 1407), False, 'from mock import Mock, call\n'), ((1725, 1745), 'mock.call', 'call', (['test_result[0]'], {}), '(test_result[0])\n', (1729, 1745), False, 'from mock import Mock, call\n'), ((1828, 1834), 'mock.Mock', 'Mock', ([], {}), '()\n', (1832, 1834), False, 'from mock import Mock, call\n')] |
from RigidFoilSimer import Parameters
import __main__
import sys
import os
import numpy as np
MainCodePath = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(MainCodePath)
def genCFile(FilePath, FoilGeo, FoilDyn):
print(FoilGeo)
print(FoilDyn)
if Parameters.query_yes_no("Are these the parameters you want to use to generate a user defined function?")== False:
sys.exit("\nPlease enter the desired foil parameters into the input form")
# Folder_Path = Parameters.path_check(FilePath.folder_path, "\nStore simulation files to %s?\nA) Yes, use/create the folder and save to it \nB) No, I want to specify a different folder directory \nC) No, I want to cancel this process\nPick an answer of A, B, or C: ")
# FilePath.newFolderPath(Folder_Path)
parameter_search = np.array([[FoilGeo.chord, 'C_chord_length'], [FoilDyn.rho, 'C_fluid_density'], [FoilDyn.freq, 'C_heaving_frequency'], [FoilDyn.h0, 'C_heaving_amplitude'], [FoilDyn.theta0, 'C_pitching_amplitude'], [FoilDyn.velocity_inf, 'C_velocity_inf']])
UDF_file = open(os.path.dirname(os.path.abspath(__file__)) + "\\AnsysFiles\\Rigid_TemPlate.c", "r").readlines()
for param in parameter_search:
UDF_file = [w.replace(param[1], param[0]).strip() for w in UDF_file]
with open(FilePath.folder_path + "\\modRigidPlateFile.c", "w") as new_UDF_file:
for lineitem in UDF_file:
new_UDF_file.write('%s\n' % lineitem)
print('\nUDF has been generated.\n')
if hasattr(__main__, '__file__'):
if "test" in __main__.__file__.lower():
return UDF_file
return FilePath
| [
"__main__.__file__.lower",
"os.path.realpath",
"numpy.array",
"RigidFoilSimer.Parameters.query_yes_no",
"sys.exit",
"os.path.abspath",
"sys.path.append"
] | [((172, 201), 'sys.path.append', 'sys.path.append', (['MainCodePath'], {}), '(MainCodePath)\n', (187, 201), False, 'import sys\n'), ((832, 1084), 'numpy.array', 'np.array', (["[[FoilGeo.chord, 'C_chord_length'], [FoilDyn.rho, 'C_fluid_density'], [\n FoilDyn.freq, 'C_heaving_frequency'], [FoilDyn.h0,\n 'C_heaving_amplitude'], [FoilDyn.theta0, 'C_pitching_amplitude'], [\n FoilDyn.velocity_inf, 'C_velocity_inf']]"], {}), "([[FoilGeo.chord, 'C_chord_length'], [FoilDyn.rho,\n 'C_fluid_density'], [FoilDyn.freq, 'C_heaving_frequency'], [FoilDyn.h0,\n 'C_heaving_amplitude'], [FoilDyn.theta0, 'C_pitching_amplitude'], [\n FoilDyn.velocity_inf, 'C_velocity_inf']])\n", (840, 1084), True, 'import numpy as np\n'), ((142, 168), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (158, 168), False, 'import os\n'), ((292, 406), 'RigidFoilSimer.Parameters.query_yes_no', 'Parameters.query_yes_no', (['"""Are these the parameters you want to use to generate a user defined function?"""'], {}), "(\n 'Are these the parameters you want to use to generate a user defined function?'\n )\n", (315, 406), False, 'from RigidFoilSimer import Parameters\n'), ((414, 491), 'sys.exit', 'sys.exit', (['"""\nPlease enter the desired foil parameters into the input form"""'], {}), '("""\nPlease enter the desired foil parameters into the input form""")\n', (422, 491), False, 'import sys\n'), ((1574, 1599), '__main__.__file__.lower', '__main__.__file__.lower', ([], {}), '()\n', (1597, 1599), False, 'import __main__\n'), ((1108, 1133), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1123, 1133), False, 'import os\n')] |
from django.contrib import admin
from .models import Person, FavoriteColor
admin.site.register(Person)
admin.site.register(FavoriteColor)
| [
"django.contrib.admin.site.register"
] | [((78, 105), 'django.contrib.admin.site.register', 'admin.site.register', (['Person'], {}), '(Person)\n', (97, 105), False, 'from django.contrib import admin\n'), ((106, 140), 'django.contrib.admin.site.register', 'admin.site.register', (['FavoriteColor'], {}), '(FavoriteColor)\n', (125, 140), False, 'from django.contrib import admin\n')] |
# from flow.visualize import visualizer_rllab as vs_rllab
# from flow.visualize.visualizer_rllab import visualizer_rllab
from flow.visualize import visualizer_rllib as vs_rllib
from flow.visualize.visualizer_rllib import visualizer_rllib
import flow.visualize.capacity_diagram_generator as cdg
import os
import unittest
import ray
import numpy as np
os.environ['TEST_FLAG'] = 'True'
class TestVisualizerRLlib(unittest.TestCase):
"""Tests visualizer_rllib"""
def test_visualizer_single(self):
"""Test for single agent"""
try:
ray.init(num_cpus=1)
except Exception:
pass
# current path
current_path = os.path.realpath(__file__).rsplit('/', 1)[0]
# run the experiment and check it doesn't crash
arg_str = '{}/../data/rllib_data/single_agent 1 --num-rollouts 1 ' \
'--render_mode no_render ' \
'--horizon 10'.format(current_path).split()
parser = vs_rllib.create_parser()
pass_args = parser.parse_args(arg_str)
visualizer_rllib(pass_args)
# FIXME(ev) set the horizon so that this runs faster
def test_visualizer_multi(self):
"""Test for multi-agent visualization"""
try:
ray.init(num_cpus=1)
except Exception:
pass
# current path
current_path = os.path.realpath(__file__).rsplit('/', 1)[0]
# run the experiment and check it doesn't crash
arg_str = '{}/../data/rllib_data/multi_agent 1 --num-rollouts 1 ' \
'--render_mode no_render ' \
'--horizon 10'.format(current_path).split()
parser = vs_rllib.create_parser()
pass_args = parser.parse_args(arg_str)
visualizer_rllib(pass_args)
# class TestVisualizerRLlab(unittest.TestCase):
# """Tests visualizer_rllab"""
#
# def test_visualizer(self):
# # current path
# current_path = os.path.realpath(__file__).rsplit('/', 1)[0]
# arg_str = '{}/../data/rllab_data/itr_0.pkl --num_rollouts 1 ' \
# '--no_render'.format(current_path).split()
# parser = vs_rllab.create_parser()
# pass_args = parser.parse_args(arg_str)
# visualizer_rllab(pass_args)
class TestPlotters(unittest.TestCase):
def test_capacity_diagram_generator(self):
# import the csv file
dir_path = os.path.dirname(os.path.realpath(__file__))
data = cdg.import_data_from_csv(
os.path.join(dir_path, 'test_files/inflows_outflows.csv'))
# compute the mean and std of the outflows for all unique inflows
unique_inflows, mean_outflows, std_outflows = cdg.get_capacity_data(
data)
# test that the values match the expected from the
expected_unique_inflows = np.array([
400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500,
1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600,
2700, 2800, 2900])
expected_means = np.array([
385.2, 479.52, 575.28, 668.16, 763.2, 856.8, 900.95668831,
1029.6705856, 1111.62035833, 1187.87297462, 1258.81962238,
1257.30378783, 1161.28280975, 1101.85671862, 1261.26596639,
936.91255623, 1039.90127834, 1032.13903881, 937.70410361,
934.85669105, 837.58808324, 889.17167643, 892.78528048,
937.85757297, 934.86027655, 804.14440138])
expected_stds = np.array([
1.60996894, 1.44, 1.44, 2.38796985, 2.78854801, 3.6, 149.57165793,
37.82554569, 67.35786443, 135.35337939, 124.41794128, 221.64466355,
280.88707947, 199.2875712, 258.72510896, 194.0785382, 239.71034056,
182.75627664, 331.37899239, 325.82943015, 467.54641633,
282.15049541, 310.36329236, 92.61828854, 229.6155371,
201.29461492])
np.testing.assert_array_almost_equal(unique_inflows,
expected_unique_inflows)
np.testing.assert_array_almost_equal(mean_outflows, expected_means)
np.testing.assert_array_almost_equal(std_outflows, expected_stds)
if __name__ == '__main__':
ray.init(num_cpus=1)
unittest.main()
ray.shutdown()
| [
"numpy.testing.assert_array_almost_equal",
"ray.shutdown",
"flow.visualize.visualizer_rllib.visualizer_rllib",
"flow.visualize.capacity_diagram_generator.get_capacity_data",
"os.path.join",
"os.path.realpath",
"numpy.array",
"unittest.main",
"flow.visualize.visualizer_rllib.create_parser",
"ray.init"
] | [((4212, 4232), 'ray.init', 'ray.init', ([], {'num_cpus': '(1)'}), '(num_cpus=1)\n', (4220, 4232), False, 'import ray\n'), ((4237, 4252), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4250, 4252), False, 'import unittest\n'), ((4257, 4271), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (4269, 4271), False, 'import ray\n'), ((981, 1005), 'flow.visualize.visualizer_rllib.create_parser', 'vs_rllib.create_parser', ([], {}), '()\n', (1003, 1005), True, 'from flow.visualize import visualizer_rllib as vs_rllib\n'), ((1061, 1088), 'flow.visualize.visualizer_rllib.visualizer_rllib', 'visualizer_rllib', (['pass_args'], {}), '(pass_args)\n', (1077, 1088), False, 'from flow.visualize.visualizer_rllib import visualizer_rllib\n'), ((1672, 1696), 'flow.visualize.visualizer_rllib.create_parser', 'vs_rllib.create_parser', ([], {}), '()\n', (1694, 1696), True, 'from flow.visualize import visualizer_rllib as vs_rllib\n'), ((1752, 1779), 'flow.visualize.visualizer_rllib.visualizer_rllib', 'visualizer_rllib', (['pass_args'], {}), '(pass_args)\n', (1768, 1779), False, 'from flow.visualize.visualizer_rllib import visualizer_rllib\n'), ((2686, 2713), 'flow.visualize.capacity_diagram_generator.get_capacity_data', 'cdg.get_capacity_data', (['data'], {}), '(data)\n', (2707, 2713), True, 'import flow.visualize.capacity_diagram_generator as cdg\n'), ((2821, 2989), 'numpy.array', 'np.array', (['[400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, \n 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800,\n 2900]'], {}), '([400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500,\n 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700,\n 2800, 2900])\n', (2829, 2989), True, 'import numpy as np\n'), ((3044, 3412), 'numpy.array', 'np.array', (['[385.2, 479.52, 575.28, 668.16, 763.2, 856.8, 900.95668831, 1029.6705856, \n 1111.62035833, 1187.87297462, 1258.81962238, 1257.30378783, \n 1161.28280975, 1101.85671862, 1261.26596639, 936.91255623, \n 1039.90127834, 1032.13903881, 937.70410361, 934.85669105, 837.58808324,\n 889.17167643, 892.78528048, 937.85757297, 934.86027655, 804.14440138]'], {}), '([385.2, 479.52, 575.28, 668.16, 763.2, 856.8, 900.95668831, \n 1029.6705856, 1111.62035833, 1187.87297462, 1258.81962238, \n 1257.30378783, 1161.28280975, 1101.85671862, 1261.26596639, \n 936.91255623, 1039.90127834, 1032.13903881, 937.70410361, 934.85669105,\n 837.58808324, 889.17167643, 892.78528048, 937.85757297, 934.86027655, \n 804.14440138])\n', (3052, 3412), True, 'import numpy as np\n'), ((3486, 3842), 'numpy.array', 'np.array', (['[1.60996894, 1.44, 1.44, 2.38796985, 2.78854801, 3.6, 149.57165793, \n 37.82554569, 67.35786443, 135.35337939, 124.41794128, 221.64466355, \n 280.88707947, 199.2875712, 258.72510896, 194.0785382, 239.71034056, \n 182.75627664, 331.37899239, 325.82943015, 467.54641633, 282.15049541, \n 310.36329236, 92.61828854, 229.6155371, 201.29461492]'], {}), '([1.60996894, 1.44, 1.44, 2.38796985, 2.78854801, 3.6, 149.57165793,\n 37.82554569, 67.35786443, 135.35337939, 124.41794128, 221.64466355, \n 280.88707947, 199.2875712, 258.72510896, 194.0785382, 239.71034056, \n 182.75627664, 331.37899239, 325.82943015, 467.54641633, 282.15049541, \n 310.36329236, 92.61828854, 229.6155371, 201.29461492])\n', (3494, 3842), True, 'import numpy as np\n'), ((3906, 3983), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['unique_inflows', 'expected_unique_inflows'], {}), '(unique_inflows, expected_unique_inflows)\n', (3942, 3983), True, 'import numpy as np\n'), ((4037, 4104), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['mean_outflows', 'expected_means'], {}), '(mean_outflows, expected_means)\n', (4073, 4104), True, 'import numpy as np\n'), ((4113, 4178), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['std_outflows', 'expected_stds'], {}), '(std_outflows, expected_stds)\n', (4149, 4178), True, 'import numpy as np\n'), ((566, 586), 'ray.init', 'ray.init', ([], {'num_cpus': '(1)'}), '(num_cpus=1)\n', (574, 586), False, 'import ray\n'), ((1258, 1278), 'ray.init', 'ray.init', ([], {'num_cpus': '(1)'}), '(num_cpus=1)\n', (1266, 1278), False, 'import ray\n'), ((2417, 2443), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2433, 2443), False, 'import os\n'), ((2498, 2555), 'os.path.join', 'os.path.join', (['dir_path', '"""test_files/inflows_outflows.csv"""'], {}), "(dir_path, 'test_files/inflows_outflows.csv')\n", (2510, 2555), False, 'import os\n'), ((676, 702), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (692, 702), False, 'import os\n'), ((1368, 1394), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1384, 1394), False, 'import os\n')] |
import argparse
import glob
import multiprocessing
import os
import random
import shutil
import time
from collections import defaultdict
from typing import Any, Set
import numpy as np
import ujson
from tqdm import tqdm
import bootleg_data_prep.utils.data_prep_utils as prep_utils
from bootleg_data_prep.language import ENSURE_ASCII
from bootleg_data_prep.utils.classes.entity_symbols_prep import EntitySymbolsPrep
from bootleg_data_prep.utils import utils
from bootleg_data_prep.utils.classes.record_trie_collection import RecordTrieCollection
from bootleg_data_prep.utils.weak_label_funcs import wl_func
ALIAS2QID = "alias2qids"
QID2ALIAS = "qid2alias"
QID2ALIASWD = "qid2aliaswd"
class WLMetadata:
def __init__(self, entity_dump=None, alias2qid_wd=None, qid2title=None, tri_collection_qids=None, tri_collection_aliases=None, tri_collection_aliases_wd=None):
if entity_dump is not None:
assert alias2qid_wd is not None, f"We are building things from scratch and require both entity_dump and alias2qid_wd"
print(f"entity_dump is not None. Rebuilding WLMetadata")
alias2qids = {}
qid2alias = {}
qid2alias_wd = {}
self.qid2title = entity_dump.get_qid2title_dict()
all_alias_len = []
all_qid_len = []
for alias in tqdm(entity_dump.get_all_aliases(), desc="Iterating over aliases"):
assert len(alias.strip()) > 0
alias2qids[alias] = []
for idx, qid in enumerate(entity_dump.get_qid_cands(alias)):
if qid not in qid2alias:
qid2alias[qid] = []
qid2alias[qid].append(alias)
alias2qids[alias].append(qid)
all_qid_len.append(len(alias2qids[alias]))
for qid, alias_cands in tqdm(qid2alias.items(), desc="Iterating over qids"):
all_alias_len.append(len(alias_cands))
for alias in tqdm(alias2qid_wd, desc="Iterating over WD aliases"):
assert len(alias.strip()) > 0
for qid in alias2qid_wd[alias]:
if qid not in qid2alias_wd:
qid2alias_wd[qid] = []
qid2alias_wd[qid].append(alias)
max_cands = 100
max_aliases = 100
print(
f"Average number of connections {np.average(all_qid_len)}, 99.9th percentile {np.percentile(all_qid_len, 99.9)} - Trimming to {max_cands}")
print(
f"Average number of connections {np.average(all_alias_len)}, 99.9th percentile {np.percentile(all_alias_len, 99.9)} - Trimming to {max_aliases}")
for alias in tqdm(list(alias2qids.keys()), desc="Iterating over aliases"):
alias2qids[alias] = alias2qids[alias][:max_cands]
for qid in tqdm(list(qid2alias.keys()), desc="Iterating over qids"):
qid2alias[qid] = qid2alias[qid][:max_aliases]
for qid in tqdm(list(qid2alias_wd.keys()), desc="Iterating over WD qids"):
qid2alias_wd[qid] = qid2alias_wd[qid][:max_aliases]
# This maps our keys that we use in the helper functions below to the right tri in tri collection.
# The values are specific strings as outlines in the record trie collection class
fmt_types = {ALIAS2QID: "qid_cand"}
max_values = {ALIAS2QID: max_cands}
input_dicts = {ALIAS2QID: alias2qids}
print(f"Max Values {max_values}")
self.tri_collection_qids = RecordTrieCollection(load_dir=None, input_dicts=input_dicts, vocabulary=entity_dump.get_qid2eid_dict(),
fmt_types=fmt_types, max_values=max_values)
# This maps our keys that we use in the helper functions below to the right tri in tri collection.
# The values are specific strings as outlines in the record trie collection class
fmt_types = {QID2ALIAS: "qid_cand"}
max_values = {QID2ALIAS: max_aliases}
input_dicts = {QID2ALIAS: qid2alias}
alias_vocab = {al: i for i, al in enumerate(alias2qids.keys())}
print(f"Max Values {max_values}")
self.tri_collection_aliases = RecordTrieCollection(load_dir=None, input_dicts=input_dicts, vocabulary=alias_vocab,
fmt_types=fmt_types, max_values=max_values)
# This maps our keys that we use in the helper functions below to the right tri in tri collection.
# The values are specific strings as outlines in the record trie collection class
fmt_types = {QID2ALIASWD: "qid_cand"}
max_values = {QID2ALIASWD: max_aliases}
input_dicts = {QID2ALIASWD: qid2alias_wd}
alias_vocab = {al: i for i, al in enumerate(alias2qid_wd.keys())}
print(f"Max Values {max_values}")
self.tri_collection_aliases_wd = RecordTrieCollection(load_dir=None, input_dicts=input_dicts, vocabulary=alias_vocab,
fmt_types=fmt_types, max_values=max_values)
else:
assert qid2title is not None, f"You have a None entity_dump, we require qid2title to not be None"
assert tri_collection_qids is not None, f"You have a None entity_dump, we require tri_collection_qids to not be None"
assert tri_collection_aliases is not None, f"You have a None entity_dump, we require tri_collection_aliases to not be None"
assert tri_collection_aliases_wd is not None, f"You have a None entity_dump, we require tri_collection_aliases to not be None"
self.qid2title = qid2title
self.tri_collection_qids = tri_collection_qids
self.tri_collection_aliases = tri_collection_aliases
self.tri_collection_aliases_wd = tri_collection_aliases_wd
@classmethod
def get_qid_tri_dir(cls, dump_dir):
return os.path.join(dump_dir, "QIDTRI")
@classmethod
def get_alias_tri_dir(cls, dump_dir):
return os.path.join(dump_dir, "ALIASTRI")
@classmethod
def get_alias_tri_wd_dir(cls, dump_dir):
return os.path.join(dump_dir, "ALIASTRIWD")
@classmethod
def get_qid2title_file(cls, dump_dir):
return os.path.join(dump_dir, "QID2TITLE.json")
def dump(self, dump_dir):
self.tri_collection_qids.dump(save_dir=self.get_qid_tri_dir(dump_dir))
self.tri_collection_aliases.dump(save_dir=self.get_alias_tri_dir(dump_dir))
self.tri_collection_aliases_wd.dump(save_dir=self.get_alias_tri_wd_dir(dump_dir))
with open(self.get_qid2title_file(dump_dir), "w", encoding='utf8') as out_f:
ujson.dump(self.qid2title, out_f, ensure_ascii=ENSURE_ASCII)
@classmethod
def load(cls, dump_dir):
tri_collection_qids = RecordTrieCollection(load_dir=cls.get_qid_tri_dir(dump_dir))
tri_collection_aliases = RecordTrieCollection(load_dir=cls.get_alias_tri_dir(dump_dir))
tri_collection_aliases_wd = RecordTrieCollection(load_dir=cls.get_alias_tri_wd_dir(dump_dir))
with open(cls.get_qid2title_file(dump_dir)) as in_f:
qid2title = ujson.load(in_f)
return cls(entity_dump=None, alias2qid_wd=None, qid2title=qid2title, tri_collection_qids=tri_collection_qids, tri_collection_aliases=tri_collection_aliases, tri_collection_aliases_wd=tri_collection_aliases_wd)
def contains_qid(self, qid):
return self.tri_collection_aliases.is_key_in_trie(QID2ALIAS, qid)
def contains_qid_wd(self, qid):
return self.tri_collection_aliases_wd.is_key_in_trie(QID2ALIASWD, qid)
def contains_alias(self, alias):
return self.tri_collection_qids.is_key_in_trie(ALIAS2QID, alias)
def get_all_aliases(self, qid: str, default: Any = None) -> Set[str]:
if self.contains_qid(qid):
return self.tri_collection_aliases.get_value(QID2ALIAS, qid)
else:
return default
def get_all_aliases_wd(self, qid: str, default: Any = None) -> Set[str]:
if self.contains_qid_wd(qid):
return self.tri_collection_aliases_wd.get_value(QID2ALIASWD, qid)
else:
return default
def get_num_cands(self, alias):
assert self.contains_alias(alias), f"{alias} not in mapping"
return len(self.tri_collection_qids.get_value(ALIAS2QID, alias))
def get_cand_pos(self, alias, qid):
assert self.contains_alias(alias), f"{alias} not in mapping"
try:
return self.tri_collection_qids.get_value(ALIAS2QID, alias).index(qid)
except:
return -1
def get_title(self, qid):
return self.qid2title.get(qid, None)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/wiki_dump', help='Directory for data to be saved.')
parser.add_argument('--filtered_alias_subdir', type=str, default='alias_filtered_sentences', help='Subdirectory to save filtered sentences.')
parser.add_argument('--out_subdir', type=str, default='test_wl2', help='Where to write processed data to.')
parser.add_argument('--wd_aliases', type=str, default='/lfs/raiders10/0/lorr1/augmented_alias_map_large_uncased_1216.jsonl',
help='Path to directory with JSONL mapping alias to QID')
parser.add_argument('--no_permute_alias', action='store_true',
help='Turn on to not make the new alias of added entities be the most conflicting.')
parser.add_argument('--max_candidates', type=int, default=30)
parser.add_argument('--processes', type=int, default=int(0.1 * multiprocessing.cpu_count()))
parser.add_argument('--overwrite', action='store_true', help='Rebuild WL metadata.')
parser.add_argument('--test', action='store_true', help='If set, will only generate for one file.')
args = parser.parse_args()
return args
def init_process(wl_metadata_dump):
global wl_metadata_global
wl_metadata_global = WLMetadata.load(wl_metadata_dump)
def launch_subprocess(args, outdir, temp_outdir, wl_metadata_dump, in_files):
all_process_args = [tuple([i + 1,
len(in_files),
outdir,
temp_outdir,
args,
in_files[i],
]) for i in range(len(in_files))]
print("Starting pool...")
pool = multiprocessing.Pool(processes=args.processes, initializer=init_process, initargs=([wl_metadata_dump]))
print("Starting processes...")
docs_not_qid = set()
for docs_not_qid_subset in tqdm(pool.imap(subprocess, all_process_args, chunksize=1), total=len(all_process_args)):
docs_not_qid.update(set(docs_not_qid_subset))
pool.close()
return list(docs_not_qid)
def choose_new_alias(max_cands, alias, qid, wl_metadata, doc_ent, sentence_idx):
# Set a seed to ensure that across ablations, the aliases chosen will be consistent. For example, if we are processing
# the document for "Q123" and are on sentence 55 of that article, and are currently labeling the QID "Q88" then we will
# set the seed to 1235588.
seed = int(str(doc_ent[1:]) + str(sentence_idx) + str(qid[1:]))
random.seed(seed)
if not wl_metadata.contains_qid(qid):
return alias
# If qid is in the top 30 for the alias, and there are at least 2 candidates for that alias, just use that alias
if 0 <= wl_metadata.get_cand_pos(alias, qid) < max_cands and wl_metadata.get_num_cands(alias) > 1:
return alias
# Otherwise, find all other aliases for that qid that are in the top 30 and have at least 2 candidates, and randomly choose on
top_mc_aliases = [al for al in wl_metadata.get_all_aliases(qid) if 0 <= wl_metadata.get_cand_pos(al, qid) < max_cands]
top_mc_gtr1_cand_aliases = sorted([[al, wl_metadata.get_num_cands(al)] for al in top_mc_aliases if wl_metadata.get_num_cands(al) > 1],
key=lambda x: x[1], reverse=True)
if len(top_mc_gtr1_cand_aliases) > 0:
return top_mc_gtr1_cand_aliases[0][0] # random.choice(top_mc_gtr1_cand_aliases)
# We might be in the situation where there are a bunch of aliases for that qid (and the top max cands (mc) condition is met) but they
# all have only 1 candidate. That's better than nothing, so in that case, randomly return one of those aliases.
if len(top_mc_aliases) > 0:
return random.choice(top_mc_aliases)
# If all of the above fail, then just return the original alias
return alias
def sort_aliases(spans, qids, aliases, sources):
if len(aliases) == 0:
return spans, qids, aliases, sources
res = sorted(zip(spans, qids, aliases, sources), key=lambda x: [x[0][0], x[0][1]], reverse=False)
spans, qids, aliases, sources = zip(*res)
return spans, qids, aliases, sources
def subprocess(all_args):
prep_utils.print_memory()
start_time = time.time()
random.seed(1234)
lfs = list(wl_func.all.values())
for lf in lfs:
assert lf.__name__ != "gold", f"The name \"gold\" is already reserved. Please name it something else."
print("LFS", lfs)
idx, total, outdir, temp_outdir, args, in_filepath = all_args
num_lines = sum(1 for _ in open(in_filepath))
# create output files
out_fname = os.path.join(outdir, prep_utils.get_outfname(in_filepath))
print(f"Starting {idx}/{total}. Reading in {in_filepath}. Ouputting to {out_fname}")
filtered_qid_counts = defaultdict(lambda: defaultdict(int))
filtered_aliases_to_qid_count = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
no_qid = []
added_alias = defaultdict(int)
with open(in_filepath, 'r', encoding="utf-8") as in_file, open(out_fname, "w", encoding='utf8') as out_file:
for doc_idx, doc in tqdm(enumerate(in_file), total=num_lines, desc=f"Processing"):
doc = ujson.loads(doc)
title = doc['title']
doc_entity = str(doc['qid'])
# if doc_entity not in ["Q3493976", "Q2655944"]:
# continue
if doc_entity != "-1" and wl_metadata_global.contains_qid(doc_entity):
no_qid.append(title)
# Gather all aliases -> qids in the document and qid -> list of aliases
aliases_to_qids_in_doc, qid_to_aliases_in_doc = collect_aliases_to_qids_in_doc(doc, wl_metadata_global)
# print(aliases_to_qids_in_doc)
# print("*********************")
# print(qid_to_aliases_in_doc)
new_sentences = []
for sentence_idx, line in enumerate(doc['sentences']):
orig_spans, orig_qids, orig_aliases, orig_sources = line["char_spans"], line["qids"], line["aliases"], ["gold"] * len(line["aliases"])
added_alias["gold"] += len(orig_aliases)
for lf in lfs:
new_spans, new_qids, new_aliases = lf(doc_entity, line["sentence"], orig_spans,
orig_qids, orig_aliases,
aliases_to_qids_in_doc, wl_metadata_global)
new_sources = [lf.__name__] * len(new_aliases)
assert len(new_spans) == len(new_qids) == len(new_aliases)
added_alias[lf.__name__] += len(new_aliases)
# for sp, q, al in zip(new_spans, new_qids, new_aliases):
# print("SENT:", line["sentence"])
# print("SP:", sp, "AL", al, "Q", q, "LF", lf.__name__)
# print(f"Time for lf {lf.__name__} is {time.time()-st}")
orig_spans.extend(new_spans)
orig_qids.extend(new_qids)
orig_aliases.extend(new_aliases)
orig_sources.extend(new_sources)
orig_spans, orig_qids, orig_aliases, orig_sources = sort_aliases(orig_spans, orig_qids, orig_aliases, orig_sources)
final_spans, final_qids, final_aliases, final_sources = list(orig_spans), list(orig_qids), list(orig_aliases), list(orig_sources)
final_orig_aliases = final_aliases[:]
# Permute aliases if flag is turned on
# If not permuting alias, just use the aliases given. HOWEVER, note that if the qid is not in the top-30 for this alias,
# then this label will be DROPPED from the training set later on. So it is likely recommended to leave permuting
# alias ON to prevent this loss of information
# st = time.time()
if not args.no_permute_alias:
for j in range(len(final_aliases)):
alias = final_aliases[j]
associated_qid = final_qids[j]
new_alias = choose_new_alias(args.max_candidates, alias, associated_qid, wl_metadata_global, doc_entity, line['doc_sent_idx'])
# if alias != new_alias:
# print("SWAPPING", alias, "FOR", new_alias, "QID", associated_qid, wl_metadata_global.get_title(associated_qid))
# print(wl_metadata_global.get_cand_pos(alias, associated_qid), wl_metadata_global.get_num_cands(alias))
final_aliases[j] = new_alias
new_sentences.append({
'doc_sent_idx': line['doc_sent_idx'],
'sentence': line['sentence'],
'aliases': final_aliases,
'unswap_aliases': final_orig_aliases,
'char_spans': final_spans,
'qids': final_qids,
'gold': [True if fs == "gold" else False for fs in final_sources],
'sources': final_sources
})
# Update stats
if len(final_aliases) > 0:
for alias, qid, source in zip(final_aliases, final_qids, final_sources):
filtered_qid_counts[source][qid] += 1
filtered_aliases_to_qid_count[source][alias][qid] += 1
doc['sentences'] = new_sentences
out_file.write(ujson.dumps(doc, ensure_ascii=ENSURE_ASCII) + '\n')
out_file.close()
utils.dump_json_file(os.path.join(temp_outdir, f"filtered_alias_to_qid_count_{idx}.json"), filtered_aliases_to_qid_count)
utils.dump_json_file(os.path.join(temp_outdir, f"filtered_qid_counts_{idx}.json"), filtered_qid_counts)
print(f"Finished {idx}/{total}. Written to {out_fname}. {time.time() - start_time} seconds.")
print(ujson.dumps(added_alias, indent=4, ensure_ascii=ENSURE_ASCII))
return no_qid
def collect_aliases_to_qids_in_doc(doc, wl_metadata):
"""
:param doc:
:param wl_metadata:
:return: aliases_to_qids_in_doc_pruned, qid_to_aliases_in_doc_pruned
The method gathers a dict of alias->qid->count for all qids linked to in a given document, including the aliases that refer to the QID of the Wikipedia page itself.
These are then pruned to remove aliases that have different qids that appear with similar frequencies (a sign of a noisy alias).
"""
st = time.time()
aliases_to_qids_in_doc = defaultdict(lambda: defaultdict(int))
doc_entity = str(doc['qid'])
# Add aliases pointing to the document
aliases = wl_metadata.get_all_aliases_wd(doc_entity, set())
for al in aliases:
assert len(al) > 0
# We correct this count below when pruning
aliases_to_qids_in_doc[al][doc_entity] = 1
# We always add other aliases so when we prune, we can remove highly conflicting aliases to use during weak labelling
for sentence in doc['sentences']:
sentence_qids = sentence['qids']
# Update the aliases_to_qids_in_doc
for qid in sentence_qids:
for al in wl_metadata.get_all_aliases_wd(qid, set()):
aliases_to_qids_in_doc[al][qid] += 1
aliases_to_qids_in_doc_pruned, qid_to_aliases_in_doc_pruned = prune_aliases_to_qids_in_doc(doc_entity, aliases_to_qids_in_doc)
# print(f"Time for collect aliases", time.time() - st)
return aliases_to_qids_in_doc_pruned, qid_to_aliases_in_doc_pruned
def prune_aliases_to_qids_in_doc(doc_entity, aliases_to_qids_in_doc):
"""doc_entity: QID of page we are on
aliases_to_qids_in_doc: list of aliases on page -> QID they link to UNION all aliases that point to doc_entity -> doc_entity"""
st = time.time()
aliases_to_qids_in_doc_pruned = {}
qid_to_aliases_in_doc_pruned = defaultdict(list)
total_qid_count = sum(v for qid_dict in aliases_to_qids_in_doc.values() for v in qid_dict.values())
# print(f"Total Count for {doc_entity} is {total_qid_count}")
# We want to assign some weight of doc_entity aliases -> doc_entity (they are often not actual links in that Wikipedia so get a low weight by default)
doc_entity_perc_of_total = 0.2
# This is representing the popularity of the doc entity in a document. If there is some other QID that appears with some alias
# also associated with the doc_entity more than doc_entity_count times, we remove this highly conflicting alias from consideration
popularity_threshold = 5
doc_entity_count = max(doc_entity_perc_of_total * total_qid_count, 1.0)
for al in aliases_to_qids_in_doc:
qid_dict = aliases_to_qids_in_doc[al]
# qid_dict is qid -> count of number of times al linked to qid; if only one qid, add it for that alias
# otherwise, find the most popular qid if one exists
if len(qid_dict) == 1:
qid_to_add = next(iter(qid_dict.keys()))
# Add the qid to the list of aliases
aliases_to_qids_in_doc_pruned[al] = qid_to_add
qid_to_aliases_in_doc_pruned[qid_to_add].append(al)
else:
# Assign the doc entity weight
if doc_entity in qid_dict:
qid_dict[doc_entity] = doc_entity_count
sorted_qids = list(sorted(qid_dict.items(), key=lambda x: x[1], reverse=True))
# Only add if count is above threshold
if sorted_qids[0][1] > popularity_threshold * sorted_qids[1][1]:
qid_to_add = sorted_qids[0][0]
aliases_to_qids_in_doc_pruned[al] = qid_to_add
qid_to_aliases_in_doc_pruned[qid_to_add].append(al)
# print(f"Time for prune aliases", time.time() - st)
return aliases_to_qids_in_doc_pruned, qid_to_aliases_in_doc_pruned
# Here, we just copy the entity dump over to the new directory but delete erroneous -1 qids
# that crop up if we don't have the page title in our mapping.
def modify_counts_and_dump(args, entity_dump):
alias2qids = entity_dump.get_alias2qids_dict()
qid2title = entity_dump.get_qid2title_dict()
if "-1" in qid2title:
del qid2title["-1"]
max_candidates = entity_dump.max_candidates
for al in alias2qids:
all_pairs = alias2qids[al]
qids = [p[0] for p in all_pairs]
if "-1" in qids:
print(f"BAD: for alias {al} there is a -1 QID of {alias2qids[al]}. Will remove")
all_pairs.pop(qids.index("-1"))
alias2qids[al] = all_pairs
# Make entity dump object
entity_dump = EntitySymbolsPrep(
max_candidates=max_candidates,
alias2qids=alias2qids,
qid2title=qid2title
)
out_dir = os.path.join(args.data_dir, args.out_subdir, 'entity_db/entity_mappings')
entity_dump.save(out_dir)
def main():
gl_start = time.time()
multiprocessing.set_start_method("spawn", force=True)
args = parse_args()
print(ujson.dumps(vars(args), indent=4))
outdir = prep_utils.get_outdir(args.data_dir, args.out_subdir, remove_old=True)
temp_outdir = prep_utils.get_outdir(os.path.join(args.data_dir, args.out_subdir), "_temp", remove_old=True)
temp_metadata_outdir = prep_utils.get_outdir(os.path.join(args.data_dir, args.filtered_alias_subdir), "_for_rerun_WL", remove_old=False)
# get inputs files
path = os.path.join(args.data_dir, args.filtered_alias_subdir, "*.jsonl")
in_files = prep_utils.glob_files(path)
# if in test mode, just take a single input file
if args.test:
in_files = in_files[:1]
st = time.time()
entity_dump = None
wl_metadata_dump = os.path.join(temp_metadata_outdir, "wl_metadata")
if not os.path.exists(wl_metadata_dump) or args.overwrite:
# this loads all entity information (aliases, titles, etc)
print(f"Reading in entity dump...")
entity_dump = EntitySymbolsPrep.load_from_cache(load_dir=os.path.join(args.data_dir, args.filtered_alias_subdir, 'entity_db/entity_mappings'))
print(f"Loaded entity dump with {entity_dump.num_entities} entities.")
print(f"Reading WD aliases")
with open(args.wd_aliases) as in_f:
wd_a2q = {k:v for k,v in ujson.load(in_f).items() if len(k.strip()) > 0}
utils.ensure_dir(wl_metadata_dump)
wl_metadata = WLMetadata(entity_dump, wd_a2q)
wl_metadata.dump(wl_metadata_dump)
print(f"Time to create WL metadata {time.time() - st}")
# launch subprocesses and collect outputs
print(f"Loaded {len(in_files)} files from {path}. Launching {args.processes} processes.")
docs_not_qid = launch_subprocess(args, outdir, temp_outdir, wl_metadata_dump, in_files)
# Gather new counts
# Total QID count
qid_count_files = glob.glob(f"{temp_outdir}/filtered_qid_counts_*")
list_of_qids_dicts = [utils.load_json_file(f) for f in qid_count_files]
filtered_qid_count = prep_utils.aggregate_list_of_nested_dictionaries(list_of_qids_dicts)
# Alias, qid pair counts
aliases_to_qid_count_files = glob.glob(f"{temp_outdir}/filtered_alias_to_qid_count_*")
list_of_alias_dicts = [utils.load_json_file(f) for f in aliases_to_qid_count_files]
filtered_aliases_to_qid = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for subdict in list_of_alias_dicts:
for source_key, subsubdict in subdict.items():
for alias_key, subsubsubdic in subsubdict.items():
for qid, cnt in subsubsubdic.items():
filtered_aliases_to_qid[source_key][alias_key][qid] += cnt
# Save counts
utils.dump_json_file(os.path.join(outdir, "filtered_qid_count.json"), filtered_qid_count)
utils.dump_json_file(os.path.join(outdir, "filtered_aliases_to_qid_count.json"), filtered_aliases_to_qid)
with open(os.path.join(outdir, "docs_not_qids.json"), "w", encoding='utf8') as out_f:
ujson.dump(docs_not_qid, out_f, ensure_ascii=ENSURE_ASCII)
if entity_dump is None:
print(f"Reading in entity dump...")
entity_dump = EntitySymbolsPrep.load_from_cache(load_dir=os.path.join(args.data_dir, args.filtered_alias_subdir, 'entity_db/entity_mappings'))
print(f"Loaded entity dump with {entity_dump.num_entities} entities.")
modify_counts_and_dump(args, entity_dump)
# remove temp
shutil.rmtree(temp_outdir)
vars(args)["out_dir"] = outdir
prep_utils.save_config(args, "add_labels_single_func_config.json")
print(f"Finished add_labels_single_func in {time.time() - gl_start} seconds.")
if __name__ == '__main__':
main()
| [
"ujson.dumps",
"bootleg_data_prep.utils.data_prep_utils.glob_files",
"multiprocessing.cpu_count",
"bootleg_data_prep.utils.weak_label_funcs.wl_func.all.values",
"multiprocessing.set_start_method",
"ujson.load",
"os.path.exists",
"argparse.ArgumentParser",
"ujson.loads",
"bootleg_data_prep.utils.data_prep_utils.save_config",
"bootleg_data_prep.utils.data_prep_utils.get_outdir",
"glob.glob",
"bootleg_data_prep.utils.classes.record_trie_collection.RecordTrieCollection",
"bootleg_data_prep.utils.data_prep_utils.print_memory",
"random.choice",
"numpy.average",
"bootleg_data_prep.utils.data_prep_utils.get_outfname",
"bootleg_data_prep.utils.utils.ensure_dir",
"time.time",
"bootleg_data_prep.utils.classes.entity_symbols_prep.EntitySymbolsPrep",
"bootleg_data_prep.utils.utils.load_json_file",
"ujson.dump",
"tqdm.tqdm",
"os.path.join",
"random.seed",
"collections.defaultdict",
"multiprocessing.Pool",
"bootleg_data_prep.utils.data_prep_utils.aggregate_list_of_nested_dictionaries",
"shutil.rmtree",
"numpy.percentile"
] | [((8862, 8887), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8885, 8887), False, 'import argparse\n'), ((10612, 10717), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'args.processes', 'initializer': 'init_process', 'initargs': '[wl_metadata_dump]'}), '(processes=args.processes, initializer=init_process,\n initargs=[wl_metadata_dump])\n', (10632, 10717), False, 'import multiprocessing\n'), ((11430, 11447), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (11441, 11447), False, 'import random\n'), ((13106, 13131), 'bootleg_data_prep.utils.data_prep_utils.print_memory', 'prep_utils.print_memory', ([], {}), '()\n', (13129, 13131), True, 'import bootleg_data_prep.utils.data_prep_utils as prep_utils\n'), ((13149, 13160), 'time.time', 'time.time', ([], {}), '()\n', (13158, 13160), False, 'import time\n'), ((13165, 13182), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (13176, 13182), False, 'import random\n'), ((13876, 13892), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (13887, 13892), False, 'from collections import defaultdict\n'), ((19433, 19444), 'time.time', 'time.time', ([], {}), '()\n', (19442, 19444), False, 'import time\n'), ((20721, 20732), 'time.time', 'time.time', ([], {}), '()\n', (20730, 20732), False, 'import time\n'), ((20807, 20824), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (20818, 20824), False, 'from collections import defaultdict\n'), ((23505, 23601), 'bootleg_data_prep.utils.classes.entity_symbols_prep.EntitySymbolsPrep', 'EntitySymbolsPrep', ([], {'max_candidates': 'max_candidates', 'alias2qids': 'alias2qids', 'qid2title': 'qid2title'}), '(max_candidates=max_candidates, alias2qids=alias2qids,\n qid2title=qid2title)\n', (23522, 23601), False, 'from bootleg_data_prep.utils.classes.entity_symbols_prep import EntitySymbolsPrep\n'), ((23642, 23715), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.out_subdir', '"""entity_db/entity_mappings"""'], {}), "(args.data_dir, args.out_subdir, 'entity_db/entity_mappings')\n", (23654, 23715), False, 'import os\n'), ((23775, 23786), 'time.time', 'time.time', ([], {}), '()\n', (23784, 23786), False, 'import time\n'), ((23791, 23844), 'multiprocessing.set_start_method', 'multiprocessing.set_start_method', (['"""spawn"""'], {'force': '(True)'}), "('spawn', force=True)\n", (23823, 23844), False, 'import multiprocessing\n'), ((23927, 23997), 'bootleg_data_prep.utils.data_prep_utils.get_outdir', 'prep_utils.get_outdir', (['args.data_dir', 'args.out_subdir'], {'remove_old': '(True)'}), '(args.data_dir, args.out_subdir, remove_old=True)\n', (23948, 23997), True, 'import bootleg_data_prep.utils.data_prep_utils as prep_utils\n'), ((24287, 24353), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.filtered_alias_subdir', '"""*.jsonl"""'], {}), "(args.data_dir, args.filtered_alias_subdir, '*.jsonl')\n", (24299, 24353), False, 'import os\n'), ((24369, 24396), 'bootleg_data_prep.utils.data_prep_utils.glob_files', 'prep_utils.glob_files', (['path'], {}), '(path)\n', (24390, 24396), True, 'import bootleg_data_prep.utils.data_prep_utils as prep_utils\n'), ((24510, 24521), 'time.time', 'time.time', ([], {}), '()\n', (24519, 24521), False, 'import time\n'), ((24568, 24617), 'os.path.join', 'os.path.join', (['temp_metadata_outdir', '"""wl_metadata"""'], {}), "(temp_metadata_outdir, 'wl_metadata')\n", (24580, 24617), False, 'import os\n'), ((25696, 25745), 'glob.glob', 'glob.glob', (['f"""{temp_outdir}/filtered_qid_counts_*"""'], {}), "(f'{temp_outdir}/filtered_qid_counts_*')\n", (25705, 25745), False, 'import glob\n'), ((25847, 25915), 'bootleg_data_prep.utils.data_prep_utils.aggregate_list_of_nested_dictionaries', 'prep_utils.aggregate_list_of_nested_dictionaries', (['list_of_qids_dicts'], {}), '(list_of_qids_dicts)\n', (25895, 25915), True, 'import bootleg_data_prep.utils.data_prep_utils as prep_utils\n'), ((25978, 26035), 'glob.glob', 'glob.glob', (['f"""{temp_outdir}/filtered_alias_to_qid_count_*"""'], {}), "(f'{temp_outdir}/filtered_alias_to_qid_count_*')\n", (25987, 26035), False, 'import glob\n'), ((27255, 27281), 'shutil.rmtree', 'shutil.rmtree', (['temp_outdir'], {}), '(temp_outdir)\n', (27268, 27281), False, 'import shutil\n'), ((27321, 27387), 'bootleg_data_prep.utils.data_prep_utils.save_config', 'prep_utils.save_config', (['args', '"""add_labels_single_func_config.json"""'], {}), "(args, 'add_labels_single_func_config.json')\n", (27343, 27387), True, 'import bootleg_data_prep.utils.data_prep_utils as prep_utils\n'), ((6063, 6095), 'os.path.join', 'os.path.join', (['dump_dir', '"""QIDTRI"""'], {}), "(dump_dir, 'QIDTRI')\n", (6075, 6095), False, 'import os\n'), ((6171, 6205), 'os.path.join', 'os.path.join', (['dump_dir', '"""ALIASTRI"""'], {}), "(dump_dir, 'ALIASTRI')\n", (6183, 6205), False, 'import os\n'), ((6284, 6320), 'os.path.join', 'os.path.join', (['dump_dir', '"""ALIASTRIWD"""'], {}), "(dump_dir, 'ALIASTRIWD')\n", (6296, 6320), False, 'import os\n'), ((6397, 6437), 'os.path.join', 'os.path.join', (['dump_dir', '"""QID2TITLE.json"""'], {}), "(dump_dir, 'QID2TITLE.json')\n", (6409, 6437), False, 'import os\n'), ((12648, 12677), 'random.choice', 'random.choice', (['top_mc_aliases'], {}), '(top_mc_aliases)\n', (12661, 12677), False, 'import random\n'), ((13199, 13219), 'bootleg_data_prep.utils.weak_label_funcs.wl_func.all.values', 'wl_func.all.values', ([], {}), '()\n', (13217, 13219), False, 'from bootleg_data_prep.utils.weak_label_funcs import wl_func\n'), ((13554, 13590), 'bootleg_data_prep.utils.data_prep_utils.get_outfname', 'prep_utils.get_outfname', (['in_filepath'], {}), '(in_filepath)\n', (13577, 13590), True, 'import bootleg_data_prep.utils.data_prep_utils as prep_utils\n'), ((18538, 18606), 'os.path.join', 'os.path.join', (['temp_outdir', 'f"""filtered_alias_to_qid_count_{idx}.json"""'], {}), "(temp_outdir, f'filtered_alias_to_qid_count_{idx}.json')\n", (18550, 18606), False, 'import os\n'), ((18664, 18724), 'os.path.join', 'os.path.join', (['temp_outdir', 'f"""filtered_qid_counts_{idx}.json"""'], {}), "(temp_outdir, f'filtered_qid_counts_{idx}.json')\n", (18676, 18724), False, 'import os\n'), ((18855, 18916), 'ujson.dumps', 'ujson.dumps', (['added_alias'], {'indent': '(4)', 'ensure_ascii': 'ENSURE_ASCII'}), '(added_alias, indent=4, ensure_ascii=ENSURE_ASCII)\n', (18866, 18916), False, 'import ujson\n'), ((24038, 24082), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.out_subdir'], {}), '(args.data_dir, args.out_subdir)\n', (24050, 24082), False, 'import os\n'), ((24159, 24214), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.filtered_alias_subdir'], {}), '(args.data_dir, args.filtered_alias_subdir)\n', (24171, 24214), False, 'import os\n'), ((25198, 25232), 'bootleg_data_prep.utils.utils.ensure_dir', 'utils.ensure_dir', (['wl_metadata_dump'], {}), '(wl_metadata_dump)\n', (25214, 25232), False, 'from bootleg_data_prep.utils import utils\n'), ((25772, 25795), 'bootleg_data_prep.utils.utils.load_json_file', 'utils.load_json_file', (['f'], {}), '(f)\n', (25792, 25795), False, 'from bootleg_data_prep.utils import utils\n'), ((26063, 26086), 'bootleg_data_prep.utils.utils.load_json_file', 'utils.load_json_file', (['f'], {}), '(f)\n', (26083, 26086), False, 'from bootleg_data_prep.utils import utils\n'), ((26547, 26594), 'os.path.join', 'os.path.join', (['outdir', '"""filtered_qid_count.json"""'], {}), "(outdir, 'filtered_qid_count.json')\n", (26559, 26594), False, 'import os\n'), ((26641, 26699), 'os.path.join', 'os.path.join', (['outdir', '"""filtered_aliases_to_qid_count.json"""'], {}), "(outdir, 'filtered_aliases_to_qid_count.json')\n", (26653, 26699), False, 'import os\n'), ((26824, 26882), 'ujson.dump', 'ujson.dump', (['docs_not_qid', 'out_f'], {'ensure_ascii': 'ENSURE_ASCII'}), '(docs_not_qid, out_f, ensure_ascii=ENSURE_ASCII)\n', (26834, 26882), False, 'import ujson\n'), ((1984, 2036), 'tqdm.tqdm', 'tqdm', (['alias2qid_wd'], {'desc': '"""Iterating over WD aliases"""'}), "(alias2qid_wd, desc='Iterating over WD aliases')\n", (1988, 2036), False, 'from tqdm import tqdm\n'), ((4311, 4444), 'bootleg_data_prep.utils.classes.record_trie_collection.RecordTrieCollection', 'RecordTrieCollection', ([], {'load_dir': 'None', 'input_dicts': 'input_dicts', 'vocabulary': 'alias_vocab', 'fmt_types': 'fmt_types', 'max_values': 'max_values'}), '(load_dir=None, input_dicts=input_dicts, vocabulary=\n alias_vocab, fmt_types=fmt_types, max_values=max_values)\n', (4331, 4444), False, 'from bootleg_data_prep.utils.classes.record_trie_collection import RecordTrieCollection\n'), ((5035, 5168), 'bootleg_data_prep.utils.classes.record_trie_collection.RecordTrieCollection', 'RecordTrieCollection', ([], {'load_dir': 'None', 'input_dicts': 'input_dicts', 'vocabulary': 'alias_vocab', 'fmt_types': 'fmt_types', 'max_values': 'max_values'}), '(load_dir=None, input_dicts=input_dicts, vocabulary=\n alias_vocab, fmt_types=fmt_types, max_values=max_values)\n', (5055, 5168), False, 'from bootleg_data_prep.utils.classes.record_trie_collection import RecordTrieCollection\n'), ((6819, 6879), 'ujson.dump', 'ujson.dump', (['self.qid2title', 'out_f'], {'ensure_ascii': 'ENSURE_ASCII'}), '(self.qid2title, out_f, ensure_ascii=ENSURE_ASCII)\n', (6829, 6879), False, 'import ujson\n'), ((7301, 7317), 'ujson.load', 'ujson.load', (['in_f'], {}), '(in_f)\n', (7311, 7317), False, 'import ujson\n'), ((13729, 13745), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (13740, 13745), False, 'from collections import defaultdict\n'), ((14115, 14131), 'ujson.loads', 'ujson.loads', (['doc'], {}), '(doc)\n', (14126, 14131), False, 'import ujson\n'), ((19494, 19510), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (19505, 19510), False, 'from collections import defaultdict\n'), ((24629, 24661), 'os.path.exists', 'os.path.exists', (['wl_metadata_dump'], {}), '(wl_metadata_dump)\n', (24643, 24661), False, 'import os\n'), ((26740, 26782), 'os.path.join', 'os.path.join', (['outdir', '"""docs_not_qids.json"""'], {}), "(outdir, 'docs_not_qids.json')\n", (26752, 26782), False, 'import os\n'), ((24857, 24945), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.filtered_alias_subdir', '"""entity_db/entity_mappings"""'], {}), "(args.data_dir, args.filtered_alias_subdir,\n 'entity_db/entity_mappings')\n", (24869, 24945), False, 'import os\n'), ((27021, 27109), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.filtered_alias_subdir', '"""entity_db/entity_mappings"""'], {}), "(args.data_dir, args.filtered_alias_subdir,\n 'entity_db/entity_mappings')\n", (27033, 27109), False, 'import os\n'), ((9780, 9807), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (9805, 9807), False, 'import multiprocessing\n'), ((13823, 13839), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (13834, 13839), False, 'from collections import defaultdict\n'), ((18440, 18483), 'ujson.dumps', 'ujson.dumps', (['doc'], {'ensure_ascii': 'ENSURE_ASCII'}), '(doc, ensure_ascii=ENSURE_ASCII)\n', (18451, 18483), False, 'import ujson\n'), ((18808, 18819), 'time.time', 'time.time', ([], {}), '()\n', (18817, 18819), False, 'import time\n'), ((26194, 26210), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (26205, 26210), False, 'from collections import defaultdict\n'), ((27436, 27447), 'time.time', 'time.time', ([], {}), '()\n', (27445, 27447), False, 'import time\n'), ((2406, 2429), 'numpy.average', 'np.average', (['all_qid_len'], {}), '(all_qid_len)\n', (2416, 2429), True, 'import numpy as np\n'), ((2451, 2483), 'numpy.percentile', 'np.percentile', (['all_qid_len', '(99.9)'], {}), '(all_qid_len, 99.9)\n', (2464, 2483), True, 'import numpy as np\n'), ((2581, 2606), 'numpy.average', 'np.average', (['all_alias_len'], {}), '(all_alias_len)\n', (2591, 2606), True, 'import numpy as np\n'), ((2628, 2662), 'numpy.percentile', 'np.percentile', (['all_alias_len', '(99.9)'], {}), '(all_alias_len, 99.9)\n', (2641, 2662), True, 'import numpy as np\n'), ((25374, 25385), 'time.time', 'time.time', ([], {}), '()\n', (25383, 25385), False, 'import time\n'), ((25141, 25157), 'ujson.load', 'ujson.load', (['in_f'], {}), '(in_f)\n', (25151, 25157), False, 'import ujson\n')] |
from celery import Celery
app = Celery('wqdss',
broker='amqp://user:password@rabbitmq',
backend='rpc://',
include=['wqdss.tasks'])
app.conf.update(
task_acks_late=True,
worker_prefetch_multiplier=1
)
| [
"celery.Celery"
] | [((33, 135), 'celery.Celery', 'Celery', (['"""wqdss"""'], {'broker': '"""amqp://user:password@rabbitmq"""', 'backend': '"""rpc://"""', 'include': "['wqdss.tasks']"}), "('wqdss', broker='amqp://user:password@rabbitmq', backend='rpc://',\n include=['wqdss.tasks'])\n", (39, 135), False, 'from celery import Celery\n')] |
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
def requires():
with open('requirements.txt') as f:
return f.read().split("\n")
config = {
'name': 'kanjinetworks',
'version': '0.1.8',
'description': 'Kanji Networks interface',
'long_description': readme(),
'license': 'MIT',
'author': '<NAME>',
'author_email': '<EMAIL>',
'url': 'https://github.com/acoomans/kanjinetworks',
'download_url': 'https://github.com/acoomans/kanjinetworks/archive/master.zip',
'install_requires': requires(),
'packages': ['kanjinetworks', 'kanjinetworks.extract', 'kanjinetworks.export'],
'scripts': ['scripts/kn_to_ja.py'],
'package_data': {'kanjinetworks': ['data/*']},
'include_package_data': True,
'zip_safe': False,
'test_suite': 'kanjinetworks',
}
setup(**config) | [
"setuptools.setup"
] | [((863, 878), 'setuptools.setup', 'setup', ([], {}), '(**config)\n', (868, 878), False, 'from setuptools import setup\n')] |
import random
import pandas as pd
RANDOM_SEED = 22
random.seed(RANDOM_SEED)
fact, zm, xq = [], [], []
with open("data/laic2021/total.src") as f:
for line in f.readlines():
fact.append(line)
with open("data/laic2021/total_zm.tgt") as f:
for line in f.readlines():
zm.append(line)
with open("data/laic2021/total_xq.tgt") as f:
for line in f.readlines():
xq.append(line)
shuffle_idx = list(range(len(fact)))
random.shuffle(shuffle_idx)
def data_split(idx, data, data_name):
total_num = len(data)
train_num = int(total_num * 0.8)
valid_num = int(total_num * 0.1)
test_num = total_num - train_num - valid_num
data = pd.Series(data) # 67651
data = data[idx]
train_data = data[:train_num].tolist()
valid_data = data[train_num:train_num+valid_num].tolist()
test_data = data[train_num+valid_num:].tolist()
print(len(train_data))
print(len(valid_data))
print(len(test_data))
# return
path = "data/laic2021/train/{}".format(data_name)
with open(path, "w") as f:
for line in train_data:
line = line.replace("\n", "")
f.write(line+"\n")
path = "data/laic2021/valid/{}".format(data_name)
with open(path, "w") as f:
for line in valid_data:
line = line.replace("\n", "")
f.write(line+"\n")
path = "data/laic2021/test/{}".format(data_name)
with open(path, "w") as f:
for line in test_data:
line = line.replace("\n", "")
f.write(line+"\n")
fact = data_split(shuffle_idx, fact, "fact.src")
zm = data_split(shuffle_idx, zm, "zm.tgt")
xq = data_split(shuffle_idx, xq, "xq.tgt")
| [
"pandas.Series",
"random.shuffle",
"random.seed"
] | [((52, 76), 'random.seed', 'random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (63, 76), False, 'import random\n'), ((448, 475), 'random.shuffle', 'random.shuffle', (['shuffle_idx'], {}), '(shuffle_idx)\n', (462, 475), False, 'import random\n'), ((676, 691), 'pandas.Series', 'pd.Series', (['data'], {}), '(data)\n', (685, 691), True, 'import pandas as pd\n')] |
from emonitor.extensions import db
from emonitor.modules.cars.car import Car
class AlarmkeyCars(db.Model):
"""AlarmkeyCars class"""
__tablename__ = 'alarmkeycars'
__table_args__ = {'extend_existing': True}
kid = db.Column(db.Integer, primary_key=True)
dept = db.Column(db.String(30), primary_key=True)
_cars1 = db.Column('cars1', db.String(100), default='')
_cars2 = db.Column('cars2', db.String(100), default='')
_material = db.Column('material', db.String(100), default='')
def _get_cars_proto(self, cartype): # type 1:cars1, 2:cars2, 3:material
ret = []
l = []
cars = Car.getCars()
try:
if cartype == 1:
l = [int(i) for i in self._cars1.split(';') if i != '']
elif cartype == 2:
l = [int(i) for i in self._cars2.split(';') if i != '']
elif cartype == 3:
l = [int(i) for i in self._material.split(';') if i != '']
except:
l = []
for c_id in l:
c = filter(lambda c: c.id == c_id, cars)
if len(c) == 1:
ret.append(c[0])
return ret
# cars1
def _get_cars1(self):
return self._get_cars_proto(1)
def _set_cars1(self, cars):
self._cars1 = cars
def _get_cars1id(self):
return [int(i) for i in self._cars1.split(';') if i != '']
# cars2
def _get_cars2(self):
return self._get_cars_proto(2)
def _set_cars2(self, cars):
self._cars2 = cars
def _get_cars2id(self):
return [int(i) for i in self._cars2.split(';') if i != '']
# material
def _get_material(self):
return self._get_cars_proto(3)
def _set_material(self, material):
self._material = material
def _get_materialid(self):
return [int(i) for i in self.material.split(';') if i != '']
car1id = property(_get_cars1id)
cars1 = property(_get_cars1, _set_cars1)
car2id = property(_get_cars2id)
cars2 = property(_get_cars2, _set_cars2)
materialid = property(_get_materialid)
materials = property(_get_material, _set_material)
def __init__(self, kid, dept, cars1, cars2, material):
self.kid = kid
self.dept = dept
self._cars1 = cars1
self._cars2 = cars2
self._material = material
acc = AlarmkeyCars.getAlarmkeyCars(0, dept=dept)
if acc:
self.defaultcars1 = acc.cars1
self.defaultcars2 = acc.cars2
self.defaultmaterial = acc.materials
else:
self.defaultcars1 = []
self.defaultcars2 = []
self.defaultmaterial = []
@staticmethod
def getAlarmkeyCars(kid=9999, dept=''):
"""
Get a list of all car objects with given parameters
:param kid: (optional) id of alarmkey, default = *9999*
:param dept: (optional) id of department, default = *''*
:return: list of :py:class:`emonitor.modules.alarmkeys.alarmkeycar.AlarmkeyCars`
"""
if int(kid) != 9999 and dept != '':
return AlarmkeyCars.query.filter_by(kid=int(kid), dept=int(dept)).first()
elif int(kid) == 9999 and dept != '': # default aao cars for dept
return AlarmkeyCars.query.filter_by(kid=int(kid), dept=int(dept)).first()
elif dept != '':
return AlarmkeyCars.query.filter_by(dept=int(dept)).all()
else:
return AlarmkeyCars.query.filter_by(kid=int(kid)).all()
| [
"emonitor.extensions.db.String",
"emonitor.extensions.db.Column",
"emonitor.modules.cars.car.Car.getCars"
] | [((236, 275), 'emonitor.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (245, 275), False, 'from emonitor.extensions import db\n'), ((297, 310), 'emonitor.extensions.db.String', 'db.String', (['(30)'], {}), '(30)\n', (306, 310), False, 'from emonitor.extensions import db\n'), ((362, 376), 'emonitor.extensions.db.String', 'db.String', (['(100)'], {}), '(100)\n', (371, 376), False, 'from emonitor.extensions import db\n'), ((422, 436), 'emonitor.extensions.db.String', 'db.String', (['(100)'], {}), '(100)\n', (431, 436), False, 'from emonitor.extensions import db\n'), ((488, 502), 'emonitor.extensions.db.String', 'db.String', (['(100)'], {}), '(100)\n', (497, 502), False, 'from emonitor.extensions import db\n'), ((641, 654), 'emonitor.modules.cars.car.Car.getCars', 'Car.getCars', ([], {}), '()\n', (652, 654), False, 'from emonitor.modules.cars.car import Car\n')] |
#! /usr/bin/env python
import json
import requests
from watson_developer_cloud import ConversationV1
# City with its name, wind strength and weather
class City:
def __init__(self, name):
self.name = name
def _get_city_data(self):
r = requests.get('http://api.openweathermap.org/data/2.5/weather?q=' + self.name + '&appid=' + conf['openweather_key'])
body = json.loads(r.text)
if 'weather' in body and len(body['weather']) > 0 and 'main' in body['weather']:
self.weather = body['weather']['main']
else:
self.weather = None
if 'wind' in body and 'speed' in body['wind']:
self.wind = int(float(body['wind']['speed']) * 1.94384 * 5)
else:
self.wind = None
# Read and parse the configuration file
def read_conf(name="config.json"):
config_data = None
with open(name) as config_file:
config_data = json.load(config_file)
return config_data
# Ask Botseidon
def ask(input_text, current_context):
return conversation.message(workspace_id= conf['workspace_id'], message_input= {"text": input_text}, context= current_context)
# Set up the environment
conf = read_conf()
if conf is None:
print("Can not read conf file")
exit(1)
conversation = ConversationV1(username=conf['username'], password=conf['password'], version=conf['version'])
city = None
context = {}
# Permanent shell
while True:
text = input(">>> ")
res = ask(text, context)
context = conversation_id =res["context"]
if "output" in res and "text" in res["output"] and len(res["output"]["text"]) > 0:
# Intercepting the place and filling the city wind and weather values
if "You live in:" in res["output"]["text"][0]:
city = City(res["output"]["text"][0].split(':')[-1])
city._get_city_data()
res = ask("The wind is blowing " + str(city.wind), context)
for output in res["output"]["text"]:
if output != "":
print(output)
else:
print("> Error: could not understand the meaning")
| [
"json.load",
"json.loads",
"watson_developer_cloud.ConversationV1",
"requests.get"
] | [((1284, 1381), 'watson_developer_cloud.ConversationV1', 'ConversationV1', ([], {'username': "conf['username']", 'password': "conf['password']", 'version': "conf['version']"}), "(username=conf['username'], password=conf['password'],\n version=conf['version'])\n", (1298, 1381), False, 'from watson_developer_cloud import ConversationV1\n'), ((262, 382), 'requests.get', 'requests.get', (["('http://api.openweathermap.org/data/2.5/weather?q=' + self.name +\n '&appid=' + conf['openweather_key'])"], {}), "('http://api.openweathermap.org/data/2.5/weather?q=' + self.\n name + '&appid=' + conf['openweather_key'])\n", (274, 382), False, 'import requests\n'), ((393, 411), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (403, 411), False, 'import json\n'), ((926, 948), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (935, 948), False, 'import json\n')] |
# -*- coding: utf-8 -*-
from unittest import TestCase
import pandas as pd
from alphaware.base import (Factor,
FactorContainer)
from alphaware.enums import (FactorType,
OutputDataFormat,
FreqType,
FactorNormType)
from alphaware.analyzer import FactorQuantile
from pandas.util.testing import assert_frame_equal
class TestFactorQuantile(TestCase):
def test_factor_quantile(self):
index = pd.MultiIndex.from_product([['2014-01-30', '2014-02-28', '2014-03-31'], ['001', '002']],
names=['trade_date', 'ticker'])
data1 = pd.DataFrame(index=index, data=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
factor_test1 = Factor(data=data1, name='alpha1')
factor_test3 = Factor(data=data1, name='alpha2')
test2_property = {'type': FactorType.FWD_RETURN,
'data_format': OutputDataFormat.MULTI_INDEX_DF,
'norm_type': FactorNormType.Null,
'freq': FreqType.EOM}
data2 = pd.DataFrame(index=index, data=[3.0, 2.0, 3.0, 7.0, 8.0, 9.0])
factor_test2 = Factor(data=data2, name='fwd_return1', property_dict=test2_property)
factor_test4 = Factor(data=data2, name='fwd_return2', property_dict=test2_property)
fc = FactorContainer('2014-01-30', '2014-02-28', [factor_test1, factor_test2, factor_test3, factor_test4])
t = FactorQuantile(quantiles=2)
calculate = t.predict(fc)
expected = pd.DataFrame(
data=[[3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0], [3.0, 7.0, 3.0, 7.0, 3.0, 7.0, 3.0, 7.0]],
index=pd.DatetimeIndex(['2014-01-30', '2014-02-28'], freq=None),
columns=['alpha1_fwd_return1_1', 'alpha1_fwd_return1_2', 'alpha2_fwd_return1_1', 'alpha2_fwd_return1_2',
'alpha1_fwd_return2_1', 'alpha1_fwd_return2_2', 'alpha2_fwd_return2_1',
'alpha2_fwd_return2_2'])
assert_frame_equal(calculate, expected)
| [
"pandas.MultiIndex.from_product",
"alphaware.analyzer.FactorQuantile",
"pandas.DatetimeIndex",
"pandas.util.testing.assert_frame_equal",
"pandas.DataFrame",
"alphaware.base.FactorContainer",
"alphaware.base.Factor"
] | [((515, 640), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['2014-01-30', '2014-02-28', '2014-03-31'], ['001', '002']]"], {'names': "['trade_date', 'ticker']"}), "([['2014-01-30', '2014-02-28', '2014-03-31'], [\n '001', '002']], names=['trade_date', 'ticker'])\n", (541, 640), True, 'import pandas as pd\n'), ((695, 757), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index', 'data': '[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]'}), '(index=index, data=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n', (707, 757), True, 'import pandas as pd\n'), ((781, 814), 'alphaware.base.Factor', 'Factor', ([], {'data': 'data1', 'name': '"""alpha1"""'}), "(data=data1, name='alpha1')\n", (787, 814), False, 'from alphaware.base import Factor, FactorContainer\n'), ((838, 871), 'alphaware.base.Factor', 'Factor', ([], {'data': 'data1', 'name': '"""alpha2"""'}), "(data=data1, name='alpha2')\n", (844, 871), False, 'from alphaware.base import Factor, FactorContainer\n'), ((1128, 1190), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index', 'data': '[3.0, 2.0, 3.0, 7.0, 8.0, 9.0]'}), '(index=index, data=[3.0, 2.0, 3.0, 7.0, 8.0, 9.0])\n', (1140, 1190), True, 'import pandas as pd\n'), ((1214, 1282), 'alphaware.base.Factor', 'Factor', ([], {'data': 'data2', 'name': '"""fwd_return1"""', 'property_dict': 'test2_property'}), "(data=data2, name='fwd_return1', property_dict=test2_property)\n", (1220, 1282), False, 'from alphaware.base import Factor, FactorContainer\n'), ((1306, 1374), 'alphaware.base.Factor', 'Factor', ([], {'data': 'data2', 'name': '"""fwd_return2"""', 'property_dict': 'test2_property'}), "(data=data2, name='fwd_return2', property_dict=test2_property)\n", (1312, 1374), False, 'from alphaware.base import Factor, FactorContainer\n'), ((1389, 1494), 'alphaware.base.FactorContainer', 'FactorContainer', (['"""2014-01-30"""', '"""2014-02-28"""', '[factor_test1, factor_test2, factor_test3, factor_test4]'], {}), "('2014-01-30', '2014-02-28', [factor_test1, factor_test2,\n factor_test3, factor_test4])\n", (1404, 1494), False, 'from alphaware.base import Factor, FactorContainer\n'), ((1503, 1530), 'alphaware.analyzer.FactorQuantile', 'FactorQuantile', ([], {'quantiles': '(2)'}), '(quantiles=2)\n', (1517, 1530), False, 'from alphaware.analyzer import FactorQuantile\n'), ((2042, 2081), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['calculate', 'expected'], {}), '(calculate, expected)\n', (2060, 2081), False, 'from pandas.util.testing import assert_frame_equal\n'), ((1719, 1776), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2014-01-30', '2014-02-28']"], {'freq': 'None'}), "(['2014-01-30', '2014-02-28'], freq=None)\n", (1735, 1776), True, 'import pandas as pd\n')] |
import torch
from csvec import CSVec
from torchvision.transforms import transforms
# datasets
from data.cifar10.cifar10 import get_cifar10_dataLoaders
from data.cifar10.cifar10_iid import get_cifar10_dataLoaders as get_cifar10_iid_dataLoaders
# models
from models import *
def setup_datasets(dataset, batch_size):
users, trainLoaders, testLoaders = [], [], []
if dataset == 'cifar10':
trainTransform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
testTransform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
users, trainLoaders, testLoaders = get_cifar10_dataLoaders(batch_size=batch_size,
train_transform=trainTransform,
test_transform=testTransform)
elif dataset == 'cifar10_iid':
trainTransform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
testTransform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
users, trainLoaders, testLoaders = get_cifar10_iid_dataLoaders(batch_size=batch_size,
train_transform=trainTransform,
test_transform=testTransform)
return users, trainLoaders, testLoaders
def select_model(algorithm, model_name):
model = None
if algorithm in ['fetchsgd', 'topk']:
if model_name == 'cifar10':
model = FetchSGD_CIFAR10()
elif model_name == 'resnet9':
model = ResNet9(num_classes=10)
else:
print(f"Unimplemented Model {model_name}")
else:
print(f"Unimplemented Algorithm {algorithm}")
return model
def fedAverage(updates):
total_weight = 0
(clientSamplesNum, new_params) = updates[0]
for (clientSamplesNum, client_params) in updates:
total_weight += clientSamplesNum
for k in new_params.keys():
for i in range(0, len(updates)):
client_samples, client_params = updates[i]
# weight
w = client_samples / total_weight
if i == 0:
new_params[k] = client_params[k] * w
else:
new_params[k] += client_params[k] * w
# return global model params
return new_params
def avgMetric(metricList):
total_weight = 0
total_metric = 0
for (samplesNum, metric) in metricList:
total_weight += samplesNum
total_metric += samplesNum * metric
average = total_metric / total_weight
return average
def get_param_vec(model):
param_vec = []
for p in model.parameters():
if p.requires_grad:
param_vec.append(p.data.view(-1).float())
return torch.cat(param_vec)
def set_param_vec(model, param_vec):
start = 0
for p in model.parameters():
if p.requires_grad:
end = start + p.numel()
p.data.zero_()
p.data.add_(param_vec[start:end].view(p.size()))
start = end
def args2sketch(grad_size=918090, num_cols=100000, num_rows=5, device=torch.device('cuda:0'), num_blocks=10):
return CSVec(d=grad_size, c=num_cols, r=num_rows, device=device, numBlocks=num_blocks)
def clip_grad(l2_norm_clip, record):
try:
l2_norm = torch.norm(record)
except:
l2_norm = record.l2estimate()
if l2_norm < l2_norm_clip:
return record
else:
return record / float(torch.abs(torch.tensor(l2_norm) / l2_norm_clip))
def topk(vec, k):
""" Return the largest k elements (by magnitude) of vec"""
# on a gpu, sorting is faster than pytorch's topk method
# topkIndices = torch.sort(vec**2)[1][-k:]
# however, torch.topk is more space efficient
# topk on cuda returns what looks like uninitialized memory if
# vals has nan values in it
# saving to a zero-initialized output array instead of using the
# output of topk appears to solve this problem
topkVals = torch.zeros(k, device=vec.device)
topkIndices = torch.zeros(k, device=vec.device).long()
torch.topk(vec ** 2, k, sorted=False, out=(topkVals, topkIndices))
ret = torch.zeros_like(vec)
if len(vec.size()) == 1:
ret[topkIndices] = vec[topkIndices]
elif len(vec.size()) == 2:
rows = torch.arange(vec.size()[0]).view(-1, 1)
ret[rows, topkIndices] = vec[rows, topkIndices]
return ret
if __name__ == '__main__':
sketch = args2sketch(grad_size=10000, num_cols=1000, num_rows=5, device=torch.device('cuda:0'), num_blocks=10)
print(sketch)
x = torch.rand(10000, device=torch.device("cuda:0"))
print(x.size())
print(sketch.accumulateVec(x))
y = sketch.unSketch(k=10)
print(sum(abs(y - x)) / len(y - x))
| [
"torch.topk",
"data.cifar10.cifar10_iid.get_cifar10_dataLoaders",
"data.cifar10.cifar10.get_cifar10_dataLoaders",
"torch.tensor",
"torch.norm",
"torchvision.transforms.transforms.Normalize",
"torchvision.transforms.transforms.ToTensor",
"csvec.CSVec",
"torch.zeros_like",
"torch.zeros",
"torch.cat",
"torch.device"
] | [((3438, 3458), 'torch.cat', 'torch.cat', (['param_vec'], {}), '(param_vec)\n', (3447, 3458), False, 'import torch\n'), ((3806, 3828), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (3818, 3828), False, 'import torch\n'), ((3858, 3937), 'csvec.CSVec', 'CSVec', ([], {'d': 'grad_size', 'c': 'num_cols', 'r': 'num_rows', 'device': 'device', 'numBlocks': 'num_blocks'}), '(d=grad_size, c=num_cols, r=num_rows, device=device, numBlocks=num_blocks)\n', (3863, 3937), False, 'from csvec import CSVec\n'), ((4715, 4748), 'torch.zeros', 'torch.zeros', (['k'], {'device': 'vec.device'}), '(k, device=vec.device)\n', (4726, 4748), False, 'import torch\n'), ((4814, 4880), 'torch.topk', 'torch.topk', (['(vec ** 2)', 'k'], {'sorted': '(False)', 'out': '(topkVals, topkIndices)'}), '(vec ** 2, k, sorted=False, out=(topkVals, topkIndices))\n', (4824, 4880), False, 'import torch\n'), ((4894, 4915), 'torch.zeros_like', 'torch.zeros_like', (['vec'], {}), '(vec)\n', (4910, 4915), False, 'import torch\n'), ((892, 1005), 'data.cifar10.cifar10.get_cifar10_dataLoaders', 'get_cifar10_dataLoaders', ([], {'batch_size': 'batch_size', 'train_transform': 'trainTransform', 'test_transform': 'testTransform'}), '(batch_size=batch_size, train_transform=\n trainTransform, test_transform=testTransform)\n', (915, 1005), False, 'from data.cifar10.cifar10 import get_cifar10_dataLoaders\n'), ((4009, 4027), 'torch.norm', 'torch.norm', (['record'], {}), '(record)\n', (4019, 4027), False, 'import torch\n'), ((1654, 1771), 'data.cifar10.cifar10_iid.get_cifar10_dataLoaders', 'get_cifar10_iid_dataLoaders', ([], {'batch_size': 'batch_size', 'train_transform': 'trainTransform', 'test_transform': 'testTransform'}), '(batch_size=batch_size, train_transform=\n trainTransform, test_transform=testTransform)\n', (1681, 1771), True, 'from data.cifar10.cifar10_iid import get_cifar10_dataLoaders as get_cifar10_iid_dataLoaders\n'), ((4768, 4801), 'torch.zeros', 'torch.zeros', (['k'], {'device': 'vec.device'}), '(k, device=vec.device)\n', (4779, 4801), False, 'import torch\n'), ((5261, 5283), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (5273, 5283), False, 'import torch\n'), ((5353, 5375), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (5365, 5375), False, 'import torch\n'), ((471, 492), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (490, 492), False, 'from torchvision.transforms import transforms\n'), ((507, 582), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (527, 582), False, 'from torchvision.transforms import transforms\n'), ((690, 711), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (709, 711), False, 'from torchvision.transforms import transforms\n'), ((726, 801), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (746, 801), False, 'from torchvision.transforms import transforms\n'), ((1233, 1254), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1252, 1254), False, 'from torchvision.transforms import transforms\n'), ((1269, 1344), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1289, 1344), False, 'from torchvision.transforms import transforms\n'), ((1452, 1473), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1471, 1473), False, 'from torchvision.transforms import transforms\n'), ((1488, 1563), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1508, 1563), False, 'from torchvision.transforms import transforms\n'), ((4187, 4208), 'torch.tensor', 'torch.tensor', (['l2_norm'], {}), '(l2_norm)\n', (4199, 4208), False, 'import torch\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 11 10:40:35 2019
@author: jcoleman
"""
import pandas as pd
# Need a list of all product links, use to iterate links
remove_chars = ":"
product_link = "https://encorbio.com/product/mca-2a5/"
tables = pd.read_html(product_link)
print(tables[1])
print(tables[1][1][0])
# vendor name
# sku/catalog number
#<!-- Scripts/CSS and wp_head hook -->
#<title>Mouse Monoclonal Antibody to Human GFAP Cat# MCA-2A5 – EnCor Biotechnology</title>
# product description/product name
print(tables[1][0][1])
#print(tables[1][0][1].replace(remove_chars, ""))
print(tables[1][1][1])
# product page url
print(product_link)
# vendor tested application
print(tables[1][0][9])
#print(tables[1][0][9].replace(remove_chars, ""))
print(tables[1][1][9])
# reactivity
print(tables[1][0][6])
#print(tables[1][0][6].replace(remove_chars, "").replace("Species Cross-", ""))
print(tables[1][1][6])
# host
print(tables[1][0][4])
#print(tables[1][0][4].replace(remove_chars, ""))
print(tables[1][1][4])
# clonality ?
# clone_id ?
# conjugate ?
# specificity ?
# post translational modification (ex) phospho ser 179, acetyl, etc) ?
# epitope sequence
# immunogen
print(tables[1][0][1])
#print(tables[1][0][1].replace(remove_chars, ""))
print(tables[1][1][1])
# immunogen type (peptide, recombinant protein etc)
# concentration (mg/ml)
print("concentration (ug/ul): ")
print("1")
# purity (serum vs purified)
print(tables[1][0][8])
print(tables[1][1][8])
# Ab formulation (azide-free, low endotoxin, bsa-free, LEAF etc)
print("Ab formulation:")
print(tables[1][1][8])
# Ab Isotype/ Fab fragment
print(tables[1][0][5])
print(tables[1][1][5])
# molecular weight
print(tables[1][0][3])
print(tables[1][1][3])
# binding affinity
# UniProt/Accession Number
print(tables[1][0][7])
print(tables[1][1][7])
# Size (ug)
# discontinued?
# OEM
# Validation Image url link ?
# PMID/References
# Datasheet url ?
# MSDS url ?
"""
Get data from the Table oin product pages:
<table>
<tr><!-- Row 1 -->
<td><b>Name:</b></td><!-- Col 1 -->
<td>Mouse monoclonal antibody to GFAP</td><!-- Col 2 -->
</tr>
<tr><!-- Row 2 -->
<td><b>Immunogen:</b></td><!-- Col 1 -->
<td>GFAP isolated biochemically from pig spinal cord</td><!-- Col 2 -->
</tr>
<tr><!-- Row 3 -->
<td><b>HGNC Name:</b></td><!-- Col 1 -->
<td><a href=http://www.genenames.org/cgi-bin/gene_symbol_report?hgnc_id=HGNC:4235>GFAP</a></td><!-- Col 2 -->
</tr>
<tr><!-- Row 4 -->
<td><b>Molecular Weight:</b></td><!-- Col 1 -->
<td>50kDa</td><!-- Col 2 -->
</tr>
<tr><!-- Row 5 -->
<td><b>Host:</b></td><!-- Col 1 -->
<td>Mouse</td><!-- Col 2 -->
</tr>
<tr class="isotype"><!-- Row 6 -->
<td><b>Isotype: </b> </td><!-- Col 1 -->
<td>IgG1</td><!-- Col 2 -->
</tr>
<tr><!-- Row 7 -->
<td><b>Species Cross-Reactivity:</b></td><!-- Col 1 -->
<td>Human, rat, mouse, cow, pig</td><!-- Col 2 -->
</tr>
<tr><!-- Row 8 -->
<td><b>RRID:</b></td><!-- Col 1 -->
<td><a href=http://antibodyregistry.org/search?q=SCR_016364>AB_2732880</a></td><!-- Col 2 -->
</tr>
<tr><!-- Row 9 -->
<td><b>Format: </b> </td><!-- Col 1 -->
<td>Purified antibody at 1mg/mL in 50% PBS, 50% glycerol plus 5mM NaN<sub>3</sub></td><!-- Col 2 -->
</tr>
<tr><!-- Row 10 -->
<td><b>Applications:</b></td><!-- Col 1 -->
<td>WB, IF/ICC, IHC</td><!-- Col 2 -->
</tr>
<tr><!-- Row 11 -->
<td><b>Recommended Dilutions: </b></td><!-- Col 1 -->
<td>WB: 1:10,000. IF/ICC and IHC: 1:1,000.</td><!-- Col 2 -->
</tr>
<tr><!-- Row 12 -->
<td><b>Storage:</b></td><!-- Col 1 -->
<td>Stable at 4°C for one year, for longer term store at -20°C</td><!-- Col 2 -->
</tr>
</table>
"""
| [
"pandas.read_html"
] | [((273, 299), 'pandas.read_html', 'pd.read_html', (['product_link'], {}), '(product_link)\n', (285, 299), True, 'import pandas as pd\n')] |
# Copyright (c) 2019 <NAME>.
# Cura is released under the terms of the LGPLv3 or higher.
import os.path
from UM.Application import Application
from UM.PluginRegistry import PluginRegistry
from cura.Stages.CuraStage import CuraStage
## Stage for preparing model (slicing).
class PrepareStage(CuraStage):
def __init__(self, parent = None):
super().__init__(parent)
Application.getInstance().engineCreatedSignal.connect(self._engineCreated)
def _engineCreated(self):
menu_component_path = os.path.join(PluginRegistry.getInstance().getPluginPath("PrepareStage"), "PrepareMenu.qml")
main_component_path = os.path.join(PluginRegistry.getInstance().getPluginPath("PrepareStage"), "PrepareMain.qml")
self.addDisplayComponent("menu", menu_component_path)
self.addDisplayComponent("main", main_component_path) | [
"UM.Application.Application.getInstance",
"UM.PluginRegistry.PluginRegistry.getInstance"
] | [((398, 423), 'UM.Application.Application.getInstance', 'Application.getInstance', ([], {}), '()\n', (421, 423), False, 'from UM.Application import Application\n'), ((550, 578), 'UM.PluginRegistry.PluginRegistry.getInstance', 'PluginRegistry.getInstance', ([], {}), '()\n', (576, 578), False, 'from UM.PluginRegistry import PluginRegistry\n'), ((673, 701), 'UM.PluginRegistry.PluginRegistry.getInstance', 'PluginRegistry.getInstance', ([], {}), '()\n', (699, 701), False, 'from UM.PluginRegistry import PluginRegistry\n')] |
import re
import csv
import pandas as pd
pattern = r"(?P<component>^[\%?\#?\w]+)+\s*(?P<Value>[\d\.]+)+\s*(?P<Minimum>[\d\.]+)+\-(?P<Maximum>[\d\.]+)+\s*(?P<Units>[\w\%\/\^]+)"
file=open('blood.txt','r')
dta_lines=file.readlines()
component_data = []
for line in dta_lines:
match=re.match(pattern,line)
if match:
component_data.append(match.groups())
else:
continue
#No action is taken
df = pd.DataFrame(data=component_data)
df.columns = ["Component" ,"Current Value", "Minimum Range", "Maximum Range" ,"Units"]
df.to_csv("blood.csv", index=False) | [
"pandas.DataFrame",
"re.match"
] | [((430, 463), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'component_data'}), '(data=component_data)\n', (442, 463), True, 'import pandas as pd\n'), ((286, 309), 're.match', 're.match', (['pattern', 'line'], {}), '(pattern, line)\n', (294, 309), False, 'import re\n')] |
def build_small_dataset():
from src.data import build_dataset
from src.data import train_test_unlabeled_split
# Build the train/test split dictionary
splits_dict = {
"2009_Obama": 0, "2017_Trump": 0,
"2016_Obama": 1, "2020_Trump": 1,
"1993_Clinton": 1, "2000_Clinton": 1,
"2001_Bush": 1, "2008_Bush": 1,
"1989_Bush": 1, "1992_Bush": 1,
"1981_Reagan": 1, "1988_Reagan": 1,
}
for year in range(2010, 2016):
splits_dict[f"{year}_Obama"] = 2
for year in range(2018, 2020):
splits_dict[f"{year}_Trump"] = 2
for year in range(1994, 2000):
splits_dict[f"{year}_Clinton"] = 2
for year in range(2002, 2008):
splits_dict[f"{year}_Bush"] = 2
for year in range(1990, 1992):
splits_dict[f"{year}_Bush"] = 2
for year in range(1982, 1988):
splits_dict[f"{year}_Reagan"] = 2
# Build and split the dataset
data, labels, speeches, vocab = build_dataset(
"data/", num_docs=40, max_words=50, vocab_size=10)
return train_test_unlabeled_split(data, labels, speeches, splits_dict)
| [
"src.data.build_dataset",
"src.data.train_test_unlabeled_split"
] | [((971, 1035), 'src.data.build_dataset', 'build_dataset', (['"""data/"""'], {'num_docs': '(40)', 'max_words': '(50)', 'vocab_size': '(10)'}), "('data/', num_docs=40, max_words=50, vocab_size=10)\n", (984, 1035), False, 'from src.data import build_dataset\n'), ((1058, 1121), 'src.data.train_test_unlabeled_split', 'train_test_unlabeled_split', (['data', 'labels', 'speeches', 'splits_dict'], {}), '(data, labels, speeches, splits_dict)\n', (1084, 1121), False, 'from src.data import train_test_unlabeled_split\n')] |
from django.conf.urls.defaults import patterns, url
from cyder.cydhcp.interface.views import is_last_interface
urlpatterns = patterns(
'',
url(r'^last_interface/', is_last_interface, name='is_last_interface'),
)
| [
"django.conf.urls.defaults.url"
] | [((149, 217), 'django.conf.urls.defaults.url', 'url', (['"""^last_interface/"""', 'is_last_interface'], {'name': '"""is_last_interface"""'}), "('^last_interface/', is_last_interface, name='is_last_interface')\n", (152, 217), False, 'from django.conf.urls.defaults import patterns, url\n')] |
"""
##################################################################################################
# Copyright Info : Copyright (c) <NAME> @ Hikvision Research Institute. All rights reserved.
# Filename : ace_converter.py
# Abstract : Implementations of text-label and text-index of ACE Loss
# Current Version: 1.0.0
# Date : 2021-04-30
# Thanks to : We borrow the released code from https://github.com/summerlvsong/Aggregation-Cross-Entropy
for the ACELabelConverter.
##################################################################################################
"""
import os.path as osp
import torch
from mmcv.utils import print_log
from .builder import CONVERTER
@CONVERTER.register_module()
class ACELabelConverter:
"""Convert between text-label and text-index, ACE Loss Converter in Ref [1]
Ref: [1] Aggregation Cross-Entropy for Sequence Recognition. CVPR-2019
"""
def __init__(self, character,
with_unknown=False):
"""
Convert between text-label and text-index
Args:
character (str): set of the possible recognition characters dictionary.
with_unknown (bool): whether to encode the characters which are out of the dictionary to ['[UNK]']
"""
self.with_unknown = with_unknown
# character dictionary is file format
if osp.isfile(character):
with open(character, 'r', encoding='utf-8') as character_file:
# character dictionary is txt file
if character.endswith('.txt'):
print_log("loading user predefined recognition dictionary from txt file: "
"%s to build the ACE converter !!!" % character)
character = character_file.readline().strip()
# [GO] for the start token of the attention decoder.
# [s] for end-of-sentence token.
list_token = ['[PAD]']
if self.with_unknown:
unk_token = ['[UNK]']
else:
unk_token = list()
# ['[s]','[UNK]','[PAD]','[GO]']
list_character = list(character)
self.character = list_token + list_character + unk_token
self.dict = {}
for i, char in enumerate(self.character):
self.dict[char] = i
else:
raise Exception("dictionary file type is not support !!!")
elif ".json" in character or ".txt" in character:
# character file does not exist, raise the error
raise FileNotFoundError("The recognition character file is not existing")
else:
raise Exception("dictionary file only support the txt and json file !!!")
print("recognition dictionary %s \t" % str(self.dict).encode(encoding="utf-8").decode(encoding="utf-8"))
def encode(self, text, batch_max_length=25):
"""
convert text-label into text-index.
Args:
text (list): text labels of each image. [batch_size]
batch_max_length (tensor): max length of text label in the batch. 25 by default
Returns:
Torch.Tensor : the training target. [batch_size x (character_num)].
text[:, 0] is text length and text[:, 1:] is character occurrence.
Torch.Tensor : the length of text length [batch_size]
"""
length = [len(s) for s in text] # +1 for [PAD] of the sentence.
batch_max_length += 1
# batch_text is padded with [PAD] token.
batch_text = torch.cuda.LongTensor(len(text), len(self.character)).fill_(0)
for i, t_ in enumerate(text):
text = [item for item in list(t_)]
if self.with_unknown:
text = [self.dict[char] if char in self.dict.keys() else self.dict["[UNK]"] for char in text]
else:
try:
text = [self.dict[char] for char in text]
except Exception as DictionaryError:
raise KeyError from DictionaryError
text_cnt = torch.cuda.LongTensor(len(self.character) - 1).fill_(0)
for ln_ in text:
text_cnt[ln_ - 1] += 1 # label construction for ACE
batch_text[i][1:] = text_cnt
batch_text[:, 0] = torch.cuda.IntTensor(length)
return batch_text, torch.cuda.IntTensor(length)
def decode(self, text_index, length):
"""
convert text-index into text-label.
Args:
text_index (Torch.tensor): decode text index
length (Torch.tensor): max text length
Returns:
list(str): decode text
"""
texts = []
for index, _ in enumerate(length):
# transfer the model prediction to text
text = ''.join([self.character[i] for i in text_index[index, text_index[index] != 0]])
texts.append(text)
return texts
| [
"os.path.isfile",
"torch.cuda.IntTensor",
"mmcv.utils.print_log"
] | [((1426, 1447), 'os.path.isfile', 'osp.isfile', (['character'], {}), '(character)\n', (1436, 1447), True, 'import os.path as osp\n'), ((4499, 4527), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['length'], {}), '(length)\n', (4519, 4527), False, 'import torch\n'), ((4555, 4583), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['length'], {}), '(length)\n', (4575, 4583), False, 'import torch\n'), ((1643, 1773), 'mmcv.utils.print_log', 'print_log', (["('loading user predefined recognition dictionary from txt file: %s to build the ACE converter !!!'\n % character)"], {}), "(\n 'loading user predefined recognition dictionary from txt file: %s to build the ACE converter !!!'\n % character)\n", (1652, 1773), False, 'from mmcv.utils import print_log\n')] |
# Generated by Django 2.2.6 on 2019-10-12 09:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0005_batchjob_run_via'),
]
operations = [
migrations.AlterField(
model_name='batchjob',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='batchjob',
name='database',
field=models.CharField(default='ak', max_length=255),
),
migrations.AlterField(
model_name='batchjob',
name='form_data',
field=models.TextField(default='{}'),
),
migrations.AlterField(
model_name='batchjob',
name='run_via',
field=models.CharField(choices=[('client-db', 'client-db'), ('api', 'api')], default='client-db', max_length=50),
),
migrations.AlterField(
model_name='recurringtask',
name='period_unit',
field=models.CharField(choices=[('minutes', 'minutes'), ('hours', 'hours'), ('days', 'days')], max_length=255),
),
]
| [
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((403, 510), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': 'settings.AUTH_USER_MODEL'}), '(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to=settings.AUTH_USER_MODEL)\n', (420, 510), False, 'from django.db import migrations, models\n'), ((632, 678), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""ak"""', 'max_length': '(255)'}), "(default='ak', max_length=255)\n", (648, 678), False, 'from django.db import migrations, models\n'), ((805, 835), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""{}"""'}), "(default='{}')\n", (821, 835), False, 'from django.db import migrations, models\n'), ((960, 1070), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('client-db', 'client-db'), ('api', 'api')]", 'default': '"""client-db"""', 'max_length': '(50)'}), "(choices=[('client-db', 'client-db'), ('api', 'api')],\n default='client-db', max_length=50)\n", (976, 1070), False, 'from django.db import migrations, models\n'), ((1200, 1309), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('minutes', 'minutes'), ('hours', 'hours'), ('days', 'days')]", 'max_length': '(255)'}), "(choices=[('minutes', 'minutes'), ('hours', 'hours'), (\n 'days', 'days')], max_length=255)\n", (1216, 1309), False, 'from django.db import migrations, models\n')] |
from __future__ import unicode_literals
from collections import OrderedDict
import urllib
from cradmin_legacy.viewhelpers.listfilter.base.exceptions import InvalidFiltersStringError
from future import standard_library
standard_library.install_aliases()
class FiltersHandler(object):
"""
Parser of the ``filters_string``. See :meth:`.AbstractFilterList.parse_filters_string`.
"""
#: The string separating filters in the filters string. Defaults to ``"/"``.
filter_separator = '/'
#: The string used to separate filter slug and value.
#: It does not matter if this also appears in the value,
#: we handle that by splitting with a maxsplit of 1.
#:
#: Defaults to ``"-"``.
slug_and_value_separator = '-'
#: The string used to separate multivalue strings.
#: This string can not appear in any value used by the filter.
#:
#: Defaults to ``","``.
multivalue_separator = ','
def __init__(self, urlbuilder):
"""
Parameters:
urlbuilder: A method that takes a single argument, ``filters_string``,
and returns an absolute URL with that string injected as the
filters string. The filters_string is urlencoded, so you should
be able to just forward it to ``reverse`` for your view.
"""
self.filtermap = OrderedDict()
self.urlbuilder = urlbuilder
self._parse_called = False
def split_raw_filter_values(self, raw_values):
"""
Parse the given ``value``, splitting it into a list of values.
You can override this if just overriding :obj:`.FiltersHandler.multivalue_separator`
is not powerful enough.
If you override this, you will also have ot override :meth:`.join_filter_values`.
"""
values = urllib.parse.unquote_plus(raw_values)
return [urllib.parse.unquote_plus(value)
for value in values.split(self.multivalue_separator)]
def join_filter_values(self, values):
"""
The reverse of :meth:`.split_raw_filter_values`. Joins
the given ``values`` list into a string.
"""
# We quote each value, and then we quote the entire string. This
# ensures that we do not get any problems when a value contains
# ``multivalue_separator``.
raw_values = [urllib.parse.quote_plus(value) for value in values]
raw_values = self.multivalue_separator.join(raw_values)
return urllib.parse.quote_plus(raw_values)
def parse_filter_string(self, filter_string):
"""
Parse the given ``filter_string`` and return a ``(slug, values)`` tuple,
where ``slug`` is a filter slug and ``values`` is a list of strings.
You should not need to override this.
"""
if self.slug_and_value_separator not in filter_string:
raise InvalidFiltersStringError('"{}" does not contain "{}".'.format(
filter_string, self.slug_and_value_separator))
slug, value = filter_string.split(self.slug_and_value_separator, 1)
return slug, self.split_raw_filter_values(value)
def parse(self, filters_string):
"""
Parse the given ``filters_string`` and add any values
found in the string to the corresponding filter.
You should not need to override this.
"""
if self._parse_called:
raise RuntimeError('Can not call parse multiple times on a FiltersHandler.')
self._parse_called = True
if not filters_string:
return
filters_string = filters_string.strip(self.filter_separator)
for filter_string in filters_string.split(self.filter_separator):
slug, values = self.parse_filter_string(filter_string)
if slug not in self.filtermap:
raise InvalidFiltersStringError('"{}" is not a valid filter slug.'.format(slug))
self.filtermap[slug].set_values(values)
def add_filter(self, filterobject):
"""
Add a :class:`.AbstractFilter` to the handler.
"""
slug = filterobject.get_slug()
if slug in self.filtermap:
raise ValueError('Duplicate slug: "{}".'.format(slug))
if self.slug_and_value_separator in slug:
raise ValueError('Invalid filter slug: "{}". Slugs can not contain "{}".'.format(
slug, self.slug_and_value_separator))
self.filtermap[slug] = filterobject
def normalize_values(self, values):
"""
Normalize values list to only contain
``bool(value) == True`` values. Since values
is a list of strings, this means that it strips
out all empty strings.
"""
return [value for value in values if value]
def build_filter_string(self, slug, values):
"""
Build a filter string suitable for an URL from the given
``slug`` and ``values``.
Parameters:
slug: See :meth:`.AbstractFilter.get_slug`.
value: A list of values. All items in the list must be strings.
"""
return '{slug}{separator}{values}'.format(
slug=slug,
separator=self.slug_and_value_separator,
values=self.join_filter_values(values=values))
def build_filters_string(self, changed_filterobject):
"""
Build the ``filters_string`` for :meth:`.build_filter_url`.
"""
filters_strings = []
for slug, filterobject in self.filtermap.items():
if filterobject.get_slug() == changed_filterobject.get_slug():
values = changed_filterobject.values
else:
values = filterobject.values
values = self.normalize_values(values)
if values:
filters_strings.append(self.build_filter_string(slug=slug, values=values))
return self.filter_separator.join(filters_strings)
def build_filter_url(self, changed_filterobject):
"""
Build an URL that applies the change introduced by
``changed_filterobject`` while keeping any values
in all the other filters within the handler.
Parameters:
changed_filterobject: A :class:`.AbstractFilter` object.
"""
filters_string = self.build_filters_string(changed_filterobject=changed_filterobject)
return self.urlbuilder(filters_string=filters_string)
def filter(self, queryobject, exclude=None):
"""
Apply the filters to the given ``queryobject``.
Loops through all the registered filters (the filters added with :meth:`.add_filter`),
and run :meth:`.AbstractFilter.filter`.
Parameters:
queryobject: See :meth:`.AbstractFilter.filter`.
exclude: Set with the slugs of filters to ignore/exclude when filtering.
Defaults to ``None``, which means that all filters are applied.
"""
for filterobject in self.filtermap.values():
if exclude and filterobject.get_slug() in exclude:
continue
queryobject = filterobject.filter(queryobject=queryobject)
return queryobject
def get_label_for(self, slug):
"""
Get the label for the filter registered in the filterhandler with the given ``slug``.
Raises:
KeyError: If the ``slug`` is not registered.
"""
return self.filtermap[slug].get_label()
def get_cleaned_value_for(self, slug):
"""
Get the cleaned value for the filter registered in the filterhandler
with the given ``slug``.
Raises:
KeyError: If the ``slug`` is not registered.
"""
return self.filtermap[slug].get_cleaned_value()
def get_cleaned_values_for(self, slug):
"""
Get the cleaned values (list) for the filter registered in the filterhandler
with the given ``slug``.
Raises:
KeyError: If the ``slug`` is not registered.
"""
return self.filtermap[slug].get_cleaned_values()
| [
"future.standard_library.install_aliases",
"collections.OrderedDict",
"urllib.parse.unquote_plus",
"urllib.parse.quote_plus"
] | [((219, 253), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (251, 253), False, 'from future import standard_library\n'), ((1358, 1371), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1369, 1371), False, 'from collections import OrderedDict\n'), ((1825, 1862), 'urllib.parse.unquote_plus', 'urllib.parse.unquote_plus', (['raw_values'], {}), '(raw_values)\n', (1850, 1862), False, 'import urllib\n'), ((2495, 2530), 'urllib.parse.quote_plus', 'urllib.parse.quote_plus', (['raw_values'], {}), '(raw_values)\n', (2518, 2530), False, 'import urllib\n'), ((1879, 1911), 'urllib.parse.unquote_plus', 'urllib.parse.unquote_plus', (['value'], {}), '(value)\n', (1904, 1911), False, 'import urllib\n'), ((2364, 2394), 'urllib.parse.quote_plus', 'urllib.parse.quote_plus', (['value'], {}), '(value)\n', (2387, 2394), False, 'import urllib\n')] |
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from easygraphics.processing import *
import random
class MyProcessingWidget(ProcessingWidget):
def __init__(self):
self._t = 0
super().__init__()
def setup(self):
self.set_size(200, 150)
self.get_canvas().translate(0, 0)
def draw(self):
self._t = self._t + 2
self.get_canvas().ellipse(self._t, self._t, 20, 20)
class MyWindow(QWidget):
def __init__(self):
super().__init__()
imageWidget = MyProcessingWidget()
area = QScrollArea()
area.setWidget(imageWidget)
area.setAlignment(Qt.AlignCenter)
layout = QHBoxLayout(self)
layout.addWidget(area)
button = QPushButton("Add Circle")
button.clicked.connect(self.button_clicked)
layout.addWidget(button)
self.setLayout(layout)
def button_clicked(self):
pass
if __name__ == "__main__":
app = QApplication([])
random.seed()
window = MyWindow()
window.show()
app.exec()
| [
"random.seed"
] | [((992, 1005), 'random.seed', 'random.seed', ([], {}), '()\n', (1003, 1005), False, 'import random\n')] |
# -*- coding: utf-8 -*-
import os
import shutil
from kaleidoscope import util
from git import Repo
import errno
from kal_task import KalTask
class CodebaseSync(KalTask):
def __init__(self, task_conf, app):
self.__conf = task_conf
self.__env = app['env']
self.__tmp_github_dir = os.path.join(self.__env['tmp_dir'], 'remote_git')
def run(self):
src_dir = self.__env['project_root']
remote_repository = self.__conf['repository']
message = self.__conf['commit_message']
project_name = os.path.basename(remote_repository).replace('.git', '')
repo = Repo.clone_from(remote_repository, self.__tmp_github_dir)
remote_dir = os.path.join(self.__tmp_github_dir, project_name)
try:
if os.path.exists(remote_dir):
shutil.rmtree(remote_dir)
shutil.copytree(src_dir, remote_dir, ignore=self.__ignore_path_common)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src_dir, remote_dir)
else:
raise exc
util.use_official_maven(remote_dir)
project_module_gradle_files = util.find_all_library_module_gradle_files_in_project(remote_dir)
for build_gradle_file in project_module_gradle_files:
util.remove_publish_gradle(build_gradle_file)
repo.git.add(A=True)
repo.index.commit(message=message)
repo.remotes.origin.push()
if os.path.exists(self.__tmp_github_dir):
shutil.rmtree(self.__tmp_github_dir)
def __ignore_path_common(self, dir, files):
ignore = []
if dir.find('\\build\\') > 0:
return [dir]
for file in files:
full_path = os.path.join(dir, file)
if full_path.find('\\build\\') > 0:
ignore.append(file)
return ignore | [
"kaleidoscope.util.find_all_library_module_gradle_files_in_project",
"os.path.exists",
"git.Repo.clone_from",
"os.path.join",
"kaleidoscope.util.remove_publish_gradle",
"shutil.copytree",
"os.path.basename",
"shutil.copy",
"shutil.rmtree",
"kaleidoscope.util.use_official_maven"
] | [((308, 357), 'os.path.join', 'os.path.join', (["self.__env['tmp_dir']", '"""remote_git"""'], {}), "(self.__env['tmp_dir'], 'remote_git')\n", (320, 357), False, 'import os\n'), ((621, 678), 'git.Repo.clone_from', 'Repo.clone_from', (['remote_repository', 'self.__tmp_github_dir'], {}), '(remote_repository, self.__tmp_github_dir)\n', (636, 678), False, 'from git import Repo\n'), ((701, 750), 'os.path.join', 'os.path.join', (['self.__tmp_github_dir', 'project_name'], {}), '(self.__tmp_github_dir, project_name)\n', (713, 750), False, 'import os\n'), ((1123, 1158), 'kaleidoscope.util.use_official_maven', 'util.use_official_maven', (['remote_dir'], {}), '(remote_dir)\n', (1146, 1158), False, 'from kaleidoscope import util\n'), ((1197, 1261), 'kaleidoscope.util.find_all_library_module_gradle_files_in_project', 'util.find_all_library_module_gradle_files_in_project', (['remote_dir'], {}), '(remote_dir)\n', (1249, 1261), False, 'from kaleidoscope import util\n'), ((1503, 1540), 'os.path.exists', 'os.path.exists', (['self.__tmp_github_dir'], {}), '(self.__tmp_github_dir)\n', (1517, 1540), False, 'import os\n'), ((779, 805), 'os.path.exists', 'os.path.exists', (['remote_dir'], {}), '(remote_dir)\n', (793, 805), False, 'import os\n'), ((861, 931), 'shutil.copytree', 'shutil.copytree', (['src_dir', 'remote_dir'], {'ignore': 'self.__ignore_path_common'}), '(src_dir, remote_dir, ignore=self.__ignore_path_common)\n', (876, 931), False, 'import shutil\n'), ((1336, 1381), 'kaleidoscope.util.remove_publish_gradle', 'util.remove_publish_gradle', (['build_gradle_file'], {}), '(build_gradle_file)\n', (1362, 1381), False, 'from kaleidoscope import util\n'), ((1554, 1590), 'shutil.rmtree', 'shutil.rmtree', (['self.__tmp_github_dir'], {}), '(self.__tmp_github_dir)\n', (1567, 1590), False, 'import shutil\n'), ((1779, 1802), 'os.path.join', 'os.path.join', (['dir', 'file'], {}), '(dir, file)\n', (1791, 1802), False, 'import os\n'), ((549, 584), 'os.path.basename', 'os.path.basename', (['remote_repository'], {}), '(remote_repository)\n', (565, 584), False, 'import os\n'), ((823, 848), 'shutil.rmtree', 'shutil.rmtree', (['remote_dir'], {}), '(remote_dir)\n', (836, 848), False, 'import shutil\n'), ((1037, 1069), 'shutil.copy', 'shutil.copy', (['src_dir', 'remote_dir'], {}), '(src_dir, remote_dir)\n', (1048, 1069), False, 'import shutil\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# Copyright (c) 2018, <NAME> <<EMAIL>>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.tests.unit.compat.mock import Mock
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec
class TestCreateJavaKeystore(ModuleTestCase):
"""Test the creation of a Java keystore."""
def setUp(self):
"""Setup."""
super(TestCreateJavaKeystore, self).setUp()
orig_exists = os.path.exists
self.spec = ArgumentSpec()
self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file',
side_effect=lambda path, content: path)
self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands')
self.mock_os_path_exists = patch('os.path.exists',
side_effect=lambda path: True if path == '/path/to/keystore.jks' else orig_exists(path))
self.mock_selinux_context = patch('ansible.module_utils.basic.AnsibleModule.selinux_context',
side_effect=lambda path: ['unconfined_u', 'object_r', 'user_home_t', 's0'])
self.mock_is_special_selinux_path = patch('ansible.module_utils.basic.AnsibleModule.is_special_selinux_path',
side_effect=lambda path: (False, None))
self.run_commands = self.mock_run_commands.start()
self.create_file = self.mock_create_file.start()
self.selinux_context = self.mock_selinux_context.start()
self.is_special_selinux_path = self.mock_is_special_selinux_path.start()
self.os_path_exists = self.mock_os_path_exists.start()
def tearDown(self):
"""Teardown."""
super(TestCreateJavaKeystore, self).tearDown()
self.mock_create_file.stop()
self.mock_run_commands.stop()
self.mock_selinux_context.stop()
self.mock_is_special_selinux_path.stop()
self.mock_os_path_exists.stop()
def test_create_jks_success(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='<PASSWORD>'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.exit_json = Mock()
with patch('os.remove', return_value=True):
self.run_commands.side_effect = lambda module, cmd, data: (0, '', '')
create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "")
module.exit_json.assert_called_once_with(
changed=True,
cmd=["keytool", "-importkeystore",
"-destkeystore", "/path/to/keystore.jks",
"-srckeystore", "/tmp/keystore.p12", "-srcstoretype", "pkcs12", "-alias", "test",
"-deststorepass", "changeit", "-srcstorepass", "changeit", "-noprompt"],
msg='',
rc=0,
stdout_lines=''
)
def test_create_jks_keypass_fail_export_pkcs12(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
private_key_passphrase='<PASSWORD>',
dest='/path/to/keystore.jks',
name='foo',
password='<PASSWORD>'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.fail_json = Mock()
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(1, '', ''), (0, '', '')]
create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "passphrase-foo")
module.fail_json.assert_called_once_with(
cmd=["openssl", "pkcs12", "-export", "-name", "test",
"-in", "/tmp/foo.crt", "-inkey", "/tmp/foo.key",
"-out", "/tmp/keystore.p12",
"-passout", "stdin",
"-passin", "stdin"],
msg='',
rc=1
)
def test_create_jks_fail_export_pkcs12(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='<PASSWORD>'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.fail_json = Mock()
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(1, '', ''), (0, '', '')]
create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "")
module.fail_json.assert_called_once_with(
cmd=["openssl", "pkcs12", "-export", "-name", "test",
"-in", "/tmp/foo.crt", "-inkey", "/tmp/foo.key",
"-out", "/tmp/keystore.p12",
"-passout", "stdin"],
msg='',
rc=1
)
def test_create_jks_fail_import_key(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='<PASSWORD>'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.fail_json = Mock()
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(0, '', ''), (1, '', '')]
create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "")
module.fail_json.assert_called_once_with(
cmd=["keytool", "-importkeystore",
"-destkeystore", "/path/to/keystore.jks",
"-srckeystore", "/tmp/keystore.p12", "-srcstoretype", "pkcs12", "-alias", "test",
"-deststorepass", "changeit", "-srcstorepass", "changeit", "-noprompt"],
msg='',
rc=1
)
class TestCertChanged(ModuleTestCase):
"""Test if the cert has changed."""
def setUp(self):
"""Setup."""
super(TestCertChanged, self).setUp()
self.spec = ArgumentSpec()
self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file',
side_effect=lambda path, content: path)
self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands')
self.run_commands = self.mock_run_commands.start()
self.create_file = self.mock_create_file.start()
def tearDown(self):
"""Teardown."""
super(TestCertChanged, self).tearDown()
self.mock_create_file.stop()
self.mock_run_commands.stop()
def test_cert_unchanged_same_fingerprint(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='<PASSWORD>'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')]
result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
self.assertFalse(result, 'Fingerprint is identical')
def test_cert_changed_fingerprint_mismatch(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='<PASSWORD>'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')]
result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
self.assertTrue(result, 'Fingerprint mismatch')
def test_cert_changed_alias_does_not_exist(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='<PASSWORD>it'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''),
(1, 'keytool error: java.lang.Exception: Alias <foo> does not exist', '')]
result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
self.assertTrue(result, 'Certificate does not exist')
def test_cert_changed_fail_read_cert(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='<PASSWORD>'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.fail_json = Mock()
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')]
cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
module.fail_json.assert_called_once_with(
cmd=["openssl", "x509", "-noout", "-in", "/tmp/foo.crt", "-fingerprint", "-sha256"],
msg='',
err='Oops',
rc=1
)
def test_cert_changed_fail_read_keystore(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='<PASSWORD>'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.fail_json = Mock(return_value=True)
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')]
cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
module.fail_json.assert_called_with(
cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-storepass", "changeit", "-v"],
msg='',
err='Oops',
rc=1
)
| [
"ansible_collections.community.general.plugins.modules.system.java_keystore.cert_changed",
"ansible.module_utils.basic.AnsibleModule",
"ansible_collections.community.general.plugins.modules.system.java_keystore.ArgumentSpec",
"ansible_collections.community.general.plugins.modules.system.java_keystore.create_jks",
"ansible_collections.community.general.tests.unit.compat.mock.Mock",
"ansible_collections.community.general.tests.unit.compat.mock.patch"
] | [((998, 1012), 'ansible_collections.community.general.plugins.modules.system.java_keystore.ArgumentSpec', 'ArgumentSpec', ([], {}), '()\n', (1010, 1012), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n'), ((1045, 1190), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""ansible_collections.community.general.plugins.modules.system.java_keystore.create_file"""'], {'side_effect': '(lambda path, content: path)'}), "(\n 'ansible_collections.community.general.plugins.modules.system.java_keystore.create_file'\n , side_effect=lambda path, content: path)\n", (1050, 1190), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((1252, 1358), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands"""'], {}), "(\n 'ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands'\n )\n", (1257, 1358), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((1574, 1719), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.selinux_context"""'], {'side_effect': "(lambda path: ['unconfined_u', 'object_r', 'user_home_t', 's0'])"}), "('ansible.module_utils.basic.AnsibleModule.selinux_context',\n side_effect=lambda path: ['unconfined_u', 'object_r', 'user_home_t', 's0'])\n", (1579, 1719), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((1802, 1919), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""ansible.module_utils.basic.AnsibleModule.is_special_selinux_path"""'], {'side_effect': '(lambda path: (False, None))'}), "('ansible.module_utils.basic.AnsibleModule.is_special_selinux_path',\n side_effect=lambda path: (False, None))\n", (1807, 1919), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((2874, 2982), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'self.spec.argument_spec', 'supports_check_mode': 'self.spec.supports_check_mode'}), '(argument_spec=self.spec.argument_spec, supports_check_mode=\n self.spec.supports_check_mode)\n', (2887, 2982), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((3040, 3046), 'ansible_collections.community.general.tests.unit.compat.mock.Mock', 'Mock', ([], {}), '()\n', (3044, 3046), False, 'from ansible_collections.community.general.tests.unit.compat.mock import Mock\n'), ((4113, 4221), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'self.spec.argument_spec', 'supports_check_mode': 'self.spec.supports_check_mode'}), '(argument_spec=self.spec.argument_spec, supports_check_mode=\n self.spec.supports_check_mode)\n', (4126, 4221), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((4279, 4285), 'ansible_collections.community.general.tests.unit.compat.mock.Mock', 'Mock', ([], {}), '()\n', (4283, 4285), False, 'from ansible_collections.community.general.tests.unit.compat.mock import Mock\n'), ((5198, 5306), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'self.spec.argument_spec', 'supports_check_mode': 'self.spec.supports_check_mode'}), '(argument_spec=self.spec.argument_spec, supports_check_mode=\n self.spec.supports_check_mode)\n', (5211, 5306), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((5364, 5370), 'ansible_collections.community.general.tests.unit.compat.mock.Mock', 'Mock', ([], {}), '()\n', (5368, 5370), False, 'from ansible_collections.community.general.tests.unit.compat.mock import Mock\n'), ((6225, 6333), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'self.spec.argument_spec', 'supports_check_mode': 'self.spec.supports_check_mode'}), '(argument_spec=self.spec.argument_spec, supports_check_mode=\n self.spec.supports_check_mode)\n', (6238, 6333), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((6391, 6397), 'ansible_collections.community.general.tests.unit.compat.mock.Mock', 'Mock', ([], {}), '()\n', (6395, 6397), False, 'from ansible_collections.community.general.tests.unit.compat.mock import Mock\n'), ((7237, 7251), 'ansible_collections.community.general.plugins.modules.system.java_keystore.ArgumentSpec', 'ArgumentSpec', ([], {}), '()\n', (7249, 7251), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n'), ((7284, 7429), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""ansible_collections.community.general.plugins.modules.system.java_keystore.create_file"""'], {'side_effect': '(lambda path, content: path)'}), "(\n 'ansible_collections.community.general.plugins.modules.system.java_keystore.create_file'\n , side_effect=lambda path, content: path)\n", (7289, 7429), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((7491, 7597), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands"""'], {}), "(\n 'ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands'\n )\n", (7496, 7597), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((8163, 8271), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'self.spec.argument_spec', 'supports_check_mode': 'self.spec.supports_check_mode'}), '(argument_spec=self.spec.argument_spec, supports_check_mode=\n self.spec.supports_check_mode)\n', (8176, 8271), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((8927, 9035), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'self.spec.argument_spec', 'supports_check_mode': 'self.spec.supports_check_mode'}), '(argument_spec=self.spec.argument_spec, supports_check_mode=\n self.spec.supports_check_mode)\n', (8940, 9035), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((9688, 9796), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'self.spec.argument_spec', 'supports_check_mode': 'self.spec.supports_check_mode'}), '(argument_spec=self.spec.argument_spec, supports_check_mode=\n self.spec.supports_check_mode)\n', (9701, 9796), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((10532, 10640), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'self.spec.argument_spec', 'supports_check_mode': 'self.spec.supports_check_mode'}), '(argument_spec=self.spec.argument_spec, supports_check_mode=\n self.spec.supports_check_mode)\n', (10545, 10640), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((10698, 10704), 'ansible_collections.community.general.tests.unit.compat.mock.Mock', 'Mock', ([], {}), '()\n', (10702, 10704), False, 'from ansible_collections.community.general.tests.unit.compat.mock import Mock\n'), ((11483, 11591), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'self.spec.argument_spec', 'supports_check_mode': 'self.spec.supports_check_mode'}), '(argument_spec=self.spec.argument_spec, supports_check_mode=\n self.spec.supports_check_mode)\n', (11496, 11591), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((11649, 11672), 'ansible_collections.community.general.tests.unit.compat.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (11653, 11672), False, 'from ansible_collections.community.general.tests.unit.compat.mock import Mock\n'), ((3061, 3098), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""os.remove"""'], {'return_value': '(True)'}), "('os.remove', return_value=True)\n", (3066, 3098), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((3194, 3287), 'ansible_collections.community.general.plugins.modules.system.java_keystore.create_jks', 'create_jks', (['module', '"""test"""', '"""openssl"""', '"""keytool"""', '"""/path/to/keystore.jks"""', '"""changeit"""', '""""""'], {}), "(module, 'test', 'openssl', 'keytool', '/path/to/keystore.jks',\n 'changeit', '')\n", (3204, 3287), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n'), ((4300, 4337), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""os.remove"""'], {'return_value': '(True)'}), "('os.remove', return_value=True)\n", (4305, 4337), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((4422, 4529), 'ansible_collections.community.general.plugins.modules.system.java_keystore.create_jks', 'create_jks', (['module', '"""test"""', '"""openssl"""', '"""keytool"""', '"""/path/to/keystore.jks"""', '"""changeit"""', '"""passphrase-foo"""'], {}), "(module, 'test', 'openssl', 'keytool', '/path/to/keystore.jks',\n 'changeit', 'passphrase-foo')\n", (4432, 4529), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n'), ((5385, 5422), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""os.remove"""'], {'return_value': '(True)'}), "('os.remove', return_value=True)\n", (5390, 5422), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((5507, 5600), 'ansible_collections.community.general.plugins.modules.system.java_keystore.create_jks', 'create_jks', (['module', '"""test"""', '"""openssl"""', '"""keytool"""', '"""/path/to/keystore.jks"""', '"""changeit"""', '""""""'], {}), "(module, 'test', 'openssl', 'keytool', '/path/to/keystore.jks',\n 'changeit', '')\n", (5517, 5600), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n'), ((6412, 6449), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""os.remove"""'], {'return_value': '(True)'}), "('os.remove', return_value=True)\n", (6417, 6449), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((6534, 6627), 'ansible_collections.community.general.plugins.modules.system.java_keystore.create_jks', 'create_jks', (['module', '"""test"""', '"""openssl"""', '"""keytool"""', '"""/path/to/keystore.jks"""', '"""changeit"""', '""""""'], {}), "(module, 'test', 'openssl', 'keytool', '/path/to/keystore.jks',\n 'changeit', '')\n", (6544, 6627), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n'), ((8315, 8352), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""os.remove"""'], {'return_value': '(True)'}), "('os.remove', return_value=True)\n", (8320, 8352), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((8486, 8576), 'ansible_collections.community.general.plugins.modules.system.java_keystore.cert_changed', 'cert_changed', (['module', '"""openssl"""', '"""keytool"""', '"""/path/to/keystore.jks"""', '"""changeit"""', '"""foo"""'], {}), "(module, 'openssl', 'keytool', '/path/to/keystore.jks',\n 'changeit', 'foo')\n", (8498, 8576), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n'), ((9079, 9116), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""os.remove"""'], {'return_value': '(True)'}), "('os.remove', return_value=True)\n", (9084, 9116), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((9250, 9340), 'ansible_collections.community.general.plugins.modules.system.java_keystore.cert_changed', 'cert_changed', (['module', '"""openssl"""', '"""keytool"""', '"""/path/to/keystore.jks"""', '"""changeit"""', '"""foo"""'], {}), "(module, 'openssl', 'keytool', '/path/to/keystore.jks',\n 'changeit', 'foo')\n", (9262, 9340), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n'), ((9840, 9877), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""os.remove"""'], {'return_value': '(True)'}), "('os.remove', return_value=True)\n", (9845, 9877), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((10096, 10186), 'ansible_collections.community.general.plugins.modules.system.java_keystore.cert_changed', 'cert_changed', (['module', '"""openssl"""', '"""keytool"""', '"""/path/to/keystore.jks"""', '"""changeit"""', '"""foo"""'], {}), "(module, 'openssl', 'keytool', '/path/to/keystore.jks',\n 'changeit', 'foo')\n", (10108, 10186), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n'), ((10719, 10756), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""os.remove"""'], {'return_value': '(True)'}), "('os.remove', return_value=True)\n", (10724, 10756), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((10867, 10957), 'ansible_collections.community.general.plugins.modules.system.java_keystore.cert_changed', 'cert_changed', (['module', '"""openssl"""', '"""keytool"""', '"""/path/to/keystore.jks"""', '"""changeit"""', '"""foo"""'], {}), "(module, 'openssl', 'keytool', '/path/to/keystore.jks',\n 'changeit', 'foo')\n", (10879, 10957), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n'), ((11687, 11724), 'ansible_collections.community.general.tests.unit.compat.mock.patch', 'patch', (['"""os.remove"""'], {'return_value': '(True)'}), "('os.remove', return_value=True)\n", (11692, 11724), False, 'from ansible_collections.community.general.tests.unit.compat.mock import patch\n'), ((11832, 11922), 'ansible_collections.community.general.plugins.modules.system.java_keystore.cert_changed', 'cert_changed', (['module', '"""openssl"""', '"""keytool"""', '"""/path/to/keystore.jks"""', '"""changeit"""', '"""foo"""'], {}), "(module, 'openssl', 'keytool', '/path/to/keystore.jks',\n 'changeit', 'foo')\n", (11844, 11922), False, 'from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec\n')] |
import io
import torch
class TensorMessage():
"""A message containing a tensor and metadata."""
def __init__(self, tensor: torch.Tensor, name: str, ip: str):
"""
Args:
tensor (torch.Tensor): the tensor to send in the payload; it is
converted to and stored as a byte array
name (str): a name to distinguish the sending client from others
ip (str): the ip address of the client sending this message
"""
self.tensor = io.BytesIO()
torch.save(tensor, self.tensor)
self.tensor.seek(0) # We assume the downstream user will try a load.
self.name = name
self.ip = ip
| [
"io.BytesIO",
"torch.save"
] | [((510, 522), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (520, 522), False, 'import io\n'), ((531, 562), 'torch.save', 'torch.save', (['tensor', 'self.tensor'], {}), '(tensor, self.tensor)\n', (541, 562), False, 'import torch\n')] |
import os
from zipfile import ZipFile
def unzip(zip_file):
try:
# Get root name of zip file, use as directory name
contents_dir = os.path.splitext(zip_file)
# Create a ZipFile Object
with ZipFile(zip_file, 'r') as unzip_dir:
# Extract all the contents of zip file in FB directory
unzip_dir.extractall(contents_dir[0])
return (contents_dir[0])
except Exception as e:
print ("Not a valid zip file or location.")
print(e)
| [
"os.path.splitext",
"zipfile.ZipFile"
] | [((152, 178), 'os.path.splitext', 'os.path.splitext', (['zip_file'], {}), '(zip_file)\n', (168, 178), False, 'import os\n'), ((226, 248), 'zipfile.ZipFile', 'ZipFile', (['zip_file', '"""r"""'], {}), "(zip_file, 'r')\n", (233, 248), False, 'from zipfile import ZipFile\n')] |
# Copyright (c) 2020, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
URLs for the djaodjin-survey django app testsite.
"""
from django.views.generic import RedirectView, TemplateView
from survey.compat import reverse_lazy
from rules.urldecorators import include, url
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^$', TemplateView.as_view(template_name='index.html'), name='home'),
url(r'^api/', include('survey.urls.api')),
url(r'^accounts/profile/',
RedirectView.as_view(url=reverse_lazy('survey_list'))),
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^manager/', include('survey.urls.manager'),
decorators=['django.contrib.auth.decorators.login_required']),
url(r'^matrix/', include('survey.urls.matrix')),
url(r'^', include('survey.urls.sample')),
]
| [
"django.views.generic.TemplateView.as_view",
"rules.urldecorators.include",
"survey.compat.reverse_lazy"
] | [((1610, 1637), 'rules.urldecorators.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (1617, 1637), False, 'from rules.urldecorators import include, url\n'), ((1655, 1703), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""index.html"""'}), "(template_name='index.html')\n", (1675, 1703), False, 'from django.views.generic import RedirectView, TemplateView\n'), ((1737, 1763), 'rules.urldecorators.include', 'include', (['"""survey.urls.api"""'], {}), "('survey.urls.api')\n", (1744, 1763), False, 'from rules.urldecorators import include, url\n'), ((1884, 1919), 'rules.urldecorators.include', 'include', (['"""django.contrib.auth.urls"""'], {}), "('django.contrib.auth.urls')\n", (1891, 1919), False, 'from rules.urldecorators import include, url\n'), ((1944, 1974), 'rules.urldecorators.include', 'include', (['"""survey.urls.manager"""'], {}), "('survey.urls.manager')\n", (1951, 1974), False, 'from rules.urldecorators import include, url\n'), ((2068, 2097), 'rules.urldecorators.include', 'include', (['"""survey.urls.matrix"""'], {}), "('survey.urls.matrix')\n", (2075, 2097), False, 'from rules.urldecorators import include, url\n'), ((2114, 2143), 'rules.urldecorators.include', 'include', (['"""survey.urls.sample"""'], {}), "('survey.urls.sample')\n", (2121, 2143), False, 'from rules.urldecorators import include, url\n'), ((1830, 1857), 'survey.compat.reverse_lazy', 'reverse_lazy', (['"""survey_list"""'], {}), "('survey_list')\n", (1842, 1857), False, 'from survey.compat import reverse_lazy\n')] |
# -*- coding: utf-8 -*-
import pytest
import torch
from eznlp.dataset import Dataset
from eznlp.model import BertLikeConfig, SpanAttrClassificationDecoderConfig, JointExtractionDecoderConfig, ExtractorConfig
from eznlp.training import Trainer
class TestModel(object):
def _assert_batch_consistency(self):
self.model.eval()
batch = [self.dataset[i] for i in range(4)]
batch012 = self.dataset.collate(batch[:3]).to(self.device)
batch123 = self.dataset.collate(batch[1:]).to(self.device)
losses012, states012 = self.model(batch012, return_states=True)
losses123, states123 = self.model(batch123, return_states=True)
hidden012, hidden123 = states012['full_hidden'], states123['full_hidden']
min_step = min(hidden012.size(1), hidden123.size(1))
delta_hidden = hidden012[1:, :min_step] - hidden123[:-1, :min_step]
assert delta_hidden.abs().max().item() < 1e-4
delta_losses = losses012[1:] - losses123[:-1]
assert delta_losses.abs().max().item() < 2e-4
pred012 = self.model.decode(batch012, **states012)
pred123 = self.model.decode(batch123, **states123)
assert pred012[1:] == pred123[:-1]
def _assert_trainable(self):
optimizer = torch.optim.AdamW(self.model.parameters())
trainer = Trainer(self.model, optimizer=optimizer, device=self.device)
dataloader = torch.utils.data.DataLoader(self.dataset,
batch_size=4,
shuffle=True,
collate_fn=self.dataset.collate)
trainer.train_epoch(dataloader)
def _setup_case(self, data, device):
self.device = device
self.dataset = Dataset(data, self.config)
self.dataset.build_vocabs_and_dims()
self.model = self.config.instantiate().to(self.device)
assert isinstance(self.config.name, str) and len(self.config.name) > 0
@pytest.mark.parametrize("agg_mode", ['max_pooling', 'multiplicative_attention'])
@pytest.mark.parametrize("ck_label_emb_dim", [25, 0])
def test_model(self, agg_mode, ck_label_emb_dim, HwaMei_demo, device):
self.config = ExtractorConfig(decoder=SpanAttrClassificationDecoderConfig(agg_mode=agg_mode, ck_label_emb_dim=ck_label_emb_dim))
self._setup_case(HwaMei_demo, device)
self._assert_batch_consistency()
self._assert_trainable()
def test_model_with_bert_like(self, HwaMei_demo, bert_with_tokenizer, device):
bert, tokenizer = bert_with_tokenizer
self.config = ExtractorConfig('span_attr_classification', ohots=None,
bert_like=BertLikeConfig(tokenizer=tokenizer, bert_like=bert),
intermediate2=None)
self._setup_case(HwaMei_demo, device)
self._assert_batch_consistency()
self._assert_trainable()
def test_prediction_without_gold(self, HwaMei_demo, device):
self.config = ExtractorConfig('span_attr_classification')
self._setup_case(HwaMei_demo, device)
data_wo_gold = [{'tokens': entry['tokens'],
'chunks': entry['chunks']} for entry in HwaMei_demo]
dataset_wo_gold = Dataset(data_wo_gold, self.config, training=False)
trainer = Trainer(self.model, device=device)
set_attributes_pred = trainer.predict(dataset_wo_gold)
assert len(set_attributes_pred) == len(data_wo_gold)
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("building", [True, False])
def test_chunks_obj(EAR_data_demo, training, building):
entry = EAR_data_demo[0]
chunks, attributes = entry['chunks'], entry['attributes']
if building:
config = ExtractorConfig(decoder=SpanAttrClassificationDecoderConfig())
attr_decoder_config = config.decoder
else:
config = ExtractorConfig(decoder=JointExtractionDecoderConfig(attr_decoder=SpanAttrClassificationDecoderConfig(), rel_decoder=None))
attr_decoder_config = config.decoder.attr_decoder
dataset = Dataset(EAR_data_demo, config, training=training)
dataset.build_vocabs_and_dims()
chunks_obj = dataset[0]['chunks_obj']
assert chunks_obj.attributes == attributes
if building:
assert chunks_obj.chunks == chunks
assert chunks_obj.is_built
else:
assert chunks_obj.chunks == chunks if training else len(chunks_obj.chunks) == 0
assert not chunks_obj.is_built
chunks_pred = [('EntA', 0, 1), ('EntB', 1, 2), ('EntA', 2, 3)]
chunks_obj.inject_chunks(chunks_pred)
chunks_obj.build(attr_decoder_config)
assert len(chunks_obj.chunks) == len(chunks) + len(chunks_pred) if training else len(chunks_obj.chunks) == len(chunks_pred)
assert chunks_obj.is_built
assert (chunks_obj.span_size_ids+1).tolist() == [e-s for l, s, e in chunks_obj.chunks]
assert chunks_obj.attr_label_ids.size(0) == len(chunks_obj.chunks)
attributes_retr = []
for chunk, attr_label_ids in zip(chunks_obj.chunks, chunks_obj.attr_label_ids):
attr_labels = [attr_decoder_config.idx2attr_label[i] for i, l in enumerate(attr_label_ids.tolist()) if l > 0]
if attr_decoder_config.attr_none_label not in attr_labels:
attributes_retr.extend([(attr_label, chunk) for attr_label in attr_labels])
if training or building:
assert set(attributes_retr) == set(attributes)
else:
assert len(attributes_retr) == 0
| [
"eznlp.model.BertLikeConfig",
"eznlp.model.ExtractorConfig",
"eznlp.training.Trainer",
"pytest.mark.parametrize",
"torch.utils.data.DataLoader",
"eznlp.model.SpanAttrClassificationDecoderConfig",
"eznlp.dataset.Dataset"
] | [((3689, 3739), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""training"""', '[True, False]'], {}), "('training', [True, False])\n", (3712, 3739), False, 'import pytest\n'), ((3741, 3791), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""building"""', '[True, False]'], {}), "('building', [True, False])\n", (3764, 3791), False, 'import pytest\n'), ((2110, 2195), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""agg_mode"""', "['max_pooling', 'multiplicative_attention']"], {}), "('agg_mode', ['max_pooling', 'multiplicative_attention']\n )\n", (2133, 2195), False, 'import pytest\n'), ((2196, 2248), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ck_label_emb_dim"""', '[25, 0]'], {}), "('ck_label_emb_dim', [25, 0])\n", (2219, 2248), False, 'import pytest\n'), ((4314, 4363), 'eznlp.dataset.Dataset', 'Dataset', (['EAR_data_demo', 'config'], {'training': 'training'}), '(EAR_data_demo, config, training=training)\n', (4321, 4363), False, 'from eznlp.dataset import Dataset\n'), ((1378, 1438), 'eznlp.training.Trainer', 'Trainer', (['self.model'], {'optimizer': 'optimizer', 'device': 'self.device'}), '(self.model, optimizer=optimizer, device=self.device)\n', (1385, 1438), False, 'from eznlp.training import Trainer\n'), ((1460, 1566), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.dataset'], {'batch_size': '(4)', 'shuffle': '(True)', 'collate_fn': 'self.dataset.collate'}), '(self.dataset, batch_size=4, shuffle=True,\n collate_fn=self.dataset.collate)\n', (1487, 1566), False, 'import torch\n'), ((1873, 1899), 'eznlp.dataset.Dataset', 'Dataset', (['data', 'self.config'], {}), '(data, self.config)\n', (1880, 1899), False, 'from eznlp.dataset import Dataset\n'), ((3192, 3235), 'eznlp.model.ExtractorConfig', 'ExtractorConfig', (['"""span_attr_classification"""'], {}), "('span_attr_classification')\n", (3207, 3235), False, 'from eznlp.model import BertLikeConfig, SpanAttrClassificationDecoderConfig, JointExtractionDecoderConfig, ExtractorConfig\n'), ((3448, 3498), 'eznlp.dataset.Dataset', 'Dataset', (['data_wo_gold', 'self.config'], {'training': '(False)'}), '(data_wo_gold, self.config, training=False)\n', (3455, 3498), False, 'from eznlp.dataset import Dataset\n'), ((3526, 3560), 'eznlp.training.Trainer', 'Trainer', (['self.model'], {'device': 'device'}), '(self.model, device=device)\n', (3533, 3560), False, 'from eznlp.training import Trainer\n'), ((2370, 2464), 'eznlp.model.SpanAttrClassificationDecoderConfig', 'SpanAttrClassificationDecoderConfig', ([], {'agg_mode': 'agg_mode', 'ck_label_emb_dim': 'ck_label_emb_dim'}), '(agg_mode=agg_mode, ck_label_emb_dim=\n ck_label_emb_dim)\n', (2405, 2464), False, 'from eznlp.model import BertLikeConfig, SpanAttrClassificationDecoderConfig, JointExtractionDecoderConfig, ExtractorConfig\n'), ((2855, 2906), 'eznlp.model.BertLikeConfig', 'BertLikeConfig', ([], {'tokenizer': 'tokenizer', 'bert_like': 'bert'}), '(tokenizer=tokenizer, bert_like=bert)\n', (2869, 2906), False, 'from eznlp.model import BertLikeConfig, SpanAttrClassificationDecoderConfig, JointExtractionDecoderConfig, ExtractorConfig\n'), ((4002, 4039), 'eznlp.model.SpanAttrClassificationDecoderConfig', 'SpanAttrClassificationDecoderConfig', ([], {}), '()\n', (4037, 4039), False, 'from eznlp.model import BertLikeConfig, SpanAttrClassificationDecoderConfig, JointExtractionDecoderConfig, ExtractorConfig\n'), ((4179, 4216), 'eznlp.model.SpanAttrClassificationDecoderConfig', 'SpanAttrClassificationDecoderConfig', ([], {}), '()\n', (4214, 4216), False, 'from eznlp.model import BertLikeConfig, SpanAttrClassificationDecoderConfig, JointExtractionDecoderConfig, ExtractorConfig\n')] |
import torch
import numpy as np
from sklearn.metrics import average_precision_score
class TrainNYTMetric(object):
def __init__(self, correct_prediction, exclude_na_flatten_output, order):
def accuracy(self, output, target):
with torch.no_grad():
pred = torch.argmax(output, dim=1)
assert pred.shape[0] == len(target)
correct = 0
correct += torch.sum(pred == target).item()
return correct / len(target)
def accuracy_all(self):
return torch.mean(self.correct_predictions.float())
def accuracy_sep(self, correct_predictions, label, tot1, tot2, s1, s2):
num = 0
s = 0
for num in correct_predictions: # 在这160个batch中
if label[s] == 0: # 如果预测==0
tot1 += 1.0
if num: ##如果==0且正确
s1 += 1.0
else:
tot2 += 1.0
if num: ##如果预测!=0且正确
s2 += 1.0
s = s + 1
return s1 / tot1, s2 / tot2
class EvalNYTMetric(object):
def __init__(self, id2rel, fewrel_100,fewrel_200, exclude_na_flatten_label, exclude_na_label, index_non_zero):
self.id2rel = id2rel
self.fewrel_100 = fewrel_100
self.fewrel_200 = fewrel_200
self.exclude_na_flatten_label = exclude_na_flatten_label
self.exclude_na_label = exclude_na_label
self.index_non_zero = index_non_zero
def mi_ma_100(self, exclude_na_output):
ss = 0
ss10 = 0
ss15 = 0
ss20 = 0
ss_rel = {}
ss10_rel = {}
ss15_rel = {}
ss20_rel = {}
for j, label in zip(exclude_na_output, self.exclude_na_label):
score = None
num = None
for ind, ll in enumerate(label):
if ll > 0:
score = j[ind]
num = ind
break
if num is None:
continue
if self.id2rel[num + 1] in self.fewrel_100:
ss += 1.0
mx = 0
for sc in j:
if sc > score:
mx = mx + 1
if not num in ss_rel:
ss_rel[num] = 0
ss10_rel[num] = 0
ss15_rel[num] = 0
ss20_rel[num] = 0
ss_rel[num] += 1.0
if mx < 10:
ss10 += 1.0
ss10_rel[num] += 1.0
if mx < 15:
ss15 += 1.0
ss15_rel[num] += 1.0
if mx < 20:
ss20 += 1.0
ss20_rel[num] += 1.0
mi_10 = (ss10 / ss)
mi_15 = (ss15 / ss)
mi_20 = (ss20 / ss)
ma_10 = (np.array([ss10_rel[i] / ss_rel[i] for i in ss_rel])).mean()
ma_15 = (np.array([ss15_rel[i] / ss_rel[i] for i in ss_rel])).mean()
ma_20 = (np.array([ss20_rel[i] / ss_rel[i] for i in ss_rel])).mean()
return {"mi_10": mi_10, "mi_15": mi_15, "mi_20": mi_20, "ma_10": ma_10, "ma_15": ma_15, "ma_20": ma_20}
def mi_ma_200(self, exclude_na_output):
ss = 0
ss10 = 0
ss15 = 0
ss20 = 0
ss_rel = {}
ss10_rel = {}
ss15_rel = {}
ss20_rel = {}
for j, label in zip(exclude_na_output, self.exclude_na_label):
score = None
num = None
for ind, ll in enumerate(label):
if ll > 0:
score = j[ind]
num = ind
break
if num is None:
continue
if self.id2rel[num + 1] in self.fewrel_200:
ss += 1.0
mx = 0
for sc in j:
if sc > score:
mx = mx + 1
if not num in ss_rel:
ss_rel[num] = 0
ss10_rel[num] = 0
ss15_rel[num] = 0
ss20_rel[num] = 0
ss_rel[num] += 1.0
if mx < 10:
ss10 += 1.0
ss10_rel[num] += 1.0
if mx < 15:
ss15 += 1.0
ss15_rel[num] += 1.0
if mx < 20:
ss20 += 1.0
ss20_rel[num] += 1.0
mi_10 = (ss10 / ss)
mi_15 = (ss15 / ss)
mi_20 = (ss20 / ss)
ma_10 = (np.array([ss10_rel[i] / ss_rel[i] for i in ss_rel])).mean()
ma_15 = (np.array([ss15_rel[i] / ss_rel[i] for i in ss_rel])).mean()
ma_20 = (np.array([ss20_rel[i] / ss_rel[i] for i in ss_rel])).mean()
return {"mi_10": mi_10, "mi_15": mi_15, "mi_20": mi_20, "ma_10": ma_10, "ma_15": ma_15, "ma_20": ma_20}
def pr(self, exclude_na_output, exclude_na_flatten_output):
m = average_precision_score(self.exclude_na_flatten_label, exclude_na_flatten_output)
M = average_precision_score(self.exclude_na_label[:, self.index_non_zero], exclude_na_output[:, self.index_non_zero],
average='macro')
return {"m":m, "M": M}
class EvalNYTMetricX(object):
def __init__(self, exclude_na_flatten_label):
self.exclude_na_flatten_label = exclude_na_flatten_label
def pone_two_all(self, exclude_na_flatten_output):
order = np.argsort(-exclude_na_flatten_output)
return np.mean(self.exclude_na_flatten_label[order[:100]]), np.mean(self.exclude_na_flatten_label[order[:200]]), np.mean(self.exclude_na_flatten_label[order[:300]])
| [
"numpy.mean",
"sklearn.metrics.average_precision_score",
"numpy.argsort",
"numpy.array",
"torch.sum",
"torch.no_grad",
"torch.argmax"
] | [((4999, 5084), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['self.exclude_na_flatten_label', 'exclude_na_flatten_output'], {}), '(self.exclude_na_flatten_label,\n exclude_na_flatten_output)\n', (5022, 5084), False, 'from sklearn.metrics import average_precision_score\n'), ((5093, 5227), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['self.exclude_na_label[:, self.index_non_zero]', 'exclude_na_output[:, self.index_non_zero]'], {'average': '"""macro"""'}), "(self.exclude_na_label[:, self.index_non_zero],\n exclude_na_output[:, self.index_non_zero], average='macro')\n", (5116, 5227), False, 'from sklearn.metrics import average_precision_score\n'), ((5510, 5548), 'numpy.argsort', 'np.argsort', (['(-exclude_na_flatten_output)'], {}), '(-exclude_na_flatten_output)\n', (5520, 5548), True, 'import numpy as np\n'), ((5564, 5615), 'numpy.mean', 'np.mean', (['self.exclude_na_flatten_label[order[:100]]'], {}), '(self.exclude_na_flatten_label[order[:100]])\n', (5571, 5615), True, 'import numpy as np\n'), ((5618, 5669), 'numpy.mean', 'np.mean', (['self.exclude_na_flatten_label[order[:200]]'], {}), '(self.exclude_na_flatten_label[order[:200]])\n', (5625, 5669), True, 'import numpy as np\n'), ((5671, 5722), 'numpy.mean', 'np.mean', (['self.exclude_na_flatten_label[order[:300]]'], {}), '(self.exclude_na_flatten_label[order[:300]])\n', (5678, 5722), True, 'import numpy as np\n'), ((254, 269), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (267, 269), False, 'import torch\n'), ((294, 321), 'torch.argmax', 'torch.argmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (306, 321), False, 'import torch\n'), ((2901, 2954), 'numpy.array', 'np.array', (['[(ss10_rel[i] / ss_rel[i]) for i in ss_rel]'], {}), '([(ss10_rel[i] / ss_rel[i]) for i in ss_rel])\n', (2909, 2954), True, 'import numpy as np\n'), ((2978, 3031), 'numpy.array', 'np.array', (['[(ss15_rel[i] / ss_rel[i]) for i in ss_rel]'], {}), '([(ss15_rel[i] / ss_rel[i]) for i in ss_rel])\n', (2986, 3031), True, 'import numpy as np\n'), ((3055, 3108), 'numpy.array', 'np.array', (['[(ss20_rel[i] / ss_rel[i]) for i in ss_rel]'], {}), '([(ss20_rel[i] / ss_rel[i]) for i in ss_rel])\n', (3063, 3108), True, 'import numpy as np\n'), ((4595, 4648), 'numpy.array', 'np.array', (['[(ss10_rel[i] / ss_rel[i]) for i in ss_rel]'], {}), '([(ss10_rel[i] / ss_rel[i]) for i in ss_rel])\n', (4603, 4648), True, 'import numpy as np\n'), ((4672, 4725), 'numpy.array', 'np.array', (['[(ss15_rel[i] / ss_rel[i]) for i in ss_rel]'], {}), '([(ss15_rel[i] / ss_rel[i]) for i in ss_rel])\n', (4680, 4725), True, 'import numpy as np\n'), ((4749, 4802), 'numpy.array', 'np.array', (['[(ss20_rel[i] / ss_rel[i]) for i in ss_rel]'], {}), '([(ss20_rel[i] / ss_rel[i]) for i in ss_rel])\n', (4757, 4802), True, 'import numpy as np\n'), ((429, 454), 'torch.sum', 'torch.sum', (['(pred == target)'], {}), '(pred == target)\n', (438, 454), False, 'import torch\n')] |
# Import Modules
from flask import Flask, render_template, request
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TimeField, SelectField
from wtforms.validators import DataRequired, URL
from datetime import datetime
import csv
# Create App
app = Flask(__name__)
app.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'
Bootstrap(app)
# Choice template
rating = [('✘')]
# Cafe Form Class
class CafeForm(FlaskForm):
name = StringField('Cafe name', validators=[DataRequired()])
location = StringField('Location URL', validators=[DataRequired(), URL()])
open_time = TimeField('Open Time (HH:MM)', format='%H:%M',validators = [DataRequired()])
close_time = TimeField('Close Time (HH:MM)', format='%H:%M', validators = [DataRequired()])
coffee_rating = SelectField(
label='Coffee Rating',
validators=[DataRequired()],
choices= rating + [('☕️'*i) for i in range(1,6)]
)
wifi_rating = SelectField(
label='Wifi Rating',
validators=[DataRequired()],
choices= rating + [('💪'*i) for i in range(1,6)]
)
power_rating = SelectField(
label='Power Outlet Rating',
validators=[DataRequired()],
choices= rating+ [('🔌'*i) for i in range(1,6)]
)
submit = SubmitField('Submit')
# Homepage Route
@app.route("/")
def home():
return render_template("index.html")
# Add new Cafe Route
@app.route('/add', methods=['GET', 'POST'])
def add_cafe():
form = CafeForm()
if form.validate_on_submit():
data = [form.name.data, form.location.data, form.open_time.data.strftime('%H:%M'), form.close_time.data.strftime('%H:%M'), form.coffee_rating.data, form.wifi_rating.data, form.power_rating.data]
with open('cafe-data.csv', 'a', encoding='utf-8') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(data)
return render_template('add.html', form=form)
# Show all cafes route
@app.route('/cafes')
def cafes():
with open('cafe-data.csv', encoding='utf-8') as csv_file:
csv_data = csv.reader(csv_file, delimiter=',')
list_of_rows = []
for row in csv_data:
list_of_rows.append(row)
return render_template('cafes.html', cafes=list_of_rows)
# Driver Code
if __name__ == '__main__':
app.run(debug=True)
| [
"flask.render_template",
"flask.Flask",
"wtforms.validators.DataRequired",
"csv.writer",
"wtforms.SubmitField",
"flask_bootstrap.Bootstrap",
"csv.reader",
"wtforms.validators.URL"
] | [((317, 332), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (322, 332), False, 'from flask import Flask, render_template, request\n'), ((395, 409), 'flask_bootstrap.Bootstrap', 'Bootstrap', (['app'], {}), '(app)\n', (404, 409), False, 'from flask_bootstrap import Bootstrap\n'), ((1331, 1352), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (1342, 1352), False, 'from wtforms import StringField, SubmitField, TimeField, SelectField\n'), ((1410, 1439), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1425, 1439), False, 'from flask import Flask, render_template, request\n'), ((1947, 1985), 'flask.render_template', 'render_template', (['"""add.html"""'], {'form': 'form'}), "('add.html', form=form)\n", (1962, 1985), False, 'from flask import Flask, render_template, request\n'), ((2264, 2313), 'flask.render_template', 'render_template', (['"""cafes.html"""'], {'cafes': 'list_of_rows'}), "('cafes.html', cafes=list_of_rows)\n", (2279, 2313), False, 'from flask import Flask, render_template, request\n'), ((2125, 2160), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (2135, 2160), False, 'import csv\n'), ((1877, 1897), 'csv.writer', 'csv.writer', (['csv_file'], {}), '(csv_file)\n', (1887, 1897), False, 'import csv\n'), ((540, 554), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (552, 554), False, 'from wtforms.validators import DataRequired, URL\n'), ((612, 626), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (624, 626), False, 'from wtforms.validators import DataRequired, URL\n'), ((628, 633), 'wtforms.validators.URL', 'URL', ([], {}), '()\n', (631, 633), False, 'from wtforms.validators import DataRequired, URL\n'), ((712, 726), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (724, 726), False, 'from wtforms.validators import DataRequired, URL\n'), ((808, 822), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (820, 822), False, 'from wtforms.validators import DataRequired, URL\n'), ((910, 924), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (922, 924), False, 'from wtforms.validators import DataRequired, URL\n'), ((1071, 1085), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1083, 1085), False, 'from wtforms.validators import DataRequired, URL\n'), ((1240, 1254), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1252, 1254), False, 'from wtforms.validators import DataRequired, URL\n')] |
import os
from chalice import Chalice
import requests
import boto3
# TODO:ログインチャネルのチャネルIDを設定する
CLIENT_ID = ''
app = Chalice(app_name='backend')
def get_dynamodb_resource():
dynamo_endpoint = os.getenv('ENDPOINT_URL')
if dynamo_endpoint:
return boto3.resource('dynamodb', endpoint_url=dynamo_endpoint, region_name='us-east-1')
else:
return boto3.resource('dynamodb')
@app.route('/', cors=True, methods=['POST'])
def index():
# IDトークンを検証する
# https://developers.line.biz/ja/reference/social-api/#verify-id-token
request = app.current_request
r = requests.post('https://api.line.me/oauth2/v2.1/verify', data={
'id_token': request.json_body['idToken'],
'client_id': CLIENT_ID
})
data = r.json()
# DynamoDBにデータを保存
dynamodb = get_dynamodb_resource()
table = dynamodb.Table(os.getenv('TABLE_NAME'))
table.put_item(Item=data)
# レスポンスを作成
# NOTE: 以下のレスポンスはサンプルです
return {'message': 'こんにちは、{0}さん。'.format(data['name'])}
| [
"requests.post",
"boto3.resource",
"chalice.Chalice",
"os.getenv"
] | [((118, 145), 'chalice.Chalice', 'Chalice', ([], {'app_name': '"""backend"""'}), "(app_name='backend')\n", (125, 145), False, 'from chalice import Chalice\n'), ((198, 223), 'os.getenv', 'os.getenv', (['"""ENDPOINT_URL"""'], {}), "('ENDPOINT_URL')\n", (207, 223), False, 'import os\n'), ((591, 723), 'requests.post', 'requests.post', (['"""https://api.line.me/oauth2/v2.1/verify"""'], {'data': "{'id_token': request.json_body['idToken'], 'client_id': CLIENT_ID}"}), "('https://api.line.me/oauth2/v2.1/verify', data={'id_token':\n request.json_body['idToken'], 'client_id': CLIENT_ID})\n", (604, 723), False, 'import requests\n'), ((263, 349), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'endpoint_url': 'dynamo_endpoint', 'region_name': '"""us-east-1"""'}), "('dynamodb', endpoint_url=dynamo_endpoint, region_name=\n 'us-east-1')\n", (277, 349), False, 'import boto3\n'), ((370, 396), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (384, 396), False, 'import boto3\n'), ((851, 874), 'os.getenv', 'os.getenv', (['"""TABLE_NAME"""'], {}), "('TABLE_NAME')\n", (860, 874), False, 'import os\n')] |
import sqlite3 as conector
try:
# Abertura de conexão e aquisição de cursor
conexao = conector.connect("./meu_banco.db")
cursor = conexao.cursor()
# Execução de um comando: SELECT...
comando = '''CREATE TABLE Veiculo(
placa CHARACTER(7) NOT NULL,
ano INTEGER NOT NULL,
cor TEXT NOT NULL,
proprietario INTEGER NOT NULL,
marca INTEGER NOT NULL,
PRIMARY KEY (placa),
FOREIGN KEY(proprietario) REFERENCES Pessoa(cpf),
FOREIGN KEY(marca) REFERENCES Marca(id)
);'''
cursor.execute(comando)
print("Tabela criada com sucesso!")
# Efetivação do comando
conexao.commit()
print("Commit realizado com sucesso!")
except conector.DatabaseError as err:
print("Erro de Banco de dados", err)
finally:
if conexao:
cursor.close()
conexao.close()
print("Conexão encerrada com sucesso!")
| [
"sqlite3.connect"
] | [((95, 129), 'sqlite3.connect', 'conector.connect', (['"""./meu_banco.db"""'], {}), "('./meu_banco.db')\n", (111, 129), True, 'import sqlite3 as conector\n')] |
import logging
import os
import wave
import mysql.connector
from bs4 import BeautifulSoup
from pydub import AudioSegment
from config import *
connection = mysql.connector.connect(
host=HOST,
database=DATABASE,
user=USER,
password=PASSWORD,
)
connection.autocommit = False
cursor = connection.cursor(dictionary=True)
logging.basicConfig(level=logging.INFO)
def get_last_insert_id(dict_cursor):
"""
return the last inserted id by this client/connection.
see also https://dev.mysql.com/doc/refman/5.7/en/mysql-insert-id.html
"""
dict_cursor.execute('select last_insert_id() as id')
return dict_cursor.fetchone()['id']
def replace_extension(file, new_extension):
root, _ = os.path.splitext(file)
if not new_extension.startswith('.'):
new_extension = '.' + new_extension
return root + new_extension
def search_directories():
try:
# init directories
os.makedirs(os.path.join(BASE_DIR, "text_audio"))
os.makedirs(os.path.join(BASE_DIR, "original_text"))
os.makedirs(os.path.join(BASE_DIR, "recording"))
except FileExistsError:
# directory already exists
pass
logging.info('Loading...')
entries = os.scandir(os.path.join(BASE_DIR, SOURCE_DIR))
for entry in entries:
for fileData in os.listdir(os.path.join(BASE_DIR, SOURCE_DIR, entry.name)):
if fileData.endswith(".xml"):
extract_data_to_db(entry.name)
logging.info('Done!')
def extract_data_to_db(folderNumber: str):
try:
logging.info('Loading ' + folderNumber)
text_path = os.path.join(SOURCE_DIR, folderNumber, 'indexes.xml')
audio_path = os.path.join(SOURCE_DIR, folderNumber, 'audio.wav')
cursor.execute('insert into source(user_id,user_group_id,path_to_raw_file,name,licence) VALUE(%s,%s,%s,%s,%s)',
[1, 1, audio_path, SOURCE_NAME, SOURCE_LICENCE])
audio_source_id = get_last_insert_id(cursor)
cursor.execute('insert into source(user_id,user_group_id,path_to_raw_file,name,licence) VALUE(%s,%s,%s,%s,%s)',
[1, 1, text_path, SOURCE_NAME, SOURCE_LICENCE])
text_source_id = get_last_insert_id(cursor)
text_path = os.path.join(BASE_DIR, text_path)
audio_path = os.path.join(BASE_DIR, audio_path)
with open(text_path, encoding='utf-8') as file:
soup = BeautifulSoup(file.read(), 'html.parser')
with wave.open(audio_path, 'rb') as f_wave:
times = {tli['id']: float(tli['time']) for tli in soup.find_all('tli')}
for tier in soup.find_all('tier'):
for event in tier.find_all('event'):
start_time = times[event['start']]
end_time = times[event['end']]
duration_seconds = end_time - start_time
if duration_seconds > 0.0:
transcript_text = event.get_text()
cursor.execute(
"insert into data_element ( source_id, user_group_id, finished)values (%s, %s, %s)",
[text_source_id, 1, True]
)
element_id_1 = get_last_insert_id(cursor)
cursor.execute(
"insert into data_element ( source_id, user_group_id, finished)values (%s, %s, %s)",
[audio_source_id, 1, True]
)
element_id_2 = get_last_insert_id(cursor)
cursor.execute(
"insert into data_tuple (data_element_id_1,data_element_id_2,type)values (%s,%s,%s)",
[element_id_1, element_id_2, "TEXT_AUDIO"]
)
# TODO for now we just insert everything as standard german
cursor.execute(
"insert into text ( dialect_id, data_element_id, text)values (%s, %s, %s)",
[27, element_id_1, transcript_text]
)
cursor.execute(
"insert into audio (dialect_id,data_element_id,audio_start,audio_end,path)values (%s,%s,%s,%s,%s)",
[27, element_id_2, start_time, end_time, 'PLACEHOLDER']
)
text_audio_id = get_last_insert_id(cursor)
audio_path_to_file = os.path.join("text_audio", f'{text_audio_id}.flac')
cursor.execute('update audio set path = %s where id = %s',
[audio_path_to_file, text_audio_id])
f_wave.setpos(int(start_time * f_wave.getframerate()))
audio_bytes = f_wave.readframes(int(duration_seconds * f_wave.getframerate()))
audio_segment = AudioSegment(
data=audio_bytes,
sample_width=f_wave.getsampwidth(),
frame_rate=f_wave.getframerate(),
channels=f_wave.getnchannels(),
)
audio_segment = audio_segment.set_channels(1)
audio_segment.export(os.path.join(BASE_DIR, audio_path_to_file),
format='flac')
else:
logging.warning(
f"skipping event with start={event['start']} because its duration of {duration_seconds} is <= 0.0.")
connection.commit()
except Exception as e:
connection.rollback()
raise e
finally:
cursor.close()
connection.close()
if __name__ == '__main__':
search_directories()
| [
"logging.basicConfig",
"wave.open",
"os.path.splitext",
"os.path.join",
"logging.warning",
"logging.info"
] | [((336, 375), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (355, 375), False, 'import logging\n'), ((721, 743), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (737, 743), False, 'import os\n'), ((1182, 1208), 'logging.info', 'logging.info', (['"""Loading..."""'], {}), "('Loading...')\n", (1194, 1208), False, 'import logging\n'), ((1473, 1494), 'logging.info', 'logging.info', (['"""Done!"""'], {}), "('Done!')\n", (1485, 1494), False, 'import logging\n'), ((1234, 1268), 'os.path.join', 'os.path.join', (['BASE_DIR', 'SOURCE_DIR'], {}), '(BASE_DIR, SOURCE_DIR)\n', (1246, 1268), False, 'import os\n'), ((1557, 1596), 'logging.info', 'logging.info', (["('Loading ' + folderNumber)"], {}), "('Loading ' + folderNumber)\n", (1569, 1596), False, 'import logging\n'), ((1617, 1670), 'os.path.join', 'os.path.join', (['SOURCE_DIR', 'folderNumber', '"""indexes.xml"""'], {}), "(SOURCE_DIR, folderNumber, 'indexes.xml')\n", (1629, 1670), False, 'import os\n'), ((1692, 1743), 'os.path.join', 'os.path.join', (['SOURCE_DIR', 'folderNumber', '"""audio.wav"""'], {}), "(SOURCE_DIR, folderNumber, 'audio.wav')\n", (1704, 1743), False, 'import os\n'), ((2252, 2285), 'os.path.join', 'os.path.join', (['BASE_DIR', 'text_path'], {}), '(BASE_DIR, text_path)\n', (2264, 2285), False, 'import os\n'), ((2307, 2341), 'os.path.join', 'os.path.join', (['BASE_DIR', 'audio_path'], {}), '(BASE_DIR, audio_path)\n', (2319, 2341), False, 'import os\n'), ((946, 982), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""text_audio"""'], {}), "(BASE_DIR, 'text_audio')\n", (958, 982), False, 'import os\n'), ((1004, 1043), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""original_text"""'], {}), "(BASE_DIR, 'original_text')\n", (1016, 1043), False, 'import os\n'), ((1065, 1100), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""recording"""'], {}), "(BASE_DIR, 'recording')\n", (1077, 1100), False, 'import os\n'), ((1331, 1377), 'os.path.join', 'os.path.join', (['BASE_DIR', 'SOURCE_DIR', 'entry.name'], {}), '(BASE_DIR, SOURCE_DIR, entry.name)\n', (1343, 1377), False, 'import os\n'), ((2473, 2500), 'wave.open', 'wave.open', (['audio_path', '"""rb"""'], {}), "(audio_path, 'rb')\n", (2482, 2500), False, 'import wave\n'), ((4531, 4582), 'os.path.join', 'os.path.join', (['"""text_audio"""', 'f"""{text_audio_id}.flac"""'], {}), "('text_audio', f'{text_audio_id}.flac')\n", (4543, 4582), False, 'import os\n'), ((5505, 5631), 'logging.warning', 'logging.warning', (['f"""skipping event with start={event[\'start\']} because its duration of {duration_seconds} is <= 0.0."""'], {}), '(\n f"skipping event with start={event[\'start\']} because its duration of {duration_seconds} is <= 0.0."\n )\n', (5520, 5631), False, 'import logging\n'), ((5351, 5393), 'os.path.join', 'os.path.join', (['BASE_DIR', 'audio_path_to_file'], {}), '(BASE_DIR, audio_path_to_file)\n', (5363, 5393), False, 'import os\n')] |
import re
import csv
import random
emotion_map = {
'angry': set(),
'bored': set(),
'calm': set(),
'excited': set(),
'happy': set(),
'love': set(),
'sad': set(),
}
# titles = ",id,post_url,date,liked,type,timestamp,tags,photo,caption,search_query,img_file_name"
with open('data/train_set.csv', 'w+') as train_file:
train_writer = csv.writer(train_file, delimiter=',')
with open('data/test_set.csv', 'w+') as test_file:
test_writer = csv.writer(test_file, delimiter=',')
with open('data/val_set.csv', 'w+') as val_file:
val_writer = csv.writer(val_file, delimiter=',')
with open('data/dataset.csv') as f:
reader = csv.reader(f)
for line in reader:
emotion = line[-2]
post_id = line[1]
if len(emotion_map[emotion]) < 12000:
train_writer.writerow(line)
emotion_map[emotion].add(post_id)
elif len(emotion_map[emotion]) < 14000:
test_writer.writerow(line)
emotion_map[emotion].add(post_id)
else:
val_writer.writerow(line)
emotion_map[emotion].add(post_id)
| [
"csv.writer",
"csv.reader"
] | [((338, 375), 'csv.writer', 'csv.writer', (['train_file'], {'delimiter': '""","""'}), "(train_file, delimiter=',')\n", (348, 375), False, 'import csv\n'), ((444, 480), 'csv.writer', 'csv.writer', (['test_file'], {'delimiter': '""","""'}), "(test_file, delimiter=',')\n", (454, 480), False, 'import csv\n'), ((548, 583), 'csv.writer', 'csv.writer', (['val_file'], {'delimiter': '""","""'}), "(val_file, delimiter=',')\n", (558, 583), False, 'import csv\n'), ((637, 650), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (647, 650), False, 'import csv\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-01-23 18:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('metadata', '0005_auto_20190111_0958'),
]
operations = [
migrations.AlterModelOptions(
name='catalogalias',
options={'verbose_name': 'Catalogs alias', 'verbose_name_plural': 'Catalogs Aliases'},
),
migrations.AlterModelOptions(
name='indexmetadatatask',
options={'verbose_name': 'Corrida de indexación de metadatos', 'verbose_name_plural': 'Corridas de indexación de metadatos'},
),
migrations.AlterModelOptions(
name='metadataconfig',
options={'verbose_name': 'Configuración de búsqueda de series por metadatos'},
),
migrations.AlterModelOptions(
name='synonym',
options={'verbose_name': 'Sinónimo de búsqueda', 'verbose_name_plural': 'Sinónimos de búsqueda'},
),
]
| [
"django.db.migrations.AlterModelOptions"
] | [((293, 433), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""catalogalias"""', 'options': "{'verbose_name': 'Catalogs alias', 'verbose_name_plural': 'Catalogs Aliases'}"}), "(name='catalogalias', options={'verbose_name':\n 'Catalogs alias', 'verbose_name_plural': 'Catalogs Aliases'})\n", (321, 433), False, 'from django.db import migrations\n'), ((474, 663), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""indexmetadatatask"""', 'options': "{'verbose_name': 'Corrida de indexación de metadatos',\n 'verbose_name_plural': 'Corridas de indexación de metadatos'}"}), "(name='indexmetadatatask', options={\n 'verbose_name': 'Corrida de indexación de metadatos',\n 'verbose_name_plural': 'Corridas de indexación de metadatos'})\n", (502, 663), False, 'from django.db import migrations\n'), ((699, 833), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""metadataconfig"""', 'options': "{'verbose_name': 'Configuración de búsqueda de series por metadatos'}"}), "(name='metadataconfig', options={'verbose_name':\n 'Configuración de búsqueda de series por metadatos'})\n", (727, 833), False, 'from django.db import migrations\n'), ((874, 1020), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""synonym"""', 'options': "{'verbose_name': 'Sinónimo de búsqueda', 'verbose_name_plural':\n 'Sinónimos de búsqueda'}"}), "(name='synonym', options={'verbose_name':\n 'Sinónimo de búsqueda', 'verbose_name_plural': 'Sinónimos de búsqueda'})\n", (902, 1020), False, 'from django.db import migrations\n')] |
import numpy as np
def sample_dist(stateCounts,hyperparams,Kextra):
#function dist_struct = sample_dist(stateCounts,hyperparams,Kextra)
numObj = (stateCounts.Ns).shape[2]
Kz_prev = (stateCounts.Ns).shape[0]
Kz = Kz_prev + Kextra
Ks = (stateCounts.Ns).shape[1]
# Define alpha0 and kappa0 in terms of alpha0+kappa0 and rho0:
alpha0 = hyperparams.alpha0
kappa0 = hyperparams.kappa0
sigma0 = hyperparams.sigma0
N = stateCounts.N # N(i,j) = # z_t = i to z_{t+1}=j transitions. N(Kz+1,i) = 1 for i=z_1.
Ns = stateCounts.Ns # Ns(i,j) = # s_t = j given z_t=i
dist_struct[:numObj] = {'pi_z':np.zeros((Kz,Kz)),'pi_init':np.zeros((1,Kz)),'pi_s':np.zeros((Kz,Ks))}
beta_vec = np.ones((1,Kz))
Ntemp = np.zeros((Kz+1,Kz))
Nstemp = np.zeros((Kz,Ks))
for ii in range(numObj):
Ntemp[:Kz_prev,:Kz_prev] = N[:Kz_prev,:,ii]
Ntemp[-1,1:Kz_prev] = N[Kz_prev+1,:,ii]
Nstemp[:Kz_prev,:] = Ns[:,:,ii]
if Ks>1:
# Sample HMM-state-specific mixture weights \psi_j's with truncation
# level Ks given sampled s stats Ns:
sigma_vec = (sigma0/Ks)*np.ones((1,Ks))
else:
sigma_vec = sigma0
pi_z = np.zeros((Kz,Kz))
pi_s = np.zeros((Kz,Ks))
for j in range(Kz):
kappa_vec = np.zeros((1,Kz))
# Add an amount \kappa to Dirichlet parameter corresponding to a
# self-transition:
kappa_vec[j] = kappa0
# Sample \pi_j's given sampled \beta_vec and counts N, where
# DP(\alpha+\kappa,(\alpha\beta+\kappa\delta[j])/(\alpha+\kappa)) is
# Dirichlet distributed over the finite partition defined by beta_vec:
pi_z[j,:] = randdirichlet_unnorm([alpha0*beta_vec + kappa_vec + Ntemp[j,:]].T).T
# Sample HMM-state-specific mixture weights \psi_j's with truncation
# level Ks given sampled s stats Ns:
pi_s[j,:] = randdirichlet([Nstemp[j,:] + sigma_vec].T).T
pi_init = randdirichlet_unnorm([alpha0*beta_vec + Ntemp[Kz+1,:]].T).T
if stateCounts.Nr:
Nr = stateCounts.Nr[ii,:] # Nr(i) = # r_t = i
Kr = len(Nr)
eta0 = hyperparams.eta0
dist_struct[ii].pi_r = randdirichlet((Nr + eta0/Kr).T).T
dist_struct[ii].pi_z = pi_z
dist_struct[ii].pi_init = pi_init
dist_struct[ii].pi_s = pi_s
return dist_struct
| [
"numpy.zeros",
"numpy.ones"
] | [((710, 726), 'numpy.ones', 'np.ones', (['(1, Kz)'], {}), '((1, Kz))\n', (717, 726), True, 'import numpy as np\n'), ((738, 760), 'numpy.zeros', 'np.zeros', (['(Kz + 1, Kz)'], {}), '((Kz + 1, Kz))\n', (746, 760), True, 'import numpy as np\n'), ((769, 787), 'numpy.zeros', 'np.zeros', (['(Kz, Ks)'], {}), '((Kz, Ks))\n', (777, 787), True, 'import numpy as np\n'), ((625, 643), 'numpy.zeros', 'np.zeros', (['(Kz, Kz)'], {}), '((Kz, Kz))\n', (633, 643), True, 'import numpy as np\n'), ((653, 670), 'numpy.zeros', 'np.zeros', (['(1, Kz)'], {}), '((1, Kz))\n', (661, 670), True, 'import numpy as np\n'), ((677, 695), 'numpy.zeros', 'np.zeros', (['(Kz, Ks)'], {}), '((Kz, Ks))\n', (685, 695), True, 'import numpy as np\n'), ((1188, 1206), 'numpy.zeros', 'np.zeros', (['(Kz, Kz)'], {}), '((Kz, Kz))\n', (1196, 1206), True, 'import numpy as np\n'), ((1217, 1235), 'numpy.zeros', 'np.zeros', (['(Kz, Ks)'], {}), '((Kz, Ks))\n', (1225, 1235), True, 'import numpy as np\n'), ((1277, 1294), 'numpy.zeros', 'np.zeros', (['(1, Kz)'], {}), '((1, Kz))\n', (1285, 1294), True, 'import numpy as np\n'), ((1123, 1139), 'numpy.ones', 'np.ones', (['(1, Ks)'], {}), '((1, Ks))\n', (1130, 1139), True, 'import numpy as np\n')] |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0008_message_body'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='role',
field=models.IntegerField(choices=[(0, 'Reader'), (1, 'Moderator'), (2, 'Admin'), (3, 'Blog User'), (4, 'Spammer')], default=0),
),
]
| [
"django.db.models.IntegerField"
] | [((282, 407), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'Reader'), (1, 'Moderator'), (2, 'Admin'), (3, 'Blog User'), (4,\n 'Spammer')]", 'default': '(0)'}), "(choices=[(0, 'Reader'), (1, 'Moderator'), (2, 'Admin'),\n (3, 'Blog User'), (4, 'Spammer')], default=0)\n", (301, 407), False, 'from django.db import migrations, models\n')] |
from sqlalchemy.types import TypeDecorator, CHAR
import uuid
class GUIDField(TypeDecorator):
# Platform independent GUID Implementation that uses little endianess.
impl = CHAR
def load_dialect_impl(self, dialect):
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
else:
if isinstance(value, uuid.UUID):
return value.bytes_le
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(bytes_le=value)
| [
"sqlalchemy.types.CHAR",
"uuid.UUID"
] | [((268, 276), 'sqlalchemy.types.CHAR', 'CHAR', (['(32)'], {}), '(32)\n', (272, 276), False, 'from sqlalchemy.types import TypeDecorator, CHAR\n'), ((614, 639), 'uuid.UUID', 'uuid.UUID', ([], {'bytes_le': 'value'}), '(bytes_le=value)\n', (623, 639), False, 'import uuid\n')] |
import json
import torch
import torch.nn.functional as F
from sklearn.utils import shuffle
from tqdm import tqdm
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from common.training.batcher import Batcher, prepare, prepare_with_labels
from common.util.random import SimpleRandom
def evaluate(model,data,labels,batch_size):
predicted = predict(model,data,batch_size)
return accuracy_score(labels,predicted.data.numpy().reshape(-1))
def predict(model, data, batch_size):
batcher = Batcher(data, batch_size)
predicted = []
for batch, size, start, end in batcher:
d = prepare(batch)
model.eval()
logits = model(d).cpu()
predicted.extend(torch.max(logits, 1)[1])
return torch.stack(predicted)
def train(model, fs, batch_size, lr, epochs,dev=None, clip=None, early_stopping=None,name=None):
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)
data, labels = fs
if dev is not None:
dev_data,dev_labels = dev
for epoch in tqdm(range(epochs)):
epoch_loss = 0
epoch_data = 0
shuffle(data,labels)
batcher = Batcher(data, batch_size)
for batch, size, start, end in batcher:
d,gold = prepare_with_labels(batch,labels[start:end])
model.train()
optimizer.zero_grad()
logits = model(d)
loss = F.cross_entropy(logits, gold)
loss.backward()
epoch_loss += loss.cpu()
epoch_data += size
if clip is not None:
torch.nn.utils.clip_grad_norm(model.parameters(), clip)
optimizer.step()
print("Average epoch loss: {0}".format((epoch_loss/epoch_data).data.numpy()))
#print("Epoch Train Accuracy {0}".format(evaluate(model, data, labels, batch_size)))
if dev is not None:
acc = evaluate(model,dev_data,dev_labels,batch_size)
print("Epoch Dev Accuracy {0}".format(acc))
if early_stopping is not None and early_stopping(model,acc):
break
if dev is not None and early_stopping is not None:
early_stopping.set_best_state(model)
def print_evaluation(model,data,ls,log=None):
features,actual = data
predictions = predict(model, features, 500).data.numpy().reshape(-1).tolist()
labels = [ls.idx[i] for i, _ in enumerate(ls.idx)]
actual = [labels[i] for i in actual]
predictions = [labels[i] for i in predictions]
print(accuracy_score(actual, predictions))
print(classification_report(actual, predictions))
print(confusion_matrix(actual, predictions))
data = zip(actual,predictions)
if log is not None:
f = open(log, "w+")
for a,p in data:
f.write(json.dumps({"actual": a, "predicted": p}) + "\n")
f.close()
| [
"common.training.batcher.prepare",
"common.training.batcher.prepare_with_labels",
"sklearn.metrics.classification_report",
"sklearn.utils.shuffle",
"torch.stack",
"torch.max",
"json.dumps",
"common.training.batcher.Batcher",
"torch.nn.functional.cross_entropy",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
] | [((532, 557), 'common.training.batcher.Batcher', 'Batcher', (['data', 'batch_size'], {}), '(data, batch_size)\n', (539, 557), False, 'from common.training.batcher import Batcher, prepare, prepare_with_labels\n'), ((764, 786), 'torch.stack', 'torch.stack', (['predicted'], {}), '(predicted)\n', (775, 786), False, 'import torch\n'), ((634, 648), 'common.training.batcher.prepare', 'prepare', (['batch'], {}), '(batch)\n', (641, 648), False, 'from common.training.batcher import Batcher, prepare, prepare_with_labels\n'), ((1139, 1160), 'sklearn.utils.shuffle', 'shuffle', (['data', 'labels'], {}), '(data, labels)\n', (1146, 1160), False, 'from sklearn.utils import shuffle\n'), ((1179, 1204), 'common.training.batcher.Batcher', 'Batcher', (['data', 'batch_size'], {}), '(data, batch_size)\n', (1186, 1204), False, 'from common.training.batcher import Batcher, prepare, prepare_with_labels\n'), ((2538, 2573), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['actual', 'predictions'], {}), '(actual, predictions)\n', (2552, 2573), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n'), ((2585, 2627), 'sklearn.metrics.classification_report', 'classification_report', (['actual', 'predictions'], {}), '(actual, predictions)\n', (2606, 2627), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n'), ((2639, 2676), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['actual', 'predictions'], {}), '(actual, predictions)\n', (2655, 2676), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n'), ((1275, 1320), 'common.training.batcher.prepare_with_labels', 'prepare_with_labels', (['batch', 'labels[start:end]'], {}), '(batch, labels[start:end])\n', (1294, 1320), False, 'from common.training.batcher import Batcher, prepare, prepare_with_labels\n'), ((1431, 1460), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'gold'], {}), '(logits, gold)\n', (1446, 1460), True, 'import torch.nn.functional as F\n'), ((728, 748), 'torch.max', 'torch.max', (['logits', '(1)'], {}), '(logits, 1)\n', (737, 748), False, 'import torch\n'), ((2811, 2852), 'json.dumps', 'json.dumps', (["{'actual': a, 'predicted': p}"], {}), "({'actual': a, 'predicted': p})\n", (2821, 2852), False, 'import json\n')] |
import numpy as np
import pybullet as pb
import time
from nuro_arm.robot.robot_arm import RobotArm
from nuro_arm.constants import CUBE_SIZE
robot = RobotArm('sim', headless=False)
# make GUI view better
pb.resetDebugVisualizerCamera(cameraDistance=1.5,
cameraYaw=50,
cameraPitch=-40,
cameraTargetPosition=(-0.45, 0.35, -0.4))
robot.set_gripper_state(0.5)
client = robot.controller._client
pb.setGravity(0,0,0,client)
# create
id_ = pb.createVisualShape(pb.GEOM_BOX,
halfExtents=3*[CUBE_SIZE/2],
rgbaColor=[0.1,0.1,0.8,0.5])
pos_body = [0, 0, 0]
body = pb.createMultiBody(1, -1, id_, pos_body)
dbg_params = {
'x' : pb.addUserDebugParameter('cube_x', rangeMin=0., rangeMax= 0.25,
startValue= 0.15, physicsClientId=client),
'y' : pb.addUserDebugParameter('cube_y', rangeMin=-0.15, rangeMax= 0.15,
startValue= 0.0, physicsClientId=client),
'z' : pb.addUserDebugParameter('cube_z', rangeMin=CUBE_SIZE/2, rangeMax= 0.45,
startValue= 0.2, physicsClientId=client),
}
d_toggle = pb.addUserDebugParameter('toggle orientation specification',
1, 0, 0,
physicsClientId=client)
dbg_params.update({
'pitch': pb.addUserDebugParameter('gripper_pitch', rangeMin=0, rangeMax=np.pi,
startValue= 2*np.pi, physicsClientId=client),
'roll' : pb.addUserDebugParameter('gripper_roll', rangeMin=-np.pi/2, rangeMax= np.pi/2,
startValue= 0.0, physicsClientId=client),
})
dbg_values = {d:0 for d,i in dbg_params.items()}
while True:
button_val = pb.readUserDebugParameter(d_toggle, physicsClientId=client)
reset_cube = False
move_arm = False
for name, prm in dbg_params.items():
new_val = pb.readUserDebugParameter(prm, physicsClientId=client)
if abs(new_val-dbg_values[name]) > 1e-4:
dbg_values[name] = new_val
if name in 'xyz':
reset_cube = True
move_arm = True
elif name in ('pitch', 'roll') and button_val % 2 == 1:
move_arm = True
pos = (dbg_values['x'],dbg_values['y'],dbg_values['z'])
if reset_cube:
pb.resetBasePositionAndOrientation(body, pos, (0,0,0,1),physicsClientId=client)
if move_arm:
# first teleport to good initial arm jpos, this should be good for most
# positions in the workspace
if button_val % 2 == 0:
robot.move_hand_to(pos)
else:
robot.move_hand_to(pos, (dbg_values['pitch'],dbg_values['roll']))
print('done')
time.sleep(0.1)
| [
"nuro_arm.robot.robot_arm.RobotArm",
"pybullet.resetDebugVisualizerCamera",
"pybullet.readUserDebugParameter",
"pybullet.createMultiBody",
"pybullet.addUserDebugParameter",
"pybullet.setGravity",
"time.sleep",
"pybullet.createVisualShape",
"pybullet.resetBasePositionAndOrientation"
] | [((150, 181), 'nuro_arm.robot.robot_arm.RobotArm', 'RobotArm', (['"""sim"""'], {'headless': '(False)'}), "('sim', headless=False)\n", (158, 181), False, 'from nuro_arm.robot.robot_arm import RobotArm\n'), ((206, 333), 'pybullet.resetDebugVisualizerCamera', 'pb.resetDebugVisualizerCamera', ([], {'cameraDistance': '(1.5)', 'cameraYaw': '(50)', 'cameraPitch': '(-40)', 'cameraTargetPosition': '(-0.45, 0.35, -0.4)'}), '(cameraDistance=1.5, cameraYaw=50, cameraPitch\n =-40, cameraTargetPosition=(-0.45, 0.35, -0.4))\n', (235, 333), True, 'import pybullet as pb\n'), ((483, 513), 'pybullet.setGravity', 'pb.setGravity', (['(0)', '(0)', '(0)', 'client'], {}), '(0, 0, 0, client)\n', (496, 513), True, 'import pybullet as pb\n'), ((527, 629), 'pybullet.createVisualShape', 'pb.createVisualShape', (['pb.GEOM_BOX'], {'halfExtents': '(3 * [CUBE_SIZE / 2])', 'rgbaColor': '[0.1, 0.1, 0.8, 0.5]'}), '(pb.GEOM_BOX, halfExtents=3 * [CUBE_SIZE / 2],\n rgbaColor=[0.1, 0.1, 0.8, 0.5])\n', (547, 629), True, 'import pybullet as pb\n'), ((701, 741), 'pybullet.createMultiBody', 'pb.createMultiBody', (['(1)', '(-1)', 'id_', 'pos_body'], {}), '(1, -1, id_, pos_body)\n', (719, 741), True, 'import pybullet as pb\n'), ((1228, 1325), 'pybullet.addUserDebugParameter', 'pb.addUserDebugParameter', (['"""toggle orientation specification"""', '(1)', '(0)', '(0)'], {'physicsClientId': 'client'}), "('toggle orientation specification', 1, 0, 0,\n physicsClientId=client)\n", (1252, 1325), True, 'import pybullet as pb\n'), ((768, 877), 'pybullet.addUserDebugParameter', 'pb.addUserDebugParameter', (['"""cube_x"""'], {'rangeMin': '(0.0)', 'rangeMax': '(0.25)', 'startValue': '(0.15)', 'physicsClientId': 'client'}), "('cube_x', rangeMin=0.0, rangeMax=0.25, startValue=\n 0.15, physicsClientId=client)\n", (792, 877), True, 'import pybullet as pb\n'), ((917, 1026), 'pybullet.addUserDebugParameter', 'pb.addUserDebugParameter', (['"""cube_y"""'], {'rangeMin': '(-0.15)', 'rangeMax': '(0.15)', 'startValue': '(0.0)', 'physicsClientId': 'client'}), "('cube_y', rangeMin=-0.15, rangeMax=0.15,\n startValue=0.0, physicsClientId=client)\n", (941, 1026), True, 'import pybullet as pb\n'), ((1068, 1185), 'pybullet.addUserDebugParameter', 'pb.addUserDebugParameter', (['"""cube_z"""'], {'rangeMin': '(CUBE_SIZE / 2)', 'rangeMax': '(0.45)', 'startValue': '(0.2)', 'physicsClientId': 'client'}), "('cube_z', rangeMin=CUBE_SIZE / 2, rangeMax=0.45,\n startValue=0.2, physicsClientId=client)\n", (1092, 1185), True, 'import pybullet as pb\n'), ((1831, 1890), 'pybullet.readUserDebugParameter', 'pb.readUserDebugParameter', (['d_toggle'], {'physicsClientId': 'client'}), '(d_toggle, physicsClientId=client)\n', (1856, 1890), True, 'import pybullet as pb\n'), ((2822, 2837), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2832, 2837), False, 'import time\n'), ((1427, 1546), 'pybullet.addUserDebugParameter', 'pb.addUserDebugParameter', (['"""gripper_pitch"""'], {'rangeMin': '(0)', 'rangeMax': 'np.pi', 'startValue': '(2 * np.pi)', 'physicsClientId': 'client'}), "('gripper_pitch', rangeMin=0, rangeMax=np.pi,\n startValue=2 * np.pi, physicsClientId=client)\n", (1451, 1546), True, 'import pybullet as pb\n'), ((1592, 1718), 'pybullet.addUserDebugParameter', 'pb.addUserDebugParameter', (['"""gripper_roll"""'], {'rangeMin': '(-np.pi / 2)', 'rangeMax': '(np.pi / 2)', 'startValue': '(0.0)', 'physicsClientId': 'client'}), "('gripper_roll', rangeMin=-np.pi / 2, rangeMax=np.\n pi / 2, startValue=0.0, physicsClientId=client)\n", (1616, 1718), True, 'import pybullet as pb\n'), ((1994, 2048), 'pybullet.readUserDebugParameter', 'pb.readUserDebugParameter', (['prm'], {'physicsClientId': 'client'}), '(prm, physicsClientId=client)\n', (2019, 2048), True, 'import pybullet as pb\n'), ((2421, 2509), 'pybullet.resetBasePositionAndOrientation', 'pb.resetBasePositionAndOrientation', (['body', 'pos', '(0, 0, 0, 1)'], {'physicsClientId': 'client'}), '(body, pos, (0, 0, 0, 1), physicsClientId\n =client)\n', (2455, 2509), True, 'import pybullet as pb\n')] |
"""add sub status info for orders
Revision ID: 2843d6469f25
Revises: <KEY>
Create Date: 2014-09-16 12:31:15.181380
"""
# revision identifiers, used by Alembic.
revision = '2843d6469f25'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('orders', sa.Column('sub_status', sa.String(length=36), nullable=True))
op.add_column('orders', sa.Column('sub_status_message', sa.String(length=255), nullable=True))
| [
"sqlalchemy.String"
] | [((330, 350), 'sqlalchemy.String', 'sa.String', ([], {'length': '(36)'}), '(length=36)\n', (339, 350), True, 'import sqlalchemy as sa\n'), ((428, 449), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (437, 449), True, 'import sqlalchemy as sa\n')] |
import threading
from collections import Counter
from datetime import datetime
import alpaca_trade_api as tradeapi
import yfinance as yf
import trading_constants
import utils.json_simplifier as json_simp
import yf_extender as yf_ext
from utils import alerts
import API_KEYS
# Alpaca Dashboard: https://app.alpaca.markets/paper/dashboard/overview
def initializeApAccount():
global api
api = tradeapi.REST(API_KEYS.TRADE_API_KEY_ID, API_KEYS.TRADE_API_SECRET_KEY,
base_url="https://paper-api.alpaca.markets")
purchased = {}
sold = {}
buying_power = trading_constants.starting_account_value
account_value = trading_constants.starting_account_value
lock = threading.Lock()
def buy_stock(ticker_symbol: str, quantity: int):
with lock:
global buying_power
json_simp.read_json()
purchased_copy = dict(purchased)
ticker = yf.Ticker(ticker_symbol)
stock_info = yf_ext.get_stock_state(ticker)
if ticker_symbol not in purchased_copy and buying_power > (quantity * stock_info['Close']):
api.submit_order(
symbol=ticker_symbol,
qty=quantity,
side='buy',
type='market',
time_in_force='day'
)
stock_info['Quantity'] = quantity
purchased[ticker_symbol] = stock_info
console_output = "Buying " + ticker_symbol + " Quantity: {0}".format(stock_info['Quantity']) + "\n"
print(console_output, end=' ')
buying_power -= (quantity * yf_ext.get_stock_state(ticker)['Close'])
alerts.say_beep(1)
json_simp.updated_purchased()
json_simp.read_json()
def sell_stock(ticker_symbol: str):
api.close_position(ticker_symbol)
global buying_power
refresh_account_balance()
sold_copy = dict(sold)
ticker = yf.Ticker(ticker_symbol)
stock_info = Counter(yf_ext.get_stock_state(ticker))
purchased_copy = dict(purchased)
console_output = "Selling " + ticker_symbol + " Quantity: {0}".format(stock_info['Quantity']) + "\n"
if ticker_symbol not in sold_copy and ticker_symbol != "":
purchase_info = Counter(purchased.pop(ticker_symbol))
console_output = "Selling " + ticker_symbol + " Quantity: {0}".format(purchase_info['Quantity']) + "\n"
stock_info.pop('Time')
purchase_info.pop('Time')
stock_info.subtract(purchase_info)
stock_info['Time'] = datetime.now().strftime("%H:%M:%S")
sold[ticker_symbol] = stock_info
buying_power += stock_info['Close'] * abs(stock_info['Quantity'])
elif ticker_symbol in purchased_copy:
purchase_info = Counter(purchased.pop(ticker_symbol))
console_output = "Selling " + ticker_symbol + " Quantity: {0}".format(purchase_info['Quantity']) + "\n"
sold_info = Counter(sold.pop(ticker_symbol))
stock_info.pop('Time')
purchase_info.pop('Time')
sold_info.pop('Time')
stock_info.subtract(purchase_info)
for i in stock_info and sold_info:
stock_info[i] = stock_info[i] + sold_info[i]
stock_info['Time'] = datetime.now().strftime("%H:%M:%S")
sold[ticker_symbol] = stock_info
buying_power += stock_info['Close'] * abs(stock_info['Quantity'])
json_simp.updated_purchased()
json_simp.updated_sold()
json_simp.read_json()
print(console_output, end=' ')
alerts.say_beep(2)
def refresh_account_balance():
with lock:
global buying_power
global account_value
json_simp.read_json()
buying_power = trading_constants.starting_account_value
account_value = trading_constants.starting_account_value
purchased_copy = dict(purchased)
sold_copy = dict(sold)
for ticker_symbol in purchased_copy:
current_ticker_price = yf_ext.get_stock_state(yf.Ticker(ticker_symbol))['Close']
purchased_ticker_price = purchased_copy[ticker_symbol]['Close']
purchased_ticker_quantity = purchased_copy[ticker_symbol]['Quantity']
account_value += current_ticker_price - purchased_ticker_price
buying_power -= purchased_ticker_price * purchased_ticker_quantity
for ticker_symbol in sold_copy:
temp = sold[ticker_symbol]['Close'] * abs(sold[ticker_symbol]['Quantity'])
buying_power += temp
account_value += temp
def print_account_status():
refresh_account_balance()
print("Buying Power {0}".format((buying_power * 1000) / 1000))
print("Account Value {0}".format((account_value * 1000) / 1000))
| [
"alpaca_trade_api.REST",
"utils.json_simplifier.updated_purchased",
"threading.Lock",
"utils.json_simplifier.updated_sold",
"utils.json_simplifier.read_json",
"utils.alerts.say_beep",
"datetime.datetime.now",
"yfinance.Ticker",
"yf_extender.get_stock_state"
] | [((692, 708), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (706, 708), False, 'import threading\n'), ((404, 524), 'alpaca_trade_api.REST', 'tradeapi.REST', (['API_KEYS.TRADE_API_KEY_ID', 'API_KEYS.TRADE_API_SECRET_KEY'], {'base_url': '"""https://paper-api.alpaca.markets"""'}), "(API_KEYS.TRADE_API_KEY_ID, API_KEYS.TRADE_API_SECRET_KEY,\n base_url='https://paper-api.alpaca.markets')\n", (417, 524), True, 'import alpaca_trade_api as tradeapi\n'), ((1879, 1903), 'yfinance.Ticker', 'yf.Ticker', (['ticker_symbol'], {}), '(ticker_symbol)\n', (1888, 1903), True, 'import yfinance as yf\n'), ((3323, 3352), 'utils.json_simplifier.updated_purchased', 'json_simp.updated_purchased', ([], {}), '()\n', (3350, 3352), True, 'import utils.json_simplifier as json_simp\n'), ((3357, 3381), 'utils.json_simplifier.updated_sold', 'json_simp.updated_sold', ([], {}), '()\n', (3379, 3381), True, 'import utils.json_simplifier as json_simp\n'), ((3386, 3407), 'utils.json_simplifier.read_json', 'json_simp.read_json', ([], {}), '()\n', (3405, 3407), True, 'import utils.json_simplifier as json_simp\n'), ((3447, 3465), 'utils.alerts.say_beep', 'alerts.say_beep', (['(2)'], {}), '(2)\n', (3462, 3465), False, 'from utils import alerts\n'), ((812, 833), 'utils.json_simplifier.read_json', 'json_simp.read_json', ([], {}), '()\n', (831, 833), True, 'import utils.json_simplifier as json_simp\n'), ((892, 916), 'yfinance.Ticker', 'yf.Ticker', (['ticker_symbol'], {}), '(ticker_symbol)\n', (901, 916), True, 'import yfinance as yf\n'), ((938, 968), 'yf_extender.get_stock_state', 'yf_ext.get_stock_state', (['ticker'], {}), '(ticker)\n', (960, 968), True, 'import yf_extender as yf_ext\n'), ((1649, 1678), 'utils.json_simplifier.updated_purchased', 'json_simp.updated_purchased', ([], {}), '()\n', (1676, 1678), True, 'import utils.json_simplifier as json_simp\n'), ((1687, 1708), 'utils.json_simplifier.read_json', 'json_simp.read_json', ([], {}), '()\n', (1706, 1708), True, 'import utils.json_simplifier as json_simp\n'), ((1929, 1959), 'yf_extender.get_stock_state', 'yf_ext.get_stock_state', (['ticker'], {}), '(ticker)\n', (1951, 1959), True, 'import yf_extender as yf_ext\n'), ((3579, 3600), 'utils.json_simplifier.read_json', 'json_simp.read_json', ([], {}), '()\n', (3598, 3600), True, 'import utils.json_simplifier as json_simp\n'), ((1621, 1639), 'utils.alerts.say_beep', 'alerts.say_beep', (['(1)'], {}), '(1)\n', (1636, 1639), False, 'from utils import alerts\n'), ((2478, 2492), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2490, 2492), False, 'from datetime import datetime\n'), ((1568, 1598), 'yf_extender.get_stock_state', 'yf_ext.get_stock_state', (['ticker'], {}), '(ticker)\n', (1590, 1598), True, 'import yf_extender as yf_ext\n'), ((3167, 3181), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3179, 3181), False, 'from datetime import datetime\n'), ((3907, 3931), 'yfinance.Ticker', 'yf.Ticker', (['ticker_symbol'], {}), '(ticker_symbol)\n', (3916, 3931), True, 'import yfinance as yf\n')] |
# ---------------------------------------------------------------------------
# HomeKit support for MTDA
# ---------------------------------------------------------------------------
#
# This software is a part of MTDA.
# Copyright (c) Mentor, a Siemens business, 2017-2020
#
# ---------------------------------------------------------------------------
# SPDX-License-Identifier: MIT
# ---------------------------------------------------------------------------
# System imports
import abc
import os
# HAP-Python imports
from pyhap.accessory import Accessory
from pyhap.accessory_driver import AccessoryDriver
import pyhap.const as Category
# Local imports
from mtda.assistant.assistant import Assistant
class PowerSwitch(Accessory):
category = Category.CATEGORY_OUTLET
def __init__(self, mtda, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mtda = mtda
serv_switch = self.add_preload_service('Outlet')
self.relay_on = serv_switch.configure_char(
'On', setter_callback=self.set_relay)
self.relay_in_use = serv_switch.configure_char(
'OutletInUse', setter_callback=self.get_relay_in_use)
def get_relay(self, status=None):
if status is None:
status = self.mtda.target_status()
return "1" if status == "ON" else 0
def relay_changed(self, status):
self.mtda.debug(3, "mtda.assistant.homekit.relay_changed(%s)" % status)
result = self.get_relay(status)
self.relay_on.set_value(result)
self.mtda.debug(3, "mtda.assistant.homekit.relay_changed(): "
"%s" % str(result))
return result
def set_relay(self, state):
self.mtda.debug(3, "mtda.assistant.homekit.set_relay()")
result = self.get_relay()
if result != state:
if state == 1:
self.mtda.target_on('homekit')
else:
self.mtda.target_off('homekit')
result = self.get_relay()
self.mtda.debug(3, "mtda.assistant.homekit.set_relay(): "
"%s" % str(result))
return result
def get_relay_in_use(self, state):
return True
def setup_message(self):
self.mtda.debug(3, "mtda.assistant.homekit.setup_message()")
pincode = self.driver.state.pincode.decode()
result = self.mtda.env_set('homekit-setup-code', pincode, 'homekit')
self.mtda.debug(3, "mtda.assistant.homekit.setup_message(): "
"%s" % str(result))
return result
class HomeKitAssistant(Assistant):
def __init__(self, mtda):
self.mtda = mtda
self.accessory = None
self.name = "MTDA"
self.port = 51826
self.state = "/var/lib/mtda/homekit.state"
def configure(self, conf):
self.mtda.debug(3, "mtda.assistant.homekit.configure()")
result = True
if 'name' in conf:
self.name = conf['name']
if 'port' in conf:
self.port = int(conf['port'], 10)
if 'state' in conf:
self.state = conf['state']
dir = os.path.dirname(self.state)
os.makedirs(dir, mode=0o755, exist_ok=True)
self.mtda.debug(3, "mtda.assistant.homekit.configure(): "
"%s" % str(result))
return result
def probe(self):
return True
def power_changed(self, status):
self.accessory.relay_changed(status)
def start(self):
drv = AccessoryDriver(persist_file=self.state, port=self.port)
self.accessory = PowerSwitch(self.mtda, drv, self.name)
drv.add_accessory(self.accessory)
drv.start_service()
def instantiate(mtda):
return HomeKitAssistant(mtda)
| [
"os.path.dirname",
"pyhap.accessory_driver.AccessoryDriver",
"os.makedirs"
] | [((3146, 3173), 'os.path.dirname', 'os.path.dirname', (['self.state'], {}), '(self.state)\n', (3161, 3173), False, 'import os\n'), ((3182, 3223), 'os.makedirs', 'os.makedirs', (['dir'], {'mode': '(493)', 'exist_ok': '(True)'}), '(dir, mode=493, exist_ok=True)\n', (3193, 3223), False, 'import os\n'), ((3523, 3579), 'pyhap.accessory_driver.AccessoryDriver', 'AccessoryDriver', ([], {'persist_file': 'self.state', 'port': 'self.port'}), '(persist_file=self.state, port=self.port)\n', (3538, 3579), False, 'from pyhap.accessory_driver import AccessoryDriver\n')] |
import io
from random import getrandbits
import sys
from lmfit import minimize, report_fit, Parameters
import numpy as np
from .saturation_calc import cw_spec, sat_residual
# Lists of fitting algorithms, separated by whether they are custom
# implementations or wrappers of lmfit functions
lmfit_algos = ["simplex", "levmar", "mcmc", "grid"]
custom_algos = ["montecarlo", "genetic"]
def fit_sat(params_fit=Parameters(),
params_nonfit={'shiftg': -3.9, 'nort': 20},
bgrid=np.reshape(np.linspace(-60, 60, 256) + 3360, (1, -1)),
spec_expt=np.zeros((1, 128)), b1_list=[0.1], weights=[1],
algo_choice="simplex", **fit_kws):
"""
Minimize the [sat_residual()] function using the specified
arguments and algorithm.
Calls on the [fit()] function to do so.
Args:
params_fit (dict, optional): Parameters to be varied and their initial
values.
params_nonfit (dict, optional): Parameters to be held constant and
their associated values.
bgrid (np.ndarray, optional): Grid of magnetic field values in Gauss,
need not be uniformly spaced.
spec_expt (np.ndarray, optional): Experimental data used to calculate
the residual.
b1_list (list, optional): List of b1 values associated with rows of
[bgrid] and [spec_expt].
weights (list, optional): List of weights to be applied to residuals.
algo_choice (str, optional): Name of algorithm used for minimization.
Possible values are as follows: "simplex", "levmar", "mcmc",
"grid", "montecarlo", or "genetic".
**fit_kws (dict, optional): Keyword arguments to be passed into the
chosen fitting algorithm. "montecarlo" will require argument
"montecarlo_trial_count" to dictate how many trials it conducts.
"genetic" will require arguments "genetic_generation_count",
"genetic_generation_size", and "genetic_survival_rate" to dictate
the number of generations produced, the number of trials in each
generation, and how many trials in each generation will be kept for
the next generation, respectively. Other fitting algorithms will
require arguments as documented by [lmfit].
Returns:
A mapping of parameters in [params] to the optimal values computed by
the selected fitting algorithm.
Return type:
[lmfit.Parameters]
"""
# Compile keyword arguments
params_nonfit["bgrid"] = bgrid
params_nonfit["spec_expt"] = spec_expt
params_nonfit["b1_list"] = b1_list
params_nonfit["weights"] = weights
# Run fitting
return fit(sat_residual_wrapper, params_fit, kws=params_nonfit,
algo_choice=algo_choice, **fit_kws)
def sat_residual_wrapper(params, **kws):
"""
[sat_residual()] restructured to conform with constraints
of [lmfit] residual functions.
See [sat_residual()] for full documentation of arguments.
Args:
params (lmfit.Parameters): all bindings that would be stored in
[params_fit] for [sat_residual()]
**kws (dict, optional): all bindings that would be stored in
[params_nonfit] for [sat_residual()], as well as
all other optional arguments of [sat_residual()].
Returns:
Residual values computed by [sat_residual()].
Return type:
[numpy.ndarray]
"""
params_fit = dict(params.valuesdict())
params_nonfit = kws
bgrid = params_nonfit.pop("bgrid")
spec_expt = params_nonfit.pop("spec_expt")
b1_list = params_nonfit.pop("b1_list")
weights = params_nonfit.pop("weights")
# Get output
return sat_residual(params_fit=params_fit,
params_nonfit=params_nonfit,
bgrid=bgrid,
spec_expt=spec_expt,
b1_list=b1_list,
weights=weights)
def dummy_sat_residual_wrapper(params, **kws):
"""
Toy version of [sat_residual_wrapper()] to be used for sanity checking.
Args:
params (lmfit.Parameters): bindings for floats "scale", "dx", "dy", and
"dz". Optimal value bindings are {"scale": 1.0, "dx": 7.2,
"dy": 7.2, "dz": 8}.
**kws (dict, optional): bindings for floats "nort", "b1", and "c20".
Best set to {"nort": 0, "b1": 0.5, "c20": 0}.
Returns:
Residual values.
Return type:
[numpy.ndarray]
"""
bgrid = np.linspace(-60, 60, 256) + 3360
# Construct fake data
spec_expt = cw_spec(bgrid=bgrid, params_in={**kws, **{'scale': 1.0,
'dx': 7.2,
'dy': 7.2,
'dz': 8}},
basis_file='xoxo', prune_on=0)[1]
# Construct the same model as [sat_residual_wrapper()], but with simple
# arguments
spec_simulated = cw_spec(bgrid=bgrid, params_in={**kws,
**{x: params[x].value
for x in params}},
basis_file='xoxo',
prune_on=False)[1]
# Return residual value
return spec_expt - spec_simulated
def fit(residual_function, params, args=None, kws=None, algo_choice="simplex",
**fit_kws):
"""
Minimize a residual function using the specified algorithm.
Args:
residual_function (function): The function to minimize. Must have a
signature compatible with the [lmfit] requirements,
[residual_function(params, *args, **kws)]. See [lmfit] for more
details.
params (lmfit.Parameters): Information on parameters to be passed into
[residual_function()]. Controls initial value, constraints on
potential values, whether or not to vary each parameter, and how to
do so. "montecarlo" and "genetic" fitting algorithms require [min]
and [max] values for parameters. Other fitting algorithms have
requirements as documented by [lmfit].
args (tuple, optional): Positional argument values to be passed into
[residual_function]. These values will not be varied as a part of
the fitting process.
kws (list, optional): Keyword argument values to be passed into
[residual_function]. These values will not be varied as a part of
the fitting process.
algo_choice (str, optional): Name of algorithm used for minimization.
Possible values are as follows: "simplex", "levmar", "mcmc",
"grid", "montecarlo", or "genetic".
**fit_kws (dict, optional): Keyword arguments to be passed into the
chosen fitting algorithm. "montecarlo" will require argument
"montecarlo_trial_count" to dictate how many trials it conducts.
"genetic" will require arguments "genetic_generation_count",
"genetic_generation_size", and "genetic_survival_rate" to dictate
the number of generations produced, the number of trials in each
generation, and how many trials in each generation will be kept for
the next generation, respectively. Other fitting algorithms will
require arguments as documented by [lmfit].
Returns:
A mapping of parameters in [params] to the optimal values computed by
the selected fitting algorithm.
Return type:
[lmfit.Parameters]
"""
# Differentiate between custom fitting and [lmfit] fitting
if algo_choice in custom_algos:
return __custom_fit(residual_function, params, algo_choice, args=args,
kws=kws, **fit_kws)
elif algo_choice in lmfit_algos:
return __lmfit_fit(residual_function, params, algo_choice, args=args,
kws=kws, **fit_kws)
else:
raise ValueError("algo_choice invalid")
def __lmfit_fit(residual_function, params, algo_choice, args=None, kws=None,
**fit_kws):
"""Process calls for [lmfit] fitting."""
method = "nelder" if algo_choice == "simplex" else \
"leastsq" if algo_choice == "levmar" else \
"emcee" if algo_choice == "mcmc" else \
"brute" if algo_choice == "grid" else None
if method is None:
raise ValueError("algo_choice invalid")
# Switch output channel to suppress printing during fitting process
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
# Call [lmfit.minimize()]
out = minimize(residual_function, params, method=method, nan_policy='omit',
args=args, kws=kws, **fit_kws)
# Return to original output channel
sys.stdout = old_stdout
# Print report of fitting results
report_fit(out.params)
# Return value bindings
return out.params
def __custom_fit(residual_function, params, algo_choice, args=None, kws=None,
**fit_kws):
"""Process calls for custom fitting."""
# Handle NoneType arguments
args = [] if args is None else args
kws = {} if kws is None else kws
# Run fitting according to method selection
# Switch output channel to suppress printing during fitting process
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
# Call relevant custom fitting function
if algo_choice == "montecarlo":
out = __montecarlo_fit(residual_function, params, args, kws,
fit_kws["montecarlo_trial_count"]
if "montecarlo_trial_count" in fit_kws
else 1000)
elif algo_choice == "genetic":
out = __genetic_fit(residual_function, params, args, kws,
fit_kws["genetic_generation_count"]
if "genetic_generation_count" in fit_kws
else 100,
fit_kws["genetic_generation_size"]
if "genetic_generation_size" in fit_kws
else 100,
fit_kws["genetic_survival_rate"]
if "genetic_survival_rate" in fit_kws
else 0.2)
else:
raise ValueError("algo_choice invalid")
# Return to original output channel
sys.stdout = old_stdout
# Print report of fitting results
print("[[Variables]]")
for param in params:
val = out[param].value
ini = params[param].value
print(" {0:7.7}{1:0<11.11} (init = {2})".format(
param + ":", (" " if val >= 0 else "") + str(val), ini))
# Return value bindings
return out
def __montecarlo_fit(residual_function, params, args, kws, trial_count):
"""Monte carlo fitting."""
# Record minimum residual sum of squares and corresponding parameters
min_params = params.copy()
min_rss = __rss(residual_function, min_params, args, kws)
# Generate random parameter sets and return optimal set
for trial in range(trial_count):
# Generate new parameters uniformly
trial_params = __random_param_values(params)
# Get corresponding residual value of new parameters
trial_rss = __rss(residual_function, trial_params, args, kws)
# Record new parameters and rss if better
if trial_rss < min_rss:
min_rss = trial_rss
min_params = trial_params
# Return best parameters
return min_params
def __genetic_fit(residual_function, params, args, kws, generation_count,
generation_size, survival_rate):
"""Genetic algorithm fitting."""
# FLAW: CANNOT MUTATE PARAMETER FROM ONE SIGN TO ANOTHER
# Uses rss as fitness to minimize
# Unclear if fitness values will have sufficient range
def new_generation(previous_population=None, previous_fitnesses=None):
# Generate original population
if previous_population is None or previous_fitnesses is None:
population = [__random_param_values(params)
for i in range(generation_size)]
# Generate descendant population
else:
# Use normalized fitness to calculate survival and breeding odds
normfit = -1 * (previous_fitnesses - np.amax(previous_fitnesses))
normfit = normfit / np.sum(normfit)
survivor_count = int(survival_rate * generation_size)
children_count = generation_size - survivor_count
# Get survivors and parents
survivors = list(np.random.choice(previous_population,
size=survivor_count, p=normfit,
replace=False))
parents = list(np.random.choice(previous_population,
size=children_count * 2,
p=normfit))
# Produce children via crossover between parents, then mutate
children = [parents[i * 2].copy() for i in range(children_count)]
for i in range(0, children_count):
for param in params:
# Verify that param can be modified
if params[param].vary:
# Crossover
children[i][param] = (children[i][param]
if bool(getrandbits(1))
else parents[2 * i + 1][param])
# Mutation
children[i][param].value = (children[i][param].value
* np.random.uniform(0.75,
1.25))
population = survivors + children
# Calculate fitnesses
fitnesses = np.array([__rss(residual_function, chromosome, args, kws)
for chromosome in population])
# Return
return population, fitnesses
# Store generation and fitnesses
population = fitnesses = None
# Main generation loop
for generation in range(generation_count):
population, fitnesses = new_generation(population, fitnesses)
# Return best parameters from final generation
return population[np.argmin(fitnesses)]
def __random_param_values(params):
"""Generate new parameters uniformly."""
rand_params = params.copy()
for param in params:
# Only modify parameter if [vary == True]
if params[param].vary:
rand_params[param].value = np.random.uniform(params[param].min,
params[param].max)
# Explicitly re-apply parameter [expr] constraints
rand_params.update_constraints()
return rand_params
def __rss(residual_function, param_values, args, kws):
"""Compute residual sum of squares"""
return np.sum(np.square(np.array(residual_function(param_values,
*args,
**kws))))
| [
"numpy.amax",
"numpy.random.choice",
"numpy.sum",
"numpy.zeros",
"numpy.linspace",
"lmfit.report_fit",
"random.getrandbits",
"numpy.random.uniform",
"numpy.argmin",
"io.StringIO",
"lmfit.Parameters",
"lmfit.minimize"
] | [((426, 438), 'lmfit.Parameters', 'Parameters', ([], {}), '()\n', (436, 438), False, 'from lmfit import minimize, report_fit, Parameters\n'), ((594, 612), 'numpy.zeros', 'np.zeros', (['(1, 128)'], {}), '((1, 128))\n', (602, 612), True, 'import numpy as np\n'), ((8898, 8911), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (8909, 8911), False, 'import io\n'), ((8983, 9088), 'lmfit.minimize', 'minimize', (['residual_function', 'params'], {'method': 'method', 'nan_policy': '"""omit"""', 'args': 'args', 'kws': 'kws'}), "(residual_function, params, method=method, nan_policy='omit', args=\n args, kws=kws, **fit_kws)\n", (8991, 9088), False, 'from lmfit import minimize, report_fit, Parameters\n'), ((9218, 9240), 'lmfit.report_fit', 'report_fit', (['out.params'], {}), '(out.params)\n', (9228, 9240), False, 'from lmfit import minimize, report_fit, Parameters\n'), ((9732, 9745), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (9743, 9745), False, 'import io\n'), ((4681, 4706), 'numpy.linspace', 'np.linspace', (['(-60)', '(60)', '(256)'], {}), '(-60, 60, 256)\n', (4692, 4706), True, 'import numpy as np\n'), ((14924, 14944), 'numpy.argmin', 'np.argmin', (['fitnesses'], {}), '(fitnesses)\n', (14933, 14944), True, 'import numpy as np\n'), ((527, 552), 'numpy.linspace', 'np.linspace', (['(-60)', '(60)', '(256)'], {}), '(-60, 60, 256)\n', (538, 552), True, 'import numpy as np\n'), ((15214, 15269), 'numpy.random.uniform', 'np.random.uniform', (['params[param].min', 'params[param].max'], {}), '(params[param].min, params[param].max)\n', (15231, 15269), True, 'import numpy as np\n'), ((12895, 12910), 'numpy.sum', 'np.sum', (['normfit'], {}), '(normfit)\n', (12901, 12910), True, 'import numpy as np\n'), ((13112, 13200), 'numpy.random.choice', 'np.random.choice', (['previous_population'], {'size': 'survivor_count', 'p': 'normfit', 'replace': '(False)'}), '(previous_population, size=survivor_count, p=normfit,\n replace=False)\n', (13128, 13200), True, 'import numpy as np\n'), ((13320, 13393), 'numpy.random.choice', 'np.random.choice', (['previous_population'], {'size': '(children_count * 2)', 'p': 'normfit'}), '(previous_population, size=children_count * 2, p=normfit)\n', (13336, 13393), True, 'import numpy as np\n'), ((12833, 12860), 'numpy.amax', 'np.amax', (['previous_fitnesses'], {}), '(previous_fitnesses)\n', (12840, 12860), True, 'import numpy as np\n'), ((14248, 14277), 'numpy.random.uniform', 'np.random.uniform', (['(0.75)', '(1.25)'], {}), '(0.75, 1.25)\n', (14265, 14277), True, 'import numpy as np\n'), ((13984, 13998), 'random.getrandbits', 'getrandbits', (['(1)'], {}), '(1)\n', (13995, 13998), False, 'from random import getrandbits\n')] |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from datetime import datetime, timedelta
import requests
from azure.common import (
AzureMissingResourceHttpError,
AzureException,
)
from azure.storage.blob import (
BlockBlobService,
BlobPermissions,
ContainerPermissions,
)
from azure.storage.common import (
AccessPolicy,
ResourceTypes,
AccountPermissions,
TokenCredential,
)
from tests.testcase import (
StorageTestCase,
TestMode,
record,
)
class StorageBlobSASTest(StorageTestCase):
def setUp(self):
super(StorageBlobSASTest, self).setUp()
self.bs = self._create_storage_service(BlockBlobService, self.settings)
self.container_name = self.get_resource_name('utcontainer')
if not self.is_playback():
self.bs.create_container(self.container_name)
self.byte_data = self.get_random_bytes(1024)
def tearDown(self):
if not self.is_playback():
self.bs.delete_container(self.container_name)
return super(StorageBlobSASTest, self).tearDown()
def _get_container_reference(self):
return self.get_resource_name("sastestcontainer")
def _get_blob_reference(self):
return self.get_resource_name("sastestblob")
def _create_block_blob(self):
blob_name = self._get_blob_reference()
self.bs.create_blob_from_bytes(self.container_name, blob_name, self.byte_data)
return blob_name
def _get_user_delegation_key(self, key_start_time, key_expiry_time):
token_credential = TokenCredential(self.generate_oauth_token())
service = BlockBlobService(self.settings.STORAGE_ACCOUNT_NAME, token_credential=token_credential)
return service.get_user_delegation_key(key_start_time, key_expiry_time)
@record
def test_get_user_delegation_key(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Act
start = datetime.utcnow()
expiry = datetime.utcnow() + timedelta(hours=1)
user_delegation_key_1 = self._get_user_delegation_key(key_start_time=start, key_expiry_time=expiry)
user_delegation_key_2 = self._get_user_delegation_key(key_start_time=start, key_expiry_time=expiry)
# Assert key1 is valid
self.assertIsNotNone(user_delegation_key_1.signed_oid)
self.assertIsNotNone(user_delegation_key_1.signed_tid)
self.assertIsNotNone(user_delegation_key_1.signed_start)
self.assertIsNotNone(user_delegation_key_1.signed_expiry)
self.assertIsNotNone(user_delegation_key_1.signed_version)
self.assertIsNotNone(user_delegation_key_1.signed_service)
self.assertIsNotNone(user_delegation_key_1.value)
# Assert key1 and key2 are equal, since they have the exact same start and end times
self.assertEqual(user_delegation_key_1.signed_oid, user_delegation_key_2.signed_oid)
self.assertEqual(user_delegation_key_1.signed_tid, user_delegation_key_2.signed_tid)
self.assertEqual(user_delegation_key_1.signed_start, user_delegation_key_2.signed_start)
self.assertEqual(user_delegation_key_1.signed_expiry, user_delegation_key_2.signed_expiry)
self.assertEqual(user_delegation_key_1.signed_version, user_delegation_key_2.signed_version)
self.assertEqual(user_delegation_key_1.signed_service, user_delegation_key_2.signed_service)
self.assertEqual(user_delegation_key_1.value, user_delegation_key_2.value)
def test_user_delegation_sas_for_blob(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_block_blob()
user_delegation_key = self._get_user_delegation_key(datetime.utcnow(), datetime.utcnow() + timedelta(hours=1))
# create a new service object without any key, to make sure the sas is truly generated from the delegation key
service = BlockBlobService(self.settings.STORAGE_ACCOUNT_NAME)
token = service.generate_blob_shared_access_signature(
self.container_name,
blob_name,
permission=BlobPermissions.READ,
expiry=datetime.utcnow() + timedelta(hours=1),
user_delegation_key=user_delegation_key,
)
# Act
# Use the generated identity sas
service = BlockBlobService(
self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
request_session=requests.Session(),
)
result = service.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(self.byte_data, result.content)
@record
def test_user_delegation_sas_for_container(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_block_blob()
user_delegation_key = self._get_user_delegation_key(datetime.utcnow(), datetime.utcnow() + timedelta(hours=1))
# create a new service object without any key, to make sure the sas is truly generated from the delegation key
service = BlockBlobService(self.settings.STORAGE_ACCOUNT_NAME)
token = service.generate_container_shared_access_signature(
self.container_name,
expiry=datetime.utcnow() + timedelta(hours=1),
permission=ContainerPermissions.READ,
user_delegation_key=user_delegation_key,
)
# Act
service = BlockBlobService(
self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
request_session=requests.Session(),
)
result = service.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(self.byte_data, result.content)
@record
def test_sas_access_blob(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_block_blob()
token = self.bs.generate_blob_shared_access_signature(
self.container_name,
blob_name,
permission=BlobPermissions.READ,
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
service = BlockBlobService(
self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
request_session=requests.Session(),
)
self._set_test_proxy(service, self.settings)
result = service.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(self.byte_data, result.content)
@record
def test_sas_access_blob_snapshot(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_block_blob()
blob_snapshot = self.bs.snapshot_blob(self.container_name, blob_name)
token = self.bs.generate_blob_shared_access_signature(
self.container_name,
blob_name,
permission=BlobPermissions.READ + BlobPermissions.DELETE,
expiry=datetime.utcnow() + timedelta(hours=1),
snapshot=blob_snapshot.snapshot
)
service = BlockBlobService(
self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
request_session=requests.Session(),
)
# Read from the snapshot
result = service.get_blob_to_bytes(self.container_name, blob_name, snapshot=blob_snapshot.snapshot)
# Assert
self.assertEqual(self.byte_data, result.content)
# Delete the snapshot
service.delete_blob(self.container_name, blob_name, snapshot=blob_snapshot.snapshot)
# Assert
self.assertFalse(service.exists(self.container_name, blob_name, snapshot=blob_snapshot.snapshot))
# Accessing the blob with a snapshot sas should fail
with self.assertRaises(AzureException):
service.get_blob_to_bytes(self.container_name, blob_name)
@record
def test_sas_signed_identifier(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_block_blob()
access_policy = AccessPolicy()
access_policy.start = '2011-10-11'
access_policy.expiry = '2088-10-12'
access_policy.permission = BlobPermissions.READ
identifiers = {'testid': access_policy}
resp = self.bs.set_container_acl(self.container_name, identifiers)
token = self.bs.generate_blob_shared_access_signature(
self.container_name,
blob_name,
id='testid'
)
# Act
service = BlockBlobService(
self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
request_session=requests.Session(),
)
self._set_test_proxy(service, self.settings)
result = service.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(self.byte_data, result.content)
@record
def test_account_sas(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_block_blob()
token = self.bs.generate_account_shared_access_signature(
ResourceTypes.OBJECT + ResourceTypes.CONTAINER,
AccountPermissions.READ,
datetime.utcnow() + timedelta(hours=1),
)
# Act
blob_url = self.bs.make_blob_url(
self.container_name,
blob_name,
sas_token=token,
)
container_url = self.bs.make_container_url(
self.container_name,
sas_token=token,
)
blob_response = requests.get(blob_url)
container_response = requests.get(container_url)
# Assert
self.assertTrue(blob_response.ok)
self.assertEqual(self.byte_data, blob_response.content)
self.assertTrue(container_response.ok)
@record
def test_shared_read_access_blob(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_block_blob()
token = self.bs.generate_blob_shared_access_signature(
self.container_name,
blob_name,
permission=BlobPermissions.READ,
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
url = self.bs.make_blob_url(
self.container_name,
blob_name,
sas_token=token,
)
response = requests.get(url)
# Assert
self.assertTrue(response.ok)
self.assertEqual(self.byte_data, response.content)
@record
def test_shared_read_access_blob_with_content_query_params(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_block_blob()
token = self.bs.generate_blob_shared_access_signature(
self.container_name,
blob_name,
permission=BlobPermissions.READ,
expiry=datetime.utcnow() + timedelta(hours=1),
cache_control='no-cache',
content_disposition='inline',
content_encoding='utf-8',
content_language='fr',
content_type='text',
)
url = self.bs.make_blob_url(
self.container_name,
blob_name,
sas_token=token,
)
# Act
response = requests.get(url)
# Assert
self.assertEqual(self.byte_data, response.content)
self.assertEqual(response.headers['cache-control'], 'no-cache')
self.assertEqual(response.headers['content-disposition'], 'inline')
self.assertEqual(response.headers['content-encoding'], 'utf-8')
self.assertEqual(response.headers['content-language'], 'fr')
self.assertEqual(response.headers['content-type'], 'text')
@record
def test_shared_write_access_blob(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
updated_data = b'updated blob data'
blob_name = self._create_block_blob()
token = self.bs.generate_blob_shared_access_signature(
self.container_name,
blob_name,
permission=BlobPermissions.WRITE,
expiry=datetime.utcnow() + timedelta(hours=1),
)
url = self.bs.make_blob_url(
self.container_name,
blob_name,
sas_token=token,
)
# Act
headers = {'x-ms-blob-type': self.bs.blob_type}
response = requests.put(url, headers=headers, data=updated_data)
# Assert
self.assertTrue(response.ok)
blob = self.bs.get_blob_to_bytes(self.container_name, blob_name)
self.assertEqual(updated_data, blob.content)
@record
def test_shared_delete_access_blob(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_block_blob()
token = self.bs.generate_blob_shared_access_signature(
self.container_name,
blob_name,
permission=BlobPermissions.DELETE,
expiry=datetime.utcnow() + timedelta(hours=1),
)
url = self.bs.make_blob_url(
self.container_name,
blob_name,
sas_token=token,
)
# Act
response = requests.delete(url)
# Assert
self.assertTrue(response.ok)
with self.assertRaises(AzureMissingResourceHttpError):
blob = self.bs.get_blob_to_bytes(self.container_name, blob_name)
@record
def test_shared_access_container(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_block_blob()
token = self.bs.generate_container_shared_access_signature(
self.container_name,
expiry=datetime.utcnow() + timedelta(hours=1),
permission=ContainerPermissions.READ,
)
url = self.bs.make_blob_url(
self.container_name,
blob_name,
sas_token=token,
)
# Act
response = requests.get(url)
# Assert
self.assertTrue(response.ok)
self.assertEqual(self.byte_data, response.content)
| [
"requests.Session",
"azure.storage.common.AccessPolicy",
"datetime.datetime.utcnow",
"requests.get",
"requests.delete",
"tests.testcase.TestMode.need_recording_file",
"requests.put",
"datetime.timedelta",
"azure.storage.blob.BlockBlobService"
] | [((1890, 1982), 'azure.storage.blob.BlockBlobService', 'BlockBlobService', (['self.settings.STORAGE_ACCOUNT_NAME'], {'token_credential': 'token_credential'}), '(self.settings.STORAGE_ACCOUNT_NAME, token_credential=\n token_credential)\n', (1906, 1982), False, 'from azure.storage.blob import BlockBlobService, BlobPermissions, ContainerPermissions\n'), ((2204, 2248), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (2232, 2248), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((2300, 2317), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2315, 2317), False, 'from datetime import datetime, timedelta\n'), ((3971, 4015), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (3999, 4015), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((4358, 4410), 'azure.storage.blob.BlockBlobService', 'BlockBlobService', (['self.settings.STORAGE_ACCOUNT_NAME'], {}), '(self.settings.STORAGE_ACCOUNT_NAME)\n', (4374, 4410), False, 'from azure.storage.blob import BlockBlobService, BlobPermissions, ContainerPermissions\n'), ((5230, 5274), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (5258, 5274), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((5617, 5669), 'azure.storage.blob.BlockBlobService', 'BlockBlobService', (['self.settings.STORAGE_ACCOUNT_NAME'], {}), '(self.settings.STORAGE_ACCOUNT_NAME)\n', (5633, 5669), False, 'from azure.storage.blob import BlockBlobService, BlobPermissions, ContainerPermissions\n'), ((6417, 6461), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (6445, 6461), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((7317, 7361), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (7345, 7361), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((8788, 8832), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (8816, 8832), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((8943, 8957), 'azure.storage.common.AccessPolicy', 'AccessPolicy', ([], {}), '()\n', (8955, 8957), False, 'from azure.storage.common import AccessPolicy, ResourceTypes, AccountPermissions, TokenCredential\n'), ((9902, 9946), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (9930, 9946), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((10559, 10581), 'requests.get', 'requests.get', (['blob_url'], {}), '(blob_url)\n', (10571, 10581), False, 'import requests\n'), ((10611, 10638), 'requests.get', 'requests.get', (['container_url'], {}), '(container_url)\n', (10623, 10638), False, 'import requests\n'), ((10956, 11000), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (10984, 11000), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((11486, 11503), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (11498, 11503), False, 'import requests\n'), ((11790, 11834), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (11818, 11834), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((12506, 12523), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (12518, 12523), False, 'import requests\n'), ((13104, 13148), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (13132, 13148), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((13735, 13788), 'requests.put', 'requests.put', (['url'], {'headers': 'headers', 'data': 'updated_data'}), '(url, headers=headers, data=updated_data)\n', (13747, 13788), False, 'import requests\n'), ((14118, 14162), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (14146, 14162), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((14650, 14670), 'requests.delete', 'requests.delete', (['url'], {}), '(url)\n', (14665, 14670), False, 'import requests\n'), ((15012, 15056), 'tests.testcase.TestMode.need_recording_file', 'TestMode.need_recording_file', (['self.test_mode'], {}), '(self.test_mode)\n', (15040, 15056), False, 'from tests.testcase import StorageTestCase, TestMode, record\n'), ((15528, 15545), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (15540, 15545), False, 'import requests\n'), ((2335, 2352), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2350, 2352), False, 'from datetime import datetime, timedelta\n'), ((2355, 2373), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (2364, 2373), False, 'from datetime import datetime, timedelta\n'), ((4161, 4178), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4176, 4178), False, 'from datetime import datetime, timedelta\n'), ((5420, 5437), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5435, 5437), False, 'from datetime import datetime, timedelta\n'), ((4180, 4197), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4195, 4197), False, 'from datetime import datetime, timedelta\n'), ((4200, 4218), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (4209, 4218), False, 'from datetime import datetime, timedelta\n'), ((4894, 4912), 'requests.Session', 'requests.Session', ([], {}), '()\n', (4910, 4912), False, 'import requests\n'), ((5439, 5456), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5454, 5456), False, 'from datetime import datetime, timedelta\n'), ((5459, 5477), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (5468, 5477), False, 'from datetime import datetime, timedelta\n'), ((6099, 6117), 'requests.Session', 'requests.Session', ([], {}), '()\n', (6115, 6117), False, 'import requests\n'), ((6937, 6955), 'requests.Session', 'requests.Session', ([], {}), '()\n', (6953, 6955), False, 'import requests\n'), ((7969, 7987), 'requests.Session', 'requests.Session', ([], {}), '()\n', (7985, 7987), False, 'import requests\n'), ((9535, 9553), 'requests.Session', 'requests.Session', ([], {}), '()\n', (9551, 9553), False, 'import requests\n'), ((10208, 10225), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (10223, 10225), False, 'from datetime import datetime, timedelta\n'), ((10228, 10246), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (10237, 10246), False, 'from datetime import datetime, timedelta\n'), ((4594, 4611), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4609, 4611), False, 'from datetime import datetime, timedelta\n'), ((4614, 4632), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (4623, 4632), False, 'from datetime import datetime, timedelta\n'), ((5790, 5807), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5805, 5807), False, 'from datetime import datetime, timedelta\n'), ((5810, 5828), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (5819, 5828), False, 'from datetime import datetime, timedelta\n'), ((6731, 6748), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6746, 6748), False, 'from datetime import datetime, timedelta\n'), ((6751, 6769), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (6760, 6769), False, 'from datetime import datetime, timedelta\n'), ((7734, 7751), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7749, 7751), False, 'from datetime import datetime, timedelta\n'), ((7754, 7772), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (7763, 7772), False, 'from datetime import datetime, timedelta\n'), ((11270, 11287), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (11285, 11287), False, 'from datetime import datetime, timedelta\n'), ((11290, 11308), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (11299, 11308), False, 'from datetime import datetime, timedelta\n'), ((12104, 12121), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (12119, 12121), False, 'from datetime import datetime, timedelta\n'), ((12124, 12142), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (12133, 12142), False, 'from datetime import datetime, timedelta\n'), ((13463, 13480), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (13478, 13480), False, 'from datetime import datetime, timedelta\n'), ((13483, 13501), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (13492, 13501), False, 'from datetime import datetime, timedelta\n'), ((14434, 14451), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (14449, 14451), False, 'from datetime import datetime, timedelta\n'), ((14454, 14472), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (14463, 14472), False, 'from datetime import datetime, timedelta\n'), ((15262, 15279), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (15277, 15279), False, 'from datetime import datetime, timedelta\n'), ((15282, 15300), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (15291, 15300), False, 'from datetime import datetime, timedelta\n')] |
import unittest
from main import AADatabase
from unittest.mock import patch
class TestAADatabase(unittest.TestCase):
def test_database_inaccessible(self):
with patch.object(AADatabase, 'is_primary') as is_primary_mocked:
is_primary_mocked.return_value = True
res = AADatabase.run()
self.assertTrue(res)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"main.AADatabase.run",
"unittest.mock.patch.object"
] | [((386, 401), 'unittest.main', 'unittest.main', ([], {}), '()\n', (399, 401), False, 'import unittest\n'), ((174, 212), 'unittest.mock.patch.object', 'patch.object', (['AADatabase', '"""is_primary"""'], {}), "(AADatabase, 'is_primary')\n", (186, 212), False, 'from unittest.mock import patch\n'), ((303, 319), 'main.AADatabase.run', 'AADatabase.run', ([], {}), '()\n', (317, 319), False, 'from main import AADatabase\n')] |
import pytest
import numpy as np
@pytest.mark.parametrize('content,row_constraints,column_constraints,expected', [
([[3,3,3],
[3,3,3],
[3,3,3]],
[6,6,6],
[6,6,6],
[
np.array([
[0,1,1],
[1,0,1],
[1,1,0],
]),
np.array([
[0,1,1],
[1,1,0],
[1,0,1],
]),
np.array([
[1,0,1],
[0,1,1],
[1,1,0],
]),
np.array([
[1,0,1],
[1,1,0],
[0,1,1],
]),
np.array([
[1,1,0],
[0,1,1],
[1,0,1],
]),
np.array([
[1,1,0],
[1,0,1],
[0,1,1],
]),
]),
])
def test_solve(content, row_constraints, column_constraints, expected, sort):
from rullo.rullo import Rullo
from rullo.solver import solve
assert np.all(sort(expected) == sort(solve(Rullo(
content,
row_constraints,
column_constraints,
)))) | [
"numpy.array",
"rullo.rullo.Rullo"
] | [((207, 250), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (215, 250), True, 'import numpy as np\n'), ((306, 349), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 1, 0], [1, 0, 1]]'], {}), '([[0, 1, 1], [1, 1, 0], [1, 0, 1]])\n', (314, 349), True, 'import numpy as np\n'), ((405, 448), 'numpy.array', 'np.array', (['[[1, 0, 1], [0, 1, 1], [1, 1, 0]]'], {}), '([[1, 0, 1], [0, 1, 1], [1, 1, 0]])\n', (413, 448), True, 'import numpy as np\n'), ((504, 547), 'numpy.array', 'np.array', (['[[1, 0, 1], [1, 1, 0], [0, 1, 1]]'], {}), '([[1, 0, 1], [1, 1, 0], [0, 1, 1]])\n', (512, 547), True, 'import numpy as np\n'), ((603, 646), 'numpy.array', 'np.array', (['[[1, 1, 0], [0, 1, 1], [1, 0, 1]]'], {}), '([[1, 1, 0], [0, 1, 1], [1, 0, 1]])\n', (611, 646), True, 'import numpy as np\n'), ((702, 745), 'numpy.array', 'np.array', (['[[1, 1, 0], [1, 0, 1], [0, 1, 1]]'], {}), '([[1, 1, 0], [1, 0, 1], [0, 1, 1]])\n', (710, 745), True, 'import numpy as np\n'), ((1004, 1055), 'rullo.rullo.Rullo', 'Rullo', (['content', 'row_constraints', 'column_constraints'], {}), '(content, row_constraints, column_constraints)\n', (1009, 1055), False, 'from rullo.rullo import Rullo\n')] |
from __future__ import print_function, division, absolute_import
# Std. lib imports
from math import pi
# Non-std. lib imports
from PySide.QtCore import Signal, QObject
from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, \
QComboBox, QRadioButton, QStringListModel, \
QLineEdit, QDoubleValidator, QGridLayout
from numpy.testing import assert_approx_equal
# Local imports
from rapid.gui.guicommon import error
from rapid.gui.guicommon import toolTipText as ttt
HZ2WAVENUM = 1 / ( 100 * 2.99792458E8 * 2 * pi )
class Rate(QObject):
'''Class to hold all information about the function'''
def __init__(self, parent = None):
'''Initialize the function class'''
super(Rate, self).__init__(parent)
self.converter = lambda x: x
self.lunits = QStringListModel('s ns ps fs'.split(' '))
self.runits = QStringListModel('Hz GHz THz PHz'.split(' '))
self.method = ''
def setConverter(self, unit):
'''Sets the function to perform rate conversion to cm^{-1}'''
self.unit = str(unit)
conv = {
'fs' : lambda x : HZ2WAVENUM / ( 1E-15 * x ),
'ps' : lambda x : HZ2WAVENUM / ( 1E-12 * x ),
'ns' : lambda x : HZ2WAVENUM / ( 1E-9 * x ),
's' : lambda x : HZ2WAVENUM / ( x ),
'PHz' : lambda x : HZ2WAVENUM * 1E15 * x,
'THz' : lambda x : HZ2WAVENUM * 1E12 * x,
'GHz' : lambda x : HZ2WAVENUM * 1E9 * x,
'Hz' : lambda x : HZ2WAVENUM * x,
}
try:
self.converter = conv[self.unit]
except KeyError:
pass # This happens when we set a new model. Ignore
def getParams(self):
'''Returns the current rate parameters'''
# Return None if rate is not yet defined
try:
return self.rate, self.unit
except AttributeError:
return None, None
def getConvertedRate(self):
'''Returns the rate in wavenumbers'''
# Return None if rate is not yet defined
try:
return self.converter(self.rate)
except AttributeError:
return None
#######
# SLOTS
#######
def setRate(self, rate):
'''Sets the rate and emits the result'''
self.rate = rate
self.rateChanged.emit()
#########
# SIGNALS
#########
# The rate changed
rateChanged = Signal()
#/\/\/\/\/\/\/\
# The rate view
#/\/\/\/\/\/\/\
class RateView(QGroupBox):
'''The box containing the rate value'''
def __init__(self, title = 'Rate', parent = None):
'''Initialize'''
super(RateView, self).__init__(parent)
self.setTitle(title)
self._createWidgets()
def _createWidgets(self):
'''Create the widgets contained in this box'''
# Rate or lifetime chooser
self.rate = QRadioButton('Rate', self)
self.rate.setToolTip(ttt('Choose this to express exchange as rate'))
self.lifetime = QRadioButton('Lifetime', self)
self.lifetime.setToolTip(ttt('Choose this to express exchange as '
'lifetime'))
# Box containing value
self.rate_value = QLineEdit(self)
validate = QDoubleValidator(self.rate_value)
validate.setDecimals(3)
validate.setBottom(0.0)
self.rate_value.setValidator(validate)
self.rate_value.setToolTip(ttt('The rate or lifetime value'))
# Unit
self.unit = QComboBox(self)
self.unit.setToolTip(ttt('Selects the input unit for the rate '
'or lifetime'))
def initUI(self):
'''Lays out the widgets'''
radios = QVBoxLayout()
radios.addWidget(self.rate)
radios.addWidget(self.lifetime)
rate = QGridLayout()
rate.addWidget(QLabel("Unit: "), 1, 1)
rate.addWidget(self.unit, 1, 2)
rate.addWidget(QLabel("Value: "), 2, 1)
rate.addWidget(self.rate_value, 2, 2)
total = QHBoxLayout()
total.addLayout(radios)
total.addStretch()
total.addLayout(rate)
self.setLayout(total)
def makeConnections(self):
'''Connect the widgets together'''
# When one radio button is checked, change the combo box model
# and un-check the other radio button
self.rate.clicked.connect(self.setRateModel)
self.lifetime.clicked.connect(self.setLifetimeModel)
# If the text changes, emit that rate
self.rate_value.editingFinished.connect(self.emitRate)
# If the underlying model changes, adjust the text
self.model.rateChanged.connect(self.updateRate)
# If the unit changes, update rate
self.unit.currentIndexChanged.connect(self.updateUnit)
def setModel(self, model):
'''Attaches models to the views'''
self.model = model
def setRate(self, rate):
'''Set the rate manually'''
self.rate_value.setText(str(rate))
self.rate_value.editingFinished.emit()
def setUnit(self, unit):
'''Set the unit manually'''
if unit == 's':
self.lifetime.click()
self.unit.setCurrentIndex(0)
elif unit == 'ns':
self.lifetime.click()
self.unit.setCurrentIndex(1)
elif unit == 'ps':
self.lifetime.click()
self.unit.setCurrentIndex(2)
elif unit == 'fs':
self.lifetime.click()
self.unit.setCurrentIndex(3)
elif unit in ('Hz', 'hz'):
self.rate.click()
self.unit.setCurrentIndex(0)
elif unit in ('GHz', 'ghz'):
self.rate.click()
self.unit.setCurrentIndex(1)
elif unit in ('THz', 'thz'):
self.rate.click()
self.unit.setCurrentIndex(2)
elif unit in ('PHz', 'phz'):
self.rate.click()
self.unit.setCurrentIndex(3)
else:
error.showMessage('Invalid unit: {0}'.format(unit))
#######
# SLOTS
#######
def updateRate(self):
'''Updates the rate of the text box'''
# Do nothing if rate is not yet defined
try:
rate = self.model.rate
except AttributeError:
return
if 0.1 > rate or rate > 100:
self.rate_value.setText('{0:.3E}'.format(rate))
else:
self.rate_value.setText('{0:.3F}'.format(rate))
def emitRate(self):
'''Converts the text to a float and emits'''
# Do nothing if there is no number
try:
self.model.setRate(float(self.rate_value.text()))
except ValueError:
pass
def updateUnit(self):
'''Update for a change of unit'''
# If there is no unit yet, just set it
try:
unit = self.model.unit
except AttributeError:
self.model.setConverter(str(self.unit.currentText()))
try:
self.model.setRate(float(self.rate_value.text()))
except ValueError:
pass
return
# Convert unit appropriately
if self.rate.isChecked():
if unit == 'Hz':
conv = { 'GHz' : 1E-9,
'THz' : 1E-12,
'PHz' : 1E-15 }
elif unit == 'GHz':
conv = { 'Hz' : 1E9,
'THz' : 1E-3,
'PHz' : 1E-6 }
elif unit == 'THz':
conv = { 'Hz' : 1E12,
'GHz' : 1E3,
'PHz' : 1E-3 }
elif unit == 'PHz':
conv = { 'Hz' : 1E15,
'GHz' : 1E6,
'THz' : 1E3 }
else:
conv = { '' : 1,
'Hz' : 1,
'GHz' : 1,
'THz' : 1,
'PHz' : 1, }
else:
if unit == 's':
conv = { 'ns' : 1E9,
'ps' : 1E12,
'fs' : 1E15 }
elif unit == 'ns':
conv = { 's' : 1E-9,
'ps' : 1E3,
'fs' : 1E6 }
elif unit == 'ps':
conv = { 's' : 1E-12,
'ns' : 1E-3,
'fs' : 1E3 }
elif unit == 'fs':
conv = { 's' : 1E-15,
'ns' : 1E-6,
'ps' : 1E-3 }
else:
conv = { '' : 1,
's' : 1,
'ns' : 1,
'ps' : 1,
'fs' : 1, }
try:
# Set the new converter, then change the rate
self.model.setConverter(str(self.unit.currentText()))
try:
self.model.setRate(float(self.rate_value.text())
* conv[str(self.unit.currentText())])
except ValueError:
pass
except KeyError:
pass
# Save the new unit
self.model.unit = str(self.unit.currentText())
def setRateModel(self):
'''Change the model to use the rate'''
if self.model.method == 'rate':
return
self.model.method = 'rate'
indx = self.unit.currentIndex()
self.unit.setModel(self.model.runits)
self.model.unit = str(self.unit.itemText(indx))
self.unit.setCurrentIndex(indx)
self.model.setConverter(self.model.unit)
try:
self.model.setRate(1 / float(self.rate_value.text()))
except (ZeroDivisionError, ValueError):
pass
def setLifetimeModel(self):
'''Change the model to use the lifetime'''
if self.model.method == 'lifetime':
return
self.model.method = 'lifetime'
indx = self.unit.currentIndex()
self.unit.setModel(self.model.lunits)
self.model.unit = str(self.unit.itemText(indx))
self.unit.setCurrentIndex(indx)
self.model.setConverter(self.model.unit)
try:
self.model.setRate(1 / float(self.rate_value.text()))
except (ZeroDivisionError, ValueError):
pass
| [
"PySide.QtGui.QGridLayout",
"PySide.QtGui.QHBoxLayout",
"PySide.QtGui.QComboBox",
"PySide.QtGui.QVBoxLayout",
"rapid.gui.guicommon.toolTipText",
"PySide.QtGui.QLineEdit",
"PySide.QtCore.Signal",
"PySide.QtGui.QLabel",
"PySide.QtGui.QRadioButton",
"PySide.QtGui.QDoubleValidator"
] | [((2539, 2547), 'PySide.QtCore.Signal', 'Signal', ([], {}), '()\n', (2545, 2547), False, 'from PySide.QtCore import Signal, QObject\n'), ((2999, 3025), 'PySide.QtGui.QRadioButton', 'QRadioButton', (['"""Rate"""', 'self'], {}), "('Rate', self)\n", (3011, 3025), False, 'from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, QComboBox, QRadioButton, QStringListModel, QLineEdit, QDoubleValidator, QGridLayout\n'), ((3127, 3157), 'PySide.QtGui.QRadioButton', 'QRadioButton', (['"""Lifetime"""', 'self'], {}), "('Lifetime', self)\n", (3139, 3157), False, 'from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, QComboBox, QRadioButton, QStringListModel, QLineEdit, QDoubleValidator, QGridLayout\n'), ((3341, 3356), 'PySide.QtGui.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (3350, 3356), False, 'from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, QComboBox, QRadioButton, QStringListModel, QLineEdit, QDoubleValidator, QGridLayout\n'), ((3376, 3409), 'PySide.QtGui.QDoubleValidator', 'QDoubleValidator', (['self.rate_value'], {}), '(self.rate_value)\n', (3392, 3409), False, 'from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, QComboBox, QRadioButton, QStringListModel, QLineEdit, QDoubleValidator, QGridLayout\n'), ((3627, 3642), 'PySide.QtGui.QComboBox', 'QComboBox', (['self'], {}), '(self)\n', (3636, 3642), False, 'from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, QComboBox, QRadioButton, QStringListModel, QLineEdit, QDoubleValidator, QGridLayout\n'), ((3839, 3852), 'PySide.QtGui.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (3850, 3852), False, 'from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, QComboBox, QRadioButton, QStringListModel, QLineEdit, QDoubleValidator, QGridLayout\n'), ((3944, 3957), 'PySide.QtGui.QGridLayout', 'QGridLayout', ([], {}), '()\n', (3955, 3957), False, 'from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, QComboBox, QRadioButton, QStringListModel, QLineEdit, QDoubleValidator, QGridLayout\n'), ((4155, 4168), 'PySide.QtGui.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (4166, 4168), False, 'from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, QComboBox, QRadioButton, QStringListModel, QLineEdit, QDoubleValidator, QGridLayout\n'), ((3055, 3101), 'rapid.gui.guicommon.toolTipText', 'ttt', (['"""Choose this to express exchange as rate"""'], {}), "('Choose this to express exchange as rate')\n", (3058, 3101), True, 'from rapid.gui.guicommon import toolTipText as ttt\n'), ((3191, 3241), 'rapid.gui.guicommon.toolTipText', 'ttt', (['"""Choose this to express exchange as lifetime"""'], {}), "('Choose this to express exchange as lifetime')\n", (3194, 3241), True, 'from rapid.gui.guicommon import toolTipText as ttt\n'), ((3556, 3589), 'rapid.gui.guicommon.toolTipText', 'ttt', (['"""The rate or lifetime value"""'], {}), "('The rate or lifetime value')\n", (3559, 3589), True, 'from rapid.gui.guicommon import toolTipText as ttt\n'), ((3672, 3726), 'rapid.gui.guicommon.toolTipText', 'ttt', (['"""Selects the input unit for the rate or lifetime"""'], {}), "('Selects the input unit for the rate or lifetime')\n", (3675, 3726), True, 'from rapid.gui.guicommon import toolTipText as ttt\n'), ((3981, 3997), 'PySide.QtGui.QLabel', 'QLabel', (['"""Unit: """'], {}), "('Unit: ')\n", (3987, 3997), False, 'from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, QComboBox, QRadioButton, QStringListModel, QLineEdit, QDoubleValidator, QGridLayout\n'), ((4068, 4085), 'PySide.QtGui.QLabel', 'QLabel', (['"""Value: """'], {}), "('Value: ')\n", (4074, 4085), False, 'from PySide.QtGui import QGroupBox, QHBoxLayout, QVBoxLayout, QLabel, QComboBox, QRadioButton, QStringListModel, QLineEdit, QDoubleValidator, QGridLayout\n')] |
import pathlib
from yaml_pyconf.base import BaseConfig
class SimpleConfig(BaseConfig):
def __new__(
cls,
yaml_path=pathlib.Path(__file__).parent.joinpath("samples").joinpath("sample-yaml").joinpath("simple.yaml")
):
return super(SimpleConfig, cls).__new__(cls, yaml_path)
@classmethod
def select_config(cls, conf):
return super(SimpleConfig, cls).select_config(conf)
@classmethod
def set_dotenv_var_from_yaml_conf(cls, instance, conf, list_name=None):
return super(SimpleConfig, cls).set_dotenv_var_from_yaml_conf(instance, conf)
| [
"pathlib.Path"
] | [((147, 169), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (159, 169), False, 'import pathlib\n')] |
# Generated by Django 3.2 on 2022-04-08 12:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('travellifestyleblog22', '0003_category_cat_image'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='cat_image',
),
]
| [
"django.db.migrations.RemoveField"
] | [((239, 302), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""category"""', 'name': '"""cat_image"""'}), "(model_name='category', name='cat_image')\n", (261, 302), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of f4
# https://github.com/scorphus/f4
# Licensed under the BSD-3-Clause license:
# https://opensource.org/licenses/BSD-3-Clause
# Copyright (c) 2020, <NAME> <<EMAIL>>
from math import log, sqrt
def repr():
return "Iterative using the golden ratio"
def setup():
pass
def header():
return "from math import log, sqrt\n\n\n"
# ---8<---
def even_fib_sum(n):
if n < 1:
return 0
phi = (1 + sqrt(5)) / 2
N = (log(n) + log(5) / 2) // log(phi) + 1
num = (pow(phi, N) - pow(1 - phi, N)) // sqrt(5)
if num > n:
N -= 1
N += 2 - (N % 3)
return ((pow(phi, N) - pow(1 - phi, N)) // sqrt(5) - 1) / 2
| [
"math.sqrt",
"math.log"
] | [((594, 601), 'math.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (598, 601), False, 'from math import log, sqrt\n'), ((490, 497), 'math.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (494, 497), False, 'from math import log, sqrt\n'), ((536, 544), 'math.log', 'log', (['phi'], {}), '(phi)\n', (539, 544), False, 'from math import log, sqrt\n'), ((512, 518), 'math.log', 'log', (['n'], {}), '(n)\n', (515, 518), False, 'from math import log, sqrt\n'), ((701, 708), 'math.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (705, 708), False, 'from math import log, sqrt\n'), ((521, 527), 'math.log', 'log', (['(5)'], {}), '(5)\n', (524, 527), False, 'from math import log, sqrt\n')] |
from django.urls import path
from .consumer import ChatConsumer
websocket_urlpatterns = [
# path('chat/<int:room_name>/',ChatConsumer),
# path('ws/heartbeat/<int:clinic_id>/', HeartbeatConsumer)
path('', ChatConsumer)
] | [
"django.urls.path"
] | [((208, 230), 'django.urls.path', 'path', (['""""""', 'ChatConsumer'], {}), "('', ChatConsumer)\n", (212, 230), False, 'from django.urls import path\n')] |
#!/usr/bin/python
# build.py
#
# Created by <NAME>.
# http://jwsu.ch/ow
import os
import sys
scripts_dir = '/scripts'
chapters_dir = '/chapters'
templates_dir = '/templates'
main_dir = sys.path[0].replace(scripts_dir,'')
# find the highest chapter number
chapter_list = os.listdir(main_dir + chapters_dir + '/')
chapter_list.remove('.DS_Store')
highest_chapter = max([int(chapter.replace('chapter', '').replace('.tex','')) for chapter in chapter_list])
# copy the template chapter to the chapters directory
os.chdir(main_dir + templates_dir)
high_chapter_string = str((highest_chapter))
new_chapter_string = str((highest_chapter + 1))
os.system('cp -R chapter.tex ../chapters/chapter' + new_chapter_string + '.tex')
# add the chapter to the thesis
os.system('sed -i -e \'s/' + high_chapter_string + '}/' + high_chapter_string + '}\\\include{chapters\/chapter' + new_chapter_string + '}/g\' ' + '../thesis.tex')
# exit terminal
os.system('exit')
| [
"os.chdir",
"os.system",
"os.listdir"
] | [((275, 316), 'os.listdir', 'os.listdir', (["(main_dir + chapters_dir + '/')"], {}), "(main_dir + chapters_dir + '/')\n", (285, 316), False, 'import os\n'), ((514, 548), 'os.chdir', 'os.chdir', (['(main_dir + templates_dir)'], {}), '(main_dir + templates_dir)\n', (522, 548), False, 'import os\n'), ((642, 727), 'os.system', 'os.system', (["('cp -R chapter.tex ../chapters/chapter' + new_chapter_string + '.tex')"], {}), "('cp -R chapter.tex ../chapters/chapter' + new_chapter_string + '.tex'\n )\n", (651, 727), False, 'import os\n'), ((756, 926), 'os.system', 'os.system', (['("sed -i -e \'s/" + high_chapter_string + \'}/\' + high_chapter_string +\n \'}\\\\\\\\include{chapters\\\\/chapter\' + new_chapter_string + "}/g\' " +\n \'../thesis.tex\')'], {}), '("sed -i -e \'s/" + high_chapter_string + \'}/\' +\n high_chapter_string + \'}\\\\\\\\include{chapters\\\\/chapter\' +\n new_chapter_string + "}/g\' " + \'../thesis.tex\')\n', (765, 926), False, 'import os\n'), ((935, 952), 'os.system', 'os.system', (['"""exit"""'], {}), "('exit')\n", (944, 952), False, 'import os\n')] |
from frontend.classes.Token import TokenType, Token
# global dict
operator_table = {
'0': 0,
'1': 1,
'eqv': TokenType.EQV,
'le': TokenType.LE,
'ge': TokenType.GE,
'or': TokenType.OR,
'xor': TokenType.XOR,
'nor': TokenType.NOR,
'and': TokenType.AND,
'nand': TokenType.NAND,
'(': TokenType.LPAREN,
')': TokenType.RPAREN,
'not': TokenType.NOT,
}
class Lexer:
"""
Create object-tokens for parser
"""
def __init__(self, word_tokens):
self.tokens = iter(word_tokens)
self.current_token = None
self.next_token()
def next_token(self):
try:
self.current_token = next(self.tokens)
except StopIteration:
self.current_token = None
def generate_tokens(self):
global operator_table
while self.current_token is not None:
if self.current_token in {'0', '1'}:
yield Token(TokenType.BOOL_VAR, operator_table[self.current_token])
self.next_token()
elif self.current_token in operator_table:
yield Token(operator_table[self.current_token])
self.next_token()
else:
raise Exception('Illegal token! {}'.format(self.current_token))
| [
"frontend.classes.Token.Token"
] | [((1042, 1103), 'frontend.classes.Token.Token', 'Token', (['TokenType.BOOL_VAR', 'operator_table[self.current_token]'], {}), '(TokenType.BOOL_VAR, operator_table[self.current_token])\n', (1047, 1103), False, 'from frontend.classes.Token import TokenType, Token\n'), ((1215, 1256), 'frontend.classes.Token.Token', 'Token', (['operator_table[self.current_token]'], {}), '(operator_table[self.current_token])\n', (1220, 1256), False, 'from frontend.classes.Token import TokenType, Token\n')] |
print('=ˆ= ' * 8)
print('{:^32}'.format('Adivinha o Número'))
print('=ˆ= ' * 8)
from random import randint
pc = randint(1, 10)
acerto = False
c = 0
while not acerto:
player = int(input('Tenta seu palpite: '))
c += 1
if player == pc:
acerto = True
else:
if player < pc:
print('tente maior...')
elif player > pc:
print('tente menor...')
print('vc tentou {} vezes até acertar!!!'.format(c))
| [
"random.randint"
] | [((112, 126), 'random.randint', 'randint', (['(1)', '(10)'], {}), '(1, 10)\n', (119, 126), False, 'from random import randint\n')] |
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import jit
import reflex_capi as backend
#import cint_capi as backend
identify = backend.identify
pythonize = backend.pythonize
register_pythonizations = backend.register_pythonizations
ts_reflect = backend.ts_reflect
ts_call = backend.ts_call
ts_memory = backend.ts_memory
ts_helper = backend.ts_helper
_C_OPAQUE_PTR = rffi.LONG
_C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO
C_SCOPE = _C_OPAQUE_PTR
C_NULL_SCOPE = rffi.cast(C_SCOPE, _C_OPAQUE_NULL)
C_TYPE = C_SCOPE
C_NULL_TYPE = C_NULL_SCOPE
C_OBJECT = _C_OPAQUE_PTR
C_NULL_OBJECT = rffi.cast(C_OBJECT, _C_OPAQUE_NULL)
C_METHOD = _C_OPAQUE_PTR
C_INDEX = rffi.LONG
C_INDEX_ARRAY = rffi.LONGP
WLAVC_INDEX = rffi.LONG
C_METHPTRGETTER = lltype.FuncType([C_OBJECT], rffi.VOIDP)
C_METHPTRGETTER_PTR = lltype.Ptr(C_METHPTRGETTER)
def direct_ptradd(ptr, offset):
offset = rffi.cast(rffi.SIZE_T, offset)
jit.promote(offset)
assert lltype.typeOf(ptr) == C_OBJECT
address = rffi.cast(rffi.CCHARP, ptr)
return rffi.cast(C_OBJECT, lltype.direct_ptradd(address, offset))
def exchange_address(ptr, cif_descr, index):
return rffi.ptradd(ptr, cif_descr.exchange_args[index])
c_load_dictionary = backend.c_load_dictionary
# name to opaque C++ scope representation ------------------------------------
_c_num_scopes = rffi.llexternal(
"cppyy_num_scopes",
[C_SCOPE], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_num_scopes(cppscope):
return _c_num_scopes(cppscope.handle)
_c_scope_name = rffi.llexternal(
"cppyy_scope_name",
[C_SCOPE, rffi.INT], rffi.CCHARP,
compilation_info = backend.eci)
def c_scope_name(cppscope, iscope):
return charp2str_free(_c_scope_name(cppscope.handle, iscope))
_c_resolve_name = rffi.llexternal(
"cppyy_resolve_name",
[rffi.CCHARP], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_resolve_name(name):
return charp2str_free(_c_resolve_name(name))
c_get_scope_opaque = rffi.llexternal(
"cppyy_get_scope",
[rffi.CCHARP], C_SCOPE,
threadsafe=ts_reflect,
compilation_info=backend.eci)
c_get_template = rffi.llexternal(
"cppyy_get_template",
[rffi.CCHARP], C_TYPE,
threadsafe=ts_reflect,
compilation_info=backend.eci)
_c_actual_class = rffi.llexternal(
"cppyy_actual_class",
[C_TYPE, C_OBJECT], C_TYPE,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_actual_class(cppclass, cppobj):
return _c_actual_class(cppclass.handle, cppobj)
# memory management ----------------------------------------------------------
_c_allocate = rffi.llexternal(
"cppyy_allocate",
[C_TYPE], C_OBJECT,
threadsafe=ts_memory,
compilation_info=backend.eci)
def c_allocate(cppclass):
return _c_allocate(cppclass.handle)
_c_deallocate = rffi.llexternal(
"cppyy_deallocate",
[C_TYPE, C_OBJECT], lltype.Void,
threadsafe=ts_memory,
compilation_info=backend.eci)
def c_deallocate(cppclass, cppobject):
_c_deallocate(cppclass.handle, cppobject)
_c_destruct = rffi.llexternal(
"cppyy_destruct",
[C_TYPE, C_OBJECT], lltype.Void,
threadsafe=ts_call,
compilation_info=backend.eci)
def c_destruct(cppclass, cppobject):
_c_destruct(cppclass.handle, cppobject)
# method/function dispatching ------------------------------------------------
c_call_v = rffi.llexternal(
"cppyy_call_v",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], lltype.Void,
threadsafe=ts_call,
compilation_info=backend.eci)
c_call_b = rffi.llexternal(
"cppyy_call_b",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.UCHAR,
threadsafe=ts_call,
compilation_info=backend.eci)
c_call_c = rffi.llexternal(
"cppyy_call_c",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CHAR,
threadsafe=ts_call,
compilation_info=backend.eci)
c_call_h = rffi.llexternal(
"cppyy_call_h",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.SHORT,
threadsafe=ts_call,
compilation_info=backend.eci)
c_call_i = rffi.llexternal(
"cppyy_call_i",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.INT,
threadsafe=ts_call,
compilation_info=backend.eci)
c_call_l = rffi.llexternal(
"cppyy_call_l",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONG,
threadsafe=ts_call,
compilation_info=backend.eci)
c_call_ll = rffi.llexternal(
"cppyy_call_ll",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONGLONG,
threadsafe=ts_call,
compilation_info=backend.eci)
c_call_f = rffi.llexternal(
"cppyy_call_f",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.FLOAT,
threadsafe=ts_call,
compilation_info=backend.eci)
c_call_d = rffi.llexternal(
"cppyy_call_d",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.DOUBLE,
threadsafe=ts_call,
compilation_info=backend.eci)
c_call_r = rffi.llexternal(
"cppyy_call_r",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.VOIDP,
threadsafe=ts_call,
compilation_info=backend.eci)
c_call_s = rffi.llexternal(
"cppyy_call_s",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CCHARP,
threadsafe=ts_call,
compilation_info=backend.eci)
c_constructor = rffi.llexternal(
"cppyy_constructor",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], lltype.Void,
threadsafe=ts_call,
compilation_info=backend.eci)
_c_call_o = rffi.llexternal(
"cppyy_call_o",
[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, C_TYPE], rffi.LONG,
threadsafe=ts_call,
compilation_info=backend.eci)
def c_call_o(method, cppobj, nargs, args, cppclass):
return _c_call_o(method, cppobj, nargs, args, cppclass.handle)
_c_get_methptr_getter = rffi.llexternal(
"cppyy_get_methptr_getter",
[C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR,
threadsafe=ts_reflect,
compilation_info=backend.eci,
elidable_function=True)
def c_get_methptr_getter(cppscope, index):
return _c_get_methptr_getter(cppscope.handle, index)
# handling of function argument buffer ---------------------------------------
c_allocate_function_args = rffi.llexternal(
"cppyy_allocate_function_args",
[rffi.SIZE_T], rffi.VOIDP,
threadsafe=ts_memory,
compilation_info=backend.eci)
c_deallocate_function_args = rffi.llexternal(
"cppyy_deallocate_function_args",
[rffi.VOIDP], lltype.Void,
threadsafe=ts_memory,
compilation_info=backend.eci)
c_function_arg_sizeof = rffi.llexternal(
"cppyy_function_arg_sizeof",
[], rffi.SIZE_T,
threadsafe=ts_memory,
compilation_info=backend.eci,
elidable_function=True)
c_function_arg_typeoffset = rffi.llexternal(
"cppyy_function_arg_typeoffset",
[], rffi.SIZE_T,
threadsafe=ts_memory,
compilation_info=backend.eci,
elidable_function=True)
# scope reflection information -----------------------------------------------
c_is_namespace = rffi.llexternal(
"cppyy_is_namespace",
[C_SCOPE], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
c_is_enum = rffi.llexternal(
"cppyy_is_enum",
[rffi.CCHARP], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
# type/class reflection information ------------------------------------------
_c_final_name = rffi.llexternal(
"cppyy_final_name",
[C_TYPE], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_final_name(cpptype):
return charp2str_free(_c_final_name(cpptype))
_c_scoped_final_name = rffi.llexternal(
"cppyy_scoped_final_name",
[C_TYPE], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_scoped_final_name(cpptype):
return charp2str_free(_c_scoped_final_name(cpptype))
c_has_complex_hierarchy = rffi.llexternal(
"cppyy_has_complex_hierarchy",
[C_TYPE], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
_c_num_bases = rffi.llexternal(
"cppyy_num_bases",
[C_TYPE], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_num_bases(cppclass):
return _c_num_bases(cppclass.handle)
_c_base_name = rffi.llexternal(
"cppyy_base_name",
[C_TYPE, rffi.INT], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_base_name(cppclass, base_index):
return charp2str_free(_c_base_name(cppclass.handle, base_index))
_c_is_subtype = rffi.llexternal(
"cppyy_is_subtype",
[C_TYPE, C_TYPE], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci,
elidable_function=True)
@jit.elidable_promote()
def c_is_subtype(derived, base):
if derived == base:
return 1
return _c_is_subtype(derived.handle, base.handle)
_c_base_offset = rffi.llexternal(
"cppyy_base_offset",
[C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T,
threadsafe=ts_reflect,
compilation_info=backend.eci,
elidable_function=True)
@jit.elidable_promote()
def c_base_offset(derived, base, address, direction):
if derived == base:
return 0
return _c_base_offset(derived.handle, base.handle, address, direction)
# method/function reflection information -------------------------------------
_c_num_methods = rffi.llexternal(
"cppyy_num_methods",
[C_SCOPE], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_num_methods(cppscope):
return _c_num_methods(cppscope.handle)
_c_method_index_at = rffi.llexternal(
"cppyy_method_index_at",
[C_SCOPE, rffi.INT], C_INDEX,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_method_index_at(cppscope, imethod):
return _c_method_index_at(cppscope.handle, imethod)
_c_method_indices_from_name = rffi.llexternal(
"cppyy_method_indices_from_name",
[C_SCOPE, rffi.CCHARP], C_INDEX_ARRAY,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_method_indices_from_name(cppscope, name):
indices = _c_method_indices_from_name(cppscope.handle, name)
if not indices:
return []
py_indices = []
i = 0
index = indices[i]
while index != -1:
i += 1
py_indices.append(index)
index = indices[i]
c_free(rffi.cast(rffi.VOIDP, indices)) # c_free defined below
return py_indices
_c_method_name = rffi.llexternal(
"cppyy_method_name",
[C_SCOPE, C_INDEX], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_method_name(cppscope, index):
return charp2str_free(_c_method_name(cppscope.handle, index))
_c_method_result_type = rffi.llexternal(
"cppyy_method_result_type",
[C_SCOPE, C_INDEX], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_method_result_type(cppscope, index):
return charp2str_free(_c_method_result_type(cppscope.handle, index))
_c_method_num_args = rffi.llexternal(
"cppyy_method_num_args",
[C_SCOPE, C_INDEX], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_method_num_args(cppscope, index):
return _c_method_num_args(cppscope.handle, index)
_c_method_req_args = rffi.llexternal(
"cppyy_method_req_args",
[C_SCOPE, C_INDEX], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_method_req_args(cppscope, index):
return _c_method_req_args(cppscope.handle, index)
_c_method_arg_type = rffi.llexternal(
"cppyy_method_arg_type",
[C_SCOPE, C_INDEX, rffi.INT], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_method_arg_type(cppscope, index, arg_index):
return charp2str_free(_c_method_arg_type(cppscope.handle, index, arg_index))
_c_method_arg_default = rffi.llexternal(
"cppyy_method_arg_default",
[C_SCOPE, C_INDEX, rffi.INT], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_method_arg_default(cppscope, index, arg_index):
return charp2str_free(_c_method_arg_default(cppscope.handle, index, arg_index))
_c_method_signature = rffi.llexternal(
"cppyy_method_signature",
[C_SCOPE, C_INDEX], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_method_signature(cppscope, index):
return charp2str_free(_c_method_signature(cppscope.handle, index))
_c_get_method = rffi.llexternal(
"cppyy_get_method",
[C_SCOPE, C_INDEX], C_METHOD,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_get_method(cppscope, index):
return _c_get_method(cppscope.handle, index)
_c_get_global_operator = rffi.llexternal(
"cppyy_get_global_operator",
[C_SCOPE, C_SCOPE, C_SCOPE, rffi.CCHARP], WLAVC_INDEX,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_get_global_operator(nss, lc, rc, op):
if nss is not None:
return _c_get_global_operator(nss.handle, lc.handle, rc.handle, op)
return rffi.cast(WLAVC_INDEX, -1)
# method properties ----------------------------------------------------------
_c_is_constructor = rffi.llexternal(
"cppyy_is_constructor",
[C_TYPE, C_INDEX], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_is_constructor(cppclass, index):
return _c_is_constructor(cppclass.handle, index)
_c_is_staticmethod = rffi.llexternal(
"cppyy_is_staticmethod",
[C_TYPE, C_INDEX], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_is_staticmethod(cppclass, index):
return _c_is_staticmethod(cppclass.handle, index)
# data member reflection information -----------------------------------------
_c_num_datamembers = rffi.llexternal(
"cppyy_num_datamembers",
[C_SCOPE], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_num_datamembers(cppscope):
return _c_num_datamembers(cppscope.handle)
_c_datamember_name = rffi.llexternal(
"cppyy_datamember_name",
[C_SCOPE, rffi.INT], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_datamember_name(cppscope, datamember_index):
return charp2str_free(_c_datamember_name(cppscope.handle, datamember_index))
_c_datamember_type = rffi.llexternal(
"cppyy_datamember_type",
[C_SCOPE, rffi.INT], rffi.CCHARP,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_datamember_type(cppscope, datamember_index):
return charp2str_free(_c_datamember_type(cppscope.handle, datamember_index))
_c_datamember_offset = rffi.llexternal(
"cppyy_datamember_offset",
[C_SCOPE, rffi.INT], rffi.SIZE_T,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_datamember_offset(cppscope, datamember_index):
return _c_datamember_offset(cppscope.handle, datamember_index)
_c_datamember_index = rffi.llexternal(
"cppyy_datamember_index",
[C_SCOPE, rffi.CCHARP], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_datamember_index(cppscope, name):
return _c_datamember_index(cppscope.handle, name)
# data member properties -----------------------------------------------------
_c_is_publicdata = rffi.llexternal(
"cppyy_is_publicdata",
[C_SCOPE, rffi.INT], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_is_publicdata(cppscope, datamember_index):
return _c_is_publicdata(cppscope.handle, datamember_index)
_c_is_staticdata = rffi.llexternal(
"cppyy_is_staticdata",
[C_SCOPE, rffi.INT], rffi.INT,
threadsafe=ts_reflect,
compilation_info=backend.eci)
def c_is_staticdata(cppscope, datamember_index):
return _c_is_staticdata(cppscope.handle, datamember_index)
# misc helpers ---------------------------------------------------------------
c_strtoll = rffi.llexternal(
"cppyy_strtoll",
[rffi.CCHARP], rffi.LONGLONG,
threadsafe=ts_helper,
compilation_info=backend.eci)
c_strtoull = rffi.llexternal(
"cppyy_strtoull",
[rffi.CCHARP], rffi.ULONGLONG,
threadsafe=ts_helper,
compilation_info=backend.eci)
c_free = rffi.llexternal(
"cppyy_free",
[rffi.VOIDP], lltype.Void,
threadsafe=ts_memory,
compilation_info=backend.eci)
def charp2str_free(charp):
string = rffi.charp2str(charp)
voidp = rffi.cast(rffi.VOIDP, charp)
c_free(voidp)
return string
c_charp2stdstring = rffi.llexternal(
"cppyy_charp2stdstring",
[rffi.CCHARP], C_OBJECT,
threadsafe=ts_helper,
compilation_info=backend.eci)
c_stdstring2stdstring = rffi.llexternal(
"cppyy_stdstring2stdstring",
[C_OBJECT], C_OBJECT,
threadsafe=ts_helper,
compilation_info=backend.eci)
c_assign2stdstring = rffi.llexternal(
"cppyy_assign2stdstring",
[C_OBJECT, rffi.CCHARP], lltype.Void,
threadsafe=ts_helper,
compilation_info=backend.eci)
c_free_stdstring = rffi.llexternal(
"cppyy_free_stdstring",
[C_OBJECT], lltype.Void,
threadsafe=ts_helper,
compilation_info=backend.eci)
| [
"rpython.rtyper.lltypesystem.rffi.ptradd",
"rpython.rtyper.lltypesystem.lltype.typeOf",
"rpython.rtyper.lltypesystem.lltype.direct_ptradd",
"rpython.rtyper.lltypesystem.lltype.Ptr",
"rpython.rlib.jit.elidable_promote",
"rpython.rtyper.lltypesystem.rffi.charp2str",
"rpython.rlib.jit.promote",
"rpython.rtyper.lltypesystem.rffi.llexternal",
"rpython.rtyper.lltypesystem.lltype.FuncType",
"rpython.rtyper.lltypesystem.lltype.nullptr",
"rpython.rtyper.lltypesystem.rffi.cast"
] | [((428, 457), 'rpython.rtyper.lltypesystem.lltype.nullptr', 'lltype.nullptr', (['rffi.LONGP.TO'], {}), '(rffi.LONGP.TO)\n', (442, 457), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((521, 555), 'rpython.rtyper.lltypesystem.rffi.cast', 'rffi.cast', (['C_SCOPE', '_C_OPAQUE_NULL'], {}), '(C_SCOPE, _C_OPAQUE_NULL)\n', (530, 555), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((643, 678), 'rpython.rtyper.lltypesystem.rffi.cast', 'rffi.cast', (['C_OBJECT', '_C_OPAQUE_NULL'], {}), '(C_OBJECT, _C_OPAQUE_NULL)\n', (652, 678), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((795, 834), 'rpython.rtyper.lltypesystem.lltype.FuncType', 'lltype.FuncType', (['[C_OBJECT]', 'rffi.VOIDP'], {}), '([C_OBJECT], rffi.VOIDP)\n', (810, 834), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((857, 884), 'rpython.rtyper.lltypesystem.lltype.Ptr', 'lltype.Ptr', (['C_METHPTRGETTER'], {}), '(C_METHPTRGETTER)\n', (867, 884), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((1389, 1503), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_num_scopes"""', '[C_SCOPE]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_num_scopes', [C_SCOPE], rffi.INT, threadsafe=\n ts_reflect, compilation_info=backend.eci)\n", (1404, 1503), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((1602, 1705), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_scope_name"""', '[C_SCOPE, rffi.INT]', 'rffi.CCHARP'], {'compilation_info': 'backend.eci'}), "('cppyy_scope_name', [C_SCOPE, rffi.INT], rffi.CCHARP,\n compilation_info=backend.eci)\n", (1617, 1705), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((1838, 1960), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_resolve_name"""', '[rffi.CCHARP]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_resolve_name', [rffi.CCHARP], rffi.CCHARP,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (1853, 1960), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((2070, 2186), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_get_scope"""', '[rffi.CCHARP]', 'C_SCOPE'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_get_scope', [rffi.CCHARP], C_SCOPE, threadsafe=\n ts_reflect, compilation_info=backend.eci)\n", (2085, 2186), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((2216, 2334), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_get_template"""', '[rffi.CCHARP]', 'C_TYPE'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_get_template', [rffi.CCHARP], C_TYPE, threadsafe=\n ts_reflect, compilation_info=backend.eci)\n", (2231, 2334), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((2365, 2487), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_actual_class"""', '[C_TYPE, C_OBJECT]', 'C_TYPE'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_actual_class', [C_TYPE, C_OBJECT], C_TYPE,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (2380, 2487), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((2685, 2794), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_allocate"""', '[C_TYPE]', 'C_OBJECT'], {'threadsafe': 'ts_memory', 'compilation_info': 'backend.eci'}), "('cppyy_allocate', [C_TYPE], C_OBJECT, threadsafe=ts_memory,\n compilation_info=backend.eci)\n", (2700, 2794), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((2890, 3014), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_deallocate"""', '[C_TYPE, C_OBJECT]', 'lltype.Void'], {'threadsafe': 'ts_memory', 'compilation_info': 'backend.eci'}), "('cppyy_deallocate', [C_TYPE, C_OBJECT], lltype.Void,\n threadsafe=ts_memory, compilation_info=backend.eci)\n", (2905, 3014), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((3127, 3247), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_destruct"""', '[C_TYPE, C_OBJECT]', 'lltype.Void'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_destruct', [C_TYPE, C_OBJECT], lltype.Void,\n threadsafe=ts_call, compilation_info=backend.eci)\n", (3142, 3247), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((3433, 3575), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_v"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'lltype.Void'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_v', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n lltype.Void, threadsafe=ts_call, compilation_info=backend.eci)\n", (3448, 3575), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((3600, 3741), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_b"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'rffi.UCHAR'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_b', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n rffi.UCHAR, threadsafe=ts_call, compilation_info=backend.eci)\n", (3615, 3741), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((3766, 3906), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_c"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'rffi.CHAR'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_c', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n rffi.CHAR, threadsafe=ts_call, compilation_info=backend.eci)\n", (3781, 3906), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((3931, 4072), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_h"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'rffi.SHORT'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_h', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n rffi.SHORT, threadsafe=ts_call, compilation_info=backend.eci)\n", (3946, 4072), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((4097, 4236), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_i"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'rffi.INT'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_i', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n rffi.INT, threadsafe=ts_call, compilation_info=backend.eci)\n", (4112, 4236), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((4261, 4401), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_l"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'rffi.LONG'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_l', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n rffi.LONG, threadsafe=ts_call, compilation_info=backend.eci)\n", (4276, 4401), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((4427, 4572), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_ll"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'rffi.LONGLONG'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_ll', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n rffi.LONGLONG, threadsafe=ts_call, compilation_info=backend.eci)\n", (4442, 4572), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((4597, 4738), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_f"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'rffi.FLOAT'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_f', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n rffi.FLOAT, threadsafe=ts_call, compilation_info=backend.eci)\n", (4612, 4738), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((4763, 4905), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_d"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'rffi.DOUBLE'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_d', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n rffi.DOUBLE, threadsafe=ts_call, compilation_info=backend.eci)\n", (4778, 4905), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((4931, 5072), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_r"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'rffi.VOIDP'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_r', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n rffi.VOIDP, threadsafe=ts_call, compilation_info=backend.eci)\n", (4946, 5072), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((5097, 5239), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_s"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'rffi.CCHARP'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_s', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP],\n rffi.CCHARP, threadsafe=ts_call, compilation_info=backend.eci)\n", (5112, 5239), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((5270, 5418), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_constructor"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP]', 'lltype.Void'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_constructor', [C_METHOD, C_OBJECT, rffi.INT, rffi.\n VOIDP], lltype.Void, threadsafe=ts_call, compilation_info=backend.eci)\n", (5285, 5418), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((5443, 5591), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_call_o"""', '[C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, C_TYPE]', 'rffi.LONG'], {'threadsafe': 'ts_call', 'compilation_info': 'backend.eci'}), "('cppyy_call_o', [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP,\n C_TYPE], rffi.LONG, threadsafe=ts_call, compilation_info=backend.eci)\n", (5458, 5591), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((5750, 5920), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_get_methptr_getter"""', '[C_SCOPE, C_INDEX]', 'C_METHPTRGETTER_PTR'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci', 'elidable_function': '(True)'}), "('cppyy_get_methptr_getter', [C_SCOPE, C_INDEX],\n C_METHPTRGETTER_PTR, threadsafe=ts_reflect, compilation_info=backend.\n eci, elidable_function=True)\n", (5765, 5920), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((6140, 6270), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_allocate_function_args"""', '[rffi.SIZE_T]', 'rffi.VOIDP'], {'threadsafe': 'ts_memory', 'compilation_info': 'backend.eci'}), "('cppyy_allocate_function_args', [rffi.SIZE_T], rffi.VOIDP,\n threadsafe=ts_memory, compilation_info=backend.eci)\n", (6155, 6270), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((6313, 6445), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_deallocate_function_args"""', '[rffi.VOIDP]', 'lltype.Void'], {'threadsafe': 'ts_memory', 'compilation_info': 'backend.eci'}), "('cppyy_deallocate_function_args', [rffi.VOIDP], lltype.Void,\n threadsafe=ts_memory, compilation_info=backend.eci)\n", (6328, 6445), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((6483, 6625), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_function_arg_sizeof"""', '[]', 'rffi.SIZE_T'], {'threadsafe': 'ts_memory', 'compilation_info': 'backend.eci', 'elidable_function': '(True)'}), "('cppyy_function_arg_sizeof', [], rffi.SIZE_T, threadsafe=\n ts_memory, compilation_info=backend.eci, elidable_function=True)\n", (6498, 6625), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((6670, 6815), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_function_arg_typeoffset"""', '[]', 'rffi.SIZE_T'], {'threadsafe': 'ts_memory', 'compilation_info': 'backend.eci', 'elidable_function': '(True)'}), "('cppyy_function_arg_typeoffset', [], rffi.SIZE_T,\n threadsafe=ts_memory, compilation_info=backend.eci, elidable_function=True)\n", (6685, 6815), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((6930, 7046), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_is_namespace"""', '[C_SCOPE]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_is_namespace', [C_SCOPE], rffi.INT, threadsafe=\n ts_reflect, compilation_info=backend.eci)\n", (6945, 7046), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((7071, 7186), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_is_enum"""', '[rffi.CCHARP]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_is_enum', [rffi.CCHARP], rffi.INT, threadsafe=\n ts_reflect, compilation_info=backend.eci)\n", (7086, 7186), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((7295, 7411), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_final_name"""', '[C_TYPE]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_final_name', [C_TYPE], rffi.CCHARP, threadsafe=\n ts_reflect, compilation_info=backend.eci)\n", (7310, 7411), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((7524, 7646), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_scoped_final_name"""', '[C_TYPE]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_scoped_final_name', [C_TYPE], rffi.CCHARP,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (7539, 7646), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((7777, 7900), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_has_complex_hierarchy"""', '[C_TYPE]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_has_complex_hierarchy', [C_TYPE], rffi.INT,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (7792, 7900), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((7929, 8041), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_num_bases"""', '[C_TYPE]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_num_bases', [C_TYPE], rffi.INT, threadsafe=\n ts_reflect, compilation_info=backend.eci)\n", (7944, 8041), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((8137, 8261), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_base_name"""', '[C_TYPE, rffi.INT]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_base_name', [C_TYPE, rffi.INT], rffi.CCHARP,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (8152, 8261), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((8399, 8544), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_is_subtype"""', '[C_TYPE, C_TYPE]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci', 'elidable_function': '(True)'}), "('cppyy_is_subtype', [C_TYPE, C_TYPE], rffi.INT, threadsafe=\n ts_reflect, compilation_info=backend.eci, elidable_function=True)\n", (8414, 8544), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((8562, 8584), 'rpython.rlib.jit.elidable_promote', 'jit.elidable_promote', ([], {}), '()\n', (8582, 8584), False, 'from rpython.rlib import jit\n'), ((8731, 8903), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_base_offset"""', '[C_TYPE, C_TYPE, C_OBJECT, rffi.INT]', 'rffi.SIZE_T'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci', 'elidable_function': '(True)'}), "('cppyy_base_offset', [C_TYPE, C_TYPE, C_OBJECT, rffi.INT],\n rffi.SIZE_T, threadsafe=ts_reflect, compilation_info=backend.eci,\n elidable_function=True)\n", (8746, 8903), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((8918, 8940), 'rpython.rlib.jit.elidable_promote', 'jit.elidable_promote', ([], {}), '()\n', (8938, 8940), False, 'from rpython.rlib import jit\n'), ((9208, 9323), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_num_methods"""', '[C_SCOPE]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_num_methods', [C_SCOPE], rffi.INT, threadsafe=\n ts_reflect, compilation_info=backend.eci)\n", (9223, 9323), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((9429, 9556), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_method_index_at"""', '[C_SCOPE, rffi.INT]', 'C_INDEX'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_method_index_at', [C_SCOPE, rffi.INT], C_INDEX,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (9444, 9556), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((9698, 9843), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_method_indices_from_name"""', '[C_SCOPE, rffi.CCHARP]', 'C_INDEX_ARRAY'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_method_indices_from_name', [C_SCOPE, rffi.CCHARP],\n C_INDEX_ARRAY, threadsafe=ts_reflect, compilation_info=backend.eci)\n", (9713, 9843), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((10267, 10393), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_method_name"""', '[C_SCOPE, C_INDEX]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_method_name', [C_SCOPE, C_INDEX], rffi.CCHARP,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (10282, 10393), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((10533, 10666), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_method_result_type"""', '[C_SCOPE, C_INDEX]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_method_result_type', [C_SCOPE, C_INDEX], rffi.CCHARP,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (10548, 10666), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((10817, 10944), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_method_num_args"""', '[C_SCOPE, C_INDEX]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_method_num_args', [C_SCOPE, C_INDEX], rffi.INT,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (10832, 10944), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((11073, 11200), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_method_req_args"""', '[C_SCOPE, C_INDEX]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_method_req_args', [C_SCOPE, C_INDEX], rffi.INT,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (11088, 11200), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((11329, 11470), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_method_arg_type"""', '[C_SCOPE, C_INDEX, rffi.INT]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_method_arg_type', [C_SCOPE, C_INDEX, rffi.INT], rffi\n .CCHARP, threadsafe=ts_reflect, compilation_info=backend.eci)\n", (11344, 11470), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((11639, 11782), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_method_arg_default"""', '[C_SCOPE, C_INDEX, rffi.INT]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_method_arg_default', [C_SCOPE, C_INDEX, rffi.INT],\n rffi.CCHARP, threadsafe=ts_reflect, compilation_info=backend.eci)\n", (11654, 11782), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((11956, 12087), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_method_signature"""', '[C_SCOPE, C_INDEX]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_method_signature', [C_SCOPE, C_INDEX], rffi.CCHARP,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (11971, 12087), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((12230, 12352), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_get_method"""', '[C_SCOPE, C_INDEX]', 'C_METHOD'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_get_method', [C_SCOPE, C_INDEX], C_METHOD,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (12245, 12352), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((12475, 12636), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_get_global_operator"""', '[C_SCOPE, C_SCOPE, C_SCOPE, rffi.CCHARP]', 'WLAVC_INDEX'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_get_global_operator', [C_SCOPE, C_SCOPE, C_SCOPE,\n rffi.CCHARP], WLAVC_INDEX, threadsafe=ts_reflect, compilation_info=\n backend.eci)\n", (12490, 12636), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((12927, 13052), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_is_constructor"""', '[C_TYPE, C_INDEX]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_is_constructor', [C_TYPE, C_INDEX], rffi.INT,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (12942, 13052), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((13179, 13305), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_is_staticmethod"""', '[C_TYPE, C_INDEX]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_is_staticmethod', [C_TYPE, C_INDEX], rffi.INT,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (13194, 13305), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((13514, 13633), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_num_datamembers"""', '[C_SCOPE]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_num_datamembers', [C_SCOPE], rffi.INT, threadsafe=\n ts_reflect, compilation_info=backend.eci)\n", (13529, 13633), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((13747, 13878), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_datamember_name"""', '[C_SCOPE, rffi.INT]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_datamember_name', [C_SCOPE, rffi.INT], rffi.CCHARP,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (13762, 13878), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((14045, 14176), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_datamember_type"""', '[C_SCOPE, rffi.INT]', 'rffi.CCHARP'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_datamember_type', [C_SCOPE, rffi.INT], rffi.CCHARP,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (14060, 14176), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((14345, 14478), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_datamember_offset"""', '[C_SCOPE, rffi.INT]', 'rffi.SIZE_T'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_datamember_offset', [C_SCOPE, rffi.INT], rffi.SIZE_T,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (14360, 14478), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((14635, 14767), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_datamember_index"""', '[C_SCOPE, rffi.CCHARP]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_datamember_index', [C_SCOPE, rffi.CCHARP], rffi.INT,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (14650, 14767), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((14974, 15100), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_is_publicdata"""', '[C_SCOPE, rffi.INT]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_is_publicdata', [C_SCOPE, rffi.INT], rffi.INT,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (14989, 15100), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((15245, 15371), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_is_staticdata"""', '[C_SCOPE, rffi.INT]', 'rffi.INT'], {'threadsafe': 'ts_reflect', 'compilation_info': 'backend.eci'}), "('cppyy_is_staticdata', [C_SCOPE, rffi.INT], rffi.INT,\n threadsafe=ts_reflect, compilation_info=backend.eci)\n", (15260, 15371), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((15589, 15708), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_strtoll"""', '[rffi.CCHARP]', 'rffi.LONGLONG'], {'threadsafe': 'ts_helper', 'compilation_info': 'backend.eci'}), "('cppyy_strtoll', [rffi.CCHARP], rffi.LONGLONG, threadsafe=\n ts_helper, compilation_info=backend.eci)\n", (15604, 15708), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((15734, 15855), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_strtoull"""', '[rffi.CCHARP]', 'rffi.ULONGLONG'], {'threadsafe': 'ts_helper', 'compilation_info': 'backend.eci'}), "('cppyy_strtoull', [rffi.CCHARP], rffi.ULONGLONG, threadsafe\n =ts_helper, compilation_info=backend.eci)\n", (15749, 15855), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((15877, 15990), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_free"""', '[rffi.VOIDP]', 'lltype.Void'], {'threadsafe': 'ts_memory', 'compilation_info': 'backend.eci'}), "('cppyy_free', [rffi.VOIDP], lltype.Void, threadsafe=\n ts_memory, compilation_info=backend.eci)\n", (15892, 15990), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((16164, 16285), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_charp2stdstring"""', '[rffi.CCHARP]', 'C_OBJECT'], {'threadsafe': 'ts_helper', 'compilation_info': 'backend.eci'}), "('cppyy_charp2stdstring', [rffi.CCHARP], C_OBJECT,\n threadsafe=ts_helper, compilation_info=backend.eci)\n", (16179, 16285), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((16323, 16445), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_stdstring2stdstring"""', '[C_OBJECT]', 'C_OBJECT'], {'threadsafe': 'ts_helper', 'compilation_info': 'backend.eci'}), "('cppyy_stdstring2stdstring', [C_OBJECT], C_OBJECT,\n threadsafe=ts_helper, compilation_info=backend.eci)\n", (16338, 16445), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((16480, 16616), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_assign2stdstring"""', '[C_OBJECT, rffi.CCHARP]', 'lltype.Void'], {'threadsafe': 'ts_helper', 'compilation_info': 'backend.eci'}), "('cppyy_assign2stdstring', [C_OBJECT, rffi.CCHARP], lltype.\n Void, threadsafe=ts_helper, compilation_info=backend.eci)\n", (16495, 16616), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((16648, 16769), 'rpython.rtyper.lltypesystem.rffi.llexternal', 'rffi.llexternal', (['"""cppyy_free_stdstring"""', '[C_OBJECT]', 'lltype.Void'], {'threadsafe': 'ts_helper', 'compilation_info': 'backend.eci'}), "('cppyy_free_stdstring', [C_OBJECT], lltype.Void, threadsafe\n =ts_helper, compilation_info=backend.eci)\n", (16663, 16769), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((931, 961), 'rpython.rtyper.lltypesystem.rffi.cast', 'rffi.cast', (['rffi.SIZE_T', 'offset'], {}), '(rffi.SIZE_T, offset)\n', (940, 961), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((966, 985), 'rpython.rlib.jit.promote', 'jit.promote', (['offset'], {}), '(offset)\n', (977, 985), False, 'from rpython.rlib import jit\n'), ((1042, 1069), 'rpython.rtyper.lltypesystem.rffi.cast', 'rffi.cast', (['rffi.CCHARP', 'ptr'], {}), '(rffi.CCHARP, ptr)\n', (1051, 1069), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((1197, 1245), 'rpython.rtyper.lltypesystem.rffi.ptradd', 'rffi.ptradd', (['ptr', 'cif_descr.exchange_args[index]'], {}), '(ptr, cif_descr.exchange_args[index])\n', (1208, 1245), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((12800, 12826), 'rpython.rtyper.lltypesystem.rffi.cast', 'rffi.cast', (['WLAVC_INDEX', '(-1)'], {}), '(WLAVC_INDEX, -1)\n', (12809, 12826), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((16044, 16065), 'rpython.rtyper.lltypesystem.rffi.charp2str', 'rffi.charp2str', (['charp'], {}), '(charp)\n', (16058, 16065), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((16078, 16106), 'rpython.rtyper.lltypesystem.rffi.cast', 'rffi.cast', (['rffi.VOIDP', 'charp'], {}), '(rffi.VOIDP, charp)\n', (16087, 16106), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((997, 1015), 'rpython.rtyper.lltypesystem.lltype.typeOf', 'lltype.typeOf', (['ptr'], {}), '(ptr)\n', (1010, 1015), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((1101, 1138), 'rpython.rtyper.lltypesystem.lltype.direct_ptradd', 'lltype.direct_ptradd', (['address', 'offset'], {}), '(address, offset)\n', (1121, 1138), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n'), ((10170, 10200), 'rpython.rtyper.lltypesystem.rffi.cast', 'rffi.cast', (['rffi.VOIDP', 'indices'], {}), '(rffi.VOIDP, indices)\n', (10179, 10200), False, 'from rpython.rtyper.lltypesystem import rffi, lltype\n')] |
import argparse
import pathlib
import os
import trafaret as T
from trafaret_config import commandline
BASE_DIR = pathlib.Path(__file__).parent.parent
DEFAULT_CONFIG_PATH = os.path.join(BASE_DIR, 'config', 'info.yaml')
TRAFARET = T.Dict({
T.Key('postgres'):
T.Dict({
'database': T.String(),
'user': T.String(),
'password': T.String(),
'host': T.String(),
'port': T.Int(),
}),
})
def get_config(argv=None) -> dict:
ap = argparse.ArgumentParser()
commandline.standard_argparse_options(
ap,
default_config=DEFAULT_CONFIG_PATH
)
# ignore unknown options
options, unknown = ap.parse_known_args(argv)
config = commandline.config_from_options(options, TRAFARET)
return config
| [
"trafaret.String",
"argparse.ArgumentParser",
"pathlib.Path",
"os.path.join",
"trafaret.Key",
"trafaret_config.commandline.standard_argparse_options",
"trafaret.Int",
"trafaret_config.commandline.config_from_options"
] | [((175, 220), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""config"""', '"""info.yaml"""'], {}), "(BASE_DIR, 'config', 'info.yaml')\n", (187, 220), False, 'import os\n'), ((508, 533), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (531, 533), False, 'import argparse\n'), ((538, 615), 'trafaret_config.commandline.standard_argparse_options', 'commandline.standard_argparse_options', (['ap'], {'default_config': 'DEFAULT_CONFIG_PATH'}), '(ap, default_config=DEFAULT_CONFIG_PATH)\n', (575, 615), False, 'from trafaret_config import commandline\n'), ((730, 780), 'trafaret_config.commandline.config_from_options', 'commandline.config_from_options', (['options', 'TRAFARET'], {}), '(options, TRAFARET)\n', (761, 780), False, 'from trafaret_config import commandline\n'), ((116, 138), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (128, 138), False, 'import pathlib\n'), ((246, 263), 'trafaret.Key', 'T.Key', (['"""postgres"""'], {}), "('postgres')\n", (251, 263), True, 'import trafaret as T\n'), ((306, 316), 'trafaret.String', 'T.String', ([], {}), '()\n', (314, 316), True, 'import trafaret as T\n'), ((338, 348), 'trafaret.String', 'T.String', ([], {}), '()\n', (346, 348), True, 'import trafaret as T\n'), ((374, 384), 'trafaret.String', 'T.String', ([], {}), '()\n', (382, 384), True, 'import trafaret as T\n'), ((406, 416), 'trafaret.String', 'T.String', ([], {}), '()\n', (414, 416), True, 'import trafaret as T\n'), ((438, 445), 'trafaret.Int', 'T.Int', ([], {}), '()\n', (443, 445), True, 'import trafaret as T\n')] |
import yaml
import os
import socket
import logging
from base_plugin import CascadePlugin
from annex import Annex
CFG = {
'SELF_FQDN': socket.getfqdn(),
'SELF_IP': socket.gethostbyname(socket.getfqdn()),
'ROLE': 'leaf',
'PLUGINS': [],
'REDIS_BINARY': '/usr/local/bin/redis-server',
'REDIS_CONFIGS': {},
'BOOTSTRAP_NODES': [],
}
def load_config(cfg_file):
'''
Given a file, try to load it as a YAML dictionary. If it isn't one, then it's probably not
a valid cascade configuration.
'''
if not os.path.exists(cfg_file):
return False
logging.info('Loading configuration file: %s' % cfg_file)
cfg = yaml.load(open(cfg_file).read())
if type(cfg) != dict:
return False
if 'bootstrapfile' in cfg:
with open(cfg['bootstrapfile']) as nodes:
CFG['BOOTSTRAP_NODES'] = nodes.read().split('\n')
if 'redis' in cfg:
if 'configs' in cfg['redis']:
CFG['REDIS_CONFIGS'] = cfg['redis']['configs']
if 'binary' in cfg['redis']:
assert os.path.exists(cfg['redis']['binary']), "redis.binary needs to exist"
CFG['REDIS_BINARY'] = cfg['redis']['binary']
if 'plugindir' in cfg:
CFG['PLUGINS'] = Annex(CascadePlugin, [cfg['plugindir']])
return True
def get_bootstrap_nodes():
'''
Returns a list of hosts that are the bootstrap nodes: defaults that may or may not be
good, but we expect at least a few to be alive so that machines joining the tree can
have a starting place.
'''
return CFG['BOOTSTRAP_NODES']
def get_self_fqdn():
'''
We use our own hostname in various places and this is what we put into the tree to tell
people how to connect to us.
'''
return CFG['SELF_FQDN']
def get_role():
'''
Returns the current role of this instance.
'''
return CFG['ROLE']
def set_role(role):
'''
Changes the existing instances role.
'''
CFG['ROLE'] = role or CFG['ROLE']
def get_redis_binary():
'''
Return location of our Redis binary.
'''
return CFG['REDIS_BINARY']
def get_self_ip():
'''
Returns our IP address, as resolved by resolving our FQDN.
'''
return CFG['SELF_IP']
def get_redis_configs():
'''
Return our dict containing Redis configurations.
'''
return CFG['REDIS_CONFIGS']
def get_plugins():
'''
Return list of plugins that are loaded.
'''
return CFG['PLUGINS']
| [
"os.path.exists",
"annex.Annex",
"logging.info",
"socket.getfqdn"
] | [((141, 157), 'socket.getfqdn', 'socket.getfqdn', ([], {}), '()\n', (155, 157), False, 'import socket\n'), ((596, 653), 'logging.info', 'logging.info', (["('Loading configuration file: %s' % cfg_file)"], {}), "('Loading configuration file: %s' % cfg_file)\n", (608, 653), False, 'import logging\n'), ((195, 211), 'socket.getfqdn', 'socket.getfqdn', ([], {}), '()\n', (209, 211), False, 'import socket\n'), ((545, 569), 'os.path.exists', 'os.path.exists', (['cfg_file'], {}), '(cfg_file)\n', (559, 569), False, 'import os\n'), ((1245, 1285), 'annex.Annex', 'Annex', (['CascadePlugin', "[cfg['plugindir']]"], {}), "(CascadePlugin, [cfg['plugindir']])\n", (1250, 1285), False, 'from annex import Annex\n'), ((1065, 1103), 'os.path.exists', 'os.path.exists', (["cfg['redis']['binary']"], {}), "(cfg['redis']['binary'])\n", (1079, 1103), False, 'import os\n')] |
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# Headline UPG phrasing for tropical events
#
# Author:
# ----------------------------------------------------------------------------
scripts = [
{
"commentary": "Clear out all Hazards Table and Grids.",
"name": "HeadlineUPG_Init",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
#scenario SC.Y ---> GL.W (no UPG headline phrase present)
{
"commentary": "Initial setup of SC.Y for SC.Y--->GL.W (no UPG headline phrase present)",
"name": "HeadlineUPG_1a",
"drtTime": "20101203_0200",
"gridsStartTime": "20101203_0000",
"productType": "Hazard_MWW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 36, "SC.Y", ["GMZ870"]),
],
"checkStrings": [
"WHUS72 KTBW 030200",
"MWWTBW",
"URGENT - MARINE WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"900 PM EST Thu Dec 2 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"GMZ870-031000-",
"/O.NEW.KTBW.SC.Y.0001.101203T0200Z-101204T1200Z/",
"Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM-",
"900 PM EST Thu Dec 2 2010",
"...SMALL CRAFT ADVISORY IN EFFECT UNTIL 7 AM EST SATURDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Small Craft Advisory, which is in effect until 7 AM EST Saturday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"$$",
],
},
{
"commentary": "SC.Y ---> GL.W for SC.Y--->GL.W (no UPG headline phrase present)",
"name": "HeadlineUPG_1b",
"drtTime": "20101203_0200",
"gridsStartTime": "20101203_0000",
"productType": "Hazard_MWW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 36, "GL.W", ["GMZ870"]),
],
"notCheckStrings": ["...Small Craft Advisory no longer in effect"],
"checkStrings": [
"WHUS72 KTBW 030200",
"MWWTBW",
"URGENT - MARINE WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"900 PM EST Thu Dec 2 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"GMZ870-031000-",
"/O.UPG.KTBW.SC.Y.0001.000000T0000Z-101204T1200Z/",
"/O.NEW.KTBW.GL.W.0001.101203T0200Z-101204T1200Z/",
"Waters from Tarpon Springs to Suwannee River FL out 20 to 60 NM-",
"900 PM EST Thu Dec 2 2010",
"...GALE WARNING IN EFFECT UNTIL 7 AM EST SATURDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Gale Warning, which is in effect until 7 AM EST Saturday. The Small Craft Advisory is no longer in effect.",
"$$",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "HeadlineUPG_1c",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
]
import TestScript
def testScript(self, dataMgr):
defaults = {
"database": "<site>_GRID__Fcst_00000000_0000",
"publishGrids": 0,
"decodeVTEC": 1,
"gridsStartTime": "20100101_0500",
"orderStrings": 1,
"vtecMode": "O",
"deleteGrids": [("Fcst", "Hazards", "SFC", "all", "all")],
}
return TestScript.generalTestScript(self, dataMgr, scripts, defaults)
| [
"TestScript.generalTestScript"
] | [((4386, 4448), 'TestScript.generalTestScript', 'TestScript.generalTestScript', (['self', 'dataMgr', 'scripts', 'defaults'], {}), '(self, dataMgr, scripts, defaults)\n', (4414, 4448), False, 'import TestScript\n')] |
from setuptools import setup
setup(
name='titanic',
packages=['titanic'],
package_dir={'': 'src'},
use_scm_version=True,
setup_requires=['setuptools_scm'],
description='Demo data science project using Titanic dataset from Kaggle.',
author='<NAME>',
license='MIT',
entry_points={
'console_scripts': ['titanic=titanic.cli:cli']
}
)
| [
"setuptools.setup"
] | [((32, 347), 'setuptools.setup', 'setup', ([], {'name': '"""titanic"""', 'packages': "['titanic']", 'package_dir': "{'': 'src'}", 'use_scm_version': '(True)', 'setup_requires': "['setuptools_scm']", 'description': '"""Demo data science project using Titanic dataset from Kaggle."""', 'author': '"""<NAME>"""', 'license': '"""MIT"""', 'entry_points': "{'console_scripts': ['titanic=titanic.cli:cli']}"}), "(name='titanic', packages=['titanic'], package_dir={'': 'src'},\n use_scm_version=True, setup_requires=['setuptools_scm'], description=\n 'Demo data science project using Titanic dataset from Kaggle.', author=\n '<NAME>', license='MIT', entry_points={'console_scripts': [\n 'titanic=titanic.cli:cli']})\n", (37, 347), False, 'from setuptools import setup\n')] |
#!/usr/bin/env python3
"""Project 3D WKT/WKB to 2D."""
import argparse
import logging
import pathlib
import sys
root = pathlib.Path(__file__).resolve().parent.parent
sys.path.insert(0, str(root))
from generative.flatten import flatten, unflatten
from generative.projection import project
from generative.wkio import (
deserialize_flat,
deserialize_geometries,
serialize_flat,
serialize_geometries,
)
LOG_LEVELS = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
DEFAULT_LEVEL = "WARNING"
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--input",
"-i",
type=argparse.FileType("r"),
default=sys.stdin,
help="The WKT/WKB input to parse. Defaults to stdin.",
)
parser.add_argument(
"--output",
"-o",
type=argparse.FileType("w"),
default=sys.stdout,
help="Where to output the WKT/WKB. Defaults to stdout.",
)
parser.add_argument(
"--input-format",
"-I",
default="wkt",
choices=["wkt", "wkb", "flat"],
help="The input geometry format. Defaults to WKT. Use 'flat' for better performance.",
)
parser.add_argument(
"--output-format",
"-O",
default="wkt",
choices=["wkt", "wkb", "flat"],
help="The output geometry format. Defaults to WKT. Use 'flat' for better performance.",
)
parser.add_argument(
"-l",
"--log-level",
type=str,
default=DEFAULT_LEVEL,
choices=LOG_LEVELS.keys(),
help=f"Set the logging output level. Defaults to {DEFAULT_LEVEL}.",
)
parser.add_argument(
"--kind",
"-k",
default="pca",
choices=["xy", "xz", "yz", "pca", "svd", "I", "isometric", "auto"],
help="What kind of projection to use. Defaults to using PCA to pick a 2D basis.",
)
parser.add_argument(
"--dimensions",
"-n",
type=int,
default=2,
choices=[2, 3],
help="The target dimensionality for the PCA, SVD, or Isometric projections.",
)
parser.add_argument(
"--scale", "-s", type=float, default=1, help="A multiplicative scale factor"
)
return parser.parse_args()
def main(args):
if args.input_format != "flat":
geometries = deserialize_geometries(args.input, args.input_format)
tagged_points = flatten(geometries)
else:
tagged_points = deserialize_flat(args.input)
transformed_points = project(tagged_points, args.kind, args.dimensions, args.scale)
if args.output_format != "flat":
transformed_geoms = unflatten(transformed_points)
serialize_geometries(transformed_geoms, args.output, args.output_format)
else:
serialize_flat(transformed_points, args.output)
if __name__ == "__main__":
args = parse_args()
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=LOG_LEVELS.get(args.log_level),
stream=sys.stderr,
)
logger = logging.getLogger(name=__file__)
main(args)
| [
"logging.getLogger",
"generative.wkio.deserialize_geometries",
"argparse.FileType",
"argparse.ArgumentParser",
"pathlib.Path",
"generative.wkio.serialize_geometries",
"generative.flatten.unflatten",
"generative.wkio.serialize_flat",
"generative.flatten.flatten",
"generative.wkio.deserialize_flat",
"generative.projection.project"
] | [((642, 745), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (665, 745), False, 'import argparse\n'), ((2707, 2769), 'generative.projection.project', 'project', (['tagged_points', 'args.kind', 'args.dimensions', 'args.scale'], {}), '(tagged_points, args.kind, args.dimensions, args.scale)\n', (2714, 2769), False, 'from generative.projection import project\n'), ((3254, 3286), 'logging.getLogger', 'logging.getLogger', ([], {'name': '__file__'}), '(name=__file__)\n', (3271, 3286), False, 'import logging\n'), ((2521, 2574), 'generative.wkio.deserialize_geometries', 'deserialize_geometries', (['args.input', 'args.input_format'], {}), '(args.input, args.input_format)\n', (2543, 2574), False, 'from generative.wkio import deserialize_flat, deserialize_geometries, serialize_flat, serialize_geometries\n'), ((2599, 2618), 'generative.flatten.flatten', 'flatten', (['geometries'], {}), '(geometries)\n', (2606, 2618), False, 'from generative.flatten import flatten, unflatten\n'), ((2653, 2681), 'generative.wkio.deserialize_flat', 'deserialize_flat', (['args.input'], {}), '(args.input)\n', (2669, 2681), False, 'from generative.wkio import deserialize_flat, deserialize_geometries, serialize_flat, serialize_geometries\n'), ((2836, 2865), 'generative.flatten.unflatten', 'unflatten', (['transformed_points'], {}), '(transformed_points)\n', (2845, 2865), False, 'from generative.flatten import flatten, unflatten\n'), ((2874, 2946), 'generative.wkio.serialize_geometries', 'serialize_geometries', (['transformed_geoms', 'args.output', 'args.output_format'], {}), '(transformed_geoms, args.output, args.output_format)\n', (2894, 2946), False, 'from generative.wkio import deserialize_flat, deserialize_geometries, serialize_flat, serialize_geometries\n'), ((2965, 3012), 'generative.wkio.serialize_flat', 'serialize_flat', (['transformed_points', 'args.output'], {}), '(transformed_points, args.output)\n', (2979, 3012), False, 'from generative.wkio import deserialize_flat, deserialize_geometries, serialize_flat, serialize_geometries\n'), ((827, 849), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (844, 849), False, 'import argparse\n'), ((1019, 1041), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (1036, 1041), False, 'import argparse\n'), ((120, 142), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (132, 142), False, 'import pathlib\n')] |
# Generated by Django 2.2.15 on 2020-08-23 05:10
from django.db import migrations, models
import edivorce.apps.core.redis
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(storage=edivorce.apps.core.redis.RedisStorage, upload_to='docs_%Y_%m_%d_%H_%M_%S')),
],
),
]
| [
"django.db.models.AutoField",
"django.db.models.FileField"
] | [((337, 430), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (353, 430), False, 'from django.db import migrations, models\n'), ((454, 558), 'django.db.models.FileField', 'models.FileField', ([], {'storage': 'edivorce.apps.core.redis.RedisStorage', 'upload_to': '"""docs_%Y_%m_%d_%H_%M_%S"""'}), "(storage=edivorce.apps.core.redis.RedisStorage, upload_to=\n 'docs_%Y_%m_%d_%H_%M_%S')\n", (470, 558), False, 'from django.db import migrations, models\n')] |
"""
migro.uploader.worker
~~~~~~~~~~~~~~~~~~~~~
File uploader worker.
"""
import asyncio
import time
from collections import defaultdict
from enum import Enum
from uuid import uuid4
from migro import settings
from migro.uploader.utils import request
class Events(Enum):
"""Available events."""
UPLOAD_ERROR = 'UPLOAD_ERROR'
UPLOAD_THROTTLED = 'UPLOAD_THROTTLED'
UPLOAD_COMPLETE = 'UPLOAD_COMPLETE'
# Download on Uploadcare side.
DOWNLOAD_ERROR = 'DOWNLOAD_ERROR'
DOWNLOAD_COMPLETE = 'DOWNLOAD_COMPLETE'
def __str__(self):
return self.value
class File:
"""An uploading file instance.
:param error: Current file migration error.
:param uuid: Uploaded to uploadcare file id .
:param upload_token: `from_url` upload token.
:param data: Uploaded to uploadcare file data.
:param url: `from_url` file url - from where to download it.
:param id: local file id.
"""
def __init__(self, url):
self.error = None
self.uuid = None
self.upload_token = None
self.data = None
self.url = url
self.id = uuid4()
@property
def status(self):
if self.error:
return 'error'
elif self.upload_token and not self.uuid:
return 'uploaded'
elif self.upload_token and self.uuid:
return 'complete'
class Uploader:
"""An uploader worker.
:param loop: Uploader event loop.
:param EVENTS: Set of available events to listen.
:param events_callbacks: Registry of events callbacks.
:param upload_semaphore: Semaphore for upload tasks.
:param status_check_semaphore: Semaphore for status check tasks.
:param event_queue: Events queue.
:param upload_queue: Upload queue.
"""
EVENTS = (Events.UPLOAD_ERROR,
Events.UPLOAD_THROTTLED,
Events.UPLOAD_COMPLETE,
Events.DOWNLOAD_ERROR,
Events.DOWNLOAD_COMPLETE)
def __init__(self, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self._events_callbacks = defaultdict(list)
self.loop = loop
# Semaphores to avoid too much 'parallel' requests.
self._upload_semaphore = asyncio.Semaphore(
settings.MAX_CONCURRENT_UPLOADS, loop=self.loop)
self.event_queue = asyncio.Queue(loop=self.loop)
self.upload_queue = asyncio.Queue(loop=self.loop)
async def upload(self, file):
"""Upload file using `from_url` feature.
:param file: `File` instance.
"""
async with self._upload_semaphore:
data = {'source_url': file.url, 'store': 'auto'}
response = await request('from_url/', data)
event = {'file': file}
if response.status == 429:
event['type'] = Events.UPLOAD_THROTTLED
timeout = response.headers.get('Retry-After',
settings.THROTTLING_TIMEOUT)
await asyncio.sleep(float(timeout), loop=self.loop)
elif response.status != 200:
file.error = 'UPLOAD_ERROR: {0}'.format(await response.text())
event['type'] = Events.UPLOAD_ERROR
else:
file.upload_token = (await response.json())['token']
event['type'] = Events.UPLOAD_COMPLETE
# Create event.
asyncio.ensure_future(self.event_queue.put(event), loop=self.loop)
if event['type'] == Events.UPLOAD_THROTTLED:
# Put item back to queue since it need to be retried
await self.upload_queue.put(file)
elif event['type'] != Events.UPLOAD_ERROR:
await self.wait_for_status(file)
# Mark file as processed from upload queue.
self.upload_queue.task_done()
return None
async def wait_for_status(self, file):
"""Wait till `file` will be processed by Uploadcare or
`settings.FROM_URL_TIMEOUT` seconds before timeout.
:param file: `File` instance.
"""
start = time.time()
event = {'file': file}
data = {'token': file.upload_token}
while time.time() - start <= settings.FROM_URL_TIMEOUT:
response = await request('from_url/status/', data)
if response.status != 200:
event['type'] = Events.DOWNLOAD_ERROR
file.error = 'Request error: {0}'.format(response.status)
break
else:
result = await response.json()
if result['status'] == 'error':
event['type'] = Events.DOWNLOAD_ERROR
file.error = result.get('error', 'unknown')
break
elif result['status'] == 'success':
event['type'] = Events.DOWNLOAD_COMPLETE
file.data = result
file.uuid = result['uuid']
break
else:
await asyncio.sleep(settings.STATUS_CHECK_INTERVAL,
loop=self.loop)
else:
# `from_url` timeout.
event['type'] = Events.DOWNLOAD_ERROR
file.error = 'Status check timeout.'
# Mark file as processed from status check queue.
asyncio.ensure_future(self.event_queue.put(event), loop=self.loop)
return None
async def process_upload_queue(self):
"""Upload queue process coroutine."""
while True:
file = await self.upload_queue.get()
asyncio.ensure_future(self.upload(file), loop=self.loop)
return None
async def process(self, urls):
"""Process `urls` - upload specified urls to Uploadcare.
:param urls: List of URL's to upload to Uploadcare.
"""
self._consumers = [
asyncio.ensure_future(self.process_events(), loop=self.loop),
asyncio.ensure_future(self.process_upload_queue(), loop=self.loop),
]
for url in urls:
# Put jobs into upload queue.
await self.upload_queue.put(File(url))
# Wait till all queues are processed
await self.upload_queue.join()
return None
def shutdown(self):
"""Shutdown uploader.
Stop all consumers, wait till they stop.
"""
for consumer in self._consumers:
consumer.cancel()
try:
# Wait till started consumers tasks will finish.
self.loop.run_until_complete(asyncio.gather(*self._consumers,
loop=self.loop))
except asyncio.CancelledError:
pass
# Remove all the queues consumers.
self._consumers = []
return None
async def process_events(self):
"""Events process coroutine."""
while True:
event = await self.event_queue.get()
event_type = event['type']
callbacks = self._events_callbacks[event_type]
for callback in callbacks:
if asyncio.iscoroutinefunction(callback):
asyncio.ensure_future(callback(event), loop=self.loop)
else:
callback(event)
return None
def on(self, *events, callback):
"""Register `callback` for `event`.
The callbacks will be executed in registration order.
In case if callback is coroutine - the callback will be scheduled to
execute in `self.loop` loop.
:param event: Event instance.
:param callback: Callback to register.
"""
for event in events:
if event not in self.EVENTS:
raise TypeError('Unknown event')
self._events_callbacks[event].append(callback)
return None
def off(self, *events, callback=None):
"""Unregister specific callback or all callbacks for event.
:param event: Event instance.
:param callback: Callback to unregister.
"""
for event in events:
if event not in self.EVENTS:
raise TypeError('Unknown event')
if event in self._events_callbacks:
if callback:
callbacks = self._events_callbacks[event]
if callback in callbacks:
callbacks.remove(callback)
else:
del self._events_callbacks[event]
return None
| [
"migro.uploader.utils.request",
"asyncio.iscoroutinefunction",
"asyncio.sleep",
"asyncio.Queue",
"uuid.uuid4",
"collections.defaultdict",
"asyncio.Semaphore",
"asyncio.gather",
"asyncio.get_event_loop",
"time.time"
] | [((1135, 1142), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1140, 1142), False, 'from uuid import uuid4\n'), ((2126, 2143), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2137, 2143), False, 'from collections import defaultdict\n'), ((2262, 2328), 'asyncio.Semaphore', 'asyncio.Semaphore', (['settings.MAX_CONCURRENT_UPLOADS'], {'loop': 'self.loop'}), '(settings.MAX_CONCURRENT_UPLOADS, loop=self.loop)\n', (2279, 2328), False, 'import asyncio\n'), ((2369, 2398), 'asyncio.Queue', 'asyncio.Queue', ([], {'loop': 'self.loop'}), '(loop=self.loop)\n', (2382, 2398), False, 'import asyncio\n'), ((2427, 2456), 'asyncio.Queue', 'asyncio.Queue', ([], {'loop': 'self.loop'}), '(loop=self.loop)\n', (2440, 2456), False, 'import asyncio\n'), ((4179, 4190), 'time.time', 'time.time', ([], {}), '()\n', (4188, 4190), False, 'import time\n'), ((2068, 2092), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2090, 2092), False, 'import asyncio\n'), ((2742, 2768), 'migro.uploader.utils.request', 'request', (['"""from_url/"""', 'data'], {}), "('from_url/', data)\n", (2749, 2768), False, 'from migro.uploader.utils import request\n'), ((4280, 4291), 'time.time', 'time.time', ([], {}), '()\n', (4289, 4291), False, 'import time\n'), ((4359, 4392), 'migro.uploader.utils.request', 'request', (['"""from_url/status/"""', 'data'], {}), "('from_url/status/', data)\n", (4366, 4392), False, 'from migro.uploader.utils import request\n'), ((6692, 6740), 'asyncio.gather', 'asyncio.gather', (['*self._consumers'], {'loop': 'self.loop'}), '(*self._consumers, loop=self.loop)\n', (6706, 6740), False, 'import asyncio\n'), ((7249, 7286), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['callback'], {}), '(callback)\n', (7276, 7286), False, 'import asyncio\n'), ((5116, 5177), 'asyncio.sleep', 'asyncio.sleep', (['settings.STATUS_CHECK_INTERVAL'], {'loop': 'self.loop'}), '(settings.STATUS_CHECK_INTERVAL, loop=self.loop)\n', (5129, 5177), False, 'import asyncio\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-20 06:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0009_auto_20180220_0745'),
]
operations = [
migrations.RemoveField(
model_name='presentationcover',
name='default',
),
migrations.AddField(
model_name='presentationcover',
name='active',
field=models.BooleanField(default=False, verbose_name='active'),
),
migrations.AlterField(
model_name='presentationcover',
name='section',
field=models.CharField(choices=[('DES', 'description'), ('FEA', 'features')], default='DES', max_length=3, verbose_name='section'),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((300, 370), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""presentationcover"""', 'name': '"""default"""'}), "(model_name='presentationcover', name='default')\n", (322, 370), False, 'from django.db import migrations, models\n'), ((525, 582), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""active"""'}), "(default=False, verbose_name='active')\n", (544, 582), False, 'from django.db import migrations, models\n'), ((716, 844), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('DES', 'description'), ('FEA', 'features')]", 'default': '"""DES"""', 'max_length': '(3)', 'verbose_name': '"""section"""'}), "(choices=[('DES', 'description'), ('FEA', 'features')],\n default='DES', max_length=3, verbose_name='section')\n", (732, 844), False, 'from django.db import migrations, models\n')] |
from .base import ParticleFilter
from ..utils import loglikelihood, choose
from ..normalization import normalize
import torch
class APF(ParticleFilter):
"""
Implements the Auxiliary Particle Filter of Pitt and Shephard.
"""
def _filter(self, y):
# ===== Perform auxiliary sampling ===== #
self.proposal.construct(y, self._x_cur)
pre_weights = self._proposal.pre_weight(y)
resamp_w = pre_weights + self._w_old
normalized = normalize(self._w_old)
# ===== Resample and propagate ===== #
resampled_indices = self._resampler(resamp_w)
resampled_x = choose(self._x_cur, resampled_indices)
self.proposal = self.proposal.resample(resampled_indices)
self._x_cur = self._proposal.draw(self._rsample)
weights = self.proposal.weight(y, self._x_cur, resampled_x)
self._w_old = weights - choose(pre_weights, resampled_indices)
# ===== Calculate log likelihood ===== #
ll = loglikelihood(self._w_old) + torch.log((normalized * torch.exp(pre_weights)).sum(-1))
normw = normalize(self._w_old) if weights.dim() == self._x_cur.dim() else normalize(self._w_old).unsqueeze(-1)
return (normw * self._x_cur).sum(self._sumaxis), ll | [
"torch.exp"
] | [((1048, 1070), 'torch.exp', 'torch.exp', (['pre_weights'], {}), '(pre_weights)\n', (1057, 1070), False, 'import torch\n')] |
__author__ = ["<NAME>", "<NAME>", "<NAME>"]
__email__ = ["<EMAIL>", "<EMAIL>", "<EMAIL>"]
from struct import pack, unpack
import message_d.message_exception as msgexcp
class Choke(object):
# CHOKE = <length><message_id>
# - payload length = 1 (4 bytes)
# - message id = 0 (1 byte)
message_id = 0
chokes_me = True
payload_length = 1
total_length = 5
def __init__(self):
super(Choke, self).__init__()
def to_bytes(self):
return pack(">IB", self.payload_length, self.message_id)
@classmethod
def from_bytes(cls, payload):
payload_length, message_id = unpack(">IB", payload[:cls.total_length])
if message_id != cls.message_id:
raise msgexp.Message_Exception("Not a Choke message")
return Choke()
| [
"struct.unpack",
"struct.pack"
] | [((510, 559), 'struct.pack', 'pack', (['""">IB"""', 'self.payload_length', 'self.message_id'], {}), "('>IB', self.payload_length, self.message_id)\n", (514, 559), False, 'from struct import pack, unpack\n'), ((649, 690), 'struct.unpack', 'unpack', (['""">IB"""', 'payload[:cls.total_length]'], {}), "('>IB', payload[:cls.total_length])\n", (655, 690), False, 'from struct import pack, unpack\n')] |
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
import time
import pickle
import re
from tqdm import tqdm
def createTopicLinkDict(dictPath, driverPath):
options = Options()
options.headless = True
s = Service(driverPath)
driver = webdriver.Chrome(options=options, service=s)
print('Dictionary creation begins.')
driver.get("https://dergipark.org.tr/tr/search?section=articles")
time.sleep(5)
lastException = 0
i = 2
topicLinkDict = {}
numberOfTopics = len(
driver.find_elements(By.XPATH, '//*[@id="collapsible_portlet_2"]/div[2]/div/div'))
numberOfGroups = len(driver.find_elements(By.XPATH,'//*[@id="collapsible_portlet_2"]/div[2]/div/div[contains(@class, "kt-widget-18__item bucket-group-title ")]'))
pbar = tqdm(total=numberOfTopics-numberOfGroups)
while True:
try:
topic = re.sub(r'[^\w\s]', '', "".join(driver.find_element(By.XPATH,
"/html/body/div[2]/div/div/div/div/div/div/div[2]/div[2]/div[1]/div[3]/div[2]/div/div[{order:d}]/div[1]/a/div".format(
order=i)).get_attribute(
"textContent").lower().split()))
link = driver.find_element(By.XPATH,
"/html/body/div[2]/div/div/div/div/div/div/div[2]/div[2]/div[1]/div[3]/div[2]/div/div[{order:d}]/a".format(
order=i)).get_attribute("href")
topicLinkDict[topic] = link
i += 1
except NoSuchElementException:
if (i - lastException) == 1:
with open(dictPath, 'wb') as handle:
pickle.dump(topicLinkDict, handle, protocol=pickle.HIGHEST_PROTOCOL)
pbar.update(1)
break
lastException = i
i += 1
continue
pbar.update(1)
print('Dictionary created.')
pbar.close()
driver.quit()
| [
"selenium.webdriver.chrome.options.Options",
"pickle.dump",
"selenium.webdriver.Chrome",
"selenium.webdriver.chrome.service.Service",
"tqdm.tqdm",
"time.sleep"
] | [((365, 374), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (372, 374), False, 'from selenium.webdriver.chrome.options import Options\n'), ((411, 430), 'selenium.webdriver.chrome.service.Service', 'Service', (['driverPath'], {}), '(driverPath)\n', (418, 430), False, 'from selenium.webdriver.chrome.service import Service\n'), ((444, 488), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options', 'service': 's'}), '(options=options, service=s)\n', (460, 488), False, 'from selenium import webdriver\n'), ((605, 618), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (615, 618), False, 'import time\n'), ((973, 1016), 'tqdm.tqdm', 'tqdm', ([], {'total': '(numberOfTopics - numberOfGroups)'}), '(total=numberOfTopics - numberOfGroups)\n', (977, 1016), False, 'from tqdm import tqdm\n'), ((1949, 2017), 'pickle.dump', 'pickle.dump', (['topicLinkDict', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(topicLinkDict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (1960, 2017), False, 'import pickle\n')] |
"""
This library has some useful types and functions used to get types of iterables.
Whilst iterable types were purposely avoided by the types library, iterables offer
unique types which could sometimes be useful to obtain.
"""
from collections import OrderedDict, defaultdict, deque
from typing import Any, Iterable, Iterator
BytearrayIteratorType = type(iter(bytearray()))
ByteIteratorType = type(iter(bytes()))
DefaultDictIteratorType = type(iter(defaultdict()))
DequeIteratorType = type(iter(deque()))
DictItemIteratorType = type(iter({}.items()))
DictIteratorType = type(iter({})) # same as dict.keys()
DictKeyIteratorType = type(iter({}.keys()))
DictReversedItemIteratorType = type(iter(reversed({}.items())))
DictReversedIteratorType = type(iter(reversed({}))) # same as dict.keys()
DictReversedKeyIteratorType = type(iter(reversed({}.keys())))
DictReversedValueIteratorType = type(iter(reversed({}.values())))
DictValueIteratorType = type(iter({}.values()))
EnumerateIteratorType = type(iter(enumerate([])))
FrozensetIteratorType = type(iter(frozenset())) # same as set
ListIteratorType = type(iter([]))
ListReversedIteratorType = type(iter(reversed([])))
LongRangeIteratorType = type(iter(range(1 << 1000)))
MapIteratorType = type(iter(map([],[])))
MemoryviewIteratorType = type(iter(memoryview(bytes()))) # generic
OrderedDictIteratorType = type(iter(OrderedDict()))
RangeIteratorType = type(iter(range(0)))
SetIteratorType = type(iter({1,}))
StringIteratorType = type(iter(""))
TupleIteratorType = type(iter((1,)))
ZipIteratorType = type(iter(zip()))
def isiterable(object: Any) -> bool:
"""Returns True or False based on whether the given object is iterable.
Parameters
----------
object: Any
The object to see if it's iterable.
Returns
-------
bool
Whether the given object is iterable.
"""
try:
iter(object)
except TypeError:
return False
else:
return True
def isstringiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a string iterator.
Parameters
----------
object: Any
The object to see if it's a string iterator.
Returns
-------
bool
Whether the given object is a string iterator.
"""
if not isiterable(object):
return False
return isinstance(object, StringIteratorType)
def istupleiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a tuple iterator.
Parameters
----------
object: Any
The object to see if it's a tuple iterator.
Returns
-------
bool
Whether the given object is a tuple iterator.
"""
if not isiterable(object):
return False
return isinstance(object, TupleIteratorType)
def islistiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a list iterator.
Parameters
----------
object: Any
The object to see if it's a list iterator.
Returns
-------
bool
Whether the given object is a list iterator.
"""
if not isiterable(object):
return False
return isinstance(object, TupleIteratorType)
def issetiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a set iterator.
Parameters
----------
object: Any
The object to see if it's a set iterator.
Returns
-------
bool
Whether the given object is a set iterator.
"""
if not isiterable(object):
return False
return isinstance(object, SetIteratorType)
def isfrozensetiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a frozenset iterator.
Parameters
----------
object: Any
The object to see if it's a frozenset iterator.
Returns
-------
bool
Whether the given object is a frozenset iterator.
"""
if not isiterable(object):
return False
return isinstance(object, FrozensetIteratorType)
def isdictiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a dict iterator.
Parameters
----------
object: Any
The object to see if it's a dict iterator.
Returns
-------
bool
Whether the given object is a dict iterator.
"""
return isdictkeyiterator(object)
def isdictkeyiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a dict key iterator.
Parameters
----------
object: Any
The object to see if it's a dict key iterator.
Returns
-------
bool
Whether the given object is a dict key iterator.
"""
if not isiterable(object):
return False
return isinstance(object, DictKeyIteratorType)
def isdictitemiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a dict item iterator.
Parameters
----------
object: Any
The object to see if it's a dict item iterator.
Returns
-------
bool
Whether the given object is a dict item iterator.
"""
if not isiterable(object):
return False
return isinstance(object, DictItemIteratorType)
def isdictvalueiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a dict value iterator.
Parameters
----------
object: Any
The object to see if it's a dict value iterator.
Returns
-------
bool
Whether the given object is a dict value iterator.
"""
if not isiterable(object):
return False
return isinstance(object, DictValueIteratorType)
def isdictreversediterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a reversed dict iterator.
Parameters
----------
object: Any
The object to see if it's a reversed dict iterator.
Returns
-------
bool
Whether the given object is a reversed dict iterator.
"""
return isdictreversedkeyiterator(object)
def isdictreversedkeyiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a reversed dict key iterator.
Parameters
----------
object: Any
The object to see if it's a reversed dict key iterator.
Returns
-------
bool
Whether the given object is a reversed dict key iterator.
"""
if not isiterable(object):
return False
return isinstance(object, DictReversedKeyIteratorType)
def isdictreverseditemiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a reversed dict item iterator.
Parameters
----------
object: Any
The object to see if it's a reversed dict item iterator.
Returns
-------
bool
Whether the given object is a reversed dict item iterator.
"""
if not isiterable(object):
return False
return isinstance(object, DictReversedItemIteratorType)
def isdictreversedvalueiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a reversed dict value iterator.
Parameters
----------
object: Any
The object to see if it's a reversed dict value iterator.
Returns
-------
bool
Whether the given object is a reversed dict value iterator.
"""
if not isiterable(object):
return False
return isinstance(object, DictReversedValueIteratorType)
def israngeiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a range iterator.
Parameters
----------
object: Any
The object to see if it's a range iterator.
Returns
-------
bool
Whether the given object is a range iterator.
"""
if not isiterable(object):
return False
return isinstance(object, RangeIteratorType)
def islongrangeiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a long range iterator.
Parameters
----------
object: Any
The object to see if it's a long range iterator.
Returns
-------
bool
Whether the given object is a long range iterator.
"""
if not isiterable(object):
return False
return isinstance(object, LongRangeIteratorType)
def isbytearrayiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a bytearray iterator.
Parameters
----------
object: Any
The object to see if it's a bytearray iterator.
Returns
-------
bool
Whether the given object is a bytearray iterator.
"""
if not isiterable(object):
return False
return isinstance(object, BytearrayIteratorType)
def isbytesiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a bytes iterator.
Parameters
----------
object: Any
The object to see if it's a bytes iterator.
Returns
-------
bool
Whether the given object is a bytes iterator.
"""
if not isiterable(object):
return False
return isinstance(object, ByteIteratorType)
def islistreversediterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a reversed list iterator.
Parameters
----------
object: Any
The object to see if it's a reversed list iterator.
Returns
-------
bool
Whether the given object is a reversed list iterator.
"""
if not isiterable(object):
return False
return isinstance(object, ListReversedIteratorType)
def iszipiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a zip iterator.
Parameters
----------
object: Any
The object to see if it's a zip iterator.
Returns
-------
bool
Whether the given object is a zip iterator.
"""
if not isiterable(object):
return False
return isinstance(object, ZipIteratorType)
def ismapiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a map iterator.
Parameters
----------
object: Any
The object to see if it's a map iterator.
Returns
-------
bool
Whether the given object is a map iterator.
"""
if not isiterable(object):
return False
return isinstance(object, MapIteratorType)
def ismemoryviewiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a memoryview iterator.
Parameters
----------
object: Any
The object to see if it's a memoryview iterator.
Returns
-------
bool
Whether the given object is a memoryview iterator.
"""
if not isiterable(object):
return False
return isinstance(object, MemoryviewIteratorType)
def isordereddictiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a collections.OrderedDict() iterator.
Parameters
----------
object: Any
The object to see if it's a OrderedDict iterator.
Returns
-------
bool
Whether the given object is a OrderedDict iterator.
"""
if not isiterable(object):
return False
return isinstance(object, OrderedDictIteratorType)
def isdefaultdictiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a collections.defaultdict() iterator.
Parameters
----------
object: Any
The object to see if it's a defaultdict iterator.
Returns
-------
bool
Whether the given object is a defaultdict iterator.
"""
if not isiterable(object):
return False
return isinstance(object, DefaultDictIteratorType)
def isenumerateiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is an enumerate iterator.
Parameters
----------
object: Any
The object to see if it's an enumerate iterator.
Returns
-------
bool
Whether the given object is an enumerate iterator.
"""
if not isiterable(object):
return False
return isinstance(object, EnumerateIteratorType)
def isdequeiterator(object: Iterator[Any]) -> bool:
"""Returns True or False based on whether the given object is a collections.deque() iterator.
Parameters
----------
object: Any
The object to see if it's a deque iterator.
Returns
-------
bool
Whether the given object is a deque iterator.
"""
if not isiterable(object):
return False
return isinstance(object, DequeIteratorType)
def filter_array(iterable: Iterable[Any], *types) -> Iterable[Any]:
"""Return the iterable with only the select types inside.
Parameters
----------
iterable : Iterable[Any]
The iterable to filter.
types: tuple
The types to filter for.
Returns
-------
list
The filtered iterable.
"""
return list(filter(lambda x: type(x) in types, iterable))
def filter_iterable(iterable: Iterable[Any]) -> Iterable[Iterable[Any]]:
"""Returns only iterables from an iterable.
Parameters
----------
iterable : Iterable[Any]
The iterable to filter.
Returns
-------
list
The filtered iterable.
"""
return list(filter(lambda x: isiterable(x), iterable))
def filter_noniterable(iterable: Iterable[Any]) -> Iterable[Any]:
"""Returns only non-iterables from an iterable.
Parameters
----------
iterable : Iterable[Any]
The iterable to filter.
Returns
-------
list
The filtered iterable.
"""
return list(filter(lambda x: not isiterable(x), iterable))
def filter_type(iterable: Iterable[Any], types: Iterable[type]) -> Iterable[Any]:
"""Filter certain types from an iterable.
Parameters
----------
iterable : Iterable[Any]
The iterable to filter.
types : Iterable[Any]
The types to filter from the iterable.
Returns
-------
list
A list of filtered types from the iterable.
"""
return list(filter(lambda x: type(x) in types, iterable))
def filter_remove_type(iterable: Iterable[Any], types: Iterable[type]) -> Iterable[Any]:
"""Filter certain types out of an iterable.
Parameters
----------
iterable : Iterable[Any]
The iterable to filter.
types : Iterable[Any]
The types to filter out of the iterable.
Returns
-------
list
A list of filtered types from the iterable.
"""
return list(filter(lambda x: type(x) not in types, iterable))
| [
"collections.deque",
"collections.defaultdict",
"collections.OrderedDict"
] | [((479, 492), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (490, 492), False, 'from collections import OrderedDict, defaultdict, deque\n'), ((537, 544), 'collections.deque', 'deque', ([], {}), '()\n', (542, 544), False, 'from collections import OrderedDict, defaultdict, deque\n'), ((1523, 1536), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1534, 1536), False, 'from collections import OrderedDict, defaultdict, deque\n')] |
import pytesseract
from PIL import Image
from multiprocessing import Pool, Manager
import sys
import json
from utils import parse_metamodel_keywords
import time
import os
import pyprind
import numpy as np
mng = Manager()
out_path = sys.argv[1]
lang_creation_time = mng.list()
ocr_times = mng.list()
def do_recognize_in_process(index, ecore_file):
#os.environ['OMP_THREAD_LIMIT'] = "1"
names = parse_metamodel_keywords(ecore_file)
with open("wordlistfile.bak") as f:
out_file_str = f.read() + "\n"
out_file_str += "\n".join(names)
with open("wordlistfile%d" % index, "w") as f:
f.write(out_file_str)
#print("Processing image #%d" % index)
#os.system("./create_tessdata_no_lstm.sh %d" % index)
start_time = time.time()
os.system("./create_tessdata.sh %d > /dev/null" % index)
lang_creation_time.append(time.time() - start_time)
#os.system("rm wordlistfile%d" % index)
#recognized_text = pytesseract.image_to_string(Image.open(out_path + '/images/%d.png' % index), lang='ocl%d' % index, config="strict")
#recognized_text = pytesseract.image_to_string(Image.open(out_path + '/images/%d.png' % index), lang='ocl%d' % index, config="-c language_model_penalty_non_dict_word=1")
#recognized_text = pytesseract.image_to_string(Image.open(out_path + '/images/%d.png' % index), lang='ocl%d' % index, config='--oem 0 -c tessedit_enable_dict_correction=1')
#recognized_text = pytesseract.image_to_string(Image.open(out_path + '/images/%d.png' % index), lang='eng2', config='--oem 0 -c tessedit_enable_dict_correction=1 -c user_words_file="wordlistfile%d" -c load_system_dawg=0' % index)
start_time = time.time()
recognized_text = pytesseract.image_to_string(Image.open(out_path + '/images/%d.png' % index), lang='ocl%d' % index)
ocr_times.append(time.time() - start_time)
os.system("rm /usr/local/share/tessdata/ocl%d.traineddata" % index)
#os.system("rm wordlistfile%d" % index)
with open(out_path + "/recognized_text/%d.txt" % index, "w") as f:
f.write(recognized_text)
matches = json.load(open("loaded_no_duplicated_filtered_expressions.json"))
pool = Pool(1)
bar = pyprind.ProgBar(len(matches), track_time=True, title='Recognizing expressions from images', bar_char='█', update_interval=1.)
for i, match in enumerate(matches):
pool.apply_async(do_recognize_in_process,
args=(i, match['file'],),
callback=lambda x: bar.update(),
error_callback=lambda x: print(x))
pool.close()
pool.join()
print("\t\t*** Performance in font '%s' ***" % out_path)
print("\t\t\t*** Average time spent creating language: %s ±%s ***" % (np.mean(lang_creation_time), np.std(lang_creation_time)))
print("\t\t\t*** Average time spent on OCR recognition: %s ±%s ***" % (np.mean(ocr_times), np.std(ocr_times)))
| [
"numpy.mean",
"PIL.Image.open",
"multiprocessing.Pool",
"numpy.std",
"multiprocessing.Manager",
"os.system",
"time.time",
"utils.parse_metamodel_keywords"
] | [((212, 221), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (219, 221), False, 'from multiprocessing import Pool, Manager\n'), ((2170, 2177), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (2174, 2177), False, 'from multiprocessing import Pool, Manager\n'), ((404, 440), 'utils.parse_metamodel_keywords', 'parse_metamodel_keywords', (['ecore_file'], {}), '(ecore_file)\n', (428, 440), False, 'from utils import parse_metamodel_keywords\n'), ((768, 779), 'time.time', 'time.time', ([], {}), '()\n', (777, 779), False, 'import time\n'), ((784, 840), 'os.system', 'os.system', (["('./create_tessdata.sh %d > /dev/null' % index)"], {}), "('./create_tessdata.sh %d > /dev/null' % index)\n", (793, 840), False, 'import os\n'), ((1683, 1694), 'time.time', 'time.time', ([], {}), '()\n', (1692, 1694), False, 'import time\n'), ((1867, 1934), 'os.system', 'os.system', (["('rm /usr/local/share/tessdata/ocl%d.traineddata' % index)"], {}), "('rm /usr/local/share/tessdata/ocl%d.traineddata' % index)\n", (1876, 1934), False, 'import os\n'), ((1745, 1792), 'PIL.Image.open', 'Image.open', (["(out_path + '/images/%d.png' % index)"], {}), "(out_path + '/images/%d.png' % index)\n", (1755, 1792), False, 'from PIL import Image\n'), ((871, 882), 'time.time', 'time.time', ([], {}), '()\n', (880, 882), False, 'import time\n'), ((1837, 1848), 'time.time', 'time.time', ([], {}), '()\n', (1846, 1848), False, 'import time\n'), ((2703, 2730), 'numpy.mean', 'np.mean', (['lang_creation_time'], {}), '(lang_creation_time)\n', (2710, 2730), True, 'import numpy as np\n'), ((2732, 2758), 'numpy.std', 'np.std', (['lang_creation_time'], {}), '(lang_creation_time)\n', (2738, 2758), True, 'import numpy as np\n'), ((2832, 2850), 'numpy.mean', 'np.mean', (['ocr_times'], {}), '(ocr_times)\n', (2839, 2850), True, 'import numpy as np\n'), ((2852, 2869), 'numpy.std', 'np.std', (['ocr_times'], {}), '(ocr_times)\n', (2858, 2869), True, 'import numpy as np\n')] |
# Script to fix encoding issue with M10 Base-NoCrowd input files
import numpy as np
import pandas as pd
# Only need to read in one header df
header = pd.read_csv('./input_files/M10__output_file.csv', header = 0, nrows = 1)#.drop(columns = 'Unnamed: 0', axis = 1)
# Reading in binary population dfs (both files)
dat1 = pd.read_csv('./input_files/M10__output_file.csv', skiprows = [0,1])#.drop(columns = 'Unnamed: 0', axis = 1)
dat2 = pd.read_csv('./input_files/M10_2__output_file.csv', skiprows = [0,1])#.drop(columns = 'Unnamed: 0', axis = 1) #header = 2,
print(dat2.columns)
# Dropping unecessary index column from header and binary dataframes
header = header.drop('Unnamed: 0', axis = 1)
dat1 = dat1.drop('Unnamed: 0', axis = 1)
# dat2 = dat2.drop('Unnamed: 0', axis = 1)
print('writing files...')
# writing df 1 to a file
with open('./input_files/M10__new_output_file.csv', 'w') as f:
header.to_csv(f, index = False)
with open('./input_files/M10__new_output_file.csv', 'a') as f: # .replace('/input_files', '')
dat1.to_csv(f, index = False)
# Writing df 2 to a file
with open('./input_files/M10_2__new_output_file.csv', 'w') as f:
header.to_csv(f, index = False)
with open('./input_files/M10_2__new_output_file.csv', 'a') as f:
dat2.to_csv(f, index = False)
| [
"pandas.read_csv"
] | [((153, 221), 'pandas.read_csv', 'pd.read_csv', (['"""./input_files/M10__output_file.csv"""'], {'header': '(0)', 'nrows': '(1)'}), "('./input_files/M10__output_file.csv', header=0, nrows=1)\n", (164, 221), True, 'import pandas as pd\n'), ((322, 388), 'pandas.read_csv', 'pd.read_csv', (['"""./input_files/M10__output_file.csv"""'], {'skiprows': '[0, 1]'}), "('./input_files/M10__output_file.csv', skiprows=[0, 1])\n", (333, 388), True, 'import pandas as pd\n'), ((438, 506), 'pandas.read_csv', 'pd.read_csv', (['"""./input_files/M10_2__output_file.csv"""'], {'skiprows': '[0, 1]'}), "('./input_files/M10_2__output_file.csv', skiprows=[0, 1])\n", (449, 506), True, 'import pandas as pd\n')] |
from django import forms
from django.utils import timezone
import subprocess
# I don't know how or why but the first few selections are local files....
CowsaySubProcess = subprocess.check_output(
["cowsay", '-l']).decode('utf-8').split()
activeAvatar = list(zip(CowsaySubProcess, CowsaySubProcess))
# Found out cowsay has a ton of avatar and implemented them into the app with a choice field
class CowsayDisplayForm(forms.Form):
userInputText = forms.CharField(max_length = 100, label='What would you like to say?')
cowsayDisplay = forms.CharField(widget = forms.Select(choices=activeAvatar), label='Choose your Avatar')
# publishDate = forms.DateTimeField()
| [
"subprocess.check_output",
"django.forms.Select",
"django.forms.CharField"
] | [((457, 525), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)', 'label': '"""What would you like to say?"""'}), "(max_length=100, label='What would you like to say?')\n", (472, 525), False, 'from django import forms\n'), ((573, 607), 'django.forms.Select', 'forms.Select', ([], {'choices': 'activeAvatar'}), '(choices=activeAvatar)\n', (585, 607), False, 'from django import forms\n'), ((173, 214), 'subprocess.check_output', 'subprocess.check_output', (["['cowsay', '-l']"], {}), "(['cowsay', '-l'])\n", (196, 214), False, 'import subprocess\n')] |
import pandas as pd
from data_loader import is_file_exist
def load_data(path: str):
"""
:param path: str
:return: DataFrame
"""
is_file_exist(path)
data = pd.read_csv(path)
return data
| [
"pandas.read_csv",
"data_loader.is_file_exist"
] | [((151, 170), 'data_loader.is_file_exist', 'is_file_exist', (['path'], {}), '(path)\n', (164, 170), False, 'from data_loader import is_file_exist\n'), ((182, 199), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (193, 199), True, 'import pandas as pd\n')] |
# Copyright 2019-2020 the ProGraML authors.
#
# Contact <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The graph transform ops are used to modify or convert Program Graphs to
another representation.
"""
import json
import subprocess
from typing import Any, Dict, Iterable, Optional, Union
import dgl
import networkx as nx
from dgl.heterograph import DGLHeteroGraph
from networkx.readwrite import json_graph as nx_json
from programl.exceptions import GraphTransformError
from programl.proto import ProgramGraph
from programl.util.py.executor import ExecutorLike, execute
from programl.util.py.runfiles_path import runfiles_path
GRAPH2DOT = str(runfiles_path("programl/bin/graph2dot"))
GRAPH2JSON = str(runfiles_path("programl/bin/graph2json"))
JsonDict = Dict[str, Any]
def _run_graph_transform_binary(
binary: str,
graph: ProgramGraph,
timeout: int = 300,
) -> Iterable[bytes]:
process = subprocess.Popen(
[binary, "--stdin_fmt=pb"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
try:
stdout, stderr = process.communicate(graph.SerializeToString(), timeout=timeout)
except subprocess.TimeoutExpired as e:
raise TimeoutError(str(e)) from e
if process.returncode:
try:
raise GraphTransformError(stderr.decode("utf-8"))
except UnicodeDecodeError as e:
raise GraphTransformError("Unknown error in graph transformation") from e
return stdout
def to_json(
graphs: Union[ProgramGraph, Iterable[ProgramGraph]],
timeout: int = 300,
executor: Optional[ExecutorLike] = None,
chunksize: Optional[int] = None,
) -> Union[JsonDict, Iterable[JsonDict]]:
"""Convert one or more Program Graphs to JSON node-link data.
:param graphs: A Program Graph, or a sequence of Program Graphs.
:param timeout: The maximum number of seconds to wait for an individual
graph conversion before raising an error. If multiple inputs are
provided, this timeout is per-input.
:param executor: An executor object, with method :code:`submit(callable,
*args, **kwargs)` and returning a Future-like object with methods
:code:`done() -> bool` and :code:`result() -> float`. The executor role
is to dispatch the execution of the jobs locally/on a cluster/with
multithreading depending on the implementation. Eg:
:code:`concurrent.futures.ThreadPoolExecutor`. Defaults to single
threaded execution. This is only used when multiple inputs are given.
:param chunksize: The number of inputs to read and process at a time. A
larger chunksize improves parallelism but increases memory consumption
as more inputs must be stored in memory. This is only used when multiple
inputs are given.
:return: If a single input is provided, return a single JSON dictionary.
Else returns an iterable sequence of JSON dictionaries.
:raises GraphTransformError: If graph conversion fails.
:raises TimeoutError: If the specified timeout is reached.
"""
def _run_one(graph: ProgramGraph):
try:
return json.loads(
_run_graph_transform_binary(
GRAPH2JSON,
graph,
timeout,
)
)
except json.JSONDecodeError as e:
raise GraphTransformError(str(e)) from e
if isinstance(graphs, ProgramGraph):
return _run_one(graphs)
return execute(_run_one, graphs, executor, chunksize)
def to_networkx(
graphs: Union[ProgramGraph, Iterable[ProgramGraph]],
timeout: int = 300,
executor: Optional[ExecutorLike] = None,
chunksize: Optional[int] = None,
) -> Union[nx.MultiDiGraph, Iterable[nx.MultiDiGraph]]:
"""Convert one or more Program Graphs to `NetworkX MultiDiGraphs
<https://networkx.org/documentation/stable/reference/classes/multidigraph.html>`_.
:param graphs: A Program Graph, or a sequence of Program Graphs.
:param timeout: The maximum number of seconds to wait for an individual
graph conversion before raising an error. If multiple inputs are
provided, this timeout is per-input.
:param executor: An executor object, with method :code:`submit(callable,
*args, **kwargs)` and returning a Future-like object with methods
:code:`done() -> bool` and :code:`result() -> float`. The executor role
is to dispatch the execution of the jobs locally/on a cluster/with
multithreading depending on the implementation. Eg:
:code:`concurrent.futures.ThreadPoolExecutor`. Defaults to single
threaded execution. This is only used when multiple inputs are given.
:param chunksize: The number of inputs to read and process at a time. A
larger chunksize improves parallelism but increases memory consumption
as more inputs must be stored in memory. This is only used when multiple
inputs are given.
:return: If a single input is provided, return a single :code:`nx.MultiDiGraph`.
Else returns an iterable sequence of :code:`nx.MultiDiGraph` instances.
:raises GraphTransformError: If graph conversion fails.
:raises TimeoutError: If the specified timeout is reached.
"""
def _run_one(json_data):
return nx_json.node_link_graph(json_data, multigraph=True, directed=True)
if isinstance(graphs, ProgramGraph):
return _run_one(to_json(graphs, timeout=timeout))
return execute(
_run_one,
to_json(graphs, timeout=timeout, executor=executor, chunksize=chunksize),
executor,
chunksize,
)
def to_dgl(
graphs: Union[ProgramGraph, Iterable[ProgramGraph]],
timeout: int = 300,
executor: Optional[ExecutorLike] = None,
chunksize: Optional[int] = None,
) -> Union[DGLHeteroGraph, Iterable[DGLHeteroGraph]]:
"""Convert one or more Program Graphs to `DGLGraphs
<https://docs.dgl.ai/en/latest/api/python/dgl.DGLGraph.html#dgl.DGLGraph>`_.
:param graphs: A Program Graph, or a sequence of Program Graphs.
:param timeout: The maximum number of seconds to wait for an individual
graph conversion before raising an error. If multiple inputs are
provided, this timeout is per-input.
:param executor: An executor object, with method :code:`submit(callable,
*args, **kwargs)` and returning a Future-like object with methods
:code:`done() -> bool` and :code:`result() -> float`. The executor role
is to dispatch the execution of the jobs locally/on a cluster/with
multithreading depending on the implementation. Eg:
:code:`concurrent.futures.ThreadPoolExecutor`. Defaults to single
threaded execution. This is only used when multiple inputs are given.
:param chunksize: The number of inputs to read and process at a time. A
larger chunksize improves parallelism but increases memory consumption
as more inputs must be stored in memory. This is only used when multiple
inputs are given.
:return: If a single input is provided, return a single
:code:`dgl.DGLGraph`. Else returns an iterable sequence of
:code:`dgl.DGLGraph` instances.
:raises GraphTransformError: If graph conversion fails.
:raises TimeoutError: If the specified timeout is reached.
"""
def _run_one(nx_graph):
return dgl.DGLGraph(nx_graph)
if isinstance(graphs, ProgramGraph):
return _run_one(to_networkx(graphs))
return execute(
_run_one,
to_networkx(graphs, timeout=timeout, executor=executor, chunksize=chunksize),
executor,
chunksize,
)
def to_dot(
graphs: Union[ProgramGraph, Iterable[ProgramGraph]],
timeout: int = 300,
executor: Optional[ExecutorLike] = None,
chunksize: Optional[int] = None,
) -> Union[str, Iterable[str]]:
"""Convert one or more Program Graphs to DOT Graph Description Language.
This produces a DOT source string representing the input graph. This can
then be rendered using the graphviz command line tools, or parsed using
`pydot <https://pypi.org/project/pydot/>`_.
:param graphs: A Program Graph, or a sequence of Program Graphs.
:param timeout: The maximum number of seconds to wait for an individual
graph conversion before raising an error. If multiple inputs are
provided, this timeout is per-input.
:param executor: An executor object, with method :code:`submit(callable,
*args, **kwargs)` and returning a Future-like object with methods
:code:`done() -> bool` and :code:`result() -> float`. The executor role
is to dispatch the execution of the jobs locally/on a cluster/with
multithreading depending on the implementation. Eg:
:code:`concurrent.futures.ThreadPoolExecutor`. Defaults to single
threaded execution. This is only used when multiple inputs are given.
:param chunksize: The number of inputs to read and process at a time. A
larger chunksize improves parallelism but increases memory consumption
as more inputs must be stored in memory. This is only used when multiple
inputs are given.
:return: A graphviz dot string when a single input is provided, else an
iterable sequence of graphviz dot strings.
:raises GraphTransformError: If graph conversion fails.
:raises TimeoutError: If the specified timeout is reached.
"""
def _run_one(graph: ProgramGraph) -> str:
return _run_graph_transform_binary(GRAPH2DOT, graph, timeout).decode("utf-8")
if isinstance(graphs, ProgramGraph):
return _run_one(graphs)
return execute(_run_one, graphs, executor, chunksize)
| [
"networkx.readwrite.json_graph.node_link_graph",
"subprocess.Popen",
"dgl.DGLGraph",
"programl.exceptions.GraphTransformError",
"programl.util.py.runfiles_path.runfiles_path",
"programl.util.py.executor.execute"
] | [((1165, 1204), 'programl.util.py.runfiles_path.runfiles_path', 'runfiles_path', (['"""programl/bin/graph2dot"""'], {}), "('programl/bin/graph2dot')\n", (1178, 1204), False, 'from programl.util.py.runfiles_path import runfiles_path\n'), ((1223, 1263), 'programl.util.py.runfiles_path.runfiles_path', 'runfiles_path', (['"""programl/bin/graph2json"""'], {}), "('programl/bin/graph2json')\n", (1236, 1263), False, 'from programl.util.py.runfiles_path import runfiles_path\n'), ((1429, 1549), 'subprocess.Popen', 'subprocess.Popen', (["[binary, '--stdin_fmt=pb']"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "([binary, '--stdin_fmt=pb'], stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n", (1445, 1549), False, 'import subprocess\n'), ((4053, 4099), 'programl.util.py.executor.execute', 'execute', (['_run_one', 'graphs', 'executor', 'chunksize'], {}), '(_run_one, graphs, executor, chunksize)\n', (4060, 4099), False, 'from programl.util.py.executor import ExecutorLike, execute\n'), ((10266, 10312), 'programl.util.py.executor.execute', 'execute', (['_run_one', 'graphs', 'executor', 'chunksize'], {}), '(_run_one, graphs, executor, chunksize)\n', (10273, 10312), False, 'from programl.util.py.executor import ExecutorLike, execute\n'), ((5885, 5951), 'networkx.readwrite.json_graph.node_link_graph', 'nx_json.node_link_graph', (['json_data'], {'multigraph': '(True)', 'directed': '(True)'}), '(json_data, multigraph=True, directed=True)\n', (5908, 5951), True, 'from networkx.readwrite import json_graph as nx_json\n'), ((7975, 7997), 'dgl.DGLGraph', 'dgl.DGLGraph', (['nx_graph'], {}), '(nx_graph)\n', (7987, 7997), False, 'import dgl\n'), ((1929, 1989), 'programl.exceptions.GraphTransformError', 'GraphTransformError', (['"""Unknown error in graph transformation"""'], {}), "('Unknown error in graph transformation')\n", (1948, 1989), False, 'from programl.exceptions import GraphTransformError\n')] |
"""
planet.py
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
class Planet:
def __init__(self, ID, planetcode):
self.ID = ID
self.PlanetCode = planetcode
self.x = None
self.y = None
self.PlayerID = None
self.Production = float(random.randint(5, 100)) / 10.
def ToString(self, visibility="None"):
s = "ID=%i;PlanetCode=%s;x=%.1f;y=%.1f" % (self.ID, self.PlanetCode, self.x, self.y)
if visibility == "None":
return s
s = s + ";PlayerID=%s" % (str(self.PlayerID),)
if visibility == "Planet":
return s
assert (visibility == 'Ship')
if self.Production is not None:
s = s + ";Production=%.1f" % (self.Production,)
return s
# static method
@staticmethod
def MergeDataStrings(new_s, old_s):
new = new_s.split(";")
old = old_s.split(";")
if len(old) > len(new):
M = len(new)
out = new + old[M:]
else:
out = new
return ";".join(out)
@staticmethod
def FromString(s):
self = Planet(-1, "-1")
self.Production = None
info = s.split(";")
for i in info:
if "=" not in i:
continue
(name, val) = i.split("=")
if name in ('x', 'y', 'Production'):
setattr(self, name, (float(val)))
continue
if name in ('PlayerID', 'ID'):
setattr(self, name, (int(val)))
continue
setattr(self, name, val)
return self
# class Player:
# def __init__(self, ID, name):
# self.ID = ID
# self.Name = name
# self.SensorRangeShip = 50.
# self.ShipRange = 50.
# self.ShipSpeed = 25.
# self.SensorRangePlanet = 100.
# self.ControlledPlanets = []
# self.VisiblePlanets = {}
# self.PreviouslyVisiblePlanets = {}
# self.VisibleFleets = []
#
# def Dump(self):
# pprint("Player")
# for i in dir(self):
# if "__" in i:
# continue
# if str(type(getattr(self, i))) == "<type 'instancemethod'>":
# continue
# if str(type(getattr(self, i))) == "<type 'instance'>":
# getattr(self, i).Dump()
# continue
# pprint((i, getattr(self, i)))
| [
"random.randint"
] | [((802, 824), 'random.randint', 'random.randint', (['(5)', '(100)'], {}), '(5, 100)\n', (816, 824), False, 'import random\n')] |
'''
Plaw.py
'''
from datetime import datetime
from requests import request
import json
class InvalidGrant(Exception):
pass
class InvalidToken(Exception):
pass
class Plaw():
AUTH_URL = 'https://cloud.lightspeedapp.com/oauth/access_token.php'
BASE_URL = 'https://api.lightspeedapp.com/'
def __init__(self, client_id, client_secret,
account_id=None, refresh_token=None, access_token=None):
self.client_id = client_id
self.client_secret = client_secret
self.account_id = account_id
self.refresh_token = refresh_token
self.access_token = access_token
def _refresh_access_token(self):
'''
uses refresh token to retrieve a new access token
:return: new access token
'''
payload = {
'refresh_token': self.refresh_token,
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh_token',
}
response = request('POST', self.AUTH_URL, data=payload)
if response.status_code == 400:
raise InvalidGrant('Refresh token is invalid. Perhaps it was revoked?')
return response.json()['access_token']
def _call(self, endpoint, params):
'''
just calls the API with the parameters given. used exclusively by _call_api()
:param endpoint: string of the endpoint being called.
:param params: dict of query parameters used in api call
:return: the decoded JSON from response
'''
endpoint = self.BASE_URL + endpoint
bearer = {
'Authorization': 'Bearer ' + self.access_token
}
response = request('GET', endpoint, headers=bearer, params=params)
if response.status_code == 401 or response.status_code == 400:
raise InvalidToken('Access Token is Expired.')
return response.json()
def _call_api(self, endpoint, params=None):
'''
utility function for calling API. this is the one that other functions
of the class will use. handles:
Token refreshes
Converting datetimes to iso format
Pagination
Rate Limiting (soon)
:param endpoint: string of the endpoint being called.
passed on to _call()
:param params: dict of query parameters used in the api call
:return: a generator for each page of the decoded JSON from response
'''
if params:
# look for datetimes to convert and query ops
for key, param in params.items():
# datetimes may not have query op passed in
if isinstance(param, datetime):
params[key] = param.isoformat()
# datetimes may be passed in with query op
if isinstance(param, list):
if len(param) > 1:
if isinstance(param[1], datetime):
params[key][1] = param[1].isoformat()
# necessary for between date lookups
if len(param) == 3:
if isinstance(param[2], datetime):
params[key][2] = param[2].isoformat()
# also, join the list
params[key] = ','.join(params[key])
else:
# we make an empty params dict to make pagination simpler
params = dict()
while True:
try:
response = self._call(endpoint, params)
yield response
except InvalidToken: # refreshing access token when necessary
self.access_token = self._refresh_access_token()
response = self._call(endpoint, params)
yield response
if 'offset' in response['@attributes']:
count = int(response['@attributes']['count'])
offset = int(response['@attributes']['offset'])
if count - offset > 100:
params['offset'] = str(offset + 100)
else:
break
else:
break
def get_tokens(self, code):
'''
uses temp code from lightspeed to request and save initial tokens
:param code: temporary code from LS
:return: Nothing - just updates the codes
'''
# TODO throw an exception on timeout
payload = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'grant_type': 'authorization_code'
}
response = request('POST', self.AUTH_URL, data=payload)
self.access_token = response.json()['access_token']
self.refresh_token = response.json()['refresh_token']
def fetch_account_id(self):
'''
likely used right after get_tokens() - adds the account id to
the object so you can actually call the API
:return: Nothing - just updates account_id
'''
account_info = self.account()
self.account_id = account_info['accountID']
def account(self):
'''
interface for the account endpoint
:return: dict of account info - accountID and name
'''
# account is special in that we don't return a generator, we just return a dict
return next(self._call_api('API/Account.json'))
def shop(self, params=None):
'''
interface for the shop endpoint
:return: generator where each item is a page of response
'''
return self._call_api(f'API/Account/{self.account_id}/Shop.json',
params)
def employee(self, params=None, load_contact=False):
'''
interface for the employee endpoint
:return: generator where each item is a page of response
'''
if load_contact:
if not params:
params = dict()
params['load_relations'] = json.dumps(['Contact'])
return self._call_api(f'API/Account/{self.account_id}/Employee.json',
params)
def employee_hours(self, params=None):
'''
interface for the employeeHours endpoint
:return: generator where each item is a page of response
'''
return self._call_api(f'API/Account/{self.account_id}/EmployeeHours.json',
params)
| [
"json.dumps",
"requests.request"
] | [((1015, 1059), 'requests.request', 'request', (['"""POST"""', 'self.AUTH_URL'], {'data': 'payload'}), "('POST', self.AUTH_URL, data=payload)\n", (1022, 1059), False, 'from requests import request\n'), ((1709, 1764), 'requests.request', 'request', (['"""GET"""', 'endpoint'], {'headers': 'bearer', 'params': 'params'}), "('GET', endpoint, headers=bearer, params=params)\n", (1716, 1764), False, 'from requests import request\n'), ((4716, 4760), 'requests.request', 'request', (['"""POST"""', 'self.AUTH_URL'], {'data': 'payload'}), "('POST', self.AUTH_URL, data=payload)\n", (4723, 4760), False, 'from requests import request\n'), ((6101, 6124), 'json.dumps', 'json.dumps', (["['Contact']"], {}), "(['Contact'])\n", (6111, 6124), False, 'import json\n')] |
# Generated by Django 3.2.5 on 2021-09-27 21:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lancamentos', '0014_auto_20191226_1319'),
]
operations = [
migrations.AlterField(
model_name='conta',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='journal',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='lancamento',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"django.db.models.BigAutoField"
] | [((335, 431), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (354, 431), False, 'from django.db import migrations, models\n'), ((546, 642), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (565, 642), False, 'from django.db import migrations, models\n'), ((760, 856), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (779, 856), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
import socket
from mercury import logger
def low_high_byte(integer):
low_byte = integer & 0xFF
high_byte = integer >> 8
return low_byte, high_byte
def byte_to_variable(low_byte1, byte2, byte3=None, high_byte4=None):
if byte3 == None and high_byte4 == None:
return (low_byte1 & 0xFF) + ((byte2 & 0xFF) << 8)
elif not byte3 == None and high_byte4 == None:
return (low_byte1 & 0xFF) + (byte2 << 8) + (byte3 << 16)
elif not byte3 == None and not high_byte4 == None:
return (low_byte1 & 0xFF) + (byte2 << 8) + (byte3 << 16) + (high_byte4 << 24)
class EposFault():
def __init__(self):
self.traction_right = 0
self.traction_left = 0
self.front_arm_fault = 0
self.rear_arm_fault = 0
self.manip_joint1 = 0
self.manip_joint2 = 0
self.manip_joint3 = 0
class EposPosition:
def __init__(self):
self.front_arm = 0.0
self.rear_arm = 0.0
self.manip_joint1 = 0.0
self.manip_joint2 = 0.0
self.manip_joint3 = 0.0
self.reserve = 0.0
class Battery:
def __init__(self):
self.power_battery = 3
self.signal_battery = 0
class Current:
def __init__(self):
self.right_traction = 0
self.left_traction = 0
self.manip_joint1 = 0
self.manip_joint2 = 0
self.manip_joint3 = 0
class Torque:
def __init__(self):
self.right_traction = 0
self.left_traction = 0
self.manip_joint1 = 0
self.manip_joint2 = 0
self.manip_joint3 = 0
class MotorRPM:
def __init__(self):
self.left = 0
self.right = 0
class SensorBoard:
def __init__(self):
self.overCurrent_joint4 = 0
self.overCurrent_joint5 = 0
self.overCurrent_joint6 = 0
self.position_joint4 = 0
self.co2_gas = 0
self.direction_joint4 = 0
class FeedBackProtocol:
def __init__(self, reciver_main_board_port=3031, reciver_sensor_board_port=3033):
self.epos_fault = EposFault()
self.epos_position = EposPosition()
self.battery = Battery()
self.current = Current()
self.torque = Torque()
self.sensor_board = SensorBoard()
self.motor_rpm = MotorRPM()
self.socket_main_board = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM)
self.socket_main_board.bind(('0.0.0.0', reciver_main_board_port))
self.socket_sensor_board = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM)
self.socket_sensor_board.bind(('0.0.0.0', reciver_sensor_board_port))
def deserilise_main_board_data(self):
try:
data, _ = self.socket_main_board.recvfrom(1024)
except socket.error as err:
logger.log_warn(err)
return
data_decimal = map(ord, data)
self.epos_fault.traction_right = byte_to_variable(
data_decimal[0], data_decimal[1])
self.epos_fault.traction_left = byte_to_variable(
data_decimal[2], data_decimal[3])
self.epos_fault.front_arm_fault = byte_to_variable(
data_decimal[4], data_decimal[5])
self.epos_fault.rear_arm_fault = byte_to_variable(
data_decimal[6], data_decimal[7])
self.epos_fault.manip_joint1 = byte_to_variable(
data_decimal[8], data_decimal[9])
self.epos_fault.manip_joint2 = byte_to_variable(
data_decimal[10], data_decimal[11])
self.epos_fault.manip_joint3 = byte_to_variable(
data_decimal[12], data_decimal[13])
self.epos_position.front_arm = data_decimal[14]
self.epos_position.rear_arm = data_decimal[15]
self.epos_position.manip_joint1 = byte_to_variable(
data_decimal[18], data_decimal[19])
self.epos_position.manip_joint2 = byte_to_variable(
data_decimal[20], data_decimal[21])
self.epos_position.manip_joint3 = byte_to_variable(
data_decimal[22], data_decimal[23])
self.battery.power_battery = data_decimal[26]
self.battery.signal_battery = data_decimal[27]
self.current.right_traction = byte_to_variable(
data_decimal[28], data_decimal[29])
self.current.left_traction = byte_to_variable(
data_decimal[30], data_decimal[31])
self.motor_rpm.left = byte_to_variable(
data_decimal[32], data_decimal[33])
self.motor_rpm.right = byte_to_variable(
data_decimal[34], data_decimal[35])
return data_decimal
def deserilise_sensor_board_data(self):
try:
data, _ = self.socket_sensor_board.recvfrom(1024)
except socket.error as err:
logger.log_warn(err)
return
data_decimal = map(ord, data)
self.sensor_board.overCurrent_joint4 = data_decimal[0]
self.sensor_board.overCurrent_joint5 = data_decimal[1]
self.sensor_board.overCurrent_joint6 = data_decimal[2]
self.sensor_board.position_joint4 = byte_to_variable(
data_decimal[3], data_decimal[4], data_decimal[5], data_decimal[6])
self.sensor_board.co2_gas = byte_to_variable(
data_decimal[7], data_decimal[8])
self.sensor_board.direction_joint4 = data_decimal[9]
| [
"socket.socket",
"mercury.logger.log_warn"
] | [((2329, 2377), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (2342, 2377), False, 'import socket\n'), ((2500, 2548), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (2513, 2548), False, 'import socket\n'), ((2804, 2824), 'mercury.logger.log_warn', 'logger.log_warn', (['err'], {}), '(err)\n', (2819, 2824), False, 'from mercury import logger\n'), ((4768, 4788), 'mercury.logger.log_warn', 'logger.log_warn', (['err'], {}), '(err)\n', (4783, 4788), False, 'from mercury import logger\n')] |
from functools import reduce
from typing import List
import regex as re
def read_lines(file_path: str) -> List[str]:
lines = []
with open(file_path, 'r') as inf:
for line in inf:
lines.append(line.rstrip('\n'))
return lines
def write_lines(lines: List[str], file_path: str) -> None:
with open(file_path, 'w') as outf:
for line in lines:
outf.write(f'{line}\n')
def split_keep_multiple_separators(string: str, separators: List[str]) -> List[str]:
'''
Split `string` on separator(s) but also keep the separator(s)
Modified from `http://programmaticallyspeaking.com/split-on-separator-but-keep-the-separator-in-python.html`
to extend to multiple separators.
'''
rgx_multiple_separators = '|'.join([re.escape(sep) for sep in separators])
rgx_multiple_separators = '(' + rgx_multiple_separators + ')'
return reduce(lambda acc, elem: acc[:-1] + [acc[-1] + elem] if (elem in separators) else acc + [elem], re.split(rgx_multiple_separators, string), [])
| [
"regex.escape",
"regex.split"
] | [((994, 1035), 'regex.split', 're.split', (['rgx_multiple_separators', 'string'], {}), '(rgx_multiple_separators, string)\n', (1002, 1035), True, 'import regex as re\n'), ((782, 796), 'regex.escape', 're.escape', (['sep'], {}), '(sep)\n', (791, 796), True, 'import regex as re\n')] |
from setuptools import setup, find_packages
setup(
name = 'spamm',
version = '0.0.1',
description = 'AGN spectral Bayesian decomposition',
keywords = ['astronomy'],
classifiers = ['Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Software Development :: Libraries :: Python Modules'],
packages = find_packages(),
install_requires = ['setuptools',
'numpy',
'astropy',
'matplotlib',
'scipy>=0.17.1',
'emcee==2.2.1',
'pysynphot']
)
| [
"setuptools.find_packages"
] | [((641, 656), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (654, 656), False, 'from setuptools import setup, find_packages\n')] |
"""Hello World API implemented using Google Cloud Endpoints.
Defined here are the ProtoRPC messages needed to define Schemas for methods
as well as those methods defined in an API.
"""
import endpoints
from google.appengine.ext import ndb
from protorpc import messages
from protorpc import remote
from protorpc.message_types import VoidMessage
# TODO: Replace the following lines with client IDs obtained from the APIs
# Console or Cloud Console.
WEB_CLIENT_ID = '140298350420-qvbr5c50mmualf39mlmiv767sqjikg1p.apps.googleusercontent.com'
class Greeting(ndb.Model):
message = ndb.StringProperty(required=True, indexed=False)
class GreetingMessage(messages.Message):
"""Greeting that stores a message."""
message = messages.StringField(1)
MULTIPLY_METHOD_RESOURCE = endpoints.ResourceContainer(
GreetingMessage,
times=messages.IntegerField(2, variant=messages.Variant.INT32,
required=True))
@endpoints.api(name='shouter', version='v1', owner_name='dcifuen',
allowed_client_ids=[WEB_CLIENT_ID,
endpoints.API_EXPLORER_CLIENT_ID], )
class ShouterApi(remote.Service):
"""Shouter API v1."""
@endpoints.method(GreetingMessage, GreetingMessage,
path='shouter', http_method='POST',
name='greeting.update')
def greeting_update(self, request):
greeting = Greeting.get_by_id(1)
if not greeting:
greeting = Greeting(id=1)
greeting.message = request.message
greeting.put()
return GreetingMessage(message=request.message)
@endpoints.method(VoidMessage, GreetingMessage,
path='shouter', http_method='GET',
name='greeting.get')
def greeting_get(self, request):
greeting = Greeting.get_by_id(1)
if not greeting:
raise endpoints.NotFoundException('Greeting not found')
return GreetingMessage(message=greeting.message)
APPLICATION = endpoints.api_server([ShouterApi])
| [
"endpoints.method",
"endpoints.api",
"endpoints.NotFoundException",
"protorpc.messages.StringField",
"endpoints.api_server",
"protorpc.messages.IntegerField",
"google.appengine.ext.ndb.StringProperty"
] | [((953, 1092), 'endpoints.api', 'endpoints.api', ([], {'name': '"""shouter"""', 'version': '"""v1"""', 'owner_name': '"""dcifuen"""', 'allowed_client_ids': '[WEB_CLIENT_ID, endpoints.API_EXPLORER_CLIENT_ID]'}), "(name='shouter', version='v1', owner_name='dcifuen',\n allowed_client_ids=[WEB_CLIENT_ID, endpoints.API_EXPLORER_CLIENT_ID])\n", (966, 1092), False, 'import endpoints\n'), ((2025, 2059), 'endpoints.api_server', 'endpoints.api_server', (['[ShouterApi]'], {}), '([ShouterApi])\n', (2045, 2059), False, 'import endpoints\n'), ((584, 632), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'required': '(True)', 'indexed': '(False)'}), '(required=True, indexed=False)\n', (602, 632), False, 'from google.appengine.ext import ndb\n'), ((732, 755), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (752, 755), False, 'from protorpc import messages\n'), ((1207, 1321), 'endpoints.method', 'endpoints.method', (['GreetingMessage', 'GreetingMessage'], {'path': '"""shouter"""', 'http_method': '"""POST"""', 'name': '"""greeting.update"""'}), "(GreetingMessage, GreetingMessage, path='shouter',\n http_method='POST', name='greeting.update')\n", (1223, 1321), False, 'import endpoints\n'), ((1634, 1741), 'endpoints.method', 'endpoints.method', (['VoidMessage', 'GreetingMessage'], {'path': '"""shouter"""', 'http_method': '"""GET"""', 'name': '"""greeting.get"""'}), "(VoidMessage, GreetingMessage, path='shouter', http_method=\n 'GET', name='greeting.get')\n", (1650, 1741), False, 'import endpoints\n'), ((845, 916), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(2)'], {'variant': 'messages.Variant.INT32', 'required': '(True)'}), '(2, variant=messages.Variant.INT32, required=True)\n', (866, 916), False, 'from protorpc import messages\n'), ((1902, 1951), 'endpoints.NotFoundException', 'endpoints.NotFoundException', (['"""Greeting not found"""'], {}), "('Greeting not found')\n", (1929, 1951), False, 'import endpoints\n')] |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import music21
from music21 import converter, corpus, instrument, midi, note, chord, pitch
import music21.midi as midi21
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from midicom import *
#In ubuntu you must manually set the default program to open *Path by specifying the path to that program
music21.environment.set('graphicsPath', '/usr/bin/eog') #Eye of Gnome, Ubuntu's default image viewer
music21.environment.set('midiPath', '/usr/bin/timidity') #timidity, installed midi player
from music21 import stream
from music21 import roman
import os
import pickle
import pprint
import gensim, logging
def main():
sentences = load_w2v_train()
model = gensim.models.Word2Vec(sentences, min_count=2, window=4, size=50, iter=100)
print("List of chords found:")
print(model.wv.vocab.keys())
print("Number of chords considered by model: {0}".format(len(model.wv.vocab)))
mid0 = "good-samples/output_0.mid"
mid1 = "good-samples/output_1.mid"
mid2 = "good-samples/output_2.mid"
mid3 = "good-samples/output_3.mid"
res = calculate_similarity_aux(model, mid0, [mid0, mid1, mid2, mid3], threshold = -1)
print(res)
def vectorize_harmony(model, harmonic_reduction):
# Gets the model vector values for each chord from the reduction.
word_vecs = []
for word in harmonic_reduction:
try:
vec = model[word]
word_vecs.append(vec)
except KeyError:
# Ignore, if the word doesn't exist in the vocabulary
pass
# Assuming that document vector is the mean of all the word vectors.
return np.mean(word_vecs, axis=0)
def cosine_similarity(vecA, vecB):
# Find the similarity between two vectors based on the dot product.
csim = np.dot(vecA, vecB) / (np.linalg.norm(vecA) * np.linalg.norm(vecB))
if np.isnan(np.sum(csim)):
return 0
return csim
def calculate_similarity_aux(model, source_name, target_names=[], threshold=0):
source_midi = open_midi(source_name, True)
source_harmo = harmonic_reduction(source_midi)
source_vec = vectorize_harmony(model, source_harmo)
results = []
for name in target_names:
print(f"loading {name}")
target_midi = open_midi(name, True)
target_harmo = harmonic_reduction(target_midi)
if (len(target_harmo) == 0):
continue
target_vec = vectorize_harmony(model, target_harmo)
sim_score = cosine_similarity(source_vec, target_vec)
if sim_score > threshold:
results.append({
'score' : sim_score,
'name' : name
})
# Sort results by score in desc order
results.sort(key=lambda k : k['score'] , reverse=True)
return results
def get_related_chords(model, token, topn=100): #default top 3 most similar words
print("Similar chords with " + token)
for word, similarity in model.wv.most_similar(positive=[token], topn=topn):
print (word, round(similarity, 5))
def get_chord_similarity(model, chordA, chordB):
print("Similarity between {0} and {1}: {2}".format(
chordA, chordB, model.wv.similarity(chordA, chordB)))
def load_w2v_train():
load_dir = "./word2vec/cache/"
sentences = []
for filename in sorted(os.listdir(load_dir)):
if filename.endswith('pickle'):
#pickle
infile = open(load_dir+filename, 'rb')
red_midi = pickle.load(infile)
infile.close()
sentences.append(red_midi)
return sentences
def save_w2v_train():
directory = "./word2vec/data/"
save_dir = "./word2vec/cache/"
for filename in sorted(os.listdir(directory)):
print(filename)
savename = filename[:-4]+"pickle"
if savename in os.listdir(save_dir):
continue
elif filename.endswith('.mid'):
base_midi = open_midi(directory + filename, True)
red_midi = harmonic_reduction(base_midi)
#pickle
outfile = open(save_dir+savename, 'wb')
pickle.dump(red_midi, outfile)
outfile.close()
def note_count(measure, count_dict):
bass_note = None
for chord in measure.recurse().getElementsByClass('Chord'):
# All notes have the same length of its chord parent.
note_length = chord.quarterLength
for note in chord.pitches:
# If note is "C5", note.name is "C". We use "C5"
# style to be able to detect more precise inversions.
note_name = str(note)
if (bass_note is None or bass_note.ps > note.ps):
bass_note = note
if note_name in count_dict:
count_dict[note_name] += note_length
else:
count_dict[note_name] = note_length
return bass_note
def simplify_roman_name(roman_numeral):
# Chords can get nasty names as "bII#86#6#5",
# in this method we try to simplify names, even if it ends in
# a different chord to reduce the chord vocabulary and display
# chord function clearer.
ret = roman_numeral.romanNumeral
inversion_name = None
inversion = roman_numeral.inversion()
# Checking valid inversions.
if ((roman_numeral.isTriad() and inversion < 3) or
(inversion < 4 and
(roman_numeral.seventh is not None or roman_numeral.isSeventh()))):
inversion_name = roman_numeral.inversionName()
if (inversion_name is not None):
ret = ret + str(inversion_name)
elif (roman_numeral.isDominantSeventh()): ret = ret + "M7"
elif (roman_numeral.isDiminishedSeventh()): ret = ret + "o7"
return ret
def harmonic_reduction(midi_file):
ret = []
temp_midi = stream.Score()
temp_midi_chords = midi_file.chordify()
temp_midi.insert(0, temp_midi_chords)
music_key = temp_midi.analyze('key')
max_notes_per_chord = 4
for m in temp_midi_chords.measures(0, None): # None = get all measures.
if (type(m) != stream.Measure):
continue
# Here we count all notes length in each measure,
# get the most frequent ones and try to create a chord with them.
count_dict = dict()
bass_note = note_count(m, count_dict)
if (len(count_dict) < 1):
ret.append("-") # Empty measure
continue
sorted_items = sorted(count_dict.items(), key=lambda x:x[1])
sorted_notes = [item[0] for item in sorted_items[-max_notes_per_chord:]]
measure_chord = chord.Chord(sorted_notes)
# Convert the chord to the functional roman representation
# to make its information independent of the music key.
roman_numeral = roman.romanNumeralFromChord(measure_chord, music_key)
ret.append(simplify_roman_name(roman_numeral))
return ret
if __name__ == "__main__":
main()
| [
"numpy.mean",
"os.listdir",
"music21.chord.Chord",
"pickle.dump",
"pickle.load",
"gensim.models.Word2Vec",
"numpy.dot",
"music21.stream.Score",
"music21.environment.set",
"numpy.sum",
"numpy.linalg.norm",
"music21.roman.romanNumeralFromChord"
] | [((427, 482), 'music21.environment.set', 'music21.environment.set', (['"""graphicsPath"""', '"""/usr/bin/eog"""'], {}), "('graphicsPath', '/usr/bin/eog')\n", (450, 482), False, 'import music21\n'), ((528, 584), 'music21.environment.set', 'music21.environment.set', (['"""midiPath"""', '"""/usr/bin/timidity"""'], {}), "('midiPath', '/usr/bin/timidity')\n", (551, 584), False, 'import music21\n'), ((793, 868), 'gensim.models.Word2Vec', 'gensim.models.Word2Vec', (['sentences'], {'min_count': '(2)', 'window': '(4)', 'size': '(50)', 'iter': '(100)'}), '(sentences, min_count=2, window=4, size=50, iter=100)\n', (815, 868), False, 'import gensim, logging\n'), ((1733, 1759), 'numpy.mean', 'np.mean', (['word_vecs'], {'axis': '(0)'}), '(word_vecs, axis=0)\n', (1740, 1759), True, 'import numpy as np\n'), ((5943, 5957), 'music21.stream.Score', 'stream.Score', ([], {}), '()\n', (5955, 5957), False, 'from music21 import stream\n'), ((1879, 1897), 'numpy.dot', 'np.dot', (['vecA', 'vecB'], {}), '(vecA, vecB)\n', (1885, 1897), True, 'import numpy as np\n'), ((1962, 1974), 'numpy.sum', 'np.sum', (['csim'], {}), '(csim)\n', (1968, 1974), True, 'import numpy as np\n'), ((3435, 3455), 'os.listdir', 'os.listdir', (['load_dir'], {}), '(load_dir)\n', (3445, 3455), False, 'import os\n'), ((3821, 3842), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (3831, 3842), False, 'import os\n'), ((6754, 6779), 'music21.chord.Chord', 'chord.Chord', (['sorted_notes'], {}), '(sorted_notes)\n', (6765, 6779), False, 'from music21 import converter, corpus, instrument, midi, note, chord, pitch\n'), ((6944, 6997), 'music21.roman.romanNumeralFromChord', 'roman.romanNumeralFromChord', (['measure_chord', 'music_key'], {}), '(measure_chord, music_key)\n', (6971, 6997), False, 'from music21 import roman\n'), ((1901, 1921), 'numpy.linalg.norm', 'np.linalg.norm', (['vecA'], {}), '(vecA)\n', (1915, 1921), True, 'import numpy as np\n'), ((1924, 1944), 'numpy.linalg.norm', 'np.linalg.norm', (['vecB'], {}), '(vecB)\n', (1938, 1944), True, 'import numpy as np\n'), ((3592, 3611), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (3603, 3611), False, 'import pickle\n'), ((3934, 3954), 'os.listdir', 'os.listdir', (['save_dir'], {}), '(save_dir)\n', (3944, 3954), False, 'import os\n'), ((4217, 4247), 'pickle.dump', 'pickle.dump', (['red_midi', 'outfile'], {}), '(red_midi, outfile)\n', (4228, 4247), False, 'import pickle\n')] |
# Normal tuple
# the indexing values are easier to remember when the number of elements are lesser
# but when the tuple is large it will caus confusion while fetching elements from
# index values
t1 = (21,34,5,6,3,3,3,3,4,6)
print(t1)
print(t1[0])
print(t1[2])
# NamedTuples: along with the index values it will also assign the names for each element
# It creates new object types
from collections import namedtuple
# It is simlar to creating a class in this context 1st value in brackets is like class_name
# and attr_1 .. attr3 are like attributes
cat = namedtuple('cat','nails eyes color')
john = cat(nails='pointy',eyes="blue",color='grey')
# We can use the tuple values like we use in class
print(john.nails)
# We can also print multiple attributes
print(john.nails,john.eyes, john.color )
print(john)
# We can also use indexing like normal tuple
# it will return value present at that index
print("john[1]: ",john[1])
# OP: john[1]: blue
# Example 2
emp = namedtuple('emp', 'id name email addr')
dave = emp(id =101, name='dave',email='<EMAIL>',addr='india' )
# We can also print whole tuple value
print(dave)
| [
"collections.namedtuple"
] | [((560, 597), 'collections.namedtuple', 'namedtuple', (['"""cat"""', '"""nails eyes color"""'], {}), "('cat', 'nails eyes color')\n", (570, 597), False, 'from collections import namedtuple\n'), ((972, 1011), 'collections.namedtuple', 'namedtuple', (['"""emp"""', '"""id name email addr"""'], {}), "('emp', 'id name email addr')\n", (982, 1011), False, 'from collections import namedtuple\n')] |
class GmshMeshReceiver:
def __init__(self):
pass
def add_node(self, node_nr, point):
pass
def finalize_nodes(self):
pass
def add_element(self, element_nr, element_type, vertex_nrs,
lexicographic_nodes, tag_numbers):
pass
def finalize_elements(self):
pass
def add_tag(self, name, index, dimension):
pass
def finalize_tags(self):
pass
def main():
mr = GmshMeshReceiver()
import sys
from meshpy.gmsh_reader import read_gmsh
read_gmsh(mr, sys.argv[1])
if __name__ == "__main__":
main()
| [
"meshpy.gmsh_reader.read_gmsh"
] | [((542, 568), 'meshpy.gmsh_reader.read_gmsh', 'read_gmsh', (['mr', 'sys.argv[1]'], {}), '(mr, sys.argv[1])\n', (551, 568), False, 'from meshpy.gmsh_reader import read_gmsh\n')] |
import oscn
def test_parse_string_response():
case = oscn.request.Case("cleveland-CF-2016-84")
assert case.judge == "<NAME>"
assert case.filed == "01/19/2016"
assert case.closed == "04/28/2016"
assert case.county == "cleveland"
assert case.year == "2016"
assert case.type == "CF"
def test_live_counts():
case1 = oscn.request.Case("tulsa-CF-2012-255")
counts = case1.counts
assert len(counts) == 2
assert counts[0]["offense"] == "01/09/2012"
assert counts[1]["description"] == "CHILD ABUSE BY INJURY(CHAB)"
assert counts[1]["violation"] == "21 O.S. 843.5 (A)"
assert counts[1]["party"] == "COTTON, <NAME>"
assert counts[1]["disposed"] == "CONVICTION, 06/25/2013. <NAME>"
assert counts.text != ""
def test_live_counts_list():
cases = oscn.request.CaseList(start=15, stop=17)
for case in cases:
assert case.counts[0]["party"]
def test_docket():
case1 = oscn.request.Case("tulsa-CF-2019-03")
docket = case1.docket
assert isinstance(docket, list)
assert "FELONY INITIAL FILING" in docket.text
for minute in docket:
assert isinstance(minute, dict)
assert minute["date"] is not ""
def test_issues():
case1 = oscn.request.Case("tulsa-CJ-2017-5021")
issues = oscn.parse.issues(case1.text)
assert isinstance(issues, list)
assert "Disposition" in issues.text
for issue in issues:
assert isinstance(issue, dict)
def test_get_parties():
case1 = oscn.request.Case("tulsa-CJ-2020-299")
parties = oscn.parse.parties(case1.text)
assert isinstance(parties, list)
assert parties != []
assert "DISCOVER BANK" in parties.text
def test_parties():
case1 = oscn.request.Case("tulsa-CJ-2020-299")
issues = oscn.parse.issues(case1.text)
assert isinstance(issues, list)
for issue in issues:
assert isinstance(issue, dict)
assert isinstance(issue["parties"], list)
for party in issue["parties"]:
assert isinstance(party, dict)
assert "name" in party.keys()
assert "disposed" in party.keys()
def test_attorneys():
case1 = oscn.request.Case("tulsa-CJ-2016-143")
attorneys1 = oscn.parse.attorneys(case1.text)
assert isinstance(attorneys1, list)
assert len(attorneys1) == 1
assert attorneys1[0]["representing"] == "BANK OF AMERICA NA,"
assert "KOZENY & MCCUBBIN" in case1.attorneys.text
case2 = oscn.request.Case("mayes-PO-2015-1")
attorneys2 = oscn.parse.attorneys(case2.text)
assert isinstance(attorneys2, list)
assert len(attorneys2) == 0
assert attorneys2.text == ""
def test_issue_list():
case_list = oscn.request.CaseList(
counties=["tulsa", "oklahoma" "mayes"], types=["CJ", "PB", "CV"], stop=20
)
for case in case_list:
assert isinstance(case.issues, list)
for issue in case.issues:
assert isinstance(issue, dict)
assert isinstance(issue["parties"], list)
for party in issue["parties"]:
assert isinstance(party, dict)
assert "name" in party.keys()
assert "disposed" in party.keys()
def test_events():
case = oscn.request.Case("oklahoma-FD-2018-5")
events = oscn.parse.events(case.text)
assert events == []
case = oscn.request.Case("oklahoma-FD-2012-5")
events = oscn.parse.events(case.text)
assert len(events) == 9
assert "PETITIONER'S APPLICATION" in events.text
| [
"oscn.parse.parties",
"oscn.parse.events",
"oscn.parse.attorneys",
"oscn.request.CaseList",
"oscn.parse.issues",
"oscn.request.Case"
] | [((59, 100), 'oscn.request.Case', 'oscn.request.Case', (['"""cleveland-CF-2016-84"""'], {}), "('cleveland-CF-2016-84')\n", (76, 100), False, 'import oscn\n'), ((348, 386), 'oscn.request.Case', 'oscn.request.Case', (['"""tulsa-CF-2012-255"""'], {}), "('tulsa-CF-2012-255')\n", (365, 386), False, 'import oscn\n'), ((806, 846), 'oscn.request.CaseList', 'oscn.request.CaseList', ([], {'start': '(15)', 'stop': '(17)'}), '(start=15, stop=17)\n', (827, 846), False, 'import oscn\n'), ((942, 979), 'oscn.request.Case', 'oscn.request.Case', (['"""tulsa-CF-2019-03"""'], {}), "('tulsa-CF-2019-03')\n", (959, 979), False, 'import oscn\n'), ((1231, 1270), 'oscn.request.Case', 'oscn.request.Case', (['"""tulsa-CJ-2017-5021"""'], {}), "('tulsa-CJ-2017-5021')\n", (1248, 1270), False, 'import oscn\n'), ((1284, 1313), 'oscn.parse.issues', 'oscn.parse.issues', (['case1.text'], {}), '(case1.text)\n', (1301, 1313), False, 'import oscn\n'), ((1492, 1530), 'oscn.request.Case', 'oscn.request.Case', (['"""tulsa-CJ-2020-299"""'], {}), "('tulsa-CJ-2020-299')\n", (1509, 1530), False, 'import oscn\n'), ((1545, 1575), 'oscn.parse.parties', 'oscn.parse.parties', (['case1.text'], {}), '(case1.text)\n', (1563, 1575), False, 'import oscn\n'), ((1715, 1753), 'oscn.request.Case', 'oscn.request.Case', (['"""tulsa-CJ-2020-299"""'], {}), "('tulsa-CJ-2020-299')\n", (1732, 1753), False, 'import oscn\n'), ((1767, 1796), 'oscn.parse.issues', 'oscn.parse.issues', (['case1.text'], {}), '(case1.text)\n', (1784, 1796), False, 'import oscn\n'), ((2153, 2191), 'oscn.request.Case', 'oscn.request.Case', (['"""tulsa-CJ-2016-143"""'], {}), "('tulsa-CJ-2016-143')\n", (2170, 2191), False, 'import oscn\n'), ((2209, 2241), 'oscn.parse.attorneys', 'oscn.parse.attorneys', (['case1.text'], {}), '(case1.text)\n', (2229, 2241), False, 'import oscn\n'), ((2449, 2485), 'oscn.request.Case', 'oscn.request.Case', (['"""mayes-PO-2015-1"""'], {}), "('mayes-PO-2015-1')\n", (2466, 2485), False, 'import oscn\n'), ((2503, 2535), 'oscn.parse.attorneys', 'oscn.parse.attorneys', (['case2.text'], {}), '(case2.text)\n', (2523, 2535), False, 'import oscn\n'), ((2682, 2779), 'oscn.request.CaseList', 'oscn.request.CaseList', ([], {'counties': "['tulsa', 'oklahomamayes']", 'types': "['CJ', 'PB', 'CV']", 'stop': '(20)'}), "(counties=['tulsa', 'oklahomamayes'], types=['CJ',\n 'PB', 'CV'], stop=20)\n", (2703, 2779), False, 'import oscn\n'), ((3215, 3254), 'oscn.request.Case', 'oscn.request.Case', (['"""oklahoma-FD-2018-5"""'], {}), "('oklahoma-FD-2018-5')\n", (3232, 3254), False, 'import oscn\n'), ((3268, 3296), 'oscn.parse.events', 'oscn.parse.events', (['case.text'], {}), '(case.text)\n', (3285, 3296), False, 'import oscn\n'), ((3333, 3372), 'oscn.request.Case', 'oscn.request.Case', (['"""oklahoma-FD-2012-5"""'], {}), "('oklahoma-FD-2012-5')\n", (3350, 3372), False, 'import oscn\n'), ((3386, 3414), 'oscn.parse.events', 'oscn.parse.events', (['case.text'], {}), '(case.text)\n', (3403, 3414), False, 'import oscn\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-03-11 02:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comment', '0002_auto_20190308_1705'),
]
operations = [
migrations.AddField(
model_name='comment',
name='content_markdown',
field=models.CharField(default=None, max_length=2000, verbose_name='markdown内容'),
preserve_default=False,
),
]
| [
"django.db.models.CharField"
] | [((411, 485), 'django.db.models.CharField', 'models.CharField', ([], {'default': 'None', 'max_length': '(2000)', 'verbose_name': '"""markdown内容"""'}), "(default=None, max_length=2000, verbose_name='markdown内容')\n", (427, 485), False, 'from django.db import migrations, models\n')] |
Subsets and Splits